Skip to content
Snippets Groups Projects
Commit d9bfa627 authored by i-robot's avatar i-robot Committed by Gitee
Browse files

!58 [上交大大学][高校贡献][Mindspore][ssd_resnet50]-高性能预训练模型提交+功能

Merge pull request !58 from 陈良宇/master
parents ca69ac96 f7d8be25
No related branches found
No related tags found
No related merge requests found
Showing
with 1175 additions and 1 deletion
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
"models/official/cv/posenet/infer/mxbase/src/Posenet.h" "runtime/references" "models/official/cv/posenet/infer/mxbase/src/Posenet.h" "runtime/references"
"models/official/cv/posenet/infer/mxbase/src/Posenet.cpp" "runtime/references" "models/official/cv/posenet/infer/mxbase/src/Posenet.cpp" "runtime/references"
"models/official/cv/posenet/infer/mxbase/src/main.cpp" "runtime/references" "models/official/cv/posenet/infer/mxbase/src/main.cpp" "runtime/references"
"models/research/cv/ssd_resnet50/infer/mxbase/C++/SSDResNet50.h" "runtime/references"
"models/research/cv/ibnnet/infer/mxbase/src/IbnnetOpencv.h" "runtime/references" "models/research/cv/ibnnet/infer/mxbase/src/IbnnetOpencv.h" "runtime/references"
"models/official/cv/nasnet/infer/mxbase/NASNet_A_MobileClassifyOpencv.h" "runtime/references" "models/official/cv/nasnet/infer/mxbase/NASNet_A_MobileClassifyOpencv.h" "runtime/references"
......
ARG FROM_IMAGE_NAME
FORM ${FROM_IMAGE_NAME}
COPY requirements.txt .
RUN pip3.7 install -r requirements.txt
\ No newline at end of file
# Copyright (C) 2021.Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
aipp_op {
aipp_mode: static
input_format : RGB888_U8
csc_switch : false
rbuv_swap_switch : true
mean_chn_0 : 124
mean_chn_1 : 117
mean_chn_2 : 104
var_reci_chn_0 : 0.0171247538316637
var_reci_chn_1 : 0.0175070028011204
var_reci_chn_2 : 0.0174291938997821
}
\ No newline at end of file
#!/bin/bash
# Copyright (c) 2021. Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
input_air_path=$1
output_om_path=$2
aipp_cfg=$3
export ASCEND_ATC_PATH=/usr/local/Ascend/atc/bin/atc
export LD_LIBRARY_PATH=/usr/local/Ascend/atc/lib64:$LD_LIBRARY_PATH
export PATH=/usr/local/python3.7.5/bin:/usr/local/Ascend/atc/ccec_compiler/bin:/usr/local/Ascend/atc/bin:$PATH
export PYTHONPATH=/usr/local/Ascend/atc/python/site-packages:/usr/local/Ascend/atc/python/site-packages/auto_tune.egg/auto_tune:/usr/local/Ascend/atc/python/site-packages/schedule_search.egg
export ASCEND_OPP_PATH=/usr/local/Ascend/opp
export ASCEND_SLOG_PRINT_TO_STDOUT=1
echo "Input AIR file path: ${input_air_path}"
echo "Output OM file path: ${output_om_path}"
echo "AIPP cfg file path: ${aipp_cfg}"
atc --input_format=NCHW --framework=1 \
--model=${input_air_path} \
--output=${output_om_path} \
--soc_version=Ascend310 \
--disable_reuse_memory=0 \
--insert_op_conf=${aipp_cfg} \
--precision_mode=allow_fp32_to_fp16 \
--op_select_implmode=high_precision
\ No newline at end of file
#!/bin/bash
#ascendhub.huawei.com/public-ascendhub/infer-modelzoo:21.0.2
docker_image=$1
model_dir=$2
echo "$1"
echo "$2"
if [ -z "${docker_image}" ]; then
echo "please input docker_image"
exit 1
fi
if [ ! -d "${model_dir}" ]; then
echo "please input model_dir"
exit 1
fi
docker run -it \
--device=/dev/davinci0 \
--device=/dev/davinci_manager \
--device=/dev/devmm_svm \
--device=/dev/hisi_hdc \
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
-v ${model_dir}:${model_dir} \
${docker_image} \
/bin/bash
\ No newline at end of file
cmake_minimum_required(VERSION 3.10.0)
project(ssd_ms)
set(TARGET ssd_resnet50)
SET(CMAKE_BUILD_TYPE "Debug")
SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g -ggdb")
SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall")
add_definitions(-DENABLE_DVPP_INTERFACE)
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
add_definitions(-Dgoogle=mindxsdk_private)
add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall)
add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -pie)
# Check environment variable
if(NOT DEFINED ENV{ASCEND_HOME})
message(FATAL_ERROR "please define environment variable:ASCEND_HOME")
endif()
if(NOT DEFINED ENV{ASCEND_VERSION})
message(WARNING "please define environment variable:ASCEND_VERSION")
endif()
if(NOT DEFINED ENV{ARCH_PATTERN})
message(WARNING "please define environment variable:ARCH_PATTERN")
endif()
# 设置acllib的头文件和动态链接库
set(ACL_INC_DIR $ENV{ASCEND_HOME}/${ASCEND_VERSION}/${ARCH_PATTERN}/acllib/include)
set(ACL_LIB_DIR $ENV{ASCEND_HOME}/${ASCEND_VERSION}/${ARCH_PATTERN}/acllib/lib64)
# 设置MxBase的头文件和动态链接库
set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME})
set(MXBASE_INC ${MXBASE_ROOT_DIR}/include)
set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib)
set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors)
set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/include/MxBase/postprocess/include)
# 设置opensource的头文件和动态链接库
# 主要包含OpenCV、Google log等开源库
set(OPENSOURCE_DIR ${MXBASE_ROOT_DIR}/opensource)
include_directories(${ACL_INC_DIR})
include_directories(${OPENSOURCE_DIR}/include)
include_directories(${OPENSOURCE_DIR}/include/opencv4)
include_directories(${MXBASE_INC})
include_directories(${MXBASE_POST_PROCESS_DIR})
link_directories(${ACL_LIB_DIR})
link_directories(${OPENSOURCE_DIR}/lib)
link_directories(${MXBASE_LIB_DIR})
link_directories(${MXBASE_POST_LIB_DIR})
# 本地编译链接文件,根据mxbase目录下文件添加修改
add_executable(${TARGET} ResNet50_main.cpp ResNet50Base.cpp)
# 链接动态链接库,后处理lib:yolov3postprocess根据实际情况修改
target_link_libraries(${TARGET} glog cpprest mxbase SsdMobilenetFpn_MindsporePost opencv_world stdc++fs)
install(TARGETS ${TARGET} RUNTIME DESTINATION ${PROJECT_SOURCE_DIR}/)
\ No newline at end of file
/*
* Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// 主代码逻辑
#include "SSDResNet50.h"
#include <unistd.h>
#include <sys/stat.h>
#include <map>
#include <vector>
#include <string>
#include <memory>
#include <fstream>
#include "MxBase/DeviceManager/DeviceManager.h"
#include "MxBase/Log/Log.h"
using MxBase::TensorBase;
using MxBase::ObjectInfo;
using MxBase::ResizedImageInfo;
using MxBase::DeviceManager;
using MxBase::TensorContext;
using MxBase::DvppWrapper;
using MxBase::ModelInferenceProcessor;
using MxBase::ConfigData;
using MxBase::SsdMobilenetFpnMindsporePost;
using MxBase::YUV444_RGB_WIDTH_NU;
using MxBase::MemoryData;
using MxBase::MemoryHelper;
using MxBase::TENSOR_DTYPE_UINT8;
using MxBase::DynamicInfo;
using MxBase::DynamicType;
using MxBase::RESIZER_STRETCHING;
APP_ERROR SSDResNet50::Init(const InitParam &initParam) {
deviceId_ = initParam.deviceId;
APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
if (ret != APP_ERR_OK) {
LogError << "Init devices failed, ret=" << ret << ".";
return ret;
}
ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
if (ret != APP_ERR_OK) {
LogError << "Set context failed, ret=" << ret << ".";
return ret;
}
dvppWrapper_ = std::make_shared<MxBase::DvppWrapper>();
ret = dvppWrapper_->Init();
if (ret != APP_ERR_OK) {
LogError << "DvppWrapper init failed, ret=" << ret << ".";
return ret;
}
model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
ret = model_->Init(initParam.modelPath, modelDesc_);
if (ret != APP_ERR_OK) {
LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
return ret;
}
MxBase::ConfigData configData;
const std::string checkTensor = initParam.checkTensor ? "true" : "false";
configData.SetJsonValue("CLASS_NUM", std::to_string(initParam.classNum));
configData.SetJsonValue("SCORE_THRESH", std::to_string(initParam.score_thresh));
configData.SetJsonValue("IOU_THRESH", std::to_string(initParam.iou_thresh));
configData.SetJsonValue("CHECK_MODEL", checkTensor);
auto jsonStr = configData.GetCfgJson().serialize();
std::map<std::string, std::shared_ptr<void>> config;
config["postProcessConfigContent"] = std::make_shared<std::string>(jsonStr);
config["labelPath"] = std::make_shared<std::string>(initParam.labelPath);
post_ = std::make_shared<MxBase::SsdMobilenetFpnMindsporePost>();
ret = post_->Init(config);
if (ret != APP_ERR_OK) {
LogError << "SSDResNet50 init failed, ret=" << ret << ".";
return ret;
}
return APP_ERR_OK;
}
APP_ERROR SSDResNet50::DeInit() {
dvppWrapper_->DeInit();
model_->DeInit();
post_->DeInit();
MxBase::DeviceManager::GetInstance()->DestroyDevices();
return APP_ERR_OK;
}
APP_ERROR SSDResNet50::ReadImage(const std::string &imgPath, cv::Mat &imageMat) {
imageMat = cv::imread(imgPath, cv::IMREAD_COLOR);
return APP_ERR_OK;
}
APP_ERROR SSDResNet50::ResizeImage(const cv::Mat &srcImageMat, cv::Mat &dstImageMat) {
static constexpr uint32_t resizeHeight = 640;
static constexpr uint32_t resizeWidth = 640;
cv::resize(srcImageMat, dstImageMat, cv::Size(resizeWidth, resizeHeight));
return APP_ERR_OK;
}
APP_ERROR SSDResNet50::CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase &tensorBase) {
const uint32_t dataSize = imageMat.cols * imageMat.rows * YUV444_RGB_WIDTH_NU;
LogInfo << "image size after crop" << imageMat.cols << " " << imageMat.rows;
MemoryData memoryDataDst(dataSize, MemoryData::MEMORY_DEVICE, deviceId_);
MemoryData memoryDataSrc(imageMat.data, dataSize, MemoryData::MEMORY_HOST_MALLOC);
APP_ERROR ret = MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
if (ret != APP_ERR_OK) {
LogError << GetError(ret) << "Memory malloc failed.";
return ret;
}
std::vector<uint32_t> shape = {imageMat.rows * YUV444_RGB_WIDTH_NU, static_cast<uint32_t>(imageMat.cols)};
tensorBase = TensorBase(memoryDataDst, false, shape, TENSOR_DTYPE_UINT8);
return APP_ERR_OK;
}
APP_ERROR SSDResNet50::Inference(const std::vector<MxBase::TensorBase> &inputs,
std::vector<MxBase::TensorBase> &outputs) {
auto dtypes = model_->GetOutputDataType();
for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
std::vector<uint32_t> shape = {};
for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
shape.push_back((uint32_t) modelDesc_.outputTensors[i].tensorDims[j]);
}
TensorBase tensor(shape, dtypes[i], MemoryData::MemoryType::MEMORY_DEVICE, deviceId_);
APP_ERROR ret = TensorBase::TensorBaseMalloc(tensor);
if (ret != APP_ERR_OK) {
LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
return ret;
}
outputs.push_back(tensor);
}
DynamicInfo dynamicInfo = {};
dynamicInfo.dynamicType = DynamicType::STATIC_BATCH;
dynamicInfo.batchSize = 1;
APP_ERROR ret = model_->ModelInference(inputs, outputs, dynamicInfo);
if (ret != APP_ERR_OK) {
LogError << "ModelInference failed, ret=" << ret << ".";
return ret;
}
return APP_ERR_OK;
}
APP_ERROR SSDResNet50::PostProcess(const std::vector<MxBase::TensorBase> &inputs,
std::vector<std::vector<MxBase::ObjectInfo>> &objectInfos,
const std::vector<MxBase::ResizedImageInfo> &resizedImageInfos,
const std::map<std::string, std::shared_ptr<void>> &configParamMap) {
APP_ERROR ret = post_->Process(inputs, objectInfos, resizedImageInfos, configParamMap);
if (ret != APP_ERR_OK) {
LogError << "Process failed, ret=" << ret << ".";
return ret;
}
return APP_ERR_OK;
}
APP_ERROR SSDResNet50::SaveResult(const std::string &imgPath,
std::vector<std::vector<MxBase::ObjectInfo>> &objectInfos) {
std::string fileName = imgPath.substr(imgPath.find_last_of("/") + 1);
size_t dot = fileName.find_last_of(".");
std::string resFileName = "./results_" + fileName.substr(0, dot) + ".txt";
std::ofstream outfile(resFileName);
if (outfile.fail()) {
LogError << "Failed to open result file: ";
return APP_ERR_COMM_FAILURE;
}
std::vector<ObjectInfo> objects = objectInfos.at(0);
std::string resultStr;
for (size_t i = 0; i < objects.size(); i++) {
ObjectInfo obj = objects.at(i);
std::string info = "BBox[" + std::to_string(i) + "]:[x0=" + std::to_string(obj.x0)
+ ", y0=" + std::to_string(obj.y0) + ", w=" + std::to_string(obj.x1 - obj.x0) + ", h="
+ std::to_string(obj.y1 - obj.y0) + "], confidence=" + std::to_string(obj.confidence)
+ ", classId=" + std::to_string(obj.classId) + ", className=" + obj.className;
LogInfo << info;
resultStr += info + "\n";
}
outfile << resultStr << std::endl;
outfile.close();
return APP_ERR_OK;
}
APP_ERROR SSDResNet50::Process(const std::string &imgPath) {
cv::Mat imageMat;
APP_ERROR ret = ReadImage(imgPath, imageMat);
const uint32_t originHeight = imageMat.rows;
const uint32_t originWidth = imageMat.cols;
LogInfo << "image shape, size=" << originWidth << "," << originHeight << ".";
if (ret != APP_ERR_OK) {
LogError << "ReadImage failed, ret=" << ret << ".";
return ret;
}
ResizeImage(imageMat, imageMat);
TensorBase tensorBase;
ret = CVMatToTensorBase(imageMat, tensorBase);
if (ret != APP_ERR_OK) {
LogError << "Resize failed, ret=" << ret << ".";
return ret;
}
std::vector<MxBase::TensorBase> inputs = {};
std::vector<MxBase::TensorBase> outputs = {};
inputs.push_back(tensorBase);
ret = Inference(inputs, outputs);
if (ret != APP_ERR_OK) {
LogError << "Inference failed, ret=" << ret << ".";
return ret;
}
LogInfo << "Inference success, ret=" << ret << ".";
std::vector<MxBase::ResizedImageInfo> resizedImageInfos = {};
ResizedImageInfo imgInfo;
imgInfo.widthOriginal = originWidth;
imgInfo.heightOriginal = originHeight;
imgInfo.widthResize = 640;
imgInfo.heightResize = 640;
imgInfo.resizeType = MxBase::RESIZER_STRETCHING;
resizedImageInfos.push_back(imgInfo);
std::vector<std::vector<MxBase::ObjectInfo>> objectInfos = {};
std::map<std::string, std::shared_ptr<void>> configParamMap = {};
ret = PostProcess(outputs, objectInfos, resizedImageInfos, configParamMap);
if (ret != APP_ERR_OK) {
LogError << "PostProcess failed, ret=" << ret << ".";
return ret;
}
if (objectInfos.empty()) {
LogInfo << "No object detected." << std::endl;
return APP_ERR_OK;
}
ret = SaveResult(imgPath, objectInfos);
if (ret != APP_ERR_OK) {
LogError << "Save infer results into file failed. ret = " << ret << ".";
return ret;
}
return APP_ERR_OK;
}
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// c++的入口文件
#include <unistd.h>
#include <dirent.h>
#include <iostream>
#include <fstream>
#include <vector>
#include "SSDResNet50.h"
#include "MxBase/Log/Log.h"
namespace {
const uint32_t CLASS_NUM = 81;
}
int main(int argc, char *argv[]) {
if (argc <= 2) {
LogWarn << "Please input image path, such as './ssd_resnet50 ssd_resnet50.om test.jpg'.";
return APP_ERR_OK;
}
InitParam initParam = {};
initParam.deviceId = 0;
initParam.classNum = CLASS_NUM;
initParam.labelPath = "../models/coco.names";
initParam.iou_thresh = 0.6;
initParam.score_thresh = 0.6;
initParam.checkTensor = true;
initParam.modelPath = argv[1];
auto ssdResnet50 = std::make_shared<SSDResNet50>();
APP_ERROR ret = ssdResnet50->Init(initParam);
if (ret != APP_ERR_OK) {
LogError << "SsdResnet50 init failed, ret=" << ret << ".";
return ret;
}
std::string imgPath = argv[2];
ret = ssdResnet50->Process(imgPath);
if (ret != APP_ERR_OK) {
LogError << "SsdResnet50 process failed, ret=" << ret << ".";
ssdResnet50->DeInit();
return ret;
}
ssdResnet50->DeInit();
return APP_ERR_OK;
}
/*
* Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// 作为标识,防止重复编译报错
#ifndef SSD_ResNet50
#define SSD_ResNet50
#include <memory>
#include <utility>
#include <vector>
#include <string>
#include <map>
#include <opencv2/opencv.hpp>
#include "MxBase/DvppWrapper/DvppWrapper.h"
#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
#include "ObjectPostProcessors/SsdMobilenetFpnMindsporePost.h"
#include "MxBase/Tensor/TensorContext/TensorContext.h"
struct InitParam {
uint32_t deviceId;
std::string labelPath;
uint32_t classNum;
float iou_thresh;
float score_thresh;
bool checkTensor;
std::string modelPath;
};
class SSDResNet50 {
public:
APP_ERROR Init(const InitParam &initParam);
APP_ERROR DeInit();
APP_ERROR ReadImage(const std::string &imgPath, cv::Mat &imageMat);
APP_ERROR ResizeImage(const cv::Mat &srcImageMat, cv::Mat &dstImageMat);
APP_ERROR CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase &tensorBase);
APP_ERROR Inference(const std::vector<MxBase::TensorBase> &inputs, std::vector<MxBase::TensorBase> &outputs);
APP_ERROR PostProcess(const std::vector<MxBase::TensorBase> &inputs,
std::vector<std::vector<MxBase::ObjectInfo>> &objectInfos,
const std::vector<MxBase::ResizedImageInfo> &resizedImageInfos,
const std::map<std::string, std::shared_ptr<void>> &configParamMap);
APP_ERROR SaveResult(const std::string &imgPath,
std::vector<std::vector<MxBase::ObjectInfo>> &objectInfos);
APP_ERROR Process(const std::string &imgPath);
private:
std::shared_ptr<MxBase::DvppWrapper> dvppWrapper_;
std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
std::shared_ptr<MxBase::SsdMobilenetFpnMindsporePost> post_;
MxBase::ModelDesc modelDesc_;
uint32_t deviceId_ = 0;
};
#endif
#!/bin/bash
# Copyright (C) 2021.Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export ASCEND_HOME=/usr/local/Ascend
export ARCH_PATTERN=x86_64-linux
export ASCEND_VERSION=nnrt/latest
rm -rf dist
mkdir dist
cd dist
cmake ..
make -j
make install
cp ./ssd_resnet50 ../
absl-py
\ No newline at end of file
{
"detection": {
"stream_config": {
"deviceId": "0"
},
"appsrc0": {
"props": {
"blocksize": "409600"
},
"factory": "appsrc",
"next": "mxpi_imagedecoder0"
},
"mxpi_imagedecoder0": {
"props": {
"handleMethod": "opencv"
},
"factory": "mxpi_imagedecoder",
"next": "mxpi_imageresize0"
},
"mxpi_imageresize0": {
"props": {
"parentName": "mxpi_imagedecoder0",
"handleMethod": "opencv",
"resizeHeight": "640",
"resizeWidth": "640",
"resizeType": "Resizer_Stretch"
},
"factory": "mxpi_imageresize",
"next": "mxpi_tensorinfer0"
},
"mxpi_tensorinfer0": {
"props": {
"waitingTime": "3000",
"dataSource": "mxpi_imageresize0",
"modelPath": "/home/test/ssd_resnet50/ssd_resnet50.om"
},
"factory": "mxpi_tensorinfer",
"next": "mxpi_objectpostprocessor0"
},
"mxpi_objectpostprocessor0": {
"props": {
"dataSource": "mxpi_tensorinfer0",
"postProcessConfigPath": "/home/test/ssd_resnet50/infer/sdk/conf/ssd_mobilenet_v1_fpn_ms_on_coco_postprocess.cfg",
"labelPath": "/home/test/ssd_resnet50/infer/sdk/conf/coco.names",
"postProcessLibPath": "/usr/local/sdk_home/mxManufacture/lib/modelpostprocessors/libSsdMobilenetFpn_MindsporePost.so"
},
"factory": "mxpi_objectpostprocessor",
"next": "mxpi_dataserialize0"
},
"mxpi_dataserialize0": {
"props": {
"outputDataKeys": "mxpi_objectpostprocessor0"
},
"factory": "mxpi_dataserialize",
"next": "appsink0"
},
"appsink0": {
"factory": "appsink"
}
}
}
\ No newline at end of file
CLASS_NUM=81
SCORE_THRESH=0.1
IOU_THRESH=0.6
CHECK_MODEL=true
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""infer_by_sdk"""
import argparse
import json
import os
from StreamManagerApi import MxDataInput
from StreamManagerApi import StreamManagerApi
# 支持的图片后缀,分别是这四个后缀名
SUPPORT_IMG_SUFFIX = (".jpg", ".JPG", ".jpeg", ".JPEG")
# os.path.dirname(__file__)获取当前脚本的完整路径,os.path.abspath()获取当前脚本的完整路径
current_path = os.path.abspath(os.path.dirname(__file__))
# argparse是个解析器,argparse块可以让人轻松编写用户友好的命令行接口,使用argparse首先要创建ArgumentParser对象,
parser = argparse.ArgumentParser(
description="SSD ResNet50 infer " "example.",
fromfile_prefix_chars="@",
)
# name or flags,一个命令或一个选项字符串的列表
# str将数据强制转换为字符串。每种数据类型都可以强制转换为字符串
# help 一个此选项作用的简单描述
# default 当参数未在命令行中出现时使用的值。
parser.add_argument(
"--pipeline_path",
type=str,
help="mxManufacture pipeline file path",
default=os.path.join(current_path, "../conf/ssd_resnet50.pipeline"),
)
parser.add_argument(
"--stream_name",
type=str,
help="Infer stream name in the pipeline config file",
default="detection",
)
parser.add_argument(
"--img_path",
type=str,
help="Image pathname, can be a image file or image directory",
default=os.path.join(current_path, "../coco/val2017"),
)
# 目的用于存放推理后的结果
parser.add_argument(
"--res_path",
type=str,
help="Directory to store the inferred result",
default=None,
required=False,
)
# 赋值然后解析参数
args = parser.parse_args()
# 推理图像
def infer():
"""Infer images by DVPP + OM. """
pipeline_path = args.pipeline_path
# 将stream_name编码为utf-8的格式
stream_name = args.stream_name.encode()
img_path = os.path.abspath(args.img_path)
res_dir_name = args.res_path
# StreamManagerApi()用于对流程的基本管理:加载流程配置、创建流程、向流程上发送数据、获得执行结果
stream_manager_api = StreamManagerApi()
# InitManager初始化一个StreamManagerApi
ret = stream_manager_api.InitManager()
if ret != 0:
print("Failed to init Stream manager, ret=%s" % str(ret))
exit()
# create streams by pipeline config file
# 读取pipeline文件
with open(pipeline_path, "rb") as f:
pipeline_str = f.read()
# CreateMultipleStreams,根据指定的pipeline配置创建Stream
ret = stream_manager_api.CreateMultipleStreams(pipeline_str)
if ret != 0:
print("Failed to create Stream, ret=%s" % str(ret))
exit()
# 插件的id
in_plugin_id = 0
# Construct the input of the stream
# 构造stream的输入,MxDataInput用于Stream接收的数据结构定义。
data_input = MxDataInput()
# os.path.isfile()用于判断某一对象(需提供绝对路径)是否为文件
# endswith用于判断是否为指定的图片的字符串结尾
if os.path.isfile(img_path) and img_path.endswith(SUPPORT_IMG_SUFFIX):
file_list = [os.path.abspath(img_path)]
else:
# os.path.isdir()用于判断对象是否为一个目录
file_list = os.listdir(img_path)
file_list = [
# 将图片路径和图片连接,for in if 过滤掉那些不符合照片后缀的图片
os.path.join(img_path, img)
for img in file_list
if img.endswith(SUPPORT_IMG_SUFFIX)
]
if not res_dir_name:
res_dir_name = os.path.join(".", "infer_res")
print(f"res_dir_name={res_dir_name}")
# 创建目录,e目标目录已存在的情况下不会触发FileExistsError异常。
os.makedirs(res_dir_name, exist_ok=True)
pic_infer_dict_list = []
# 开始对file_list进行遍历
for file_name in file_list:
# 依次读出每张照片
with open(file_name, "rb") as f:
img_data = f.read()
if not img_data:
print(f"read empty data from img:{file_name}")
continue
# data_input这个对象的data元素值为img_data
data_input.data = img_data
# SendDataWithUniqueId向指定的元件发送数据,输入in_plugin_id目标输入插件id,data_input ,根据官方的API,stream_name应该是可以不作为输入的
unique_id = stream_manager_api.SendDataWithUniqueId(
stream_name, in_plugin_id, data_input
)
if unique_id < 0:
print("Failed to send data to stream.")
exit()
# 获得Stream上的输出元件的结果(appsink), 延时3000ms
infer_result = stream_manager_api.GetResultWithUniqueId(
stream_name, unique_id, 3000
)
if infer_result.errorCode != 0:
print(
"GetResultWithUniqueId error. errorCode=%d, errorMsg=%s"
% (infer_result.errorCode, infer_result.data.decode())
)
exit()
# 将推理的结果parse_img_infer_result追加到pic_infer_dict_list数组中
pic_infer_dict_list.extend(
parse_img_infer_result(file_name, infer_result)
)
print(f"Inferred image:{file_name} success!")
with open(os.path.join(res_dir_name, "det_result.json"), "w") as fw:
# 将Python格式转为json格式并且写入
fw.write(json.dumps(pic_infer_dict_list))
stream_manager_api.DestroyAllStreams()
def parse_img_infer_result(file_name, infer_result):
"""parse_img_infer_result"""
# 将infer_result.data即元器件返回的结果转为dict格式,用get("MxpiObject", [])新建一个MxpiObject的Key且复制为[]
obj_list = json.loads(infer_result.data.decode()).get("MxpiObject", [])
det_obj_list = []
for o in obj_list:
# 把图片框一个框,一个正方形,四个角的位置,坐标位置
x0, y0, x1, y1 = (
# round()函数,四舍五入到第四位
round(o.get("x0"), 4),
round(o.get("y0"), 4),
round(o.get("x1"), 4),
round(o.get("y1"), 4),
)
bbox_for_map = [x0, y0, x1 - x0, y1 - y0]
score = o.get("classVec")[0].get("confidence")
category_id = o.get("classVec")[0].get("classId")
# basename()用于选取最后的文件名,即image的name,.split(".")用于把后缀给分割掉
img_fname_without_suffix = os.path.basename(file_name).split(".")[0]
image_id = img_fname_without_suffix
det_obj_list.append(
dict(
image_id=image_id,
bbox=bbox_for_map,
# 目录id,把图片归类
category_id=category_id,
# 置信度的问题,过滤掉比较小的,意思就是说假如我这边猜个猫的机率为0.1,那就是大概率不是猫,那这个数据就可以筛掉了
# ssd_mobilenet_v1_fpn_ms_on_coco_postprocess.cfg文件里面的 SCORE_THRESH=0.6设置
score=score,
)
)
return det_obj_list
if __name__ == "__main__":
infer()
# Copyright (C) 2020.Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""generate"""
import os
from datetime import datetime
import json
from absl import flags
from absl import app
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
PRINT_LINES_TEMPLATE = """
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = %.3f
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = %.3f
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = %.3f
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = %.3f
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = %.3f
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = %.3f
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = %.3f
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = %.3f
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = %.3f
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = %.3f
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = %.3f
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = %.3f
"""
FLAGS = flags.FLAGS
flags.DEFINE_string(
name="annotations_json",
default=None,
help="annotations_json file path name",
)
flags.DEFINE_string(
name="det_result_json", default=None, help="det_result json file"
)
flags.DEFINE_enum(
name="anno_type",
default="bbox",
enum_values=["segm", "bbox", "keypoints"],
help="Annotation type",
)
flags.DEFINE_string(
name="output_path_name",
default=None,
help="Where to out put the result files.",
)
flags.mark_flag_as_required("annotations_json")
flags.mark_flag_as_required("det_result_json")
flags.mark_flag_as_required("output_path_name")
def get_category_id(k):
"""
:param: class id which corresponding coco.names
:return: category id is used in instances_val2014.json
"""
kk = k
if 12 <= k <= 24:
kk = k + 1
elif 25 <= k <= 26:
kk = k + 2
elif 27 <= k <= 40:
kk = k + 4
elif 41 <= k <= 60:
kk = k + 5
elif k == 61:
kk = k + 6
elif k == 62:
kk = k + 8
elif 63 <= k <= 73:
kk = k + 9
elif 74 <= k <= 80:
kk = k + 10
return kk
def get_dict_from_file(file_path):
"""
:param: file_path contain all infer result
:return: dict_list contain infer result of every images
"""
ls = []
# Opening JSON file
f = open(file_path)
# returns JSON object as
# a dictionary
ban_list = json.load(f)
for item in ban_list:
item_copy = item.copy()
item_copy['category_id'] = get_category_id(int(item['category_id']))
item_copy['image_id'] = int(item['image_id'])
ls.append(item_copy)
return ls
def generate_main(unused_arg):
"""generate_main"""
del unused_arg
out_put_dir = os.path.dirname(FLAGS.output_path_name)
if not os.path.exists(out_put_dir):
os.makedirs(out_put_dir)
fw = open(FLAGS.output_path_name, "a+")
now_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
head_info = f"{'-'*50}mAP Test starts @ {now_time_str}{'-'*50}\n"
fw.write(head_info)
fw.flush()
# 把脚本里面的蓝样本过滤
cocoGt = COCO(FLAGS.annotations_json)
image_ids = cocoGt.getImgIds()
need_img_ids = []
for img_id in image_ids:
iscrowd = False
anno_ids = cocoGt.getAnnIds(imgIds=img_id, iscrowd=None)
anno = cocoGt.loadAnns(anno_ids)
for label in anno:
iscrowd = iscrowd or label["iscrowd"]
if iscrowd:
continue
need_img_ids.append(img_id)
result_dict = get_dict_from_file(FLAGS.det_result_json)
json_file_name = './result.json'
with open(json_file_name, 'w') as f:
json.dump(result_dict, f)
cocoDt = cocoGt.loadRes(json_file_name)
cocoEval = COCOeval(cocoGt, cocoDt, iouType=FLAGS.anno_type)
cocoEval.params.imgIds = sorted(need_img_ids)
print(cocoEval.params.imgIds)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
format_lines = [
line for line in PRINT_LINES_TEMPLATE.splitlines() if line.strip()
]
for i, line in enumerate(format_lines):
fw.write(line % cocoEval.stats[i] + "\n")
end_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
tail_info = f"{'-'*50}mAP Test ends @ {end_time_str}{'-'*50}\n"
fw.write(tail_info)
fw.close()
if __name__ == "__main__":
app.run(generate_main)
#!/bin/bash
# Copyright (C) 2021.Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PY=/usr/bin/python3.7
export PYTHONPATH=${PYTHONPATH}:.
annotations_json=$1
det_result_json=$2
output_path_name=$3
${PY} generate_map_report.py \
--annotations_json=${annotations_json} \
--det_result_json=${det_result_json} \
--output_path_name=${output_path_name} \
--anno_type=bbox
\ No newline at end of file
#!/bin/bash
# Copyright (C) 2021.Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# Simple log helper functions
info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH}
export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
#to set PYTHONPATH, import the StreamManagerApi.py
export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
pipeline_path=$1
img_path=$2
res_path=$3
stream_name=$4
python3.7 infer_by_sdk.py --pipeline_path ${pipeline_path} --img_path ${img_path} --res_path ${res_path} --stream_name ${stream_name}
exit 0
\ No newline at end of file
# SSD_Resnet50-Ascend (目标检测/MindSpore)
## 1.概述
SSD GhostNet 将边界框的输出空间离散为一组默认框,每个特征地图位置的纵横比和比例不同。在预测时,网络为每个默认框中每个对象类别 生成分数,并对该框进行调整,以更好地匹配对象形状。此外,该网络结合了来自不同分辨率的多个特征图的预测,从而自然地处理不同尺寸的物体。
## 2.训练
### 2.1.算法基本信息
- 任务类型: 目标检测
- 支持的框架引擎: Ascend-Powered-Engine- Mindspore-1.1.1-python3.7-aarch64
- 算法输入:
- obs数据集路径,下面存放使用coco2017数据集。数据集的格式见训练手册说明。
- 算法输出:
- 训练生成的ckpt模型
### 2.2.训练参数说明
名称|默认值|类型|是否必填|描述
---|---|---|---|---
lr|0.05|float|True|初始学习率
dataset|coco|string|True|数据集格式,可选值coco、voc、other
epoch_size|500|int|True|训练轮数
batch_size|32|int|True|一次训练所抓取的数据样本数量
save_checkpoint_epochs|10|int|False|保存checkpoint的轮数。
num_classes|81|string|True|数据集类别数+1。
voc_json|-|string|False|dataset为voc时,用于指定数据集标注文件,填相对于data_url的路径。
anno_path|-|string|False|dataset为other时,用于指定数据集标注文件,填相对于data_url的路径。
pre_trained|-|string|False|迁移学习时,预训练模型路径,模型放在data_url下,填相对于data_url的路径。
loss_scale|1024|int|False|Loss scale.
filter_weight|False|Boolean|False|Filter head weight parameters,迁移学习时需要设置为True。
### 2.3. 训练输出文件
训练完成后的输出文件如下
```js
训练输出目录 V000X
├── ssd-10_12.ckpt
├── ssd-10_12.ckpt.air
├── ssd-graph.meta
├── kernel_meta
├── ApplyMomentum_13796921261177776697_0.info
├── AddN_4688903218960634315_0.json
├── ...
```
## 3.迁移学习指导
### 3.1. 数据集准备:
参考训练手册:`迁移学习指导`->`数据集准备`
### 3.2. 上传预训练模型ckpt文件到obs数据目录pretrain_model中,示例如下:
```js
MicrocontrollerDetection # obs数据目录
|- train # 训练图片数据集目录
|- IMG_20181228_102033.jpg
|- IMG_20181228_102041.jpg
|- ..
|- train_labels.txt # 训练图片数据标注
|- pretrain_model
|- ssd-3-61.ckpt # 预训练模型 ckpt文件
```
classes_label_path参数对应的train_labels.txt的内容如下所示:
```js
background
Arduino Nano
ESP8266
Raspberry Pi 3
Heltec ESP32 Lora
```
### 3.3. 修改调优参数
目前迁移学习支持修改数据集类别,订阅算法创建训练任务,创建训练作业时需要修改如下调优参数:
1. dataset改为other。
2. num_classes改为迁移学习数据集的类别数+1。
3. anno_path指定迁移学习数据集的标注文件路径。
4. filter_weight改为True。
5. pre_trained指定预训练模型路径。
以上参数的说明见`训练参数说明`
### 3.4. 创建训练作业
指定数据存储位置、模型输出位置和作业日志路径,创建训练作业进行迁移学习。
\ No newline at end of file
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment