diff --git a/.jenkins/check/config/filter_cpplint.txt b/.jenkins/check/config/filter_cpplint.txt index 2d8fb85c63fef7a80f011c7651e672fcb630ee99..a4916b2a5a6b788b6a9975bfff691f7b7c18e379 100644 --- a/.jenkins/check/config/filter_cpplint.txt +++ b/.jenkins/check/config/filter_cpplint.txt @@ -57,7 +57,7 @@ "models/official/cv/posenet/infer/mxbase/src/Posenet.h" "runtime/references" "models/official/cv/posenet/infer/mxbase/src/Posenet.cpp" "runtime/references" "models/official/cv/posenet/infer/mxbase/src/main.cpp" "runtime/references" - +"models/research/cv/ssd_resnet50/infer/mxbase/C++/SSDResNet50.h" "runtime/references" "models/research/cv/ibnnet/infer/mxbase/src/IbnnetOpencv.h" "runtime/references" "models/official/cv/nasnet/infer/mxbase/NASNet_A_MobileClassifyOpencv.h" "runtime/references" diff --git a/research/cv/ssd_resnet50/infer/Dockerfile b/research/cv/ssd_resnet50/infer/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..0769bd8f4a4f2f5e42452470d4393d06546a4f4b --- /dev/null +++ b/research/cv/ssd_resnet50/infer/Dockerfile @@ -0,0 +1,5 @@ +ARG FROM_IMAGE_NAME +FORM ${FROM_IMAGE_NAME} + +COPY requirements.txt . +RUN pip3.7 install -r requirements.txt \ No newline at end of file diff --git a/research/cv/ssd_resnet50/infer/__init__.py b/research/cv/ssd_resnet50/infer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f099fb0fdac063a3b5bd2e1175b4b753c6cc10d2 --- /dev/null +++ b/research/cv/ssd_resnet50/infer/__init__.py @@ -0,0 +1,13 @@ +# Copyright (C) 2021.Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/research/cv/ssd_resnet50/infer/convert/aipp.cfg b/research/cv/ssd_resnet50/infer/convert/aipp.cfg new file mode 100644 index 0000000000000000000000000000000000000000..f40bc1b108ec4fd1f400ca916b91c524bfb162b1 --- /dev/null +++ b/research/cv/ssd_resnet50/infer/convert/aipp.cfg @@ -0,0 +1,12 @@ +aipp_op { +aipp_mode: static +input_format : RGB888_U8 +csc_switch : false +rbuv_swap_switch : true +mean_chn_0 : 124 +mean_chn_1 : 117 +mean_chn_2 : 104 + var_reci_chn_0 : 0.0171247538316637 + var_reci_chn_1 : 0.0175070028011204 + var_reci_chn_2 : 0.0174291938997821 +} \ No newline at end of file diff --git a/research/cv/ssd_resnet50/infer/convert/convert_om.sh b/research/cv/ssd_resnet50/infer/convert/convert_om.sh new file mode 100644 index 0000000000000000000000000000000000000000..009ee1ea69fc1175135808cea2b4ad039240f995 --- /dev/null +++ b/research/cv/ssd_resnet50/infer/convert/convert_om.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# Copyright (c) 2021. Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +input_air_path=$1 +output_om_path=$2 +aipp_cfg=$3 + +export ASCEND_ATC_PATH=/usr/local/Ascend/atc/bin/atc +export LD_LIBRARY_PATH=/usr/local/Ascend/atc/lib64:$LD_LIBRARY_PATH +export PATH=/usr/local/python3.7.5/bin:/usr/local/Ascend/atc/ccec_compiler/bin:/usr/local/Ascend/atc/bin:$PATH +export PYTHONPATH=/usr/local/Ascend/atc/python/site-packages:/usr/local/Ascend/atc/python/site-packages/auto_tune.egg/auto_tune:/usr/local/Ascend/atc/python/site-packages/schedule_search.egg +export ASCEND_OPP_PATH=/usr/local/Ascend/opp + +export ASCEND_SLOG_PRINT_TO_STDOUT=1 + +echo "Input AIR file path: ${input_air_path}" +echo "Output OM file path: ${output_om_path}" +echo "AIPP cfg file path: ${aipp_cfg}" + +atc --input_format=NCHW --framework=1 \ +--model=${input_air_path} \ +--output=${output_om_path} \ +--soc_version=Ascend310 \ +--disable_reuse_memory=0 \ +--insert_op_conf=${aipp_cfg} \ +--precision_mode=allow_fp32_to_fp16 \ +--op_select_implmode=high_precision \ No newline at end of file diff --git a/research/cv/ssd_resnet50/infer/docker_start_infer.sh b/research/cv/ssd_resnet50/infer/docker_start_infer.sh new file mode 100644 index 0000000000000000000000000000000000000000..7dfa5d41a122d469940cd31545b3de83efa5960a --- /dev/null +++ b/research/cv/ssd_resnet50/infer/docker_start_infer.sh @@ -0,0 +1,25 @@ +#!/bin/bash +#ascendhub.huawei.com/public-ascendhub/infer-modelzoo:21.0.2 +docker_image=$1 +model_dir=$2 +echo "$1" +echo "$2" +if [ -z "${docker_image}" ]; then + echo "please input docker_image" + exit 1 +fi + +if [ ! -d "${model_dir}" ]; then + echo "please input model_dir" + exit 1 +fi + +docker run -it \ + --device=/dev/davinci0 \ + --device=/dev/davinci_manager \ + --device=/dev/devmm_svm \ + --device=/dev/hisi_hdc \ + -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \ + -v ${model_dir}:${model_dir} \ + ${docker_image} \ + /bin/bash \ No newline at end of file diff --git a/research/cv/ssd_resnet50/infer/mxbase/C++/CMakeLists.txt b/research/cv/ssd_resnet50/infer/mxbase/C++/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..f509e7d8bf1de35d7e0557cb230cf34a1010f9ae --- /dev/null +++ b/research/cv/ssd_resnet50/infer/mxbase/C++/CMakeLists.txt @@ -0,0 +1,59 @@ +cmake_minimum_required(VERSION 3.10.0) +project(ssd_ms) + +set(TARGET ssd_resnet50) + +SET(CMAKE_BUILD_TYPE "Debug") +SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g -ggdb") +SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall") + +add_definitions(-DENABLE_DVPP_INTERFACE) +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +add_definitions(-Dgoogle=mindxsdk_private) +add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall) +add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -pie) + +# Check environment variable +if(NOT DEFINED ENV{ASCEND_HOME}) + message(FATAL_ERROR "please define environment variable:ASCEND_HOME") +endif() +if(NOT DEFINED ENV{ASCEND_VERSION}) + message(WARNING "please define environment variable:ASCEND_VERSION") +endif() +if(NOT DEFINED ENV{ARCH_PATTERN}) + message(WARNING "please define environment variable:ARCH_PATTERN") +endif() + +# 璁剧疆acllib鐨勫ご鏂囦欢鍜屽姩鎬侀摼鎺ュ簱 +set(ACL_INC_DIR $ENV{ASCEND_HOME}/${ASCEND_VERSION}/${ARCH_PATTERN}/acllib/include) +set(ACL_LIB_DIR $ENV{ASCEND_HOME}/${ASCEND_VERSION}/${ARCH_PATTERN}/acllib/lib64) + +# 璁剧疆MxBase鐨勫ご鏂囦欢鍜屽姩鎬侀摼鎺ュ簱 +set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME}) +set(MXBASE_INC ${MXBASE_ROOT_DIR}/include) +set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib) +set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors) +set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/include/MxBase/postprocess/include) + +# 璁剧疆opensource鐨勫ご鏂囦欢鍜屽姩鎬侀摼鎺ュ簱 +# 涓昏鍖呭惈OpenCV銆丟oogle log绛夊紑婧愬簱 +set(OPENSOURCE_DIR ${MXBASE_ROOT_DIR}/opensource) + +include_directories(${ACL_INC_DIR}) +include_directories(${OPENSOURCE_DIR}/include) +include_directories(${OPENSOURCE_DIR}/include/opencv4) + +include_directories(${MXBASE_INC}) +include_directories(${MXBASE_POST_PROCESS_DIR}) +link_directories(${ACL_LIB_DIR}) +link_directories(${OPENSOURCE_DIR}/lib) +link_directories(${MXBASE_LIB_DIR}) +link_directories(${MXBASE_POST_LIB_DIR}) + +# 鏈湴缂栬瘧閾炬帴鏂囦欢锛屾牴鎹甿xbase鐩綍涓嬫枃浠舵坊鍔犱慨鏀� +add_executable(${TARGET} ResNet50_main.cpp ResNet50Base.cpp) + +# 閾炬帴鍔ㄦ€侀摼鎺ュ簱锛屽悗澶勭悊lib:yolov3postprocess鏍规嵁瀹為檯鎯呭喌淇敼 +target_link_libraries(${TARGET} glog cpprest mxbase SsdMobilenetFpn_MindsporePost opencv_world stdc++fs) + +install(TARGETS ${TARGET} RUNTIME DESTINATION ${PROJECT_SOURCE_DIR}/) \ No newline at end of file diff --git a/research/cv/ssd_resnet50/infer/mxbase/C++/ResNet50Base.cpp b/research/cv/ssd_resnet50/infer/mxbase/C++/ResNet50Base.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c76c40e1d61ce098a45b33ef86d58e78e8cf8261 --- /dev/null +++ b/research/cv/ssd_resnet50/infer/mxbase/C++/ResNet50Base.cpp @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// 涓讳唬鐮侀€昏緫 +#include "SSDResNet50.h" +#include <unistd.h> +#include <sys/stat.h> +#include <map> +#include <vector> +#include <string> +#include <memory> +#include <fstream> +#include "MxBase/DeviceManager/DeviceManager.h" +#include "MxBase/Log/Log.h" + +using MxBase::TensorBase; +using MxBase::ObjectInfo; +using MxBase::ResizedImageInfo; +using MxBase::DeviceManager; +using MxBase::TensorContext; +using MxBase::DvppWrapper; +using MxBase::ModelInferenceProcessor; +using MxBase::ConfigData; +using MxBase::SsdMobilenetFpnMindsporePost; +using MxBase::YUV444_RGB_WIDTH_NU; +using MxBase::MemoryData; +using MxBase::MemoryHelper; +using MxBase::TENSOR_DTYPE_UINT8; +using MxBase::DynamicInfo; +using MxBase::DynamicType; +using MxBase::RESIZER_STRETCHING; + +APP_ERROR SSDResNet50::Init(const InitParam &initParam) { + deviceId_ = initParam.deviceId; + APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices(); + if (ret != APP_ERR_OK) { + LogError << "Init devices failed, ret=" << ret << "."; + return ret; + } + ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId); + if (ret != APP_ERR_OK) { + LogError << "Set context failed, ret=" << ret << "."; + return ret; + } + dvppWrapper_ = std::make_shared<MxBase::DvppWrapper>(); + ret = dvppWrapper_->Init(); + if (ret != APP_ERR_OK) { + LogError << "DvppWrapper init failed, ret=" << ret << "."; + return ret; + } + model_ = std::make_shared<MxBase::ModelInferenceProcessor>(); + ret = model_->Init(initParam.modelPath, modelDesc_); + if (ret != APP_ERR_OK) { + LogError << "ModelInferenceProcessor init failed, ret=" << ret << "."; + return ret; + } + MxBase::ConfigData configData; + const std::string checkTensor = initParam.checkTensor ? "true" : "false"; + + configData.SetJsonValue("CLASS_NUM", std::to_string(initParam.classNum)); + configData.SetJsonValue("SCORE_THRESH", std::to_string(initParam.score_thresh)); + configData.SetJsonValue("IOU_THRESH", std::to_string(initParam.iou_thresh)); + configData.SetJsonValue("CHECK_MODEL", checkTensor); + + auto jsonStr = configData.GetCfgJson().serialize(); + std::map<std::string, std::shared_ptr<void>> config; + config["postProcessConfigContent"] = std::make_shared<std::string>(jsonStr); + config["labelPath"] = std::make_shared<std::string>(initParam.labelPath); + + post_ = std::make_shared<MxBase::SsdMobilenetFpnMindsporePost>(); + ret = post_->Init(config); + if (ret != APP_ERR_OK) { + LogError << "SSDResNet50 init failed, ret=" << ret << "."; + return ret; + } + return APP_ERR_OK; +} + +APP_ERROR SSDResNet50::DeInit() { + dvppWrapper_->DeInit(); + model_->DeInit(); + post_->DeInit(); + MxBase::DeviceManager::GetInstance()->DestroyDevices(); + return APP_ERR_OK; +} + + +APP_ERROR SSDResNet50::ReadImage(const std::string &imgPath, cv::Mat &imageMat) { + imageMat = cv::imread(imgPath, cv::IMREAD_COLOR); + return APP_ERR_OK; +} + +APP_ERROR SSDResNet50::ResizeImage(const cv::Mat &srcImageMat, cv::Mat &dstImageMat) { + static constexpr uint32_t resizeHeight = 640; + static constexpr uint32_t resizeWidth = 640; + + cv::resize(srcImageMat, dstImageMat, cv::Size(resizeWidth, resizeHeight)); + return APP_ERR_OK; +} + +APP_ERROR SSDResNet50::CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase &tensorBase) { + const uint32_t dataSize = imageMat.cols * imageMat.rows * YUV444_RGB_WIDTH_NU; + LogInfo << "image size after crop" << imageMat.cols << " " << imageMat.rows; + MemoryData memoryDataDst(dataSize, MemoryData::MEMORY_DEVICE, deviceId_); + MemoryData memoryDataSrc(imageMat.data, dataSize, MemoryData::MEMORY_HOST_MALLOC); + + APP_ERROR ret = MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc); + if (ret != APP_ERR_OK) { + LogError << GetError(ret) << "Memory malloc failed."; + return ret; + } + + std::vector<uint32_t> shape = {imageMat.rows * YUV444_RGB_WIDTH_NU, static_cast<uint32_t>(imageMat.cols)}; + tensorBase = TensorBase(memoryDataDst, false, shape, TENSOR_DTYPE_UINT8); + return APP_ERR_OK; +} + +APP_ERROR SSDResNet50::Inference(const std::vector<MxBase::TensorBase> &inputs, + std::vector<MxBase::TensorBase> &outputs) { + auto dtypes = model_->GetOutputDataType(); + for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) { + std::vector<uint32_t> shape = {}; + for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) { + shape.push_back((uint32_t) modelDesc_.outputTensors[i].tensorDims[j]); + } + TensorBase tensor(shape, dtypes[i], MemoryData::MemoryType::MEMORY_DEVICE, deviceId_); + APP_ERROR ret = TensorBase::TensorBaseMalloc(tensor); + if (ret != APP_ERR_OK) { + LogError << "TensorBaseMalloc failed, ret=" << ret << "."; + return ret; + } + outputs.push_back(tensor); + } + DynamicInfo dynamicInfo = {}; + dynamicInfo.dynamicType = DynamicType::STATIC_BATCH; + dynamicInfo.batchSize = 1; + + APP_ERROR ret = model_->ModelInference(inputs, outputs, dynamicInfo); + if (ret != APP_ERR_OK) { + LogError << "ModelInference failed, ret=" << ret << "."; + return ret; + } + return APP_ERR_OK; +} + +APP_ERROR SSDResNet50::PostProcess(const std::vector<MxBase::TensorBase> &inputs, + std::vector<std::vector<MxBase::ObjectInfo>> &objectInfos, + const std::vector<MxBase::ResizedImageInfo> &resizedImageInfos, + const std::map<std::string, std::shared_ptr<void>> &configParamMap) { + APP_ERROR ret = post_->Process(inputs, objectInfos, resizedImageInfos, configParamMap); + if (ret != APP_ERR_OK) { + LogError << "Process failed, ret=" << ret << "."; + return ret; + } + return APP_ERR_OK; +} + +APP_ERROR SSDResNet50::SaveResult(const std::string &imgPath, + std::vector<std::vector<MxBase::ObjectInfo>> &objectInfos) { + std::string fileName = imgPath.substr(imgPath.find_last_of("/") + 1); + size_t dot = fileName.find_last_of("."); + std::string resFileName = "./results_" + fileName.substr(0, dot) + ".txt"; + std::ofstream outfile(resFileName); + if (outfile.fail()) { + LogError << "Failed to open result file: "; + return APP_ERR_COMM_FAILURE; + } + + std::vector<ObjectInfo> objects = objectInfos.at(0); + std::string resultStr; + + for (size_t i = 0; i < objects.size(); i++) { + ObjectInfo obj = objects.at(i); + std::string info = "BBox[" + std::to_string(i) + "]:[x0=" + std::to_string(obj.x0) + + ", y0=" + std::to_string(obj.y0) + ", w=" + std::to_string(obj.x1 - obj.x0) + ", h=" + + std::to_string(obj.y1 - obj.y0) + "], confidence=" + std::to_string(obj.confidence) + + ", classId=" + std::to_string(obj.classId) + ", className=" + obj.className; + LogInfo << info; + resultStr += info + "\n"; + } + outfile << resultStr << std::endl; + outfile.close(); + + return APP_ERR_OK; +} + +APP_ERROR SSDResNet50::Process(const std::string &imgPath) { + cv::Mat imageMat; + APP_ERROR ret = ReadImage(imgPath, imageMat); + + const uint32_t originHeight = imageMat.rows; + const uint32_t originWidth = imageMat.cols; + + LogInfo << "image shape, size=" << originWidth << "," << originHeight << "."; + if (ret != APP_ERR_OK) { + LogError << "ReadImage failed, ret=" << ret << "."; + return ret; + } + + ResizeImage(imageMat, imageMat); + + TensorBase tensorBase; + ret = CVMatToTensorBase(imageMat, tensorBase); + + if (ret != APP_ERR_OK) { + LogError << "Resize failed, ret=" << ret << "."; + return ret; + } + std::vector<MxBase::TensorBase> inputs = {}; + std::vector<MxBase::TensorBase> outputs = {}; + inputs.push_back(tensorBase); + ret = Inference(inputs, outputs); + if (ret != APP_ERR_OK) { + LogError << "Inference failed, ret=" << ret << "."; + return ret; + } + LogInfo << "Inference success, ret=" << ret << "."; + std::vector<MxBase::ResizedImageInfo> resizedImageInfos = {}; + + ResizedImageInfo imgInfo; + + imgInfo.widthOriginal = originWidth; + imgInfo.heightOriginal = originHeight; + imgInfo.widthResize = 640; + imgInfo.heightResize = 640; + imgInfo.resizeType = MxBase::RESIZER_STRETCHING; + + resizedImageInfos.push_back(imgInfo); + std::vector<std::vector<MxBase::ObjectInfo>> objectInfos = {}; + std::map<std::string, std::shared_ptr<void>> configParamMap = {}; + + ret = PostProcess(outputs, objectInfos, resizedImageInfos, configParamMap); + if (ret != APP_ERR_OK) { + LogError << "PostProcess failed, ret=" << ret << "."; + return ret; + } + if (objectInfos.empty()) { + LogInfo << "No object detected." << std::endl; + return APP_ERR_OK; + } + + ret = SaveResult(imgPath, objectInfos); + if (ret != APP_ERR_OK) { + LogError << "Save infer results into file failed. ret = " << ret << "."; + return ret; + } + + return APP_ERR_OK; +} diff --git a/research/cv/ssd_resnet50/infer/mxbase/C++/ResNet50_main.cpp b/research/cv/ssd_resnet50/infer/mxbase/C++/ResNet50_main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f884f09a3328d005450255fd8ec986e4f40398ae --- /dev/null +++ b/research/cv/ssd_resnet50/infer/mxbase/C++/ResNet50_main.cpp @@ -0,0 +1,62 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// c++鐨勫叆鍙f枃浠� +#include <unistd.h> +#include <dirent.h> +#include <iostream> +#include <fstream> +#include <vector> +#include "SSDResNet50.h" +#include "MxBase/Log/Log.h" + +namespace { + const uint32_t CLASS_NUM = 81; +} + +int main(int argc, char *argv[]) { + if (argc <= 2) { + LogWarn << "Please input image path, such as './ssd_resnet50 ssd_resnet50.om test.jpg'."; + return APP_ERR_OK; + } + + InitParam initParam = {}; + initParam.deviceId = 0; + initParam.classNum = CLASS_NUM; + initParam.labelPath = "../models/coco.names"; + + initParam.iou_thresh = 0.6; + initParam.score_thresh = 0.6; + initParam.checkTensor = true; + + initParam.modelPath = argv[1]; + auto ssdResnet50 = std::make_shared<SSDResNet50>(); + APP_ERROR ret = ssdResnet50->Init(initParam); + if (ret != APP_ERR_OK) { + LogError << "SsdResnet50 init failed, ret=" << ret << "."; + return ret; + } + + std::string imgPath = argv[2]; + ret = ssdResnet50->Process(imgPath); + if (ret != APP_ERR_OK) { + LogError << "SsdResnet50 process failed, ret=" << ret << "."; + ssdResnet50->DeInit(); + return ret; + } + ssdResnet50->DeInit(); + return APP_ERR_OK; +} diff --git a/research/cv/ssd_resnet50/infer/mxbase/C++/SSDResNet50.h b/research/cv/ssd_resnet50/infer/mxbase/C++/SSDResNet50.h new file mode 100644 index 0000000000000000000000000000000000000000..daec8e4f6e62d2c398689720154684ce45083595 --- /dev/null +++ b/research/cv/ssd_resnet50/infer/mxbase/C++/SSDResNet50.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// 浣滀负鏍囪瘑锛岄槻姝㈤噸澶嶇紪璇戞姤閿� +#ifndef SSD_ResNet50 +#define SSD_ResNet50 + + +#include <memory> +#include <utility> +#include <vector> +#include <string> +#include <map> +#include <opencv2/opencv.hpp> +#include "MxBase/DvppWrapper/DvppWrapper.h" +#include "MxBase/ModelInfer/ModelInferenceProcessor.h" +#include "ObjectPostProcessors/SsdMobilenetFpnMindsporePost.h" +#include "MxBase/Tensor/TensorContext/TensorContext.h" + +struct InitParam { + uint32_t deviceId; + std::string labelPath; + uint32_t classNum; + float iou_thresh; + float score_thresh; + bool checkTensor; + std::string modelPath; +}; + +class SSDResNet50 { + public: + APP_ERROR Init(const InitParam &initParam); + APP_ERROR DeInit(); + APP_ERROR ReadImage(const std::string &imgPath, cv::Mat &imageMat); + APP_ERROR ResizeImage(const cv::Mat &srcImageMat, cv::Mat &dstImageMat); + APP_ERROR CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase &tensorBase); + APP_ERROR Inference(const std::vector<MxBase::TensorBase> &inputs, std::vector<MxBase::TensorBase> &outputs); + APP_ERROR PostProcess(const std::vector<MxBase::TensorBase> &inputs, + std::vector<std::vector<MxBase::ObjectInfo>> &objectInfos, + const std::vector<MxBase::ResizedImageInfo> &resizedImageInfos, + const std::map<std::string, std::shared_ptr<void>> &configParamMap); + APP_ERROR SaveResult(const std::string &imgPath, + std::vector<std::vector<MxBase::ObjectInfo>> &objectInfos); + APP_ERROR Process(const std::string &imgPath); + private: + std::shared_ptr<MxBase::DvppWrapper> dvppWrapper_; + std::shared_ptr<MxBase::ModelInferenceProcessor> model_; + std::shared_ptr<MxBase::SsdMobilenetFpnMindsporePost> post_; + MxBase::ModelDesc modelDesc_; + uint32_t deviceId_ = 0; +}; +#endif diff --git a/research/cv/ssd_resnet50/infer/mxbase/C++/run.sh b/research/cv/ssd_resnet50/infer/mxbase/C++/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..8bc66e96ece97665a1bfca5c628b764d8523bc5e --- /dev/null +++ b/research/cv/ssd_resnet50/infer/mxbase/C++/run.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Copyright (C) 2021.Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export ASCEND_HOME=/usr/local/Ascend +export ARCH_PATTERN=x86_64-linux +export ASCEND_VERSION=nnrt/latest + +rm -rf dist +mkdir dist +cd dist +cmake .. +make -j +make install + +cp ./ssd_resnet50 ../ diff --git a/research/cv/ssd_resnet50/infer/requirements.txt b/research/cv/ssd_resnet50/infer/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..15f64936074aa40d5ff1d65d577f2bc9bfd89b29 --- /dev/null +++ b/research/cv/ssd_resnet50/infer/requirements.txt @@ -0,0 +1 @@ +absl-py \ No newline at end of file diff --git a/research/cv/ssd_resnet50/infer/sdk/conf/ssd-resnet50.pipeline b/research/cv/ssd_resnet50/infer/sdk/conf/ssd-resnet50.pipeline new file mode 100644 index 0000000000000000000000000000000000000000..43ea1c394a13baa6831679bb83cdd5eaa53d4de2 --- /dev/null +++ b/research/cv/ssd_resnet50/infer/sdk/conf/ssd-resnet50.pipeline @@ -0,0 +1,61 @@ +{ + "detection": { + "stream_config": { + "deviceId": "0" + }, + "appsrc0": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "mxpi_imagedecoder0" + }, + "mxpi_imagedecoder0": { + "props": { + "handleMethod": "opencv" + }, + "factory": "mxpi_imagedecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "props": { + "parentName": "mxpi_imagedecoder0", + "handleMethod": "opencv", + "resizeHeight": "640", + "resizeWidth": "640", + "resizeType": "Resizer_Stretch" + }, + "factory": "mxpi_imageresize", + "next": "mxpi_tensorinfer0" + }, + "mxpi_tensorinfer0": { + "props": { + "waitingTime": "3000", + "dataSource": "mxpi_imageresize0", + "modelPath": "/home/test/ssd_resnet50/ssd_resnet50.om" + }, + "factory": "mxpi_tensorinfer", + "next": "mxpi_objectpostprocessor0" + }, + "mxpi_objectpostprocessor0": { + "props": { + "dataSource": "mxpi_tensorinfer0", + "postProcessConfigPath": "/home/test/ssd_resnet50/infer/sdk/conf/ssd_mobilenet_v1_fpn_ms_on_coco_postprocess.cfg", + "labelPath": "/home/test/ssd_resnet50/infer/sdk/conf/coco.names", + "postProcessLibPath": "/usr/local/sdk_home/mxManufacture/lib/modelpostprocessors/libSsdMobilenetFpn_MindsporePost.so" + }, + "factory": "mxpi_objectpostprocessor", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_objectpostprocessor0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + }, + "appsink0": { + "factory": "appsink" + } + } +} \ No newline at end of file diff --git a/research/cv/ssd_resnet50/infer/sdk/conf/ssd_mobilenet_v1_fpn_ms_on_coco_postprocess.cfg b/research/cv/ssd_resnet50/infer/sdk/conf/ssd_mobilenet_v1_fpn_ms_on_coco_postprocess.cfg new file mode 100644 index 0000000000000000000000000000000000000000..9d8bb9f25bfadace3c9207252169db51d461fa42 --- /dev/null +++ b/research/cv/ssd_resnet50/infer/sdk/conf/ssd_mobilenet_v1_fpn_ms_on_coco_postprocess.cfg @@ -0,0 +1,4 @@ +CLASS_NUM=81 +SCORE_THRESH=0.1 +IOU_THRESH=0.6 +CHECK_MODEL=true diff --git a/research/cv/ssd_resnet50/infer/sdk/infer_by_sdk.py b/research/cv/ssd_resnet50/infer/sdk/infer_by_sdk.py new file mode 100644 index 0000000000000000000000000000000000000000..4947a523ab81d8d82d8d5831c395a1c42cbdfc93 --- /dev/null +++ b/research/cv/ssd_resnet50/infer/sdk/infer_by_sdk.py @@ -0,0 +1,196 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""infer_by_sdk""" +import argparse +import json +import os +from StreamManagerApi import MxDataInput +from StreamManagerApi import StreamManagerApi + +# 鏀寔鐨勫浘鐗囧悗缂€锛屽垎鍒槸杩欏洓涓悗缂€鍚� +SUPPORT_IMG_SUFFIX = (".jpg", ".JPG", ".jpeg", ".JPEG") + +# os.path.dirname(__file__)鑾峰彇褰撳墠鑴氭湰鐨勫畬鏁磋矾寰勶紝os.path.abspath()鑾峰彇褰撳墠鑴氭湰鐨勫畬鏁磋矾寰� +current_path = os.path.abspath(os.path.dirname(__file__)) + +# argparse鏄釜瑙f瀽鍣紝argparse鍧楀彲浠ヨ浜鸿交鏉剧紪鍐欑敤鎴峰弸濂界殑鍛戒护琛屾帴鍙�,浣跨敤argparse棣栧厛瑕佸垱寤篈rgumentParser瀵硅薄锛� +parser = argparse.ArgumentParser( + description="SSD ResNet50 infer " "example.", + fromfile_prefix_chars="@", +) + +# name or flags锛屼竴涓懡浠ゆ垨涓€涓€夐」瀛楃涓茬殑鍒楄〃 +# str灏嗘暟鎹己鍒惰浆鎹负瀛楃涓层€傛瘡绉嶆暟鎹被鍨嬮兘鍙互寮哄埗杞崲涓哄瓧绗︿覆 +# help 涓€涓閫夐」浣滅敤鐨勭畝鍗曟弿杩� +# default 褰撳弬鏁版湭鍦ㄥ懡浠よ涓嚭鐜版椂浣跨敤鐨勫€笺€� +parser.add_argument( + "--pipeline_path", + type=str, + help="mxManufacture pipeline file path", + default=os.path.join(current_path, "../conf/ssd_resnet50.pipeline"), +) +parser.add_argument( + "--stream_name", + type=str, + help="Infer stream name in the pipeline config file", + default="detection", +) +parser.add_argument( + "--img_path", + type=str, + help="Image pathname, can be a image file or image directory", + default=os.path.join(current_path, "../coco/val2017"), +) +# 鐩殑鐢ㄤ簬瀛樻斁鎺ㄧ悊鍚庣殑缁撴灉 +parser.add_argument( + "--res_path", + type=str, + help="Directory to store the inferred result", + default=None, + required=False, +) + +# 璧嬪€肩劧鍚庤В鏋愬弬鏁� +args = parser.parse_args() + +# 鎺ㄧ悊鍥惧儚 +def infer(): + """Infer images by DVPP + OM. """ + pipeline_path = args.pipeline_path + # 灏唖tream_name缂栫爜涓簎tf-8鐨勬牸寮� + stream_name = args.stream_name.encode() + img_path = os.path.abspath(args.img_path) + res_dir_name = args.res_path + + # StreamManagerApi()鐢ㄤ簬瀵规祦绋嬬殑鍩烘湰绠$悊锛氬姞杞芥祦绋嬮厤缃€佸垱寤烘祦绋嬨€佸悜娴佺▼涓婂彂閫佹暟鎹€佽幏寰楁墽琛岀粨鏋� + stream_manager_api = StreamManagerApi() + # InitManager鍒濆鍖栦竴涓猄treamManagerApi + ret = stream_manager_api.InitManager() + if ret != 0: + print("Failed to init Stream manager, ret=%s" % str(ret)) + exit() + + # create streams by pipeline config file + # 璇诲彇pipeline鏂囦欢 + with open(pipeline_path, "rb") as f: + pipeline_str = f.read() + + # CreateMultipleStreams锛屾牴鎹寚瀹氱殑pipeline閰嶇疆鍒涘缓Stream + ret = stream_manager_api.CreateMultipleStreams(pipeline_str) + if ret != 0: + print("Failed to create Stream, ret=%s" % str(ret)) + exit() + + # 鎻掍欢鐨刬d + in_plugin_id = 0 + # Construct the input of the stream + # 鏋勯€爏tream鐨勮緭鍏ワ紝MxDataInput鐢ㄤ簬Stream鎺ユ敹鐨勬暟鎹粨鏋勫畾涔夈€� + data_input = MxDataInput() + + # os.path.isfile()鐢ㄤ簬鍒ゆ柇鏌愪竴瀵硅薄(闇€鎻愪緵缁濆璺緞)鏄惁涓烘枃浠� + # endswith鐢ㄤ簬鍒ゆ柇鏄惁涓烘寚瀹氱殑鍥剧墖鐨勫瓧绗︿覆缁撳熬 + if os.path.isfile(img_path) and img_path.endswith(SUPPORT_IMG_SUFFIX): + file_list = [os.path.abspath(img_path)] + else: + # os.path.isdir()鐢ㄤ簬鍒ゆ柇瀵硅薄鏄惁涓轰竴涓洰褰� + file_list = os.listdir(img_path) + file_list = [ + # 灏嗗浘鐗囪矾寰勫拰鍥剧墖杩炴帴锛宖or in if 杩囨护鎺夐偅浜涗笉绗﹀悎鐓х墖鍚庣紑鐨勫浘鐗� + os.path.join(img_path, img) + for img in file_list + if img.endswith(SUPPORT_IMG_SUFFIX) + ] + + if not res_dir_name: + res_dir_name = os.path.join(".", "infer_res") + print(f"res_dir_name={res_dir_name}") + # 鍒涘缓鐩綍锛宔鐩爣鐩綍宸插瓨鍦ㄧ殑鎯呭喌涓嬩笉浼氳Е鍙慒ileExistsError寮傚父銆� + os.makedirs(res_dir_name, exist_ok=True) + pic_infer_dict_list = [] + # 寮€濮嬪file_list杩涜閬嶅巻 + for file_name in file_list: + # 渚濇璇诲嚭姣忓紶鐓х墖 + with open(file_name, "rb") as f: + img_data = f.read() + if not img_data: + print(f"read empty data from img:{file_name}") + continue + # data_input杩欎釜瀵硅薄鐨刣ata鍏冪礌鍊间负img_data + data_input.data = img_data + # SendDataWithUniqueId鍚戞寚瀹氱殑鍏冧欢鍙戦€佹暟鎹紝杈撳叆in_plugin_id鐩爣杈撳叆鎻掍欢id锛宒ata_input 锛屾牴鎹畼鏂圭殑API,stream_name搴旇鏄彲浠ヤ笉浣滀负杈撳叆鐨� + unique_id = stream_manager_api.SendDataWithUniqueId( + stream_name, in_plugin_id, data_input + ) + if unique_id < 0: + print("Failed to send data to stream.") + exit() + # 鑾峰緱Stream涓婄殑杈撳嚭鍏冧欢鐨勭粨鏋�(appsink), 寤舵椂3000ms + infer_result = stream_manager_api.GetResultWithUniqueId( + stream_name, unique_id, 3000 + ) + if infer_result.errorCode != 0: + print( + "GetResultWithUniqueId error. errorCode=%d, errorMsg=%s" + % (infer_result.errorCode, infer_result.data.decode()) + ) + exit() + # 灏嗘帹鐞嗙殑缁撴灉parse_img_infer_result杩藉姞鍒皃ic_infer_dict_list鏁扮粍涓� + pic_infer_dict_list.extend( + parse_img_infer_result(file_name, infer_result) + ) + + print(f"Inferred image:{file_name} success!") + + with open(os.path.join(res_dir_name, "det_result.json"), "w") as fw: + # 灏哖ython鏍煎紡杞负json鏍煎紡骞朵笖鍐欏叆 + fw.write(json.dumps(pic_infer_dict_list)) + + stream_manager_api.DestroyAllStreams() + + +def parse_img_infer_result(file_name, infer_result): + """parse_img_infer_result""" + # 灏唅nfer_result.data鍗冲厓鍣ㄤ欢杩斿洖鐨勭粨鏋滆浆涓篸ict鏍煎紡锛岀敤get("MxpiObject", [])鏂板缓涓€涓狹xpiObject鐨凨ey涓斿鍒朵负[] + obj_list = json.loads(infer_result.data.decode()).get("MxpiObject", []) + det_obj_list = [] + for o in obj_list: + # 鎶婂浘鐗囨涓€涓锛屼竴涓鏂瑰舰锛屽洓涓鐨勪綅缃紝鍧愭爣浣嶇疆 + x0, y0, x1, y1 = ( + # round()鍑芥暟锛屽洓鑸嶄簲鍏ュ埌绗洓浣� + round(o.get("x0"), 4), + round(o.get("y0"), 4), + round(o.get("x1"), 4), + round(o.get("y1"), 4), + ) + bbox_for_map = [x0, y0, x1 - x0, y1 - y0] + score = o.get("classVec")[0].get("confidence") + category_id = o.get("classVec")[0].get("classId") + # basename()鐢ㄤ簬閫夊彇鏈€鍚庣殑鏂囦欢鍚嶏紝鍗砳mage鐨刵ame,.split(".")鐢ㄤ簬鎶婂悗缂€缁欏垎鍓叉帀 + img_fname_without_suffix = os.path.basename(file_name).split(".")[0] + image_id = img_fname_without_suffix + det_obj_list.append( + dict( + image_id=image_id, + bbox=bbox_for_map, + # 鐩綍id锛屾妸鍥剧墖褰掔被 + category_id=category_id, + # 缃俊搴︾殑闂锛岃繃婊ゆ帀姣旇緝灏忕殑锛屾剰鎬濆氨鏄鍋囧鎴戣繖杈圭寽涓尗鐨勬満鐜囦负0.1,閭e氨鏄ぇ姒傜巼涓嶆槸鐚紝閭h繖涓暟鎹氨鍙互绛涙帀浜� + # ssd_mobilenet_v1_fpn_ms_on_coco_postprocess.cfg鏂囦欢閲岄潰鐨� SCORE_THRESH=0.6璁剧疆 + score=score, + ) + ) + return det_obj_list +if __name__ == "__main__": + infer() diff --git a/research/cv/ssd_resnet50/infer/sdk/perf/__init__.py b/research/cv/ssd_resnet50/infer/sdk/perf/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..45c53ab7c77e354418115dfd460b582e3288a536 --- /dev/null +++ b/research/cv/ssd_resnet50/infer/sdk/perf/__init__.py @@ -0,0 +1,13 @@ +# Copyright (C) 2020.Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/research/cv/ssd_resnet50/infer/sdk/perf/generate_map_report.py b/research/cv/ssd_resnet50/infer/sdk/perf/generate_map_report.py new file mode 100644 index 0000000000000000000000000000000000000000..dd98d2fcbfc6b5bacd468c3b83ef0ff1bbd08f67 --- /dev/null +++ b/research/cv/ssd_resnet50/infer/sdk/perf/generate_map_report.py @@ -0,0 +1,170 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""generate""" +import os +from datetime import datetime +import json + +from absl import flags +from absl import app +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval + +PRINT_LINES_TEMPLATE = """ +Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = %.3f +Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = %.3f +Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = %.3f +Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = %.3f +Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = %.3f +Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = %.3f +Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = %.3f +Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = %.3f +Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = %.3f +Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = %.3f +Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = %.3f +Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = %.3f +""" + +FLAGS = flags.FLAGS +flags.DEFINE_string( + name="annotations_json", + default=None, + help="annotations_json file path name", +) + +flags.DEFINE_string( + name="det_result_json", default=None, help="det_result json file" +) + +flags.DEFINE_enum( + name="anno_type", + default="bbox", + enum_values=["segm", "bbox", "keypoints"], + help="Annotation type", +) + +flags.DEFINE_string( + name="output_path_name", + default=None, + help="Where to out put the result files.", +) + +flags.mark_flag_as_required("annotations_json") +flags.mark_flag_as_required("det_result_json") +flags.mark_flag_as_required("output_path_name") + + +def get_category_id(k): + """ + :param: class id which corresponding coco.names + :return: category id is used in instances_val2014.json + """ + kk = k + if 12 <= k <= 24: + kk = k + 1 + elif 25 <= k <= 26: + kk = k + 2 + elif 27 <= k <= 40: + kk = k + 4 + elif 41 <= k <= 60: + kk = k + 5 + elif k == 61: + kk = k + 6 + elif k == 62: + kk = k + 8 + elif 63 <= k <= 73: + kk = k + 9 + elif 74 <= k <= 80: + kk = k + 10 + return kk + + +def get_dict_from_file(file_path): + """ + :param: file_path contain all infer result + :return: dict_list contain infer result of every images + """ + ls = [] + # Opening JSON file + f = open(file_path) + + # returns JSON object as + # a dictionary + ban_list = json.load(f) + + for item in ban_list: + item_copy = item.copy() + item_copy['category_id'] = get_category_id(int(item['category_id'])) + item_copy['image_id'] = int(item['image_id']) + ls.append(item_copy) + + return ls + + +def generate_main(unused_arg): + """generate_main""" + del unused_arg + out_put_dir = os.path.dirname(FLAGS.output_path_name) + if not os.path.exists(out_put_dir): + os.makedirs(out_put_dir) + + fw = open(FLAGS.output_path_name, "a+") + now_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + head_info = f"{'-'*50}mAP Test starts @ {now_time_str}{'-'*50}\n" + fw.write(head_info) + fw.flush() + # 鎶婅剼鏈噷闈㈢殑钃濇牱鏈繃婊� + cocoGt = COCO(FLAGS.annotations_json) + + image_ids = cocoGt.getImgIds() + need_img_ids = [] + for img_id in image_ids: + iscrowd = False + anno_ids = cocoGt.getAnnIds(imgIds=img_id, iscrowd=None) + anno = cocoGt.loadAnns(anno_ids) + for label in anno: + iscrowd = iscrowd or label["iscrowd"] + + if iscrowd: + continue + need_img_ids.append(img_id) + + result_dict = get_dict_from_file(FLAGS.det_result_json) + json_file_name = './result.json' + with open(json_file_name, 'w') as f: + json.dump(result_dict, f) + + cocoDt = cocoGt.loadRes(json_file_name) + cocoEval = COCOeval(cocoGt, cocoDt, iouType=FLAGS.anno_type) + cocoEval.params.imgIds = sorted(need_img_ids) + print(cocoEval.params.imgIds) + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + + format_lines = [ + line for line in PRINT_LINES_TEMPLATE.splitlines() if line.strip() + ] + for i, line in enumerate(format_lines): + fw.write(line % cocoEval.stats[i] + "\n") + + end_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + tail_info = f"{'-'*50}mAP Test ends @ {end_time_str}{'-'*50}\n" + fw.write(tail_info) + fw.close() + + +if __name__ == "__main__": + app.run(generate_main) diff --git a/research/cv/ssd_resnet50/infer/sdk/perf/run_map_test.sh b/research/cv/ssd_resnet50/infer/sdk/perf/run_map_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..5c671b65b44360bfdd6305d8949903cff5e27fbc --- /dev/null +++ b/research/cv/ssd_resnet50/infer/sdk/perf/run_map_test.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# Copyright (C) 2021.Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +PY=/usr/bin/python3.7 + +export PYTHONPATH=${PYTHONPATH}:. + +annotations_json=$1 +det_result_json=$2 +output_path_name=$3 + +${PY} generate_map_report.py \ +--annotations_json=${annotations_json} \ +--det_result_json=${det_result_json} \ +--output_path_name=${output_path_name} \ +--anno_type=bbox \ No newline at end of file diff --git a/research/cv/ssd_resnet50/infer/sdk/run.sh b/research/cv/ssd_resnet50/infer/sdk/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..277ce294594d3ce720ef405bc868f3cfbc14cc84 --- /dev/null +++ b/research/cv/ssd_resnet50/infer/sdk/run.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# Copyright (C) 2021.Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +# Simple log helper functions +info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; } +warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; } + +export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH} +export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner +export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins + +#to set PYTHONPATH, import the StreamManagerApi.py +export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python + +pipeline_path=$1 +img_path=$2 +res_path=$3 +stream_name=$4 + +python3.7 infer_by_sdk.py --pipeline_path ${pipeline_path} --img_path ${img_path} --res_path ${res_path} --stream_name ${stream_name} +exit 0 \ No newline at end of file diff --git a/research/cv/ssd_resnet50/modelarts/readme.md b/research/cv/ssd_resnet50/modelarts/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..ef7a1538dfe828f70933b24b88d39f0cdff4bcd8 --- /dev/null +++ b/research/cv/ssd_resnet50/modelarts/readme.md @@ -0,0 +1,93 @@ +# SSD_Resnet50-Ascend (鐩爣妫€娴�/MindSpore) + +## 1.姒傝堪 + +SSD GhostNet 灏嗚竟鐣屾鐨勮緭鍑虹┖闂寸鏁d负涓€缁勯粯璁ゆ锛屾瘡涓壒寰佸湴鍥句綅缃殑绾垫í姣斿拰姣斾緥涓嶅悓銆傚湪棰勬祴鏃讹紝缃戠粶涓烘瘡涓粯璁ゆ涓瘡涓璞$被鍒� 鐢熸垚鍒嗘暟锛屽苟瀵硅妗嗚繘琛岃皟鏁达紝浠ユ洿濂藉湴鍖归厤瀵硅薄褰㈢姸銆傛澶栵紝璇ョ綉缁滅粨鍚堜簡鏉ヨ嚜涓嶅悓鍒嗚鲸鐜囩殑澶氫釜鐗瑰緛鍥剧殑棰勬祴锛屼粠鑰岃嚜鐒跺湴澶勭悊涓嶅悓灏哄鐨勭墿浣撱€� + +## 2.璁粌 + +### 2.1.绠楁硶鍩烘湰淇℃伅 + +- 浠诲姟绫诲瀷: 鐩爣妫€娴� +- 鏀寔鐨勬鏋跺紩鎿�: Ascend-Powered-Engine- Mindspore-1.1.1-python3.7-aarch64 +- 绠楁硶杈撳叆: + - obs鏁版嵁闆嗚矾寰勶紝涓嬮潰瀛樻斁浣跨敤coco2017鏁版嵁闆嗐€傛暟鎹泦鐨勬牸寮忚璁粌鎵嬪唽璇存槑銆� +- 绠楁硶杈撳嚭: + - 璁粌鐢熸垚鐨刢kpt妯″瀷 + +### 2.2.璁粌鍙傛暟璇存槑 + +鍚嶇О|榛樿鍊紎绫诲瀷|鏄惁蹇呭~|鎻忚堪 +---|---|---|---|--- +lr|0.05|float|True|鍒濆瀛︿範鐜� +dataset|coco|string|True|鏁版嵁闆嗘牸寮忥紝鍙€夊€糲oco銆乿oc銆乷ther +epoch_size|500|int|True|璁粌杞暟 +batch_size|32|int|True|涓€娆¤缁冩墍鎶撳彇鐨勬暟鎹牱鏈暟閲� +save_checkpoint_epochs|10|int|False|淇濆瓨checkpoint鐨勮疆鏁般€� +num_classes|81|string|True|鏁版嵁闆嗙被鍒暟+1銆� +voc_json|-|string|False|dataset涓簐oc鏃讹紝鐢ㄤ簬鎸囧畾鏁版嵁闆嗘爣娉ㄦ枃浠讹紝濉浉瀵逛簬data_url鐨勮矾寰勩€� +anno_path|-|string|False|dataset涓簅ther鏃讹紝鐢ㄤ簬鎸囧畾鏁版嵁闆嗘爣娉ㄦ枃浠讹紝濉浉瀵逛簬data_url鐨勮矾寰勩€� +pre_trained|-|string|False|杩佺Щ瀛︿範鏃讹紝棰勮缁冩ā鍨嬭矾寰勶紝妯″瀷鏀惧湪data_url涓嬶紝濉浉瀵逛簬data_url鐨勮矾寰勩€� +loss_scale|1024|int|False|Loss scale. +filter_weight|False|Boolean|False|Filter head weight parameters锛岃縼绉诲涔犳椂闇€瑕佽缃负True銆� + +### 2.3. 璁粌杈撳嚭鏂囦欢 + +璁粌瀹屾垚鍚庣殑杈撳嚭鏂囦欢濡備笅 + +```js +璁粌杈撳嚭鐩綍 V000X +鈹溾攢鈹€ ssd-10_12.ckpt +鈹溾攢鈹€ ssd-10_12.ckpt.air +鈹溾攢鈹€ ssd-graph.meta +鈹溾攢鈹€ kernel_meta +鈹偮犅� 鈹溾攢鈹€ ApplyMomentum_13796921261177776697_0.info +鈹偮犅� 鈹溾攢鈹€ AddN_4688903218960634315_0.json +鈹偮犅� 鈹溾攢鈹€ ... + +``` + +## 3.杩佺Щ瀛︿範鎸囧 + +### 3.1. 鏁版嵁闆嗗噯澶囷細 + +鍙傝€冭缁冩墜鍐岋細`杩佺Щ瀛︿範鎸囧`->`鏁版嵁闆嗗噯澶嘸 + +### 3.2. 涓婁紶棰勮缁冩ā鍨媍kpt鏂囦欢鍒皁bs鏁版嵁鐩綍pretrain_model涓紝绀轰緥濡備笅锛� + +```js +MicrocontrollerDetection # obs鏁版嵁鐩綍 + |- train # 璁粌鍥剧墖鏁版嵁闆嗙洰褰� + |- IMG_20181228_102033.jpg + |- IMG_20181228_102041.jpg + |- ..銆� + |- train_labels.txt # 璁粌鍥剧墖鏁版嵁鏍囨敞 + |- pretrain_model + |- ssd-3-61.ckpt # 棰勮缁冩ā鍨� ckpt鏂囦欢 +``` + +classes_label_path鍙傛暟瀵瑰簲鐨則rain_labels.txt鐨勫唴瀹瑰涓嬫墍绀猴細 + +```js +background +Arduino Nano +ESP8266 +Raspberry Pi 3 +Heltec ESP32 Lora +``` + +### 3.3. 淇敼璋冧紭鍙傛暟 + +鐩墠杩佺Щ瀛︿範鏀寔淇敼鏁版嵁闆嗙被鍒紝璁㈤槄绠楁硶鍒涘缓璁粌浠诲姟锛屽垱寤鸿缁冧綔涓氭椂闇€瑕佷慨鏀瑰涓嬭皟浼樺弬鏁帮細 + +1. dataset鏀逛负other銆� +2. num_classes鏀逛负杩佺Щ瀛︿範鏁版嵁闆嗙殑绫诲埆鏁�+1銆� +3. anno_path鎸囧畾杩佺Щ瀛︿範鏁版嵁闆嗙殑鏍囨敞鏂囦欢璺緞銆� +4. filter_weight鏀逛负True銆� +5. pre_trained鎸囧畾棰勮缁冩ā鍨嬭矾寰勩€� + +浠ヤ笂鍙傛暟鐨勮鏄庤`璁粌鍙傛暟璇存槑`銆� + +### 3.4. 鍒涘缓璁粌浣滀笟 + +鎸囧畾鏁版嵁瀛樺偍浣嶇疆銆佹ā鍨嬭緭鍑轰綅缃拰浣滀笟鏃ュ織璺緞锛屽垱寤鸿缁冧綔涓氳繘琛岃縼绉诲涔犮€� \ No newline at end of file diff --git a/research/cv/ssd_resnet50/modelarts/start.py b/research/cv/ssd_resnet50/modelarts/start.py new file mode 100644 index 0000000000000000000000000000000000000000..bf49cf2f2f9561a33a760dc3e0770e13329d7cd6 --- /dev/null +++ b/research/cv/ssd_resnet50/modelarts/start.py @@ -0,0 +1,347 @@ +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""start""" +import sys +import os +import argparse +import logging +import ast +import glob +import numpy as np +import mindspore +import mindspore.nn as nn +from mindspore import context, Tensor +from mindspore.communication.management import init, get_rank +from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor +from mindspore.train import Model +from mindspore.context import ParallelMode +from mindspore.train.serialization import export as export_model +from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.common import dtype + +CACHE_TRAIN_DATA_URL = "/cache/train_data_url" +CACHE_TRAIN_OUT_URL = "/cache/train_out_url" + +if CACHE_TRAIN_OUT_URL != '': + sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '../')) + + from src.ssd import SSDWithLossCell, TrainingWrapper, ssd_resnet50 + from src.config import config + from src.dataset import create_ssd_dataset, data_to_mindrecord_byte_image, voc_data_to_mindrecord + from src.lr_schedule import get_lr + from src.init_params import init_net_param, filter_checkpoint_parameter_by_list + import moxing as mox + + +def get_args(): + """ + Parse arguments + """ + parser = argparse.ArgumentParser(description="SSD training") + parser.add_argument("--run_platform", type=str, default="Ascend", choices=("Ascend", "GPU", "CPU"), + help="run platform, support Ascend, GPU and CPU.") + + # Model output directory + parser.add_argument("--train_url", + type=str, default='', help='the path model saved') + # 鏁版嵁闆嗙洰褰� + parser.add_argument("--data_url", + type=str, default='', help='the training data') + + parser.add_argument("--only_create_dataset", type=ast.literal_eval, default=False, + help="If set it true, only create Mindrecord, default is False.") + parser.add_argument("--distribute", type=ast.literal_eval, default=False, + help="Run distribute, default is False.") + parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.") + parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.") + parser.add_argument("--lr", type=float, default=0.05, help="Learning rate, default is 0.05.") + parser.add_argument("--mode", type=str, default="sink", help="Run sink mode or not, default is sink.") + parser.add_argument("--dataset", type=str, default="coco", help="Dataset, default is coco.") + parser.add_argument("--epoch_size", type=int, default=10, help="Epoch size, default is 500.") + parser.add_argument("--batch_size", type=int, default=32, help="Batch size, default is 32.") + parser.add_argument("--pre_trained", type=str, default=None, help="Pretrained Checkpoint file path.") + parser.add_argument("--pre_trained_epoch_size", type=int, default=0, help="Pretrained epoch size.") + parser.add_argument("--save_checkpoint_epochs", type=int, default=10, help="Save checkpoint epochs, default is 10.") + parser.add_argument("--loss_scale", type=int, default=1024, help="Loss scale, default is 1024.") + parser.add_argument("--filter_weight", type=ast.literal_eval, default=False, + help="Filter head weight parameters, default is False.") + parser.add_argument('--freeze_layer', type=str, default="none", choices=["none", "backbone"], + help="freeze the weights of network, support freeze the backbone's weights, " + "default is not freezing.") + + # adapts parameters in config.py + parser.add_argument("--feature_extractor_base_param", type=str, default="") + parser.add_argument("--coco_root", type=str, default="") + # parser.add_argument("--classes_label_path", type=str, default="labels.txt") + parser.add_argument("--num_classes", type=int, default=81) + parser.add_argument("--voc_root", type=str, default="") + parser.add_argument("--voc_json", type=str, default="") + parser.add_argument("--image_dir", type=str, default="") + parser.add_argument("--anno_path", type=str, default="coco_labels.txt") + + + args_opt = parser.parse_args() + return args_opt + + +def update_config(args_opt): + """ + 琛ュ叏鍦╟onfig涓殑鏁版嵁闆嗚矾寰� + Args: + args_opt: + config: + + Returns: + + """ + if config.num_ssd_boxes == -1: + num = 0 + h, w = config.img_shape + for i in range(len(config.steps)): + num += (h // config.steps[i]) * (w // config.steps[i]) * \ + config.num_default[i] + config.num_ssd_boxes = num + + data_dir = CACHE_TRAIN_DATA_URL + + # The MindRecord format dataset path is updated to the selected dataset path + config.mindrecord_dir = data_dir + + # complete the path of the dataset + dataset = args_opt.dataset + if dataset == 'coco': + coco_root = args_opt.coco_root + config.coco_root = os.path.join(data_dir, coco_root) + print(f"update config.coco_root {coco_root} to {config.coco_root}") + elif dataset == 'voc': + voc_root = args_opt.voc_root + config.voc_root = os.path.join(data_dir, voc_root) + print(f"update config.voc_root {voc_root} to {config.voc_root}") + else: + image_dir = args_opt.image_dir + anno_path = args_opt.anno_path + config.image_dir = os.path.join(data_dir, image_dir) + config.anno_path = os.path.join(data_dir, anno_path) + print(f"update config.image_dir {image_dir} to {config.image_dir}") + print(f"update config.anno_path {anno_path} to {config.anno_path}") + + # with open(os.path.join(data_dir, args_opt.classes_label_path), 'r') as f: + # config.classes = [line.strip() for line in f.readlines()] + config.num_classes = args_opt.num_classes + print(f"config: {config}") + + +def get_last_ckpt(): + """get_last_ckpt""" + ckpt_pattern = os.path.join(CACHE_TRAIN_OUT_URL, "*.ckpt") + ckpts = glob.glob(ckpt_pattern) + if not ckpts: + print(f"Cant't found ckpt in {CACHE_TRAIN_OUT_URL}") + return None + ckpts.sort(key=os.path.getmtime) + return ckpts[-1] + + +def export(net, device_id, ckpt_file, file_format="AIR", batch_size=1): + """export""" + print(f"start export {ckpt_file} to {file_format}, device_id {device_id}") + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + context.set_context(device_id=device_id) + + param_dict = load_checkpoint(ckpt_file) + net.init_parameters_data() + load_param_into_net(net, param_dict) + net.set_train(False) + + input_shp = [batch_size, 3] + config.img_shape + input_array = Tensor(np.random.uniform(-1.0, 1.0, size=input_shp), + mindspore.float32) + export_model(net, input_array, file_name=ckpt_file, file_format=file_format) + print(f"export {ckpt_file} to {file_format} success.") + + +def export_air(net, args): + """export_air""" + ckpt = get_last_ckpt() + if not ckpt: + return + export(net, args.device_id, ckpt, CACHE_TRAIN_OUT_URL + "ssd_resnet50", "AIR") + + +def ssd_model_build(args_opt): + """ + build ssd model + """ + if config.model == "ssd_resnet50": + ssd = ssd_resnet50(config=config) + init_net_param(ssd) + if args_opt.feature_extractor_base_param != "": + print("args_opt.feature_extractor_base_param鐨勫€兼槸****=", args_opt.feature_extractor_base_param) + print("args_opt.pre_trained****=", args_opt.pre_trained) + param_dict = load_checkpoint(args_opt.feature_extractor_base_param) + for x in list(param_dict.keys()): + param_dict["network.feature_extractor.resnet." + x] = param_dict[x] + del param_dict[x] + load_param_into_net(ssd.feature_extractor.resnet, param_dict) + else: + raise ValueError(f'config.model: {args_opt.model} is not supported') + return ssd + + +def file_create(args_opt, mindrecord_dir, mindrecord_file, prefix): + """generate mindrecord file path""" + if not os.path.exists(mindrecord_file): + if not os.path.isdir(mindrecord_dir): + os.makedirs(mindrecord_dir) + if args_opt.dataset == "coco": + if os.path.isdir(config.coco_root): + print("Create Mindrecord.") + data_to_mindrecord_byte_image("coco", True, prefix) + print("Create Mindrecord Done, at {}".format(mindrecord_dir)) + else: + print("coco_root not exits.") + elif args_opt.dataset == "voc": + if os.path.isdir(config.voc_dir): + print("Create Mindrecord.") + voc_data_to_mindrecord(mindrecord_dir, True, prefix) + print("Create Mindrecord Done, at {}".format(mindrecord_dir)) + else: + print("voc_dir not exits.") + else: + if os.path.isdir(config.image_dir) and os.path.exists(config.anno_path): + print("Create Mindrecord.") + data_to_mindrecord_byte_image("other", True, prefix) + print("Create Mindrecord Done, at {}".format(mindrecord_dir)) + else: + print("image_dir or anno_path not exits.") + + print("*********mindrecord_file", mindrecord_file) + + +def start_main(): + """start_main""" + args_opt = get_args() + logging.basicConfig(level=logging.INFO, + format='%(levelname)s: %(message)s') + print("Training setting args:", args_opt) + + os.makedirs(CACHE_TRAIN_DATA_URL, exist_ok=True) + mox.file.copy_parallel(args_opt.data_url, CACHE_TRAIN_DATA_URL) + + update_config(args_opt) + + if args_opt.distribute: + device_num = args_opt.device_num + context.reset_auto_parallel_context() + context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True, + device_num=device_num) + init() + rank = args_opt.device_id % device_num + else: + rank = 0 + device_num = 1 + print("Start create dataset!") + + if args_opt.run_platform == "CPU": + print(args_opt) + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + else: + context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.run_platform, device_id=args_opt.device_id) + if args_opt.distribute: + device_num = args_opt.device_num + context.reset_auto_parallel_context() + context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True, + device_num=device_num) + init() + context.set_auto_parallel_context(all_reduce_fusion_config=[29, 58, 89]) + rank = get_rank() + + + print("****args_opt.dataset=", args_opt.dataset) + prefix = "ssd.mindrecord" + mindrecord_dir = config.mindrecord_dir + mindrecord_file = os.path.join(mindrecord_dir, prefix + "0") + file_create(args_opt, mindrecord_dir, mindrecord_file, prefix) + + if args_opt.only_create_dataset: + return + + loss_scale = float(args_opt.loss_scale) + if args_opt.run_platform == "CPU": + loss_scale = 1.0 + + # When create MindDataset, using the fitst mindrecord file, such as ssd.mindrecord0. + use_multiprocessing = (args_opt.run_platform != "CPU") + dataset = create_ssd_dataset(mindrecord_file, repeat_num=1, batch_size=args_opt.batch_size, + device_num=device_num, rank=rank, use_multiprocessing=use_multiprocessing) + print("********dataset", dataset) + dataset_size = dataset.get_dataset_size() + print("****-----****dataset_size", dataset) + print(f"Create dataset done! dataset size is {dataset_size}") + ssd = ssd_model_build(args_opt) + print("finish ssd model building ...............") + + if ("use_float16" in config and config.use_float16) or args_opt.run_platform == "GPU": + ssd.to_float(dtype.float16) + net = SSDWithLossCell(ssd, config) + + ckpt_config = CheckpointConfig(save_checkpoint_steps=dataset_size * args_opt.save_checkpoint_epochs, \ + keep_checkpoint_max=60) + ckpoint_cb = ModelCheckpoint(prefix="ssd", directory=CACHE_TRAIN_OUT_URL, config=ckpt_config) + + if args_opt.pre_trained: + param_dict = load_checkpoint(args_opt.pre_trained) + if args_opt.filter_weight: + filter_checkpoint_parameter_by_list(param_dict, config.checkpoint_filter_list) + load_param_into_net(net, param_dict, True) + + lr = Tensor(get_lr(global_step=args_opt.pre_trained_epoch_size * dataset_size, + lr_init=config.lr_init, lr_end=config.lr_end_rate * args_opt.lr, lr_max=args_opt.lr, + warmup_epochs=config.warmup_epochs, + total_epochs=args_opt.epoch_size, + steps_per_epoch=dataset_size)) + + if "use_global_norm" in config and config.use_global_norm: + opt = nn.Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, + config.momentum, config.weight_decay, 1.0) + net = TrainingWrapper(net, opt, loss_scale, True) + else: + opt = nn.Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, + config.momentum, config.weight_decay, loss_scale) + net = TrainingWrapper(net, opt, loss_scale) + + + callback = [TimeMonitor(data_size=dataset_size), LossMonitor(), ckpoint_cb] + model = Model(net) + dataset_sink_mode = False + if args_opt.mode == "sink" and args_opt.run_platform != "CPU": + print("In sink mode, one epoch return a loss.") + dataset_sink_mode = True + print("Start train SSD, the first epoch will be slower because of the graph compilation.") + + # change the working directory for model saving + os.makedirs(CACHE_TRAIN_OUT_URL, exist_ok=True) + os.chdir(CACHE_TRAIN_OUT_URL) + model.train(args_opt.epoch_size, dataset, callbacks=callback, dataset_sink_mode=dataset_sink_mode) + + + # net = SSDWithLossCell(ssd, config) + net = ssd_resnet50(config=config) + export_air(net, args_opt) + mox.file.copy_parallel(CACHE_TRAIN_OUT_URL, args_opt.train_url) + + +if __name__ == '__main__': + start_main()