diff --git a/official/cv/yolov5/infer/mxbase/CMakeLists.txt b/official/cv/yolov5/infer/mxbase/CMakeLists.txt index c11a1e17e9d55c1109086fe71772d41cb5cd6bff..86df663487d3ab2fa1719bc0782b7f34b98f1f41 100644 --- a/official/cv/yolov5/infer/mxbase/CMakeLists.txt +++ b/official/cv/yolov5/infer/mxbase/CMakeLists.txt @@ -9,7 +9,7 @@ add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) set(TARGET_LIBRARY yolov5postprocessor) set(TARGET_MAIN yolov5) -set(ACL_LIB_PATH $ENV{ASCEND_HOME}/nnrt/latest/acllib) +set(ACL_LIB_PATH $ENV{ASCEND_HOME}/ascend-toolkit/latest/acllib) include_directories(${CMAKE_CURRENT_BINARY_DIR}) @@ -19,6 +19,7 @@ include_directories($ENV{MX_SDK_HOME}/opensource/include/opencv4) include_directories($ENV{MX_SDK_HOME}/opensource/include/gstreamer-1.0) include_directories($ENV{MX_SDK_HOME}/opensource/include/glib-2.0) include_directories($ENV{MX_SDK_HOME}/opensource/lib/glib-2.0/include) +include_directories($ENV{MX_SDK_HOME}/ascend-toolkit/latest/include) link_directories($ENV{MX_SDK_HOME}/lib) link_directories($ENV{MX_SDK_HOME}/opensource/lib/) diff --git a/official/cv/yolov5/infer/mxbase/src/PostProcess/Yolov5MindSporePost.cpp b/official/cv/yolov5/infer/mxbase/src/PostProcess/Yolov5MindSporePost.cpp index 4ba481b543cfe57ac3022c30464cf3cddfb09d54..9ee175b19c2081f67b4df6d143562266dc775b7a 100644 --- a/official/cv/yolov5/infer/mxbase/src/PostProcess/Yolov5MindSporePost.cpp +++ b/official/cv/yolov5/infer/mxbase/src/PostProcess/Yolov5MindSporePost.cpp @@ -38,6 +38,14 @@ namespace { auto uint8Deleter = [] (uint8_t* p) { }; } // namespace +namespace localParameter { + const uint32_t VECTOR_FIRST_INDEX = 0; + const uint32_t VECTOR_SECOND_INDEX = 1; + const uint32_t VECTOR_THIRD_INDEX = 2; + const uint32_t VECTOR_FOURTH_INDEX = 3; + const uint32_t VECTOR_FIFTH_INDEX = 4; +} + namespace MxBase { Yolov5PostProcess& Yolov5PostProcess::operator=(const Yolov5PostProcess &other) { if (this == &other) { @@ -95,13 +103,14 @@ namespace MxBase { if (yoloVersion_ == YOLOV5_VERSION) { for (size_t i = 0; i < tensors.size(); i++) { auto shape = tensors[i].GetShape(); - if (shape.size() < VECTOR_FIFTH_INDEX) { - LogError << "dimensions of tensor [" << i << "] is less than " << VECTOR_FIFTH_INDEX << "."; + if (shape.size() < localParameter::VECTOR_FIFTH_INDEX) { + LogError << "dimensions of tensor [" << i << "] is less than " << + localParameter::VECTOR_FIFTH_INDEX << "."; return false; } uint32_t channelNumber = 1; - int startIndex = modelType_ ? VECTOR_SECOND_INDEX : VECTOR_FOURTH_INDEX; - int endIndex = modelType_ ? (shape.size() - VECTOR_THIRD_INDEX) : shape.size(); + int startIndex = modelType_ ? localParameter::VECTOR_SECOND_INDEX : localParameter::VECTOR_FOURTH_INDEX; + int endIndex = modelType_ ? (shape.size() - localParameter::VECTOR_THIRD_INDEX) : shape.size(); for (int j = startIndex; j < endIndex; j++) { channelNumber *= shape[j]; } diff --git a/official/cv/yolov5/infer/mxbase/src/PostProcess/Yolov5MindSporePost.h b/official/cv/yolov5/infer/mxbase/src/PostProcess/Yolov5MindSporePost.h index fd1b5d3e64cb5cc812cb637b7c3c18f27680d586..91d31501688d78ac2b15a0bd2627b7feaf805d9d 100644 --- a/official/cv/yolov5/infer/mxbase/src/PostProcess/Yolov5MindSporePost.h +++ b/official/cv/yolov5/infer/mxbase/src/PostProcess/Yolov5MindSporePost.h @@ -70,7 +70,7 @@ class Yolov5PostProcess : public ObjectPostProcessBase { const std::map<std::string, std::shared_ptr<void>> &configParamMap = {}) override; protected: - bool IsValidTensors(const std::vector<TensorBase> &tensors) const override; + bool IsValidTensors(const std::vector<TensorBase> &tensors) const; void ObjectDetectionOutput(const std::vector<TensorBase> &tensors, std::vector<std::vector<ObjectInfo>> *objectInfos, diff --git a/official/cv/yolov5/infer/sdk/run.sh b/official/cv/yolov5/infer/sdk/run.sh index 121dd35e773c1c8c535d7fd5c6a25d3bd045f185..7b58b4fe60ab0361372b905f5bcba1ca3b91c50a 100644 --- a/official/cv/yolov5/infer/sdk/run.sh +++ b/official/cv/yolov5/infer/sdk/run.sh @@ -56,7 +56,7 @@ if [[ $ann_file == "" ]];then exit 1 fi -python3.7 main.py --pipeline_path=$pipeline_path \ +python3 main.py --pipeline_path=$pipeline_path \ --dataset_path=$dataset_path \ --ann_file=$ann_file \ --result_files=$result_files diff --git a/research/cv/HRNetW48_seg/infer/mxbase/src/hrnet.cpp b/research/cv/HRNetW48_seg/infer/mxbase/src/hrnet.cpp index aac859b0ac967c679e5a3365f785a41bf3a62982..34e861d4224adfb31624a2c8e5837223982244fe 100644 --- a/research/cv/HRNetW48_seg/infer/mxbase/src/hrnet.cpp +++ b/research/cv/HRNetW48_seg/infer/mxbase/src/hrnet.cpp @@ -24,6 +24,14 @@ #include "MxBase/DvppWrapper/DvppWrapper.h" #include "MxBase/Log/Log.h" +namespace localParameter { + const uint32_t VECTOR_FIRST_INDEX = 0; + const uint32_t VECTOR_SECOND_INDEX = 1; + const uint32_t VECTOR_THIRD_INDEX = 2; + const uint32_t VECTOR_FOURTH_INDEX = 3; + const uint32_t VECTOR_FIFTH_INDEX = 4; +} + APP_ERROR HRNetW48Seg::Init(const InitParam& initParam) { deviceId_ = initParam.deviceId; APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices(); @@ -151,9 +159,9 @@ APP_ERROR HRNetW48Seg::PostProcess(std::vector<MxBase::TensorBase>& inputs, } uint32_t imgHeight = resizedInfo.heightOriginal; uint32_t imgWidth = resizedInfo.widthOriginal; - uint32_t outputModelWidth = tensor.GetShape()[MxBase::VECTOR_FOURTH_INDEX]; - uint32_t outputModelHeight = tensor.GetShape()[MxBase::VECTOR_THIRD_INDEX]; - uint32_t outputModelChannel = tensor.GetShape()[MxBase::VECTOR_SECOND_INDEX]; + uint32_t outputModelWidth = tensor.GetShape()[localParameter::VECTOR_FOURTH_INDEX]; + uint32_t outputModelHeight = tensor.GetShape()[localParameter::VECTOR_THIRD_INDEX]; + uint32_t outputModelChannel = tensor.GetShape()[localParameter::VECTOR_SECOND_INDEX]; auto data = reinterpret_cast<float(*)[outputModelHeight][outputModelWidth]>(tensor.GetBuffer()); std::vector<cv::Mat> tensorChannels = {}; for (size_t c = 0; c < outputModelChannel; ++c) { diff --git a/research/cv/HRNetW48_seg/infer/sdk/do_infer.sh b/research/cv/HRNetW48_seg/infer/sdk/do_infer.sh index 40e507d82f61dd9d25aea5e3defbf31219a3c6b3..6e4b968679af4ed5bf72aaa5daef1e3f74126050 100644 --- a/research/cv/HRNetW48_seg/infer/sdk/do_infer.sh +++ b/research/cv/HRNetW48_seg/infer/sdk/do_infer.sh @@ -48,7 +48,7 @@ mkdir ./inferResults echo "Inference results will be stored in ./inferResults/." -python3.7 main.py --pipeline="../data/config/hrnetw48seg.pipeline" \ +python3 main.py --pipeline="../data/config/hrnetw48seg.pipeline" \ --data_path=$PATH1 \ --data_lst=$PATH2 \ --infer_result_path="./inferResults/" diff --git a/research/cv/SE_ResNeXt50/infer/mxbase/src/SEResNeXtClassifyOpencv.cpp b/research/cv/SE_ResNeXt50/infer/mxbase/src/SEResNeXtClassifyOpencv.cpp index ee298f74733bdbd4d70787d9198e8017e46ba30e..598a3f3c9350b221a772cc59defcb9fe7be30dd7 100644 --- a/research/cv/SE_ResNeXt50/infer/mxbase/src/SEResNeXtClassifyOpencv.cpp +++ b/research/cv/SE_ResNeXt50/infer/mxbase/src/SEResNeXtClassifyOpencv.cpp @@ -19,6 +19,14 @@ #include "MxBase/Log/Log.h" #include "SEResNeXtClassifyOpencv.h" +namespace localParameter { + const uint32_t VECTOR_FIRST_INDEX = 0; + const uint32_t VECTOR_SECOND_INDEX = 0; + const uint32_t VECTOR_THIRD_INDEX = 0; + const uint32_t VECTOR_FOURTH_INDEX = 0; + const uint32_t VECTOR_FIFTH_INDEX = 0; +} + APP_ERROR SEResNeXtClassifyOpencv::Init(const InitParam &initParam) { deviceId_ = initParam.deviceId; APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices(); @@ -110,10 +118,10 @@ APP_ERROR SEResNeXtClassifyOpencv::Crop(const cv::Mat &srcImageMat, cv::Mat &dst APP_ERROR SEResNeXtClassifyOpencv::Inference(const std::vector<MxBase::TensorBase> &inputs, std::vector<MxBase::TensorBase> &outputs) { - uint32_t first = inputs[0].GetShape()[MxBase::VECTOR_FIRST_INDEX]; - uint32_t second = inputs[0].GetShape()[MxBase::VECTOR_SECOND_INDEX]; - uint32_t third = inputs[0].GetShape()[MxBase::VECTOR_THIRD_INDEX]; - uint32_t fourth = inputs[0].GetShape()[MxBase::VECTOR_FOURTH_INDEX]; + uint32_t first = inputs[0].GetShape()[localParameter::VECTOR_FIRST_INDEX]; + uint32_t second = inputs[0].GetShape()[localParameter::VECTOR_SECOND_INDEX]; + uint32_t third = inputs[0].GetShape()[localParameter::VECTOR_THIRD_INDEX]; + uint32_t fourth = inputs[0].GetShape()[localParameter::VECTOR_FOURTH_INDEX]; std::cout << "++ inputs: " << inputs.size() << " " << first << " " << second << " " << third << " " << fourth << std::endl; diff --git a/research/cv/SE_ResNeXt50/infer/sdk/run.sh b/research/cv/SE_ResNeXt50/infer/sdk/run.sh index dbce3fb478a68e81465e1f4988259fe672f5da0e..18c2b1bb901d8001389526be6abdd21fb26cd124 100644 --- a/research/cv/SE_ResNeXt50/infer/sdk/run.sh +++ b/research/cv/SE_ResNeXt50/infer/sdk/run.sh @@ -22,12 +22,12 @@ set -e info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; } warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; } -export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH} +export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/${ASCEND_VERSION}/latest/acllib/lib64:${LD_LIBRARY_PATH} export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins #to set PYTHONPATH, import the StreamManagerApi.py export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python -python main.py $image_path $result_dir +python3 main.py $image_path $result_dir exit 0 diff --git a/research/cv/squeezenet1_1/infer/convert/convert_om.sh b/research/cv/squeezenet1_1/infer/convert/convert_om.sh index 60265c96935e42aa7dda96574a8ca3f2fc095de4..07a8f47dd1b208625bf37244b11c85943be0c7d9 100644 --- a/research/cv/squeezenet1_1/infer/convert/convert_om.sh +++ b/research/cv/squeezenet1_1/infer/convert/convert_om.sh @@ -18,7 +18,7 @@ model_path=$1 output_model_name=$2 aipp_cfg=$3 -/usr/local/Ascend/atc/bin/atc \ +atc \ --model=$model_path \ --framework=1 \ --output=$output_model_name \ diff --git a/research/cv/squeezenet1_1/infer/mxbase/Squeezenet1_1ClassifyOpencv.cpp b/research/cv/squeezenet1_1/infer/mxbase/Squeezenet1_1ClassifyOpencv.cpp index 6b047d4d1c96f0bd349fa581131f94d8663986c6..87b8ee74c877557cefd349816c53fd57e7bb0023 100644 --- a/research/cv/squeezenet1_1/infer/mxbase/Squeezenet1_1ClassifyOpencv.cpp +++ b/research/cv/squeezenet1_1/infer/mxbase/Squeezenet1_1ClassifyOpencv.cpp @@ -19,6 +19,14 @@ #include "MxBase/Log/Log.h" #include "Squeezenet1_1ClassifyOpencv.h" +namespace localParameter { + const uint32_t VECTOR_FIRST_INDEX = 0; + const uint32_t VECTOR_SECOND_INDEX = 1; + const uint32_t VECTOR_THIRD_INDEX = 2; + const uint32_t VECTOR_FOURTH_INDEX = 3; + const uint32_t VECTOR_FIFTH_INDEX = 4; +} + APP_ERROR Squeezenet1_1ClassifyOpencv::Init(const InitParam &initParam) { deviceId_ = initParam.deviceId; APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices(); @@ -110,10 +118,10 @@ APP_ERROR Squeezenet1_1ClassifyOpencv::Crop(const cv::Mat &srcImageMat, cv::Mat APP_ERROR Squeezenet1_1ClassifyOpencv::Inference(const std::vector<MxBase::TensorBase> &inputs, std::vector<MxBase::TensorBase> &outputs) { - uint32_t first = inputs[0].GetShape()[MxBase::VECTOR_FIRST_INDEX]; - uint32_t second = inputs[0].GetShape()[MxBase::VECTOR_SECOND_INDEX]; - uint32_t third = inputs[0].GetShape()[MxBase::VECTOR_THIRD_INDEX]; - uint32_t fourth = inputs[0].GetShape()[MxBase::VECTOR_FOURTH_INDEX]; + uint32_t first = inputs[0].GetShape()[localParameter::VECTOR_FIRST_INDEX]; + uint32_t second = inputs[0].GetShape()[localParameter::VECTOR_SECOND_INDEX]; + uint32_t third = inputs[0].GetShape()[localParameter::VECTOR_THIRD_INDEX]; + uint32_t fourth = inputs[0].GetShape()[localParameter::VECTOR_FOURTH_INDEX]; std::cout << "++ inputs: " << inputs.size() << " " << first << " " << second << " " << third << " " << fourth << std::endl; diff --git a/research/cv/squeezenet1_1/infer/sdk/run.sh b/research/cv/squeezenet1_1/infer/sdk/run.sh index 270b7cd59b3e8f5fc05b551aea19c08933a82c7f..97e059f7b9bd9b126b86b755eb501ae97315caff 100644 --- a/research/cv/squeezenet1_1/infer/sdk/run.sh +++ b/research/cv/squeezenet1_1/infer/sdk/run.sh @@ -22,12 +22,12 @@ set -e info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; } warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; } -export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH} +export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/${ASCEND_VERSION}/latest/acllib/lib64:${LD_LIBRARY_PATH} export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins #to set PYTHONPATH, import the StreamManagerApi.py export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python -python3.7 main_squeezenet.py $image_path $result_dir +python3 main_squeezenet.py $image_path $result_dir exit 0 diff --git a/research/cv/ssd_ghostnet/infer/mxbase/C++/CMakeLists.txt b/research/cv/ssd_ghostnet/infer/mxbase/C++/CMakeLists.txt index 58229e4b28c73f66401ca98eb178a8d444b6061d..8c2ad5fba6f0121ee1c7589edce8d48427a1b52b 100644 --- a/research/cv/ssd_ghostnet/infer/mxbase/C++/CMakeLists.txt +++ b/research/cv/ssd_ghostnet/infer/mxbase/C++/CMakeLists.txt @@ -34,7 +34,7 @@ endif() # Set up ACLLIB header files and dynamic link libraries set(ACL_INC_DIR $ENV{ASCEND_HOME}/${ASCEND_VERSION}/${ARCH_PATTERN}/acllib/include) set(ACL_LIB_DIR $ENV{ASCEND_HOME}/${ASCEND_VERSION}/${ARCH_PATTERN}/acllib/lib64) -set(ACL_LIB_PATH $ENV{ASCEND_HOME}/nnrt/latest/acllib) +set(ACL_LIB_PATH $ENV{ASCEND_HOME}/ascend-toolkit/latest/acllib) # Set the header file and dynamic link library of MXBase set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME}) diff --git a/research/cv/ssd_ghostnet/infer/mxbase/C++/PostProcess/SsdGhost_MindsporePost.cpp b/research/cv/ssd_ghostnet/infer/mxbase/C++/PostProcess/SsdGhost_MindsporePost.cpp index b924c2587cb3290a47116b419124b159acaf4668..02544223effb14ee869f87a538cfeb421ca1df53 100644 --- a/research/cv/ssd_ghostnet/infer/mxbase/C++/PostProcess/SsdGhost_MindsporePost.cpp +++ b/research/cv/ssd_ghostnet/infer/mxbase/C++/PostProcess/SsdGhost_MindsporePost.cpp @@ -25,6 +25,15 @@ namespace { const int RIGHTBOTY = 2; const int RIGHTBOTX = 3; } + +namespace localParameter { + const uint32_t VECTOR_FIRST_INDEX = 0; + const uint32_t VECTOR_SECOND_INDEX = 1; + const uint32_t VECTOR_THIRD_INDEX = 2; + const uint32_t VECTOR_FOURTH_INDEX = 3; + const uint32_t VECTOR_FIFTH_INDEX = 4; +} + namespace MxBase { float SsdGhostPostProcess::CalcIou(DetectBox boxA, DetectBox boxB, IOUMethod method) { float left = std::max(boxA.x - boxA.width / 2.f, boxB.x - boxB.width / 2.f); @@ -119,39 +128,43 @@ namespace MxBase { bool SsdGhostPostProcess::IsValidTensors(const std::vector<TensorBase> &tensors) const { auto shape = tensors[0].GetShape(); - if (tensors.size() < VECTOR_THIRD_INDEX) { + if (tensors.size() < localParameter::VECTOR_THIRD_INDEX) { LogError << "number of tensors (" << tensors.size() << ") " << "is less than required (" - << VECTOR_THIRD_INDEX << ")"; + << localParameter::VECTOR_THIRD_INDEX << ")"; return false; } - if (shape.size() != VECTOR_FOURTH_INDEX) { + if (shape.size() != localParameter::VECTOR_FOURTH_INDEX) { LogError << "number of tensor[0] dimensions (" << shape.size() << ") " << "is not equal to (" - << VECTOR_FOURTH_INDEX << ")"; + << localParameter::VECTOR_FOURTH_INDEX << ")"; return false; } - if (shape[VECTOR_SECOND_INDEX] != (uint32_t)objectNum_) { - LogError << "dimension of tensor[0][1] (" << shape[VECTOR_SECOND_INDEX] << ") " << "is not equal to (" + if (shape[localParameter::VECTOR_SECOND_INDEX] != (uint32_t)objectNum_) { + LogError << "dimension of tensor[0][1] (" << shape[localParameter::VECTOR_SECOND_INDEX] + << ") " << "is not equal to (" << objectNum_ << ")"; return false; } - if (shape[VECTOR_THIRD_INDEX] != BOX_DIM) { - LogError << "dimension of tensor[0][2] (" << shape[VECTOR_THIRD_INDEX] << ") " << "is not equal to (" + if (shape[localParameter::VECTOR_THIRD_INDEX] != BOX_DIM) { + LogError << "dimension of tensor[0][2] (" << shape[localParameter::VECTOR_THIRD_INDEX] + << ") " << "is not equal to (" << BOX_DIM << ")"; return false; } shape = tensors[1].GetShape(); - if (shape.size() != VECTOR_FOURTH_INDEX) { + if (shape.size() != localParameter::VECTOR_FOURTH_INDEX) { LogError << "number of tensor[1] dimensions (" << shape.size() << ") " << "is not equal to (" - << VECTOR_FOURTH_INDEX << ")"; + << localParameter::VECTOR_FOURTH_INDEX << ")"; return false; } - if (shape[VECTOR_SECOND_INDEX] != (uint32_t)objectNum_) { - LogError << "dimension of tensor[1][1] (" << shape[VECTOR_SECOND_INDEX] << ") " << "is not equal to (" + if (shape[localParameter::VECTOR_SECOND_INDEX] != (uint32_t)objectNum_) { + LogError << "dimension of tensor[1][1] (" << shape[localParameter::VECTOR_SECOND_INDEX] << ") " + << "is not equal to (" << objectNum_ << ")"; return false; } - if (shape[VECTOR_THIRD_INDEX] != (uint32_t)classNum_) { - LogError << "dimension of tensor[1][2] (" << shape[VECTOR_THIRD_INDEX] << ") " << "is not equal to (" + if (shape[localParameter::VECTOR_THIRD_INDEX] != (uint32_t)classNum_) { + LogError << "dimension of tensor[1][2] (" << shape[localParameter::VECTOR_THIRD_INDEX] << ") " + << "is not equal to (" << classNum_ << ")"; return false; } diff --git a/research/cv/ssd_ghostnet/infer/mxbase/C++/PostProcess/SsdGhost_MindsporePost.h b/research/cv/ssd_ghostnet/infer/mxbase/C++/PostProcess/SsdGhost_MindsporePost.h index bbd21a66baca580e39f2f136ccde47f9ffb3ed26..ecdf92be6a8b43cdd1efe42e08830c59727d545c 100644 --- a/research/cv/ssd_ghostnet/infer/mxbase/C++/PostProcess/SsdGhost_MindsporePost.h +++ b/research/cv/ssd_ghostnet/infer/mxbase/C++/PostProcess/SsdGhost_MindsporePost.h @@ -52,7 +52,7 @@ class SsdGhostPostProcess:public ObjectPostProcessBase { const std::vector<ResizedImageInfo> &resizedImageInfos = {}, const std::map<std::string, std::shared_ptr<void>> &configParamMap = {}) override; - bool IsValidTensors(const std::vector<TensorBase> &tensors) const override; + bool IsValidTensors(const std::vector<TensorBase> &tensors) const; private: void ObjectDetectionOutput(const std::vector<TensorBase> &tensors, diff --git a/research/cv/ssd_ghostnet/infer/sdk/conf/ssd_ghost.pipeline b/research/cv/ssd_ghostnet/infer/sdk/conf/ssd_ghost.pipeline index 42cbe5e9e45c7425e290e24e0fdb321216f60017..a310c675e185c707def323769b0553ea52151b14 100644 --- a/research/cv/ssd_ghostnet/infer/sdk/conf/ssd_ghost.pipeline +++ b/research/cv/ssd_ghostnet/infer/sdk/conf/ssd_ghost.pipeline @@ -31,7 +31,7 @@ "mxpi_tensorinfer0": { "props": { "dataSource": "mxpi_imageresize0", - "modelPath": "../../ssd_ghostnet.om" + "modelPath": "../ssd_ghostnet.om" }, "factory": "mxpi_tensorinfer", "next": "mxpi_objectpostprocessor0" @@ -41,7 +41,7 @@ "dataSource": "mxpi_tensorinfer0", "postProcessConfigPath": "./ssd_ghost_fpn_ms_on_coco_postprocess.cfg", "labelPath": "./coco.names", - "postProcessLibPath": "../mxManufacture/lib/libssdghostprocessor.so" + "postProcessLibPath": "libssdghostprocessor.so" }, "factory": "mxpi_objectpostprocessor", "next": "mxpi_dataserialize0" diff --git a/research/cv/ssd_ghostnet/infer/sdk/perf/run_map_test.sh b/research/cv/ssd_ghostnet/infer/sdk/perf/run_map_test.sh index 5c671b65b44360bfdd6305d8949903cff5e27fbc..e8ae4a925570416202bc45f35301183d278c5a02 100644 --- a/research/cv/ssd_ghostnet/infer/sdk/perf/run_map_test.sh +++ b/research/cv/ssd_ghostnet/infer/sdk/perf/run_map_test.sh @@ -14,15 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -PY=/usr/bin/python3.7 - export PYTHONPATH=${PYTHONPATH}:. annotations_json=$1 det_result_json=$2 output_path_name=$3 -${PY} generate_map_report.py \ +python3 generate_map_report.py \ --annotations_json=${annotations_json} \ --det_result_json=${det_result_json} \ --output_path_name=${output_path_name} \ diff --git a/research/cv/ssd_ghostnet/infer/sdk/run.sh b/research/cv/ssd_ghostnet/infer/sdk/run.sh index b84ade2f8c5be06e421e3a12701ec0bbc9be15f0..a8e54d7f90979c0bf0f0d792c143fc06569af470 100644 --- a/research/cv/ssd_ghostnet/infer/sdk/run.sh +++ b/research/cv/ssd_ghostnet/infer/sdk/run.sh @@ -20,7 +20,7 @@ set -e info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; } warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; } -export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH} +export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/${ASCEND_VERSION}/latest/acllib/lib64:${LD_LIBRARY_PATH} export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins @@ -32,5 +32,5 @@ img_path=$2 res_path=$3 stream_name=$4 -python3.7 infer_by_sdk.py --pipeline_path ${pipeline_path} --img_path ${img_path} --res_path ${res_path} --stream_name ${stream_name} +python3 infer_by_sdk.py --pipeline_path ${pipeline_path} --img_path ${img_path} --res_path ${res_path} --stream_name ${stream_name} exit 0 \ No newline at end of file diff --git a/research/cv/wdsr/infer/convert/convert_om.sh b/research/cv/wdsr/infer/convert/convert_om.sh index 244b880d9b2785c454a014e1b188da3d57cc3117..23cf058f621b4f455bdfa0b528b705404ce1524d 100644 --- a/research/cv/wdsr/infer/convert/convert_om.sh +++ b/research/cv/wdsr/infer/convert/convert_om.sh @@ -19,7 +19,7 @@ aipp_cfg_path=$2 output_model_name=$3 -/usr/local/Ascend/atc/bin/atc \ +atc \ --model=$model_path \ --input_format=NCHW \ --framework=1 \ diff --git a/research/cv/wdsr/infer/mxbase/WdsrSuperresolution.cpp b/research/cv/wdsr/infer/mxbase/WdsrSuperresolution.cpp index e823e23512bbb41d706e1732f3fd964754c1d202..f98299468033f8a49a97357f6b934446de4f396b 100644 --- a/research/cv/wdsr/infer/mxbase/WdsrSuperresolution.cpp +++ b/research/cv/wdsr/infer/mxbase/WdsrSuperresolution.cpp @@ -23,6 +23,13 @@ #include "MxBase/DvppWrapper/DvppWrapper.h" #include "MxBase/Log/Log.h" +namespace localParameter { + const uint32_t VECTOR_FIRST_INDEX = 0; + const uint32_t VECTOR_SECOND_INDEX = 1; + const uint32_t VECTOR_THIRD_INDEX = 2; + const uint32_t VECTOR_FOURTH_INDEX = 3; + const uint32_t VECTOR_FIFTH_INDEX = 4; +} APP_ERROR WdsrSuperresolution::Init(const InitParam &initParam) { deviceId_ = initParam.deviceId; @@ -43,9 +50,9 @@ APP_ERROR WdsrSuperresolution::Init(const InitParam &initParam) { return ret; } - uint32_t outputModelHeight = modelDesc_.outputTensors[0].tensorDims[MxBase::VECTOR_THIRD_INDEX]; - uint32_t inputModelHeight = modelDesc_.inputTensors[0].tensorDims[MxBase::VECTOR_SECOND_INDEX]; - uint32_t inputModelWidth = modelDesc_.inputTensors[0].tensorDims[MxBase::VECTOR_THIRD_INDEX]; + uint32_t outputModelHeight = modelDesc_.outputTensors[0].tensorDims[localParameter::VECTOR_THIRD_INDEX]; + uint32_t inputModelHeight = modelDesc_.inputTensors[0].tensorDims[localParameter::VECTOR_SECOND_INDEX]; + uint32_t inputModelWidth = modelDesc_.inputTensors[0].tensorDims[localParameter::VECTOR_THIRD_INDEX]; scale_ = outputModelHeight/inputModelHeight; maxEdge_ = inputModelWidth > inputModelHeight ? inputModelWidth:inputModelHeight; @@ -122,9 +129,9 @@ APP_ERROR WdsrSuperresolution::PostProcess(std::vector<MxBase::TensorBase> *inpu LogError << GetError(ret) << "Tensor deploy to host failed."; return ret; } - uint32_t outputModelChannel = tensor.GetShape()[MxBase::VECTOR_SECOND_INDEX]; - uint32_t outputModelHeight = tensor.GetShape()[MxBase::VECTOR_THIRD_INDEX]; - uint32_t outputModelWidth = tensor.GetShape()[MxBase::VECTOR_FOURTH_INDEX]; + uint32_t outputModelChannel = tensor.GetShape()[localParameter::VECTOR_SECOND_INDEX]; + uint32_t outputModelHeight = tensor.GetShape()[localParameter::VECTOR_THIRD_INDEX]; + uint32_t outputModelWidth = tensor.GetShape()[localParameter::VECTOR_FOURTH_INDEX]; LogInfo << "Channel:" << outputModelChannel << " Height:" << outputModelHeight << " Width:" <<outputModelWidth; diff --git a/research/cv/wdsr/infer/sdk/run.sh b/research/cv/wdsr/infer/sdk/run.sh index 50ef0cfe684d1627be1690b0450a2c5c9b483c49..5a3aec5aa202bb3646dd14e09b804ea4e6a31a8c 100644 --- a/research/cv/wdsr/infer/sdk/run.sh +++ b/research/cv/wdsr/infer/sdk/run.sh @@ -54,7 +54,7 @@ echo "enter $CUR_PATH" info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; } warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; } -export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:${LD_LIBRARY_PATH} +export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${MX_SDK_HOME}/opensource/lib64:/usr/local/Ascend/${ASCEND_VERSION}/latest/acllib/lib64:${LD_LIBRARY_PATH} #to set PYTHONPATH, import the StreamManagerApi.py export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python