Skip to content
Snippets Groups Projects
Unverified Commit 5b9c924b authored by i-robot's avatar i-robot Committed by Gitee
Browse files

!2927 ONNX:advanced_east

Merge pull request !2927 from yang-how/advancedeast_onnx
parents 6fb232bf b02939fd
No related branches found
No related tags found
No related merge requests found
......@@ -9,6 +9,8 @@
- [Data Preprocess](#data-preprocess)
- [Training Process](#training-process)
- [Evaluation Process](#evaluation-process)
- [Evaluation](#evaluation)
- [ONNX Evaluation](#onnx-evaluation)
- [Performance](#performance)
- [Training Performance](#training-performance)
- [Evaluation Performance](#evaluation-performance)
......@@ -61,8 +63,10 @@ This project is inherited by [huoyijie/AdvancedEAST](https://github.com/huoyijie
├── run_distribute_train_ascend.sh # launch ascend distributed training(8 pcs)
├── run_standalone_train_ascend.sh # launch ascend standalone training(1 pcs)
├── run_distribute_train_gpu.sh # launch gpu distributed training(8 pcs)
└── run_standalone_train_gpu.sh # launch gpu standalone training(1 pcs)
└── eval.sh # evaluate model(1 pcs)
├── run_standalone_train_gpu.sh # launch gpu standalone training(1 pcs)
├── run_eval_ascend.sh # evaluate model(1 pcs)
├── run_eval_gpu.sh # evaluate model(1 pcs)
└── run_eval_onnx.sh # evaluate model(1 pcs)
├── src
├── cfg.py # parameter configuration
├── dataset.py # data preprocessing
......@@ -76,6 +80,7 @@ This project is inherited by [huoyijie/AdvancedEAST](https://github.com/huoyijie
└── vgg.py # vgg model
├── export.py # export model for inference
├── prepare_data.py # exec data preprocessing
├── eval_onnx.py # eval onnx
├── eval.py # eval net
├── train.py # train net on multi-size input
└── train_single_size.py # train net on fix-size input
......@@ -181,6 +186,8 @@ config.py:
## [Evaluation Process](#contents)
### Evaluation
The above python command will run in the background, you can view the results through the file output.eval.log. You will get the accuracy as following.
You can get loss, accuracy, recall, F1 score and the box vertices of an image.
......@@ -205,6 +212,22 @@ bash scripts/run_distribute_train_gpu.sh
bash run_eval.sh 0_8-24_1012.ckpt pred ./demo/001.png
```
### ONNX Evaluation
- Export your model to ONNX:
```shell
python export.py --ckpt_file /path/to/AdvancedEast.ckpt --file_name /path/to/AdvancedEast --file_format ONNX --device_target CPU
```
- Run ONNX evaluation from advanced_east directory:
```shell
bash scripts/run_eval_onnx.sh ./icpr/ GPU AdvancedEast.onnx
```
- You can view the results through the file output.eval.log.
## Inference Process
### [Export MindIR](#contents)
......
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Run evaluation for a model exported to ONNX"""
import argparse
import os
import numpy as np
import onnxruntime as ort
from mindspore import Tensor
from PIL import Image
from tqdm import tqdm
from src.config import config as cfg
from src.score import eval_pre_rec_f1
def create_session(checkpoint_path, target_device):
if target_device == 'GPU':
providers = ['CUDAExecutionProvider']
elif target_device == 'CPU':
providers = ['CPUExecutionProvider']
else:
raise ValueError(
f'Unsupported target device {target_device}, '
f'Expected one of: "CPU", "GPU"'
)
session = ort.InferenceSession(checkpoint_path, providers=providers)
input_name = session.get_inputs()[0].name
return session, input_name
def parse_args():
"""parameters"""
parser = argparse.ArgumentParser('adveast evaling')
parser.add_argument('--device_target', type=str, default='GPU', choices=['CPU', 'GPU'],
help='device where the code will be implemented. (Default: GPU)')
parser.add_argument('--onnx_path', type=str, default='AdvancedEast.onnx', help='onnx save location')
parser.add_argument('--data_dir', type=str, default='./icpr/', help='images and labels save location')
args_opt = parser.parse_args()
args_opt.batch_size = 1
args_opt.train_image_dir_name = args_opt.data_dir + cfg.train_image_dir_name
args_opt.train_label_dir_name = args_opt.data_dir + cfg.train_label_dir_name
args_opt.val_fname = cfg.val_fname
args_opt.max_predict_img_size = cfg.max_predict_img_size
return args_opt
def eval_score(eval_arg):
"""get network and init"""
session, input_name = create_session(eval_arg.onnx_path, eval_arg.device_target)
obj = eval_pre_rec_f1()
with open(os.path.join(eval_arg.data_dir, eval_arg.val_fname), 'r') as f_val:
f_list = f_val.readlines()
img_h, img_w = eval_arg.max_predict_img_size, eval_arg.max_predict_img_size
x = np.zeros((eval_arg.batch_size, 3, img_h, img_w), dtype=np.float32)
batch_list = np.arange(0, len(f_list), eval_arg.batch_size)
for idx in tqdm(batch_list):
gt_list = []
for i in range(idx, min(idx + eval_arg.batch_size, len(f_list))):
item = f_list[i]
img_filename = str(item).strip().split(',')[0][:-4]
img_path = os.path.join(eval_arg.train_image_dir_name, img_filename) + '.jpg'
img = Image.open(img_path)
img = img.resize((img_w, img_h), Image.NEAREST).convert('RGB')
img = np.asarray(img)
img = img / 1.
mean = np.array((123.68, 116.779, 103.939)).reshape([1, 1, 3])
img = ((img - mean)).astype(np.float32)
img = img.transpose((2, 0, 1))
x[i - idx] = img
gt_list.append(np.load(os.path.join(eval_arg.train_label_dir_name, img_filename) + '.npy'))
if idx + eval_arg.batch_size >= len(f_list):
x = x[:len(f_list) - idx]
y = Tensor(session.run(None, {input_name: x})[0])
obj.add(y, gt_list)
print(obj.val())
if __name__ == '__main__':
args = parse_args()
eval_score(args)
......@@ -30,7 +30,7 @@ parser.add_argument('--width', type=int, default=448, help='input width')
parser.add_argument('--height', type=int, default=448, help='input height')
parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format")
parser.add_argument("--device_target", type=str, default="Ascend",
choices=["Ascend", "GPU"], help="device target(default: Ascend)")
choices=["Ascend", "GPU", "CPU"], help="device target(default: Ascend)")
args = parser.parse_args()
args.is_train = False
......
#!/bin/bash
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
if [ $# -ne 3 ]; then
echo "=============================================================================================================="
echo "Please run the script as: "
echo "bash scripts/run_onnx_eval.sh DATA_PATH DEVICE_TYPE ONNX_MODEL_PATH"
echo "for example: bash scripts/run_onnx_eval.sh /path/icpr GPU /path/AdvancedEast.onnx "
echo "=============================================================================================================="
exit 1
fi
DATA_PATH=$1
DEVICE_TARGET=$2
ONNX_MODEL_PATH=$3
python eval_onnx.py \
--data_dir=$DATA_PATH \
--device_target=$DEVICE_TARGET \
--onnx_path=$ONNX_MODEL_PATH > eval_onnx.log 2>&1 &
......@@ -23,6 +23,7 @@ from mindspore.ops import ResizeNearestNeighbor
from mindspore import Tensor, ParameterTuple, Parameter
from mindspore.common.initializer import initializer, TruncatedNormal
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore._checkparam import twice
import numpy as np
from src.vgg import Vgg
......@@ -152,7 +153,7 @@ class AdvancedEast(nn.Cell):
out_channels, in_channels, filter_size, _ = filters.shape
else:
out_channels, in_channels, filter_size, _ = shape_dict[name]
self.conv2d = P.Conv2D(out_channels, filter_size, pad_mode='same', mode=1)
self.conv2d = P.Conv2D(out_channels, twice(filter_size), pad_mode='same', mode=1)
self.bias_add = P.BiasAdd()
self.weight = Parameter(initializer(filters if args.is_train else TruncatedNormal(),
[out_channels, in_channels, filter_size, filter_size]),
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment