diff --git a/official/cv/cnnctc/default_config.yaml b/official/cv/cnnctc/default_config.yaml index 7cd5cd952787838bb053c42e920fc33c18472ebc..d9e10754f920921332afae4d3321c7f05e95cf7e 100644 --- a/official/cv/cnnctc/default_config.yaml +++ b/official/cv/cnnctc/default_config.yaml @@ -68,6 +68,6 @@ data_path: "The location of input data" output_pah: "The location of the output file" device_target: "device id of GPU or Ascend. (Default: None)" enable_profiling: "Whether enable profiling while training default: False" -file_name: "CNN&CTC output air name" +file_name: "CNN&CTC output mindir name" file_format: "choices [AIR, MINDIR]" ckpt_file: "CNN&CTC ckpt file" diff --git a/official/cv/crnn/default_config.yaml b/official/cv/crnn/default_config.yaml index 925cb048277e2b1ee5e6a6cc2948452386e68f80..c66f65fa2acd368ca26f2256dd59c5c1c006d59b 100644 --- a/official/cv/crnn/default_config.yaml +++ b/official/cv/crnn/default_config.yaml @@ -77,7 +77,7 @@ data_path: "The location of input data" output_pah: "The location of the output file" device_target: "device id of GPU or Ascend. (Default: None)" enable_profiling: "Whether enable profiling while training default: False" -file_name: "CNN&CTC output air name" +file_name: "CNN&CTC output mindir name" file_format: "choices [AIR, MINDIR]" ckpt_file: "Checkpoint file path." run_distribute: "Run distribute, default is false." diff --git a/official/cv/cspdarknet53/default_config.yaml b/official/cv/cspdarknet53/default_config.yaml index 76ed77265f76fb03b0437fa2ef33e31c33e95c7e..644ed92e1c981c61412c7efd905d6eba78d19d0f 100644 --- a/official/cv/cspdarknet53/default_config.yaml +++ b/official/cv/cspdarknet53/default_config.yaml @@ -64,7 +64,7 @@ graph_ckpt: "graph ckpt or feed ckpt" # export options export_batch_size: "batch size for export" ckpt_file: "cspdarknet53 ckpt file" -file_name: "output air name." +file_name: "output mindir name." file_format: "file format, choices in ['AIR', 'ONNX', 'MINDIR']" width: "input width" height: "input height" \ No newline at end of file diff --git a/official/cv/ctpn/default_config.yaml b/official/cv/ctpn/default_config.yaml index 40958e477d87fac2b67f016e212aa96a6bd2816c..f6a7c60cdd69551d01fa7656150e86d7cee286f4 100644 --- a/official/cv/ctpn/default_config.yaml +++ b/official/cv/ctpn/default_config.yaml @@ -163,7 +163,7 @@ data_path: "The location of input data" output_pah: "The location of the output file" device_target: "device id of GPU or Ascend. (Default: None)" enable_profiling: "Whether enable profiling while training default: False" -file_name: "CNN&CTC output air name" +file_name: "CNN&CTC output mindir name" file_format: "choices [AIR, MINDIR]" ckpt_file: "CNN&CTC ckpt file" run_distribute: "Run distribute, default: false." diff --git a/official/cv/darknet53/export.py b/official/cv/darknet53/export.py index ccb66fdec2c0bd1699983009601b1b8cdeb10a06..09bfd1927bca633989f51849e6a1028c8538d74b 100644 --- a/official/cv/darknet53/export.py +++ b/official/cv/darknet53/export.py @@ -32,7 +32,7 @@ parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint fil parser.add_argument("--file_name", type=str, default="darknet53", help="output file name.") parser.add_argument('--width', type=int, default=256, help='input width') parser.add_argument('--height', type=int, default=256, help='input height') -parser.add_argument("--file_format", type=str, choices=["AIR", "MINDIR"], default="AIR", help="file format") +parser.add_argument("--file_format", type=str, choices=["AIR", "MINDIR"], default="MINDIR", help="file format") parser.add_argument("--device_target", type=str, default="GPU", choices=["GPU", "CPU"], help="device target(default: GPU)") args = parser.parse_args() diff --git a/official/cv/depthnet/export.py b/official/cv/depthnet/export.py index 017bd24cb5468af7406a83bef310d71c24951a1c..8fb2369825648516beab6201d51314797cca10b5 100644 --- a/official/cv/depthnet/export.py +++ b/official/cv/depthnet/export.py @@ -57,7 +57,7 @@ if __name__ == "__main__": export(coarse_net, Tensor(input_rgb_coarsenet), file_name=os.path.join(mindir_dir, "FinalCoarseNet"), file_format='MINDIR') export(coarse_net, Tensor(input_rgb_coarsenet), file_name=os.path.join(air_dir, "FinalCoarseNet"), - file_format='MINDIR') + file_format='AIR') else: fine_net = FineNet() fine_net_file_name = os.path.join(ckpt_dir, "FinalFineNet.ckpt") @@ -68,4 +68,4 @@ if __name__ == "__main__": export(fine_net, Tensor(input_rgb_finenet), Tensor(input_coarse_depth), file_name=os.path.join(mindir_dir, "FinalFineNet"), file_format='MINDIR') export(fine_net, Tensor(input_rgb_finenet), Tensor(input_coarse_depth), - file_name=os.path.join(air_dir, "FinalFineNet"), file_format='MINDIR') + file_name=os.path.join(air_dir, "FinalFineNet"), file_format='AIR') diff --git a/official/cv/faster_rcnn/README.md b/official/cv/faster_rcnn/README.md index b8401aba55a6aa891e50984d696bc06440a346f4..8044109c2376aa0c55c6eea38605bbdaf867898c 100644 --- a/official/cv/faster_rcnn/README.md +++ b/official/cv/faster_rcnn/README.md @@ -122,7 +122,7 @@ bash run_distribute_train_ascend.sh [RANK_TABLE_FILE] [PRETRAINED_MODEL] [BACKBO bash run_eval_ascend.sh [VALIDATION_JSON_FILE] [CHECKPOINT_PATH] [BACKBONE] [COCO_ROOT] [MINDRECORD_DIR](optional) # inference (the values of IMAGE_WIDTH and IMAGE_HEIGHT must be set or use default at the same time.) -bash run_infer_310.sh [AIR_PATH] [DATA_PATH] [ANN_FILE_PATH] [IMAGE_WIDTH](optional) [IMAGE_HEIGHT](optional) [DEVICE_ID](optional) +bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [ANN_FILE_PATH] [IMAGE_WIDTH](optional) [IMAGE_HEIGHT](optional) [DEVICE_ID](optional) ``` ## Run on GPU diff --git a/official/cv/fastscnn/export.py b/official/cv/fastscnn/export.py index 719430f7f85fac1d0816796b8cfbafb2677cfecc..750a348cdf3a94777f0a79cb497dbc572f28fe95 100644 --- a/official/cv/fastscnn/export.py +++ b/official/cv/fastscnn/export.py @@ -33,7 +33,7 @@ parser.add_argument("--image_height", type=int, default=768, help="Image height. parser.add_argument("--image_width", type=int, default=768, help="Image width.") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--file_name", type=str, default="fastscnn", help="output file name.") -parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format") +parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format") parser.add_argument('--device_target', type=str, default='Ascend' , help='device where the code will be implemented. (Default: Ascend)') parser.add_argument("--device_id", type=int, default=0, help="Device id") diff --git a/official/cv/googlenet/README.md b/official/cv/googlenet/README.md index 04708b39f50d41079eb64ca0435082a4330a6753..50d0b7fab0a65496d973e5562525820ef4647e9d 100644 --- a/official/cv/googlenet/README.md +++ b/official/cv/googlenet/README.md @@ -319,7 +319,7 @@ Parameters for both training and evaluation can be set in config.py 'keep_checkpoint_max': 10 # only keep the last keep_checkpoint_max checkpoint 'checkpoint_path': './train_googlenet_cifar10-125_390.ckpt' # the absolute full path to save the checkpoint file 'onnx_filename': 'googlenet.onnx' # file name of the onnx model used in export.py - 'air_filename': 'googlenet.air' # file name of the air model used in export.py + 'air_filename': 'googlenet.air' # file name of the air/mindir model used in export.py ``` - config for GoogleNet, ImageNet dataset @@ -341,7 +341,7 @@ Parameters for both training and evaluation can be set in config.py 'keep_checkpoint_max': 10 # only keep the last keep_checkpoint_max checkpoint 'checkpoint_path': './train_googlenet_cifar10-125_390.ckpt' # the absolute full path to save the checkpoint file 'onnx_filename': 'googlenet.onnx' # file name of the onnx model used in export.py - 'air_filename': 'googlenet.air' # file name of the air model used in export.py + 'air_filename': 'googlenet.air' # file name of the air/mindir model used in export.py 'lr_scheduler': 'exponential' # learning rate scheduler 'lr_epochs': [70, 140, 210, 280] # epoch of lr changing 'lr_gamma': 0.3 # decrease lr by a factor of exponential lr_scheduler diff --git a/official/cv/inceptionv3/default_config_cpu.yaml b/official/cv/inceptionv3/default_config_cpu.yaml index a4fce7baef65d5dc6ddb73777ec6ff52320ec942..c736725f7907b5a6ea0e199c5149efa034ef65a4 100644 --- a/official/cv/inceptionv3/default_config_cpu.yaml +++ b/official/cv/inceptionv3/default_config_cpu.yaml @@ -65,7 +65,7 @@ output_path: 'Training output path for local' device_target: 'Target device type' enable_profiling: 'Whether enable profiling while training, default: False' -file_name: 'inceptionv3 output air name.' +file_name: 'inceptionv3 output mindir name.' file_format: 'file format' --- diff --git a/official/cv/inceptionv4/default_config_cpu.yaml b/official/cv/inceptionv4/default_config_cpu.yaml index c60ed10db407c1902a2999cee5dcdf8d4e4cff6f..9d7699ec86596341c4ec144baa2b2bfa5d16770b 100644 --- a/official/cv/inceptionv4/default_config_cpu.yaml +++ b/official/cv/inceptionv4/default_config_cpu.yaml @@ -67,7 +67,7 @@ output_path: 'Training output path for local' device_target: 'Target device type' enable_profiling: 'Whether enable profiling while training, default: False' -file_name: 'inceptionv3 output air name.' +file_name: 'inceptionv3 output mindir name.' file_format: 'file format' result_path: "result file path" label_file: "label file" diff --git a/official/cv/maskrcnn/README.md b/official/cv/maskrcnn/README.md index 790cd660f10e4c3c76f005c5cd47b197519c715c..a8fb24b3ffbc1e98b4e7a4127d4ec60ddb4d60b4 100644 --- a/official/cv/maskrcnn/README.md +++ b/official/cv/maskrcnn/README.md @@ -144,7 +144,7 @@ pip install mmcv=0.2.14 ``` Note: - 1. AIR_PATH is a model file, exported by export script file on the Ascend910 environment. + 1. MINDIR_PATH is a model file, exported by export script file on the Ascend910 environment. 2. ANN_FILE_PATH is a annotation file for inference. # Run in docker @@ -228,9 +228,9 @@ bash run_eval.sh [VALIDATION_JSON_FILE] [CHECKPOINT_PATH] [DATA_PATH] # (5) Set the code directory to "/path/maskrcnn" on the website UI interface. # (6) Set the startup file to "train.py" on the website UI interface. # (7) Set the "Dataset path" and "Output file path" and "Job log path" to your path on the website UI interface. - # (8) Create your job.[AIR_PATH] [DATA_PATH] [ANN_FILE_PATH] + # (8) Create your job.[MINDIR_PATH] [DATA_PATH] [ANN_FILE_PATH] # - # Train 1p with Ascend[AIR_PATH] [DATA_PATH] [ANN_FILE_PATH] + # Train 1p with Ascend[MINDIR_PATH] [DATA_PATH] [ANN_FILE_PATH] # (1) Perform a or b. # a. Set "enable_modelarts=True" on default_config.yaml file. # Set "need_modelarts_dataset_unzip=True" on default_config.yaml file. diff --git a/official/cv/maskrcnn/README_CN.md b/official/cv/maskrcnn/README_CN.md index 1bbee945c5f571cb6d27390735e60c50a1f8cd01..cb503fb3ed43bf790c10448251cb17cdbeaa0166 100644 --- a/official/cv/maskrcnn/README_CN.md +++ b/official/cv/maskrcnn/README_CN.md @@ -138,7 +138,7 @@ pip install mmcv=0.2.14 ``` 注: - 1. AIR_PATH是在910上使用export脚本导出的模型。 + 1. MINDIR_PATH是在910上使用export脚本导出的模型。 2. ANN_FILE_PATH是推理使用的标注文件。 # 在docker上运行 diff --git a/official/cv/resnet_thor/export.py b/official/cv/resnet_thor/export.py index 22632e6028e92618af6eca2c19e96feae47baf6e..1bf0facca4fb3a2c59715c8202f113a37e64d9c8 100644 --- a/official/cv/resnet_thor/export.py +++ b/official/cv/resnet_thor/export.py @@ -27,7 +27,7 @@ parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint fil parser.add_argument('--width', type=int, default=224, help='input width') parser.add_argument('--height', type=int, default=224, help='input height') parser.add_argument("--file_name", type=str, default="resnet_thor", help="output file name.") -parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format") +parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format") parser.add_argument("--device_target", type=str, default="Ascend", choices=["Ascend", "GPU", "CPU"], help="device target (default: Ascend)") args = parser.parse_args() diff --git a/official/cv/tinydarknet/README.md b/official/cv/tinydarknet/README.md index 8fdfc76eb6d615e4f99290c3510c77bbaafe5d48..5db7523330930e3c5eaf8ff7e0557d86a975db50 100644 --- a/official/cv/tinydarknet/README.md +++ b/official/cv/tinydarknet/README.md @@ -245,7 +245,7 @@ Parameters for both training and evaluation can be set in `imagenet_config.yaml` keep_checkpoint_max: 10 # only keep the last keep_checkpoint_max checkpoint checkpoint_path: '/train_tinydarknet.ckpt' # the absolute full path to save the checkpoint file onnx_filename: 'tinydarknet.onnx' # file name of the onnx model used in export.py - air_filename: 'tinydarknet.air' # file name of the air model used in export.py + air_filename: 'tinydarknet.air' # file name of the air/mindir model used in export.py lr_scheduler: 'exponential' # learning rate scheduler lr_epochs: [70, 140, 210, 280] # epoch of lr changing lr_gamma: 0.3 # decrease lr by a factor of exponential lr_scheduler diff --git a/official/cv/tinydarknet/README_CN.md b/official/cv/tinydarknet/README_CN.md index c648fd1a1cefa506db0ef38fc55a62102e4fad49..a828249c3c9e1ddb9e8c2f95b8db131b028c7195 100644 --- a/official/cv/tinydarknet/README_CN.md +++ b/official/cv/tinydarknet/README_CN.md @@ -252,7 +252,7 @@ Tiny-DarkNet是Joseph Chet Redmon等人提出的一个16层的针对于经典的 keep_checkpoint_max: 10 # 仅仅保持最新的keep_checkpoint_max个checkpoint文件 checkpoint_path: '/train_tinydarknet.ckpt' # 保存checkpoint文件的绝对路径 onnx_filename: 'tinydarknet.onnx' # 用于export.py 文件中的onnx模型的文件名 - air_filename: 'tinydarknet.air' # 用于export.py 文件中的air模型的文件名 + air_filename: 'tinydarknet.air' # 用于export.py 文件中的air/mindir模型的文件名 lr_scheduler: 'exponential' # 学习率策略 lr_epochs: [70, 140, 210, 280] # 学习率进行变化的epoch数 lr_gamma: 0.3 # lr_scheduler为exponential时的学习率衰减因子 diff --git a/official/gnn/gcn/export.py b/official/gnn/gcn/export.py index aa91b622bdd377861f141086c1e00b5fcf77e73c..612fe3eedab14b76bdf15b70e680534fb769d963 100644 --- a/official/gnn/gcn/export.py +++ b/official/gnn/gcn/export.py @@ -26,7 +26,7 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--dataset", type=str, default="cora", choices=["cora", "citeseer"], help="Dataset.") parser.add_argument("--file_name", type=str, default="gcn", help="output file name.") -parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format") +parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format") parser.add_argument("--device_target", type=str, default="Ascend", choices=["Ascend", "GPU", "CPU"], help="device target (default: Ascend)") args = parser.parse_args() diff --git a/official/nlp/bert/task_classifier_config.yaml b/official/nlp/bert/task_classifier_config.yaml index 82aaf3d9174f5ab5b987d6a0e25930d287f493c6..6b75084a74e54eff433e5a9fe588b5910570bcfe 100644 --- a/official/nlp/bert/task_classifier_config.yaml +++ b/official/nlp/bert/task_classifier_config.yaml @@ -100,7 +100,7 @@ schema_file_path: "Schema path, it is better to use absolute path" export_batch_size: "export batch size." export_ckpt_file: "Bert ckpt file." -export_file_name: "bert output air name." +export_file_name: "bert output mindir name." file_format: "file format" --- # chocies diff --git a/official/nlp/bert/task_ner_config.yaml b/official/nlp/bert/task_ner_config.yaml index 2b228f92b973dca99a19643c1e43f9e70273b1e2..7243bf2ef780ae76b14583ed190ebf1819764035 100644 --- a/official/nlp/bert/task_ner_config.yaml +++ b/official/nlp/bert/task_ner_config.yaml @@ -107,7 +107,7 @@ schema_file_path: "Schema path, it is better to use absolute path" export_batch_size: "export batch size." export_ckpt_file: "Bert ckpt file." -export_file_name: "bert output air name." +export_file_name: "bert output mindir name." file_format: "file format" --- # chocies diff --git a/official/nlp/bert/task_squad_config.yaml b/official/nlp/bert/task_squad_config.yaml index fe800c68ad3372d9ee3035756029d092b75a2455..f0195e8e4856fb437fc1668a6059d4a92ddb2656 100644 --- a/official/nlp/bert/task_squad_config.yaml +++ b/official/nlp/bert/task_squad_config.yaml @@ -100,7 +100,7 @@ schema_file_path: "Schema path, it is better to use absolute path" export_batch_size: "export batch size." export_ckpt_file: "Bert ckpt file." -export_file_name: "bert output air name." +export_file_name: "bert output mindir name." file_format: "file format" --- # chocies diff --git a/official/nlp/dgu/export.py b/official/nlp/dgu/export.py index d756ad7dbc66e1ba8434f613cf184fb280636539..5b2992d082723639501e0e1eabcdad3faa63342d 100644 --- a/official/nlp/dgu/export.py +++ b/official/nlp/dgu/export.py @@ -26,8 +26,8 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=1, help="batch size") parser.add_argument("--number_labels", type=int, default=26, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Bert ckpt file.") -parser.add_argument("--file_name", type=str, default="Bert", help="bert output air name.") -parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format") +parser.add_argument("--file_name", type=str, default="Bert", help="bert output mindir name.") +parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format") parser.add_argument("--device_target", type=str, default="Ascend", choices=["Ascend", "GPU", "CPU"], help="device target (default: Ascend)") args = parser.parse_args() diff --git a/official/nlp/emotect/export.py b/official/nlp/emotect/export.py index 335e21155b0767dd844db919b1ca734296900a7f..242a7940ac190d0c337619e8936bcad9da26d4f0 100644 --- a/official/nlp/emotect/export.py +++ b/official/nlp/emotect/export.py @@ -26,7 +26,7 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=32, help="batch size") parser.add_argument("--number_labels", type=int, default=3, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Bert ckpt file.") -parser.add_argument("--file_name", type=str, default="emotect", help="bert output air name.") +parser.add_argument("--file_name", type=str, default="emotect", help="bert output mindir name.") parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default='MINDIR', help="file format") parser.add_argument("--device_target", type=str, default="Ascend", diff --git a/official/nlp/emotect/modelart/start.py b/official/nlp/emotect/modelart/start.py index 1e08cb09fe477b10f7362af7fb1c58d29e924587..918539d9ced68834b26c68f54f8be224181a2dce 100644 --- a/official/nlp/emotect/modelart/start.py +++ b/official/nlp/emotect/modelart/start.py @@ -1,237 +1,237 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -''' -Ernie finetune and evaluation script. -''' - -import os -import time -import argparse -import numpy as np - -import mindspore.common.dtype as mstype -from mindspore import Tensor, context, export -from mindspore import log as logger -from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell -from mindspore.nn.optim import Adam, AdamWeightDecay, Adagrad -from mindspore.train.model import Model -from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, TimeMonitor -from mindspore.train.serialization import load_checkpoint, load_param_into_net - -from src.ernie_for_finetune import ErnieFinetuneCell, ErnieCLS -from src.finetune_eval_config import optimizer_cfg, ernie_net_cfg -from src.dataset import create_classification_dataset -from src.assessment_method import Accuracy -from src.utils import make_directory, LossCallBack, LoadNewestCkpt, ErnieLearningRate - -_cur_dir = os.getcwd() -CACHE_TRAINING_URL = "/cache/training/" - -if not os.path.isdir(CACHE_TRAINING_URL): - os.makedirs(CACHE_TRAINING_URL) - -def do_train(dataset=None, network=None, load_checkpoint_path="", save_checkpoint_path="", epoch_num=1): - """ do train """ - if load_checkpoint_path == "": - raise ValueError("Pretrain model missed, finetune task must load pretrain model!") - steps_per_epoch = 500 - # optimizer - if optimizer_cfg.optimizer == 'AdamWeightDecay': - lr_schedule = ErnieLearningRate(learning_rate=optimizer_cfg.AdamWeightDecay.learning_rate, - end_learning_rate=optimizer_cfg.AdamWeightDecay.end_learning_rate, - warmup_steps=int(steps_per_epoch * epoch_num * 0.1), - decay_steps=steps_per_epoch * epoch_num, - power=optimizer_cfg.AdamWeightDecay.power) - params = network.trainable_params() - decay_params = list(filter(optimizer_cfg.AdamWeightDecay.decay_filter, params)) - other_params = list(filter(lambda x: not optimizer_cfg.AdamWeightDecay.decay_filter(x), params)) - group_params = [{'params': decay_params, 'weight_decay': optimizer_cfg.AdamWeightDecay.weight_decay}, - {'params': other_params, 'weight_decay': 0.0}] - - optimizer = AdamWeightDecay(group_params, lr_schedule, eps=optimizer_cfg.AdamWeightDecay.eps) - elif optimizer_cfg.optimizer == 'Adam': - optimizer = Adam(network.trainable_params(), learning_rate=optimizer_cfg.Adam.learning_rate) - elif optimizer_cfg.optimizer == 'Adagrad': - optimizer = Adagrad(network.trainable_params(), learning_rate=optimizer_cfg.Adagrad.learning_rate) - # load checkpoint into network - ckpt_config = CheckpointConfig(save_checkpoint_steps=steps_per_epoch, keep_checkpoint_max=10) - ckpoint_cb = ModelCheckpoint(prefix="classifier", - directory=None if save_checkpoint_path == "" else save_checkpoint_path, - config=ckpt_config) - param_dict = load_checkpoint(load_checkpoint_path) - unloaded_params = load_param_into_net(network, param_dict) - if len(unloaded_params) > 2: - print(unloaded_params) - logger.warning('Loading ernie model failed, please check the checkpoint file.') - - update_cell = DynamicLossScaleUpdateCell(loss_scale_value=2 ** 32, scale_factor=2, scale_window=1000) - netwithgrads = ErnieFinetuneCell(network, optimizer=optimizer, scale_update_cell=update_cell) - model = Model(netwithgrads) - callbacks = [TimeMonitor(dataset.get_dataset_size()), LossCallBack(dataset.get_dataset_size()), ckpoint_cb] - model.train(epoch_num, dataset, callbacks=callbacks) - - -def do_eval(dataset=None, network=None, num_class=2, load_checkpoint_path=""): - """ do eval """ - if load_checkpoint_path == "": - raise ValueError("Finetune model missed, evaluation task must load finetune model!") - net_for_pretraining = network(ernie_net_cfg, False, num_class) - net_for_pretraining.set_train(False) - param_dict = load_checkpoint(load_checkpoint_path) - load_param_into_net(net_for_pretraining, param_dict) - - callback = Accuracy() - - evaluate_times = [] - columns_list = ["input_ids", "input_mask", "segment_ids", "label_ids"] - for data in dataset.create_dict_iterator(num_epochs=1): - input_data = [] - for i in columns_list: - input_data.append(data[i]) - input_ids, input_mask, token_type_id, label_ids = input_data - time_begin = time.time() - logits = net_for_pretraining(input_ids, input_mask, token_type_id, label_ids) - time_end = time.time() - evaluate_times.append(time_end - time_begin) - callback.update(logits, label_ids) - print("==============================================================") - print("acc_num {} , total_num {}, accuracy {:.6f}".format(callback.acc_num, callback.total_num, - callback.acc_num / callback.total_num)) - print("(w/o first and last) elapsed time: {}, per step time : {}".format( - sum(evaluate_times[1:-1]), sum(evaluate_times[1:-1])/(len(evaluate_times) - 2))) - print("==============================================================") - - -def run_classifier(): - """run classifier task""" - parser = argparse.ArgumentParser(description="run classifier") - parser.add_argument("--device_target", type=str, default="Ascend", choices=["Ascend", "GPU"], - help="Device type, default is Ascend") - parser.add_argument("--do_train", type=str, default="false", choices=["true", "false"], - help="Enable train, default is false") - parser.add_argument("--do_eval", type=str, default="false", choices=["true", "false"], - help="Enable eval, default is false") - parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.") - parser.add_argument("--epoch_num", type=int, default=3, help="Epoch number, default is 3.") - parser.add_argument("--num_class", type=int, default=3, help="The number of class, default is 3.") - parser.add_argument("--train_data_shuffle", type=str, default="true", choices=["true", "false"], - help="Enable train data shuffle, default is true") - parser.add_argument("--eval_data_shuffle", type=str, default="false", choices=["true", "false"], - help="Enable eval data shuffle, default is false") - parser.add_argument("--train_batch_size", type=int, default=32, help="Train batch size, default is 32") - parser.add_argument("--eval_batch_size", type=int, default=1, help="Eval batch size, default is 1") - parser.add_argument("--save_finetune_checkpoint_path", type=str, default="", help="Save checkpoint path") - parser.add_argument("--load_pretrain_checkpoint_path", type=str, default="", help="Load checkpoint file path") - parser.add_argument("--local_pretrain_checkpoint_path", type=str, default="", - help="Local pretrain checkpoint file path") - parser.add_argument("--load_finetune_checkpoint_path", type=str, default="", help="Load checkpoint file path") - parser.add_argument("--train_data_file_path", type=str, default="", - help="Data path, it is better to use absolute path") - parser.add_argument("--eval_data_file_path", type=str, default="", - help="Data path, it is better to use absolute path") - parser.add_argument("--schema_file_path", type=str, default="", - help="Schema path, it is better to use absolute path") - parser.add_argument('--data_url', type=str, default=None, help='Dataset path for ModelArts') - parser.add_argument('--train_url', type=str, default=None, help='Train output path for ModelArts') - parser.add_argument('--modelarts', type=str, default='false', - help='train on modelarts or not, default is false') - args_opt = parser.parse_args() - - epoch_num = args_opt.epoch_num - load_pretrain_checkpoint_path = args_opt.load_pretrain_checkpoint_path - save_finetune_checkpoint_path = args_opt.save_finetune_checkpoint_path - load_finetune_checkpoint_path = args_opt.load_finetune_checkpoint_path - - if args_opt.modelarts.lower() == 'true': - import moxing as mox - mox.file.copy_parallel(args_opt.data_url, '/cache/data') - mox.file.copy_parallel(args_opt.load_pretrain_checkpoint_path, args_opt.local_pretrain_checkpoint_path) - load_pretrain_checkpoint_path = args_opt.local_pretrain_checkpoint_path - if args_opt.do_train.lower() == "false" and args_opt.do_eval.lower() == "true": - mox.file.copy_parallel(args_opt.save_finetune_checkpoint_path, args_opt.load_finetune_checkpoint_path) - - if args_opt.do_train.lower() == "false" and args_opt.do_eval.lower() == "false": - raise ValueError("At least one of 'do_train' or 'do_eval' must be true") - if args_opt.do_train.lower() == "true" and args_opt.train_data_file_path == "": - raise ValueError("'train_data_file_path' must be set when do finetune task") - if args_opt.do_eval.lower() == "true" and args_opt.eval_data_file_path == "": - raise ValueError("'eval_data_file_path' must be set when do evaluation task") - - target = args_opt.device_target - if target == "Ascend": - context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=args_opt.device_id) - elif target == "GPU": - context.set_context(mode=context.GRAPH_MODE, device_target="GPU") - if ernie_net_cfg.compute_type != mstype.float32: - logger.warning('GPU only support fp32 temporarily, run with fp32.') - ernie_net_cfg.compute_type = mstype.float32 - else: - raise Exception("Target error, GPU or Ascend is supported.") - - netwithloss = ErnieCLS(ernie_net_cfg, True, num_labels=args_opt.num_class, dropout_prob=0.1) - - if args_opt.do_train.lower() == "true": - ds = create_classification_dataset(batch_size=args_opt.train_batch_size, repeat_count=1, - data_file_path=args_opt.train_data_file_path, - schema_file_path=args_opt.schema_file_path, - do_shuffle=(args_opt.train_data_shuffle.lower() == "true")) - do_train(ds, netwithloss, load_pretrain_checkpoint_path, save_finetune_checkpoint_path, epoch_num) - - if save_finetune_checkpoint_path == "": - load_finetune_checkpoint_dir = _cur_dir - else: - load_finetune_checkpoint_dir = make_directory(save_finetune_checkpoint_path) - load_finetune_checkpoint_path = LoadNewestCkpt(load_finetune_checkpoint_dir, - ds.get_dataset_size(), epoch_num, "classifier") - #frozen_to_air - ckpt_model = load_finetune_checkpoint_path - frozen_to_air_args = {'ckpt_file': ckpt_model, - 'batch_size': 1, - 'file_name': CACHE_TRAINING_URL + 'emotect.air', - 'file_format': 'AIR'} - net = ErnieCLS(ernie_net_cfg, False, num_labels=args_opt.num_class) - frozen_to_air(net, frozen_to_air_args) - - mox.file.copy_parallel(CACHE_TRAINING_URL, args_opt.train_url) - - if args_opt.do_eval.lower() == "true": - ds = create_classification_dataset(batch_size=args_opt.eval_batch_size, repeat_count=1, - data_file_path=args_opt.eval_data_file_path, - schema_file_path=args_opt.schema_file_path, - do_shuffle=(args_opt.eval_data_shuffle.lower() == "true"), - drop_remainder=False) - do_eval(ds, ErnieCLS, args_opt.num_class, load_finetune_checkpoint_path) - - if args_opt.modelarts.lower() == 'true' and args_opt.do_train.lower() == "true": - mox.file.copy_parallel(load_finetune_checkpoint_path, - args_opt.train_url + load_finetune_checkpoint_path.split('/')[-1]) - - -def frozen_to_air(net, args): - """frozen model to air""" - load_checkpoint(args.get("ckpt_file"), net=net) - net.set_train(False) - - batch_size = args.get("batch_size") - input_ids = Tensor(np.zeros([batch_size, ernie_net_cfg.seq_length]), mstype.int32) - input_mask = Tensor(np.zeros([batch_size, ernie_net_cfg.seq_length]), mstype.int32) - token_type_id = Tensor(np.zeros([batch_size, ernie_net_cfg.seq_length]), mstype.int32) - - input_data = [input_ids, input_mask, token_type_id] - export(net.ernie, *input_data, file_name=args.get("file_name"), file_format=args.get("file_format")) - -if __name__ == "__main__": - run_classifier() +# Copyright 2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +''' +Ernie finetune and evaluation script. +''' + +import os +import time +import argparse +import numpy as np + +import mindspore.common.dtype as mstype +from mindspore import Tensor, context, export +from mindspore import log as logger +from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell +from mindspore.nn.optim import Adam, AdamWeightDecay, Adagrad +from mindspore.train.model import Model +from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, TimeMonitor +from mindspore.train.serialization import load_checkpoint, load_param_into_net + +from src.ernie_for_finetune import ErnieFinetuneCell, ErnieCLS +from src.finetune_eval_config import optimizer_cfg, ernie_net_cfg +from src.dataset import create_classification_dataset +from src.assessment_method import Accuracy +from src.utils import make_directory, LossCallBack, LoadNewestCkpt, ErnieLearningRate + +_cur_dir = os.getcwd() +CACHE_TRAINING_URL = "/cache/training/" + +if not os.path.isdir(CACHE_TRAINING_URL): + os.makedirs(CACHE_TRAINING_URL) + +def do_train(dataset=None, network=None, load_checkpoint_path="", save_checkpoint_path="", epoch_num=1): + """ do train """ + if load_checkpoint_path == "": + raise ValueError("Pretrain model missed, finetune task must load pretrain model!") + steps_per_epoch = 500 + # optimizer + if optimizer_cfg.optimizer == 'AdamWeightDecay': + lr_schedule = ErnieLearningRate(learning_rate=optimizer_cfg.AdamWeightDecay.learning_rate, + end_learning_rate=optimizer_cfg.AdamWeightDecay.end_learning_rate, + warmup_steps=int(steps_per_epoch * epoch_num * 0.1), + decay_steps=steps_per_epoch * epoch_num, + power=optimizer_cfg.AdamWeightDecay.power) + params = network.trainable_params() + decay_params = list(filter(optimizer_cfg.AdamWeightDecay.decay_filter, params)) + other_params = list(filter(lambda x: not optimizer_cfg.AdamWeightDecay.decay_filter(x), params)) + group_params = [{'params': decay_params, 'weight_decay': optimizer_cfg.AdamWeightDecay.weight_decay}, + {'params': other_params, 'weight_decay': 0.0}] + + optimizer = AdamWeightDecay(group_params, lr_schedule, eps=optimizer_cfg.AdamWeightDecay.eps) + elif optimizer_cfg.optimizer == 'Adam': + optimizer = Adam(network.trainable_params(), learning_rate=optimizer_cfg.Adam.learning_rate) + elif optimizer_cfg.optimizer == 'Adagrad': + optimizer = Adagrad(network.trainable_params(), learning_rate=optimizer_cfg.Adagrad.learning_rate) + # load checkpoint into network + ckpt_config = CheckpointConfig(save_checkpoint_steps=steps_per_epoch, keep_checkpoint_max=10) + ckpoint_cb = ModelCheckpoint(prefix="classifier", + directory=None if save_checkpoint_path == "" else save_checkpoint_path, + config=ckpt_config) + param_dict = load_checkpoint(load_checkpoint_path) + unloaded_params = load_param_into_net(network, param_dict) + if len(unloaded_params) > 2: + print(unloaded_params) + logger.warning('Loading ernie model failed, please check the checkpoint file.') + + update_cell = DynamicLossScaleUpdateCell(loss_scale_value=2 ** 32, scale_factor=2, scale_window=1000) + netwithgrads = ErnieFinetuneCell(network, optimizer=optimizer, scale_update_cell=update_cell) + model = Model(netwithgrads) + callbacks = [TimeMonitor(dataset.get_dataset_size()), LossCallBack(dataset.get_dataset_size()), ckpoint_cb] + model.train(epoch_num, dataset, callbacks=callbacks) + + +def do_eval(dataset=None, network=None, num_class=2, load_checkpoint_path=""): + """ do eval """ + if load_checkpoint_path == "": + raise ValueError("Finetune model missed, evaluation task must load finetune model!") + net_for_pretraining = network(ernie_net_cfg, False, num_class) + net_for_pretraining.set_train(False) + param_dict = load_checkpoint(load_checkpoint_path) + load_param_into_net(net_for_pretraining, param_dict) + + callback = Accuracy() + + evaluate_times = [] + columns_list = ["input_ids", "input_mask", "segment_ids", "label_ids"] + for data in dataset.create_dict_iterator(num_epochs=1): + input_data = [] + for i in columns_list: + input_data.append(data[i]) + input_ids, input_mask, token_type_id, label_ids = input_data + time_begin = time.time() + logits = net_for_pretraining(input_ids, input_mask, token_type_id, label_ids) + time_end = time.time() + evaluate_times.append(time_end - time_begin) + callback.update(logits, label_ids) + print("==============================================================") + print("acc_num {} , total_num {}, accuracy {:.6f}".format(callback.acc_num, callback.total_num, + callback.acc_num / callback.total_num)) + print("(w/o first and last) elapsed time: {}, per step time : {}".format( + sum(evaluate_times[1:-1]), sum(evaluate_times[1:-1])/(len(evaluate_times) - 2))) + print("==============================================================") + + +def run_classifier(): + """run classifier task""" + parser = argparse.ArgumentParser(description="run classifier") + parser.add_argument("--device_target", type=str, default="Ascend", choices=["Ascend", "GPU"], + help="Device type, default is Ascend") + parser.add_argument("--do_train", type=str, default="false", choices=["true", "false"], + help="Enable train, default is false") + parser.add_argument("--do_eval", type=str, default="false", choices=["true", "false"], + help="Enable eval, default is false") + parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.") + parser.add_argument("--epoch_num", type=int, default=3, help="Epoch number, default is 3.") + parser.add_argument("--num_class", type=int, default=3, help="The number of class, default is 3.") + parser.add_argument("--train_data_shuffle", type=str, default="true", choices=["true", "false"], + help="Enable train data shuffle, default is true") + parser.add_argument("--eval_data_shuffle", type=str, default="false", choices=["true", "false"], + help="Enable eval data shuffle, default is false") + parser.add_argument("--train_batch_size", type=int, default=32, help="Train batch size, default is 32") + parser.add_argument("--eval_batch_size", type=int, default=1, help="Eval batch size, default is 1") + parser.add_argument("--save_finetune_checkpoint_path", type=str, default="", help="Save checkpoint path") + parser.add_argument("--load_pretrain_checkpoint_path", type=str, default="", help="Load checkpoint file path") + parser.add_argument("--local_pretrain_checkpoint_path", type=str, default="", + help="Local pretrain checkpoint file path") + parser.add_argument("--load_finetune_checkpoint_path", type=str, default="", help="Load checkpoint file path") + parser.add_argument("--train_data_file_path", type=str, default="", + help="Data path, it is better to use absolute path") + parser.add_argument("--eval_data_file_path", type=str, default="", + help="Data path, it is better to use absolute path") + parser.add_argument("--schema_file_path", type=str, default="", + help="Schema path, it is better to use absolute path") + parser.add_argument('--data_url', type=str, default=None, help='Dataset path for ModelArts') + parser.add_argument('--train_url', type=str, default=None, help='Train output path for ModelArts') + parser.add_argument('--modelarts', type=str, default='false', + help='train on modelarts or not, default is false') + args_opt = parser.parse_args() + + epoch_num = args_opt.epoch_num + load_pretrain_checkpoint_path = args_opt.load_pretrain_checkpoint_path + save_finetune_checkpoint_path = args_opt.save_finetune_checkpoint_path + load_finetune_checkpoint_path = args_opt.load_finetune_checkpoint_path + + if args_opt.modelarts.lower() == 'true': + import moxing as mox + mox.file.copy_parallel(args_opt.data_url, '/cache/data') + mox.file.copy_parallel(args_opt.load_pretrain_checkpoint_path, args_opt.local_pretrain_checkpoint_path) + load_pretrain_checkpoint_path = args_opt.local_pretrain_checkpoint_path + if args_opt.do_train.lower() == "false" and args_opt.do_eval.lower() == "true": + mox.file.copy_parallel(args_opt.save_finetune_checkpoint_path, args_opt.load_finetune_checkpoint_path) + + if args_opt.do_train.lower() == "false" and args_opt.do_eval.lower() == "false": + raise ValueError("At least one of 'do_train' or 'do_eval' must be true") + if args_opt.do_train.lower() == "true" and args_opt.train_data_file_path == "": + raise ValueError("'train_data_file_path' must be set when do finetune task") + if args_opt.do_eval.lower() == "true" and args_opt.eval_data_file_path == "": + raise ValueError("'eval_data_file_path' must be set when do evaluation task") + + target = args_opt.device_target + if target == "Ascend": + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=args_opt.device_id) + elif target == "GPU": + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + if ernie_net_cfg.compute_type != mstype.float32: + logger.warning('GPU only support fp32 temporarily, run with fp32.') + ernie_net_cfg.compute_type = mstype.float32 + else: + raise Exception("Target error, GPU or Ascend is supported.") + + netwithloss = ErnieCLS(ernie_net_cfg, True, num_labels=args_opt.num_class, dropout_prob=0.1) + + if args_opt.do_train.lower() == "true": + ds = create_classification_dataset(batch_size=args_opt.train_batch_size, repeat_count=1, + data_file_path=args_opt.train_data_file_path, + schema_file_path=args_opt.schema_file_path, + do_shuffle=(args_opt.train_data_shuffle.lower() == "true")) + do_train(ds, netwithloss, load_pretrain_checkpoint_path, save_finetune_checkpoint_path, epoch_num) + + if save_finetune_checkpoint_path == "": + load_finetune_checkpoint_dir = _cur_dir + else: + load_finetune_checkpoint_dir = make_directory(save_finetune_checkpoint_path) + load_finetune_checkpoint_path = LoadNewestCkpt(load_finetune_checkpoint_dir, + ds.get_dataset_size(), epoch_num, "classifier") + #frozen_to_air + ckpt_model = load_finetune_checkpoint_path + frozen_to_air_args = {'ckpt_file': ckpt_model, + 'batch_size': 1, + 'file_name': CACHE_TRAINING_URL + 'emotect.air', + 'file_format': 'AIR'} + net = ErnieCLS(ernie_net_cfg, False, num_labels=args_opt.num_class) + frozen_to_air(net, frozen_to_air_args) + + mox.file.copy_parallel(CACHE_TRAINING_URL, args_opt.train_url) + + if args_opt.do_eval.lower() == "true": + ds = create_classification_dataset(batch_size=args_opt.eval_batch_size, repeat_count=1, + data_file_path=args_opt.eval_data_file_path, + schema_file_path=args_opt.schema_file_path, + do_shuffle=(args_opt.eval_data_shuffle.lower() == "true"), + drop_remainder=False) + do_eval(ds, ErnieCLS, args_opt.num_class, load_finetune_checkpoint_path) + + if args_opt.modelarts.lower() == 'true' and args_opt.do_train.lower() == "true": + mox.file.copy_parallel(load_finetune_checkpoint_path, + args_opt.train_url + load_finetune_checkpoint_path.split('/')[-1]) + + +def frozen_to_air(net, args): + """frozen model to air""" + load_checkpoint(args.get("ckpt_file"), net=net) + net.set_train(False) + + batch_size = args.get("batch_size") + input_ids = Tensor(np.zeros([batch_size, ernie_net_cfg.seq_length]), mstype.int32) + input_mask = Tensor(np.zeros([batch_size, ernie_net_cfg.seq_length]), mstype.int32) + token_type_id = Tensor(np.zeros([batch_size, ernie_net_cfg.seq_length]), mstype.int32) + + input_data = [input_ids, input_mask, token_type_id] + export(net.ernie, *input_data, file_name=args.get("file_name"), file_format=args.get("file_format")) + +if __name__ == "__main__": + run_classifier() diff --git a/official/nlp/ernie/export.py b/official/nlp/ernie/export.py index e56a863eb97be5f6718e368a9350298232ccd8b7..11d563e0631f6b2a2699fc74eb32d351653ef4b2 100644 --- a/official/nlp/ernie/export.py +++ b/official/nlp/ernie/export.py @@ -29,7 +29,7 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=1, help="batch size") parser.add_argument("--number_labels", type=int, default=3, help="number of labels ") parser.add_argument("--ckpt_file", type=str, required=True, help="Ernie ckpt file.") -parser.add_argument("--file_name", type=str, default="ernie_finetune", help="Ernie output air name.") +parser.add_argument("--file_name", type=str, default="ernie_finetune", help="Ernie output mindir name.") parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default='MINDIR', help="file format") parser.add_argument("--device_target", type=str, default="Ascend", diff --git a/official/nlp/gnmt_v2/README.md b/official/nlp/gnmt_v2/README.md index caf0b4d8a1406be7ed228132fd68af3487580070..398c612b71298b87c5733d559ba1a14472097945 100644 --- a/official/nlp/gnmt_v2/README.md +++ b/official/nlp/gnmt_v2/README.md @@ -237,7 +237,7 @@ The GNMT network script and code result are as follows: ├── create_dataset.py // Dataset preparation. ├── eval.py // Infer API entry. ├── eval_onnx.py // ONNX infer API entry. - ├── export.py // Export checkpoint file into air models. + ├── export.py // Export checkpoint file into air/mindir models. ├── mindspore_hub_conf.py // Hub config. ├── pip-requirements.txt // Requirements of third party package for modelarts. ├── requirements.txt // Requirements of third party package. diff --git a/research/audio/dscnn/default_config.yaml b/research/audio/dscnn/default_config.yaml index 212cfc4a55f6eed15c3a8ed0a018d1893a05d101..fd4aa275d32c615a88eb43c1cf15b0ae5c081301 100644 --- a/research/audio/dscnn/default_config.yaml +++ b/research/audio/dscnn/default_config.yaml @@ -98,7 +98,7 @@ data_path: "The location of input data" output_pah: "The location of the output file" device_target: "device id of GPU or Ascend. (Default: None)" enable_profiling: "Whether enable profiling while training default: False" -file_name: "CNN&CTC output air name" +file_name: "CNN&CTC output mindir name" file_format: "choices [AIR, MINDIR]" val_ckpt_path: "checkpoint path." is_distributed: "distributed training" diff --git a/research/audio/fcn-4/README.md b/research/audio/fcn-4/README.md index 10b81360b3235f052dc3ece523c1df38143617bd..460c8f10a802e9b0c152a65166fb30fe0b5fa65e 100644 --- a/research/audio/fcn-4/README.md +++ b/research/audio/fcn-4/README.md @@ -203,7 +203,7 @@ SLOG_PRINT_TO_STDOUT=1 python eval.py --device_id 0 | └─moxing_adapter.py // Parameter processing ├── train.py // training script ├── eval.py // evaluation script - ├── export.py // export model in air format + ├── export.py // export model in air/mindir format ├─default_config.yaml // Training parameter profile └─train.py // Train net ``` diff --git a/research/audio/wavenet/README.md b/research/audio/wavenet/README.md index f6bf345677ffc19b53eef0107932f2620ba1115e..3124160d9ed8f302bc029f32b0118cbb96030dab 100644 --- a/research/audio/wavenet/README.md +++ b/research/audio/wavenet/README.md @@ -80,7 +80,7 @@ Dataset used: [The LJ Speech Dataset](<https://keithito.com/LJ-Speech-Dataset>) ├── audio.py // Audio utils. Note this script should be downloaded from the above link ├── compute-meanvar-stats.py // Compute mean-variance Normalization stats. Note this script should be downloaded from the above link ├── evaluate.py // Evaluation - ├── export.py // Convert mindspore model to air model + ├── export.py // Convert mindspore model to air/mindir model ├── hparams.py // Hyper-parameter configuration. Note this script should be downloaded from the above link ├── mksubset.py // Make subset of dataset. Note this script should be downloaded from the above link ├── preprocess.py // Preprocess dataset. Note this script should be downloaded from the above link diff --git a/research/cv/AttGAN/export.py b/research/cv/AttGAN/export.py index 0f95421780dce31efbf29a72554358146bdc43c4..5239b8a9e788bab40f1d4d15c1bcb5fcd5612e2b 100644 --- a/research/cv/AttGAN/export.py +++ b/research/cv/AttGAN/export.py @@ -28,7 +28,7 @@ parser = argparse.ArgumentParser(description='Attribute Edit') parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=1, help="batch size") parser.add_argument('--gen_ckpt_name', type=str, default='') -parser.add_argument('--file_format', type=str, choices=["AIR", "MINDIR"], default='AIR', help='file format') +parser.add_argument('--file_format', type=str, choices=["AIR", "MINDIR"], default='MINDIR', help='file format') parser.add_argument('--experiment_name', dest='experiment_name', required=True) args_ = parser.parse_args() diff --git a/research/cv/FaceAttribute/README.md b/research/cv/FaceAttribute/README.md index 82904fdd62fc0b099e556fe55b17238cbfbd16fb..f3ff641baa32e002558a6aa0b3e27d891b9bce4b 100644 --- a/research/cv/FaceAttribute/README.md +++ b/research/cv/FaceAttribute/README.md @@ -135,7 +135,7 @@ The entire code structure is as following: ├─ run_distribute_train_gpu.sh # launch distributed training(8p) in GPU ├─ run_eval.sh # launch evaluating in ascend ├─ run_eval_gpu.sh # launch evaluating in gpu - └─ run_export.sh # launch exporting air model + └─ run_export.sh # launch exporting air/mindir model ├─ run_infer_310.sh # shell script for 310 inference ├─ src ├─ FaceAttribute @@ -158,7 +158,7 @@ The entire code structure is as following: ├─ preprocess.py # preprocess scripts ├─ train.py # training scripts ├─ eval.py # evaluation scripts - └─ export.py # export air model + └─ export.py # export air/mindir model ``` ## [Running Example](#contents) @@ -386,7 +386,7 @@ mask f1: 0.9992691394116572 ### Convert model -If you want to infer the network on Ascend 310, you should convert the model to AIR: +If you want to infer the network on Ascend 310, you should convert the model to AIR/MINDIR: ```bash Ascend cd ./scripts diff --git a/research/cv/FaceAttribute/export.py b/research/cv/FaceAttribute/export.py index d4cb0d8c934342858ee7d68fb71f952cbb0bd281..65f09ed307566c2bd908360eee88409b2b76165d 100644 --- a/research/cv/FaceAttribute/export.py +++ b/research/cv/FaceAttribute/export.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -"""Convert ckpt to air.""" +"""Convert ckpt to air/mindir.""" import os import numpy as np diff --git a/research/cv/FaceDetection/README.md b/research/cv/FaceDetection/README.md index a225a28025f64e497ee3f56ce807b213dab84f7c..330739f9e6b908bf656798b0fc757ea465fb6b20 100644 --- a/research/cv/FaceDetection/README.md +++ b/research/cv/FaceDetection/README.md @@ -101,7 +101,7 @@ The entire code structure is as following: ├─ run_distribute_train_gpu.sh # launch distributed training(8p) in GPU ├─ run_eval.sh # launch evaluating in ascend ├─ run_infer_310.sh # launch inference on Ascend310 - └─ run_export.sh # launch exporting air model + └─ run_export.sh # launch exporting air/mindir model ├─ src ├─ FaceDetection ├─ voc_wrapper.py # get detection results @@ -122,7 +122,7 @@ The entire code structure is as following: ├─ postprocess.py # postprocess script ├─ preprocess.py # preprocess script ├─ bin.py # bin script - └─ export.py # export air model + └─ export.py # export air/mindir model ``` ## [Running Example](#contents) diff --git a/research/cv/FaceDetection/export.py b/research/cv/FaceDetection/export.py index c46ed4572c8063f282556abe2983cc1ba8b5a899..cfbd726575fa19e118f1071666bafa196d170155 100644 --- a/research/cv/FaceDetection/export.py +++ b/research/cv/FaceDetection/export.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -"""Convert ckpt to air.""" +"""Convert ckpt to air/mindir.""" import os import numpy as np diff --git a/research/cv/FaceQualityAssessment/README.md b/research/cv/FaceQualityAssessment/README.md index 78b4fd4941cb9685c453819f95ed1cb2091594e8..cc4705702d7f1bd1b283843244bfe0c4ae7702e8 100644 --- a/research/cv/FaceQualityAssessment/README.md +++ b/research/cv/FaceQualityAssessment/README.md @@ -99,7 +99,7 @@ The entire code structure is as following: ├─ run_standalone_train.sh # launch standalone training(1p) in ascend ├─ run_distribute_train.sh # launch distributed training(8p) in ascend ├─ run_eval.sh # launch evaluating in ascend - ├─ run_export.sh # launch exporting air model + ├─ run_export.sh # launch exporting air/mindir model ├─ run_standalone_train_gpu.sh # launch standalone training(1p) in gpu ├─ run_distribute_train_gpu.sh # launch distributed training(8p) in gpu ├─ run_eval_gpu.sh # launch evaluating in gpu @@ -116,7 +116,7 @@ The entire code structure is as following: ├─ default_config.yaml # Configurations ├─ train.py # training scripts ├─ eval.py # evaluation scripts - └─ export.py # export air model + └─ export.py # export air/mindir model ``` ## [Running Example](#contents) @@ -411,7 +411,7 @@ MAE of elur:17.69762644062826 ### Convert model -If you want to infer the network on Ascend 310, you should convert the model to AIR: +If you want to infer the network on Ascend 310, you should convert the model to AIR/MINDIR: ```bash Ascend diff --git a/research/cv/FaceRecognition/README.md b/research/cv/FaceRecognition/README.md index 1e4b20b184e4265ff1f415a0e60276dbc2905e7a..8c95ad9b1dfa7ccd1807ef1654a82b187be7c005 100644 --- a/research/cv/FaceRecognition/README.md +++ b/research/cv/FaceRecognition/README.md @@ -81,7 +81,7 @@ The entire code structure is as following: │ ├── run_eval.sh // shell script for evaluation on Ascend │ ├── run_eval_cpu.sh // shell script for evaluation on CPU │ ├── run_eval_gpu.sh // shell script for evaluation on gpu - │ ├── run_export.sh // shell script for exporting air model + │ ├── run_export.sh // shell script for exporting air/mindir model │ ├── run_standalone_train_base.sh // shell script for standalone training on Ascend │ ├── run_standalone_train_beta.sh // shell script for standalone training on Ascend │ ├── run_standalone_train_for_gpu.sh // shell script for standalone training on GPU @@ -115,7 +115,7 @@ The entire code structure is as following: ├─ inference_config_cpu.yaml // parameter configuration ├─ train.py // training scripts ├─ eval.py // evaluation scripts - └─ export.py // export air model + └─ export.py // export air/mindir model ``` ## [Running Example](#contents) @@ -351,7 +351,7 @@ If you want to run in modelarts, please check the official documentation of [mod ### Convert model -If you want to infer the network on Ascend 310, you should convert the model to AIR: +If you want to infer the network on Ascend 310, you should convert the model to AIR/MINDIR: ```bash cd ./scripts diff --git a/research/cv/FaceRecognition/export.py b/research/cv/FaceRecognition/export.py index ea598ce676771fb663ce06f04a66a4ec7b489725..7d5f9259a0eeef0fe0b31f9a8d157b768ae35502 100644 --- a/research/cv/FaceRecognition/export.py +++ b/research/cv/FaceRecognition/export.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -"""Convert ckpt to air.""" +"""Convert ckpt to air/mindir.""" import os import numpy as np diff --git a/research/cv/Inception-v2/export.py b/research/cv/Inception-v2/export.py index 06002d198a091a28af1e30f56aae1567873679d5..3799dcc757644ab262910f8b222857f2a47721b2 100644 --- a/research/cv/Inception-v2/export.py +++ b/research/cv/Inception-v2/export.py @@ -39,7 +39,7 @@ def run_export(): parser.add_argument("--batch_size", type=int, default=1, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--file_name", type=str, default="inceptionv2", help="output file name.") - parser.add_argument('--file_format', type=str, choices=["AIR", "MINDIR"], default='AIR', help='file format') + parser.add_argument('--file_format', type=str, choices=["AIR", "MINDIR"], default='MINDIR', help='file format') parser.add_argument("--platform", type=str, choices=["Ascend", "GPU"], default="Ascend", help="platform") args = parser.parse_args() diff --git a/research/cv/LightCNN/export.py b/research/cv/LightCNN/export.py index 6ebc21fc88198346cb4c0070bb61a4c0b60fc1a7..df2e84c5314c716b3afd3380e6c3e50743b837c7 100644 --- a/research/cv/LightCNN/export.py +++ b/research/cv/LightCNN/export.py @@ -26,7 +26,7 @@ parser.add_argument("--device_id", type=int, default=4, help="Device id") parser.add_argument("--batch_size", type=int, default=1, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--file_name", type=str, default="lightcnn", help="output file name.") -parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format") +parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format") parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend", help="device target") args = parser.parse_args() diff --git a/research/cv/STGAN/src/utils/args.py b/research/cv/STGAN/src/utils/args.py index c4ba346487e2465475a9259ae9a9b1269774629d..f1bd1d9c3b628875a8ef499a7b7d086633069f3b 100644 --- a/research/cv/STGAN/src/utils/args.py +++ b/research/cv/STGAN/src/utils/args.py @@ -255,7 +255,7 @@ def get_args(phase): parser.add_argument('--outputs_dir', type=str, default='./outputs', \ help='models are saved here, default is ./outputs.') parser.add_argument("--dataroot", type=str, default='./dataset') - parser.add_argument('--file_format', type=str, choices=['AIR', 'ONNX', 'MINDIR'], default='AIR', \ + parser.add_argument('--file_format', type=str, choices=['AIR', 'ONNX', 'MINDIR'], default='MINDIR', \ help='file format') parser.add_argument('--file_name', type=str, default='STGAN', help='output file name prefix.') parser.add_argument('--ckpt_path', default=None, help='path of checkpoint file.') diff --git a/research/cv/arcface/export.py b/research/cv/arcface/export.py index 16f8fb0e162018439afb519ba3c9939d51fd1d5a..52ca1d9b26c68d3bb2163205fc401ce26cbc5fbc 100644 --- a/research/cv/arcface/export.py +++ b/research/cv/arcface/export.py @@ -29,7 +29,7 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=64, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--file_name", type=str, default="arcface", help="output file name.") -parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='AIR', help='file format') +parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='MINDIR', help='file format') parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend", help="device target") parser.add_argument('--dataset_name', type=str, default='MS1MV2', choices=['MS1MV2'], diff --git a/research/cv/centernet/README.md b/research/cv/centernet/README.md index e9d54d857aa93b782364a181f11ac975e536429b..b0c90aada2fdc711ed1127c66ac2cde836c50e78 100644 --- a/research/cv/centernet/README.md +++ b/research/cv/centernet/README.md @@ -210,7 +210,7 @@ Dataset used: [COCO2017](https://cocodataset.org/) ├── centernet ├── train.py // training scripts ├── eval.py // testing and evaluation outputs - ├── export.py // convert mindspore model to air model + ├── export.py // convert mindspore model to air/mindir model ├── README.md // descriptions about CenterNet ├── default_config.yaml // parameter configuration ├── scripts @@ -342,9 +342,9 @@ eval_config: ```text config for export. - input_res: dataset_config.input_res // input resolution of the model air, default is [512, 512] + input_res: dataset_config.input_res // input resolution of the model air/mindir, default is [512, 512] ckpt_file: "./ckpt_file.ckpt" // checkpoint file, default is "./ckkt_file.ckpt" - export_format: "MINDIR" // the exported format of model air, default is MINDIR + export_format: "MINDIR" // the exported format of model air/mindir, default is MINDIR export_name: "CenterNet_MultiPose" // the exported file name, default is "CentNet_MultiPose" ``` diff --git a/research/cv/hardnet/export.py b/research/cv/hardnet/export.py index be33c2cc8cd303a186409770f0ccf15ee5f2f688..72a8cf7bc6683e9cc84409e5bde6bf7ed5a19f05 100644 --- a/research/cv/hardnet/export.py +++ b/research/cv/hardnet/export.py @@ -30,7 +30,7 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=1, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--file_name", type=str, default="hardnet", help="output file name.") -parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='AIR', help='file format') +parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='MINDIR', help='file format') parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend", help="device target") diff --git a/research/cv/ibnnet/export.py b/research/cv/ibnnet/export.py index 73f6533d2705882525bbc6cf7a6df9f7ccd72443..e456a2d1e7784d50c8427b8f5c320b91dc5b3464 100644 --- a/research/cv/ibnnet/export.py +++ b/research/cv/ibnnet/export.py @@ -30,7 +30,7 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=1, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--file_name", type=str, default="ibnnet", help="output file name.") -parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='AIR', help='file format') +parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='MINDIR', help='file format') parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend", help="device target") parser.add_argument('--dataset_name', type=str, default='imagenet', choices=['imagenet'], diff --git a/research/cv/inception_resnet_v2/export.py b/research/cv/inception_resnet_v2/export.py index 2fe2e862f1bc11d5ba2acf13acbf8463be33e469..fbb7b267ea7d646fb4ed669295465b28014dafed 100644 --- a/research/cv/inception_resnet_v2/export.py +++ b/research/cv/inception_resnet_v2/export.py @@ -25,8 +25,8 @@ from src.inception_resnet_v2 import Inception_resnet_v2 parser = argparse.ArgumentParser(description='inception_resnet_v2 export') parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument('--ckpt_file', type=str, required=True, help='inception_resnet_v2 ckpt file.') -parser.add_argument('--file_name', type=str, default='inception_resnet_v2', help='inception_resnet_v2 output air name.') -parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='AIR', help='file format') +parser.add_argument('--file_name', type=str, default='inception_resnet_v2', help='output air or mindir name.') +parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='MINDIR', help='file format') parser.add_argument('--width', type=int, default=299, help='input width') parser.add_argument('--height', type=int, default=299, help='input height') parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend", diff --git a/research/cv/lresnet100e_ir/export.py b/research/cv/lresnet100e_ir/export.py index 06d9a03eed239f51893b0c4e0354d373125741b4..4016d6925a4c1131e484cd2bdff2f97406c23057 100644 --- a/research/cv/lresnet100e_ir/export.py +++ b/research/cv/lresnet100e_ir/export.py @@ -30,7 +30,7 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=64, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--file_name", type=str, default="arcface", help="output file name.") -parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='AIR', help='file format') +parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='MINDIR', help='file format') parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="GPU", help="device target") parser.add_argument('--dataset_name', type=str, default='MS1MV2', choices=['MS1MV2'], diff --git a/research/cv/relationnet/export.py b/research/cv/relationnet/export.py index 49ce8dc415ee50d4041ad16cd693dfff4be53e7c..c3bc4bd86ea4243004e589461127ca4edd4d7bdc 100644 --- a/research/cv/relationnet/export.py +++ b/research/cv/relationnet/export.py @@ -28,7 +28,7 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=1, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--file_name", type=str, default="relationnet", help="output file name.") -parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format") +parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format") parser.add_argument("--device_target", type=str, choices=["Ascend"], default="Ascend", help="device target") args = parser.parse_args() diff --git a/research/cv/res2net_faster_rcnn/README.md b/research/cv/res2net_faster_rcnn/README.md index a2dd044e9b2d63440b42bf3b3d2be71ffc665b90..3e6559f674296b8b0a978bdb18d8666401c2b6ff 100644 --- a/research/cv/res2net_faster_rcnn/README.md +++ b/research/cv/res2net_faster_rcnn/README.md @@ -125,7 +125,7 @@ bash run_distribute_train_ascend.sh [RANK_TABLE_FILE] [PRETRAINED_MODEL] [BACKBO bash run_eval_ascend.sh [VALIDATION_JSON_FILE] [CHECKPOINT_PATH] [BACKBONE] [COCO_ROOT] [MINDRECORD_DIR](option) # inference -bash run_infer_310.sh [AIR_PATH] [DATA_PATH] [ANN_FILE_PATH] +bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [ANN_FILE_PATH] ``` ## Run on GPU diff --git a/research/cv/resnet50_bam/README.md b/research/cv/resnet50_bam/README.md index 9367f89f3bdcea82793d32481271a6aa77b5883d..5a9b4e871ae4902b3f741db65dcd4e4508f0535f 100644 --- a/research/cv/resnet50_bam/README.md +++ b/research/cv/resnet50_bam/README.md @@ -133,7 +133,7 @@ After installing MindSpore through the official website, you can follow the step ├── README_CN.md // resnet50_bam description in Chinese ├── create_imagenet2012_label.py // create imagenet 2012 label ├── eval.py // evaluation script - ├── export.py // export a ckpt to air/mindi + ├── export.py // export a ckpt to air/mindir ├── postprocess.py // Ascend310 postrocess ├── train.py // trainig script ``` @@ -239,7 +239,7 @@ python export.py --ckpt_file [CKPT_FILE] --device_target [DEVICE_TARGET] --file_ ### Inference -Before inference, we need to export the model first. Mindir can be exported in any environment, and the air model can only be exported in the Shengteng 910 environment. The following shows an example of using the mindir model to perform inference. +Before inference, we need to export the model first. Mindir can be exported in any environment, and the air/mindir model can only be exported in the Shengteng 910 environment. The following shows an example of using the mindir model to perform inference. - Use ImageNet2012 data set for inference on Shengteng 31 diff --git a/research/cv/resnetv2/export.py b/research/cv/resnetv2/export.py index 22245a6c28879b2c03ff0a751e5414d2cd8ff5f1..3848082ad9a2ab3b13079170c2f7c54f37d67f6c 100644 --- a/research/cv/resnetv2/export.py +++ b/research/cv/resnetv2/export.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -"""Convert ckpt to air.""" +"""Convert ckpt to air/mindir.""" import argparse import numpy as np @@ -28,7 +28,7 @@ parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint fil parser.add_argument("--file_name", type=str, default="resnetv2", help="output file name.") parser.add_argument('--width', type=int, default=32, help='input width') parser.add_argument('--height', type=int, default=32, help='input height') -parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format") +parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format") parser.add_argument("--device_target", type=str, default="Ascend", choices=["Ascend", "GPU", "CPU"], help="device target(default: Ascend)") args = parser.parse_args() diff --git a/research/cv/sknet/export.py b/research/cv/sknet/export.py index f5b668e1ab0410a7f456b6449d6a169da19d542b..ef9a9fdcab45ab33a29c076ea6369eb36699c769 100644 --- a/research/cv/sknet/export.py +++ b/research/cv/sknet/export.py @@ -32,7 +32,7 @@ parser.add_argument("--ckpt_file", type=str, default="/path/to/sknet-90_195.ckpt parser.add_argument("--file_name", type=str, default="sknet_export", help="output file name.") parser.add_argument('--width', type=int, default=224, help='input width') parser.add_argument('--height', type=int, default=224, help='input height') -parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format") +parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format") parser.add_argument("--device_target", type=str, default="Ascend", choices=["Ascend", "GPU"], help="device target(default: Ascend)") args = parser.parse_args() diff --git a/research/cv/squeezenet1_1/export.py b/research/cv/squeezenet1_1/export.py index 3d34979dea8ee00b24428e39cdbacb4d7e1fa1d5..67e604736e58170b33af0a0face3d2856de141d6 100644 --- a/research/cv/squeezenet1_1/export.py +++ b/research/cv/squeezenet1_1/export.py @@ -32,7 +32,7 @@ parser.add_argument('--net', type=str, default='squeezenet', help='Model.') parser.add_argument('--dataset', type=str, default='imagenet', help='Dataset.') parser.add_argument("--file_name", type=str, default="squeezenet", help="output file name.") -parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format") +parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format") parser.add_argument("--device_target", type=str, default="Ascend", choices=["Ascend", "GPU", "CPU"], help="device target (default: Ascend)") args = parser.parse_args() diff --git a/research/cv/ssd_inceptionv2/export.py b/research/cv/ssd_inceptionv2/export.py index 321040b819697e15a5a6122012e00a686ebfb7a8..47274a9a3af9e7013255c6e031d26bd46a2b2fd8 100644 --- a/research/cv/ssd_inceptionv2/export.py +++ b/research/cv/ssd_inceptionv2/export.py @@ -30,7 +30,7 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=1, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--file_name", type=str, default="ssd", help="output file name.") -parser.add_argument('--file_format', type=str, choices=["AIR", "MINDIR"], default='AIR', help='file format') +parser.add_argument('--file_format', type=str, choices=["AIR", "MINDIR"], default='MINDIR', help='file format') parser.add_argument("--device_target", type=str, choices=["Ascend"], default="Ascend", help="device target,currently only Ascend is supported") args = parser.parse_args() diff --git a/research/cv/ssd_resnet34/export.py b/research/cv/ssd_resnet34/export.py index 5ba73de32319f64863c98ab84bbedbfa2371ce7d..27f872d70384fd1c144d59db76ce51d8890e6778 100644 --- a/research/cv/ssd_resnet34/export.py +++ b/research/cv/ssd_resnet34/export.py @@ -30,7 +30,7 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=1, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--file_name", type=str, default="ssd", help="output file name.") -parser.add_argument('--file_format', type=str, choices=["AIR", "MINDIR"], default='AIR', help='file format') +parser.add_argument('--file_format', type=str, choices=["AIR", "MINDIR"], default='MINDIR', help='file format') parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend", help="device target") args = parser.parse_args() diff --git a/research/cv/ssd_resnet50/export.py b/research/cv/ssd_resnet50/export.py index 290ed3763a306a1d515742fa9e8e3855742299f0..2dfb6dcb3696ebe0a4cf16d81fabdca63848bf35 100644 --- a/research/cv/ssd_resnet50/export.py +++ b/research/cv/ssd_resnet50/export.py @@ -26,7 +26,7 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=1, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--file_name", type=str, default="ssd", help="output file name.") -parser.add_argument('--file_format', type=str, choices=["AIR", "MINDIR"], default='AIR', help='file format') +parser.add_argument('--file_format', type=str, choices=["AIR", "MINDIR"], default='MINDIR', help='file format') parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend", help="device target") args = parser.parse_args() diff --git a/research/cv/stgcn/export.py b/research/cv/stgcn/export.py index fa413340d38b4e7694acc94b2008c7c92ad5701b..04d614d2b06c0d0d0130447588b0a76c77a6369f 100644 --- a/research/cv/stgcn/export.py +++ b/research/cv/stgcn/export.py @@ -39,7 +39,7 @@ parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint fil parser.add_argument("--n_pred", type=int, default=3, help="The number of time interval for predcition.") parser.add_argument("--graph_conv_type", type=str, default="chebconv", help="Grapg convolution type.") parser.add_argument("--file_name", type=str, default="stgcn", help="output file name.") -parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format") +parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format") args = parser.parse_args() context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, device_id=args.device_id) diff --git a/research/cv/textfusenet/README.md b/research/cv/textfusenet/README.md index b76f7935d3b4ad4885339f294ff7786de3740aad..a8443144835233ca6658d0f5ce8ac51bbbee5f29 100755 --- a/research/cv/textfusenet/README.md +++ b/research/cv/textfusenet/README.md @@ -111,11 +111,11 @@ Shapely==1.5.9 ```shell # inference - bash run_infer_310.sh [AIR_PATH] [DATA_PATH] [ANN_FILE_PATH] + bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [ANN_FILE_PATH] ``` Note: - 1. AIR_PATH is a model file, exported by export script file on the Ascend910 environment. + 1. MINDIR_PATH is a model file, exported by export script file on the Ascend910 environment. 2. ANN_FILE_PATH is a annotation file for inference. # [Script Description](#contents) diff --git a/research/cv/textfusenet/README_CN.md b/research/cv/textfusenet/README_CN.md index 63d33edff4d8ddf6c61877eff59a5b52ca1497a0..cf4c2ed50f6109fe51cf9371772b167d4089fe00 100755 --- a/research/cv/textfusenet/README_CN.md +++ b/research/cv/textfusenet/README_CN.md @@ -121,7 +121,7 @@ Shapely==1.5.9 ``` 注: - 1. AIR_PATH是在910上使用export脚本导出的模型。 + 1. MINDIR_PATH是在910上使用export脚本导出的模型。 2. ANN_FILE_PATH是推理使用的标注文件。 # 脚本说明 diff --git a/research/cv/vnet/export.py b/research/cv/vnet/export.py index caa2098c4e35e2772f9306e3ed5b5e7a15b71fa4..266ae374107e5e34e7cb69a80bdca8e92822261d 100644 --- a/research/cv/vnet/export.py +++ b/research/cv/vnet/export.py @@ -26,7 +26,7 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=1, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--file_name", type=str, default="vnet", help="output file name.") -parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format") +parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format") parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend", help="device target") args = parser.parse_args() diff --git a/research/cv/wideresnet/export.py b/research/cv/wideresnet/export.py index dcbd6d63f8614346edfb3461152666c2427d1da1..64ee8239e4e9c796bc0611af2216ec564f0863f3 100644 --- a/research/cv/wideresnet/export.py +++ b/research/cv/wideresnet/export.py @@ -33,7 +33,7 @@ parser.add_argument('--data_url', default=None, help='Directory contains cifar10 parser.add_argument('--train_url', default=None, help='Directory contains checkpoint file') parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file name.") parser.add_argument("--batch_size", type=int, default=1, help="batch size") -parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='AIR', help='file format') +parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='MINDIR', help='file format') args = parser.parse_args() context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target) diff --git a/research/gnn/sdne/export.py b/research/gnn/sdne/export.py index 6f755e6c573ec675bd23142abb683e6dc56f5e62..18cc5f8f9cd784da336c8d36e249c3115b8834fd 100644 --- a/research/gnn/sdne/export.py +++ b/research/gnn/sdne/export.py @@ -30,7 +30,7 @@ parser.add_argument("--device_id", type=int, default=0, help="Device id") parser.add_argument("--batch_size", type=int, default=256, help="batch size") parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.") parser.add_argument("--file_name", type=str, default="sdne", help="output file name.") -parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='AIR', help='file format') +parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='MINDIR', help='file format') parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend", help="device target") parser.add_argument('--dataset', type=str, default='WIKI', diff --git a/research/hpc/molecular_dynamics/default_config.yaml b/research/hpc/molecular_dynamics/default_config.yaml index f8b12c2e16899c3a9392a8d98c14a9f10dd236aa..a1dcc32adb3b150ad2183fb74587136f9a129822 100644 --- a/research/hpc/molecular_dynamics/default_config.yaml +++ b/research/hpc/molecular_dynamics/default_config.yaml @@ -35,7 +35,7 @@ data_path: "The location of input data" output_pah: "The location of the output file" device_target: "device id of GPU or Ascend. (Default: None)" enable_profiling: "Whether enable profiling while training default: False" -file_name: "CNN&CTC output air name" +file_name: "CNN&CTC output mindir name" file_format: "choices [AIR, MINDIR]" ckpt_file: "CNN&CTC ckpt file" checkpoint_path: "Checkpoint file path" diff --git a/research/nlp/albert/task_classifier_config.yaml b/research/nlp/albert/task_classifier_config.yaml index d060e33ff66e98a448b31edd6cfb87972433acf0..56d7f0ffc3730e57cb50cb4f8c1be7ccc9a98995 100644 --- a/research/nlp/albert/task_classifier_config.yaml +++ b/research/nlp/albert/task_classifier_config.yaml @@ -105,7 +105,7 @@ schema_file_path: "Schema path, it is better to use absolute path" export_batch_size: "export batch size." export_ckpt_file: "Alert ckpt file." -export_file_name: "Albert output air name." +export_file_name: "Albert output mindir name." file_format: "file format" --- # chocies diff --git a/research/nlp/albert/task_squad_config.yaml b/research/nlp/albert/task_squad_config.yaml index cebdbb4debd8294b29a47da6991f1f71a559e739..aa1ca2f514d451dcbb3eeb10b55430b2709f7b3d 100644 --- a/research/nlp/albert/task_squad_config.yaml +++ b/research/nlp/albert/task_squad_config.yaml @@ -109,7 +109,7 @@ spm_model_file: "Spm path, it is better to use absolute path" export_batch_size: "export batch size." export_ckpt_file: "Bert ckpt file." -export_file_name: "bert output air name." +export_file_name: "bert output mindir name." file_format: "file format" --- # chocies diff --git a/research/nlp/ktnet/export.py b/research/nlp/ktnet/export.py index f1632f2280f6163f852e69309860a5ef311d4344..ba3569427089c131fb94a18f84efbd80e8e8a769 100644 --- a/research/nlp/ktnet/export.py +++ b/research/nlp/ktnet/export.py @@ -50,8 +50,8 @@ parser.add_argument("--train_wn_max_concept_length", type=int, default=8, help=" parser.add_argument("--train_nell_max_concept_length", type=int, default=8, help="nell_concept_length") parser.add_argument("--dataset", type=str, default="squard", help="target dataset") parser.add_argument("--ckpt_file", type=str, required=True, help="KTNET ckpt file for dataset.") -parser.add_argument("--file_name", type=str, default="KTNET", help="KTNET output air name.") -parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format") +parser.add_argument("--file_name", type=str, default="KTNET", help="KTNET output mindir name.") +parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format") parser.add_argument("--device_target", type=str, default="Ascend", choices=["Ascend", "GPU", "CPU"], help="device target (default: Ascend)") args = parser.parse_args() diff --git a/research/nlp/senta/export.py b/research/nlp/senta/export.py index 8abed0973b5d4b922acafb3462390b1982c18ca0..4d78340ac09257acccc4780e19bdff5375e7ab62 100644 --- a/research/nlp/senta/export.py +++ b/research/nlp/senta/export.py @@ -33,7 +33,7 @@ parser.add_argument( "--file_name", type=str, default="Senta", - help="Senta output air name.") + help="Senta output mindir name.") parser.add_argument( "--file_format", type=str, diff --git a/research/nlp/seq2seq/README_CN.md b/research/nlp/seq2seq/README_CN.md index 45dc01a073e1e0430c41b073dc57a11e96d0857c..2f85be67c1f9b411aa18e694db3b7391446d4dc9 100644 --- a/research/nlp/seq2seq/README_CN.md +++ b/research/nlp/seq2seq/README_CN.md @@ -126,7 +126,7 @@ bash wmt14_en_fr.sh │ ├──filter_dataset.py // dataset filter ├── create_dataset.py // Dataset preparation. ├── eval.py // Infer API entry. - ├── export.py // Export checkpoint file into air models. + ├── export.py // Export checkpoint file into air/mindir models. ├── mindspore_hub_conf.py // Hub config. ├── requirements.txt // Requirements of third party package. ├── train.py // Train API entry. diff --git a/research/nlp/seq2seq/export.py b/research/nlp/seq2seq/export.py index 07e22b3cf7ab0b64cb1f395940f1d027a0fc46ab..617caacf7830c191e40e79f8a6d9fec911614b52 100644 --- a/research/nlp/seq2seq/export.py +++ b/research/nlp/seq2seq/export.py @@ -29,7 +29,7 @@ from src.utils.load_weights import load_infer_weights parser = argparse.ArgumentParser(description="seq2seq export") parser.add_argument("--file_name", type=str, default="seq2seq", help="output file name.") -parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format") +parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format") parser.add_argument('--infer_config', type=str, required=True, help='seq2seq config file') parser.add_argument("--existed_ckpt", type=str, required=True, help="existed checkpoint address.") parser.add_argument('--vocab_file', type=str, required=True, help='vocabulary file') diff --git a/research/nlp/skipgram/export.py b/research/nlp/skipgram/export.py index 719bf0cdf575d4cfac58a8118b6deb40cd029ed6..cda64f3ed70f43af104d7e0c014b33da5e4ffc42 100644 --- a/research/nlp/skipgram/export.py +++ b/research/nlp/skipgram/export.py @@ -26,7 +26,7 @@ parser = argparse.ArgumentParser(description='SkipGram export') parser.add_argument("--device_id", type=int, default=0, help="device id") parser.add_argument("--checkpoint_path", type=str, required=True, help="checkpoint file path.") parser.add_argument("--file_name", type=str, default="skipgram", help="output file name.") -parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='AIR', help='file format') +parser.add_argument('--file_format', type=str, choices=["AIR", "ONNX", "MINDIR"], default='MINDIR', help='file format') parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU"], default="Ascend", help="device target") args = parser.parse_args()