diff --git a/README.md b/README.md
index e2d10efaed0fefc7cfaca91f862378c91c032621..88587dffd281927319eb733ab93bca2afe93146d 100644
--- a/README.md
+++ b/README.md
@@ -16,308 +16,420 @@ In order to facilitate developers to enjoy the benefits of MindSpore framework,
 
 |  Domain | Sub Domain    | Network  | Ascend  | GPU | CPU |
 |:------   |:------| :-----------  |:------:   |:------:  |:-----: |
-| Audio | Speech Synthesis | [LPCNet](https://gitee.com/mindspore/models/tree/master/official/audio/lpcnet) | 鉁� |   |   |
-| Audio | Speech Synthesis | [MelGAN](https://gitee.com/mindspore/models/tree/master/official/audio/melgan) | 鉁� |   |   |
-| Audio | Speech Synthesis | [Tacotron2](https://gitee.com/mindspore/models/tree/master/official/audio/tacotron2) | 鉁� |   |   |
-| Computer Vision (CV) | Point Cloud Model | [OctSqueeze](https://gitee.com/mindspore/models/tree/master/official/cv/octsqueeze) | 鉁� |   |   |
-| Computer Vision (CV) | Optical Flow Estimation | [PWCNet](https://gitee.com/mindspore/models/tree/master/official/cv/pwcnet) | 鉁� |   |   |
-| Computer Vision (CV) | Object Tracking | [Deepsort](https://gitee.com/mindspore/models/tree/master/official/cv/Deepsort) | 鉁� |   |   |
-| Computer Vision (CV) | Object Tracking | [ADNet](https://gitee.com/mindspore/models/tree/master/official/cv/ADNet) | 鉁� |   |   |
-|Computer Vision (CV) | Image Classification  | [AlexNet](https://gitee.com/mindspore/models/tree/master/official/cv/alexnet)          |  鉁� |  鉁� |   |
-| Computer Vision (CV)  | Image Classification  | [CNN](https://gitee.com/mindspore/models/tree/master/official/cv/cnn_direction_model)  |  鉁� |    |   |
-| Computer Vision (CV)  | Image Classification  | [DenseNet100](https://gitee.com/mindspore/models/tree/master/official/cv/densenet) |    |    | 鉁� |
-| Computer Vision (CV)  | Image Classification  | [DenseNet121](https://gitee.com/mindspore/models/tree/master/official/cv/densenet) |  鉁� |    |   |
-| Computer Vision (CV)  | Image Classification  | [DPN](https://gitee.com/mindspore/models/tree/master/official/cv/dpn) |  鉁� |    |   |
-| Computer Vision (CV)  | Image Classification  | [EfficientNet-B0](https://gitee.com/mindspore/models/tree/master/official/cv/efficientnet) |    |  鉁� |   |
-| Computer Vision (CV)  | Image Classification  | [GoogLeNet](https://gitee.com/mindspore/models/tree/master/official/cv/googlenet)    |  鉁�  | 鉁� |   |
-| Computer Vision (CV)  | Image Classification  | [InceptionV3](https://gitee.com/mindspore/models/tree/master/official/cv/inceptionv3)   |  鉁� |    |   |
-| Computer Vision (CV)  | Image Classification  | [InceptionV4](https://gitee.com/mindspore/models/tree/master/official/cv/inceptionv4)    |  鉁� |    |   |
-| Computer Vision (CV)  | Image Classification  | [LeNet](https://gitee.com/mindspore/models/tree/master/official/cv/lenet)              |  鉁� |  鉁� | 鉁� |
-| Computer Vision (CV)  | Image Classification  | [MobileNetV1](https://gitee.com/mindspore/models/tree/master/official/cv/mobilenetv1)      |  鉁� |    |   |
-| Computer Vision (CV)  | Image Classification  | [MobileNetV2](https://gitee.com/mindspore/models/tree/master/official/cv/mobilenetv2)      |  鉁� |  鉁� | 鉁� |
-| Computer Vision (CV)  | Image Classification  | [MobileNetV3](https://gitee.com/mindspore/models/tree/master/official/cv/mobilenetv3)  |    |  鉁� |   |
-| Computer Vision (CV)  | Image Classification  | [NASNet](https://gitee.com/mindspore/models/tree/master/official/cv/nasnet) | 鉁� | 鉁� |   |
-| Computer Vision (CV)  | Image Classification  | [ResNet-18](https://gitee.com/mindspore/models/tree/master/official/cv/resnet)          |  鉁� |    |   |
-| Computer Vision (CV)  | Image Classification  | [ResNet-34](https://gitee.com/mindspore/models/tree/master/official/cv/resnet)          |  鉁� |    |   |
-| Computer Vision (CV)  | Image Classification  | [ResNet-50](https://gitee.com/mindspore/models/tree/master/official/cv/resnet)          |  鉁� |  鉁� | 鉁� |
-|Computer Vision (CV)  | Image Classification  | [ResNet-101](https://gitee.com/mindspore/models/tree/master/official/cv/resnet)       |  鉁� | 鉁� |   |
-|Computer Vision (CV)  | Image Classification  | [ResNet-152](https://gitee.com/mindspore/models/tree/master/official/cv/resnet)       |  鉁� |   |   |
-|Computer Vision (CV)  | Image Classification  | [ResNeXt50](https://gitee.com/mindspore/models/tree/master/official/cv/resnext)     |  鉁� | 鉁� |   |
-|Computer Vision (CV)  | Image Classification  | [ResNeXt101](https://gitee.com/mindspore/models/tree/master/official/cv/resnext)     |  鉁� |   |   |
-|Computer Vision (CV)  | Image Classification  | [SE-ResNet50](https://gitee.com/mindspore/models/tree/master/official/cv/resnet)      |  鉁� |   |   |
-| Computer Vision锛圕V锛�  | Image Classification  | [SE-ResNext50](https://gitee.com/mindspore/models/tree/master/official/cv/se_resnext50) |  鉁� |  |  |
-| Computer Vision (CV)  | Image Classification  | [ShuffleNetV1](https://gitee.com/mindspore/models/tree/master/official/cv/shufflenetv1)  |  鉁� |    |   |
-| Computer Vision (CV)  | Image Classification  | [ShuffleNetV2](https://gitee.com/mindspore/models/tree/master/official/cv/shufflenetv2) |    |  鉁� |   |
-| Computer Vision (CV)  | Image Classification  | [SqueezeNet](https://gitee.com/mindspore/models/tree/master/official/cv/squeezenet) |  鉁� |    |   |
-| Computer Vision (CV)  | Image Classification  | [Tiny-DarkNet](https://gitee.com/mindspore/models/tree/master/official/cv/tinydarknet)       |  鉁� |    |   |
-| Computer Vision (CV)  | Image Classification  | [VGG16](https://gitee.com/mindspore/models/tree/master/official/cv/vgg16)                |  鉁� |  鉁� |   |
-| Computer Vision (CV)  | Image Classification  | [Xception](https://gitee.com/mindspore/models/tree/master/official/cv/xception)             |  鉁� |    |   |
-| Computer Vision (CV)  | Image Classification | [CspDarkNet53](https://gitee.com/mindspore/models/tree/master/official/cv/cspdarknet53) | 鉁� |   |   |
-| Computer Vision (CV)  | Image Classification | [ErfNet](https://gitee.com/mindspore/models/tree/master/official/cv/erfnet) | 鉁� |   |   |
-| Computer Vision (CV)  | Image Classification | [SimCLR](https://gitee.com/mindspore/models/tree/master/official/cv/simclr) | 鉁� |   |   |
-| Computer Vision (CV)  | Image Classification | [Vit](https://gitee.com/mindspore/models/tree/master/official/cv/vit) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection  | [CenterFace](https://gitee.com/mindspore/models/tree/master/official/cv/centerface)     |  鉁� |    |   |
-| Computer Vision (CV) | Object Detection  | [CTPN](https://gitee.com/mindspore/models/tree/master/official/cv/ctpn)     |  鉁� |    |   |
-| Computer Vision (CV)  | Object Detection  | [Faster R-CNN](https://gitee.com/mindspore/models/tree/master/official/cv/faster_rcnn)   |  鉁� |  鉁� |   |
-| Computer Vision (CV)  | Object Detection  | [Mask R-CNN](https://gitee.com/mindspore/models/tree/master/official/cv/maskrcnn)         |  鉁� |    |   |
-| Computer Vision (CV)  | Object Detection  | [Mask R-CNN (MobileNetV1)](https://gitee.com/mindspore/models/tree/master/official/cv/maskrcnn_mobilenetv1)    |  鉁� |    |   |
-| Computer Vision (CV)  | Object Detection  | [SSD](https://gitee.com/mindspore/models/tree/master/official/cv/ssd)                   |  鉁� | 鉁� | 鉁� |
-| Computer Vision (CV)  | Object Detection  | [SSD-MobileNetV1-FPN](https://gitee.com/mindspore/models/tree/master/official/cv/ssd)         |  鉁� |    |   |
-| Computer Vision (CV)  | Object Detection  | [SSD-Resnet50-FPN](https://gitee.com/mindspore/models/tree/master/official/cv/ssd)                   |  鉁� |   |   |
-| Computer Vision (CV)  | Object Detection  | [SSD-VGG16](https://gitee.com/mindspore/models/tree/master/official/cv/ssd)                   |  鉁� |   |   |
-| Computer Vision (CV) | Object Detection  | [WarpCTC](https://gitee.com/mindspore/models/tree/master/official/cv/warpctc)                    |  鉁� |  鉁� |   |
-| Computer Vision (CV)  | Object Detection  | [YOLOv3-ResNet18](https://gitee.com/mindspore/models/tree/master/official/cv/yolov3_resnet18)    | 鉁� |    |   |
-| Computer Vision (CV)  | Object Detection  | [YOLOv3-DarkNet53](https://gitee.com/mindspore/models/tree/master/official/cv/yolov3_darknet53)         |  鉁� |  鉁� |   |
-| Computer Vision (CV)  | Object Detection  | [YOLOv4](https://gitee.com/mindspore/models/tree/master/official/cv/yolov4)         |  鉁� |    |   |
-| Computer Vision (CV) | Object Detection  |[YOLOv5](https://gitee.com/mindspore/models/tree/master/official/cv/yolov5) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection  |[RetinaNet](https://gitee.com/mindspore/models/tree/master/official/cv/retinanet) | 鉁� |   |   |
-| Computer Vision (CV) | Text Detection  | [DeepText](https://gitee.com/mindspore/models/tree/master/official/cv/deeptext)   |  鉁� |    |   |
-| Computer Vision (CV) | Text Detection  | [PSENet](https://gitee.com/mindspore/models/tree/master/official/cv/psenet)   |  鉁� |    |   |
-| Computer Vision (CV) | Text Recognition  | [CNN+CTC](https://gitee.com/mindspore/models/tree/master/official/cv/cnnctc)                |  鉁� |    |   |
-| Computer Vision (CV) | Semantic Segmentation  | [DeepLabV3](https://gitee.com/mindspore/models/tree/master/official/cv/deeplabv3)     |  鉁� |    | 鉁� |
-| Computer Vision (CV) | Semantic Segmentation  | [DeepLabV3+](https://gitee.com/mindspore/models/tree/master/research/cv/deeplabv3plus)     |  鉁� |    |   |
-| Computer Vision (CV) | Semantic Segmentation  | [U-Net2D (Medical)](https://gitee.com/mindspore/models/tree/master/official/cv/unet)                |  鉁� |    |   |
-| Computer Vision (CV) | Semantic Segmentation  | [U-Net3D (Medical)](https://gitee.com/mindspore/models/tree/master/official/cv/unet3d)                |  鉁� |    |   |
-| Computer Vision (CV) | Semantic Segmentation  | [U-Net++](https://gitee.com/mindspore/models/tree/master/official/cv/unet)                |  鉁� |    |   |
-| Computer Vision (CV) | Semantic Segmentation  | [Fast-SCNN](https://gitee.com/mindspore/models/tree/master/official/cv/fastscnn)                |  鉁� |    |   |
-| Computer Vision (CV) | Semantic Segmentation  | [FCN8s](https://gitee.com/mindspore/models/tree/master/official/cv/FCN8s) | 鉁� |   |   |
-| Computer Vision (CV) | 6DoF Pose Estimation | [PVNet](https://gitee.com/mindspore/models/tree/master/official/cv/pvnet) | 鉁� |   |   |
-| Computer Vision (CV) | Keypoint Detection  | [OpenPose](https://gitee.com/mindspore/models/tree/master/official/cv/openpose)                |  鉁� |    |   |
-| Computer Vision (CV) | Keypoint Detection  | [SimplePoseNet](https://gitee.com/mindspore/models/tree/master/official/cv/simple_pose)                |  鉁� |    |   |
-| Computer Vision (CV) | Scene Text Detection  | [East](https://gitee.com/mindspore/models/tree/master/official/cv/east) | 鉁� |   |   |
-| Computer Vision (CV) | Scene Text Detection  | [PSENet](https://gitee.com/mindspore/models/tree/master/official/cv/psenet) | 鉁� |   |   |
-| Computer Vision (CV) | Scene Text Recognition | [CRNN](https://gitee.com/mindspore/models/tree/master/official/cv/crnn) |  鉁� |    |   |
-| Computer Vision (CV) | Scene Text Recognition |[CNN+CTC](https://gitee.com/mindspore/models/tree/master/official/cv/cnnctc) |  鉁� |   |  |
-| Computer Vision (CV) | Scene Text Recognition |[CRNN-Seq2Seq-OCR](https://gitee.com/mindspore/models/tree/master/official/cv/crnn_seq2seq_ocr) | 鉁� |   |   |
-| Computer Vision (CV) | Scene Text Recognition |[WarpCTC](https://gitee.com/mindspore/models/tree/master/official/cv/warpctc) | 鉁� |   |   |
-| Computer Vision (CV) | Defect Detection |[ssim-ae](https://gitee.com/mindspore/models/tree/master/official/cv/ssim-ae) | 鉁� |   |   |
-| Computer Vision (CV) | Defect Detection |[PatchCore](https://gitee.com/mindspore/models/tree/master/official/cv/patchcore) | 鉁� |   |   |
-| Computer Vision (CV) | Face Detection | [RetinaFace-ResNet50](https://gitee.com/mindspore/models/tree/master/official/cv/retinaface_resnet50)     | 鉁� | 鉁� |   |
-| Computer Vision (CV) | Face Detection | [CenterFace](https://gitee.com/mindspore/models/tree/master/official/cv/centerface) | 鉁� |   |   |
-| Computer Vision (CV) | Face Detection | [SphereFace](https://gitee.com/mindspore/models/tree/master/official/cv/sphereface) | 鉁� |   |   |
-| Computer Vision (CV) | Crowd Counting | [MCNN](https://gitee.com/mindspore/models/tree/master/official/cv/MCNN) | 鉁� |   |   |
-| Computer Vision (CV) | Depth Estimation | [DepthNet](https://gitee.com/mindspore/models/tree/master/official/cv/depthnet) | 鉁� |   |   |
-| Computer Vision (CV) | Camera Relocalization | [PoseNet](https://gitee.com/mindspore/models/tree/master/official/cv/posenet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Matting | [Semantic Human Matting](https://gitee.com/mindspore/models/tree/master/official/cv/semantic_human_matting) | 鉁� |   |   |
-| Computer Vision (CV) | Video Classification | [C3D](https://gitee.com/mindspore/models/tree/master/official/cv/c3d) | 鉁� |   |   |
-| Computer Vision (CV) | Image Super-Resolution | [SRCNN](https://gitee.com/mindspore/models/tree/master/official/cv/srcnn) | 鉁� |   |   |
-| Computer Vision (CV) | Image Super-Resolution |[RDN](https://gitee.com/mindspore/models/tree/master/official/cv/RDN) | 鉁� | 鉁� |   |
-| Computer Vision (CV) | Image Denoising | [BRDNet](https://gitee.com/mindspore/models/tree/master/official/cv/brdnet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Denoising | [DnCNN](https://gitee.com/mindspore/models/tree/master/official/cv/dncnn) | 鉁� |   |   |
-| Computer Vision (CV) | Image Denoising | [Learning-to-See-in-the-Dark](https://gitee.com/mindspore/models/tree/master/official/cv/LearningToSeeInTheDark) | 鉁� |   |   |
-| Computer Vision (CV) | Image Quality Assessment | [NIMA](https://gitee.com/mindspore/models/tree/master/official/cv/nima) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Natural Language Understanding  | [BERT](https://gitee.com/mindspore/models/tree/master/official/nlp/bert)        |  鉁� |  鉁� |   |
-| Natural Language Processing (NLP) | Natural Language Understanding  | [FastText](https://gitee.com/mindspore/models/tree/master/official/nlp/fasttext)        |  鉁� |    |   |
-| Natural Language Processing (NLP) | Natural Language Understanding  | [GNMT v2](https://gitee.com/mindspore/models/tree/master/official/nlp/gnmt_v2)        |  鉁� |    |   |
-| Natural Language Processing (NLP) | Natural Language Understanding  | [GRU](https://gitee.com/mindspore/models/tree/master/official/nlp/gru)                |  鉁� |    |   |
-| Natural Language Processing (NLP) | Natural Language Understanding  | [MASS](https://gitee.com/mindspore/models/tree/master/official/nlp/mass)   |  鉁� |  鉁� |   |
-| Natural Language Processing (NLP) | Natural Language Understanding  | [SentimentNet](https://gitee.com/mindspore/models/tree/master/official/nlp/lstm)      |  鉁� |  鉁� | 鉁� |
-| Natural Language Processing (NLP) | Natural Language Understanding  | [Transformer](https://gitee.com/mindspore/models/tree/master/official/nlp/transformer)   |  鉁� |  鉁� |   |
-| Natural Language Processing (NLP) | Natural Language Understanding  | [TinyBERT](https://gitee.com/mindspore/models/tree/master/official/nlp/tinybert)       |  鉁� |  鉁� |   |
-| Natural Language Processing (NLP) | Natural Language Understanding  | [TextCNN](https://gitee.com/mindspore/models/tree/master/official/nlp/textcnn)                |  鉁� |    |   |
-| Natural Language Processing (NLP) | Natural Language Understanding  | [CPM](https://gitee.com/mindspore/models/tree/master/official/nlp/cpm) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Natural Language Understanding  | [ERNIE](https://gitee.com/mindspore/models/tree/master/official/nlp/ernie) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Natural Language Understanding  | [GPT-3](https://gitee.com/mindspore/models/tree/master/official/nlp/gpt) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Emotion Classification | [EmoTect](https://gitee.com/mindspore/models/tree/master/official/nlp/emotect)                | 鉁� |    |   |
-| Natural Language Processing (NLP) | Emotion Classification | [LSTM](https://gitee.com/mindspore/models/tree/master/official/nlp/lstm)                | 鉁� |    |   |
-| Natural Language Processing (NLP) | Dialogue Generation | [DGU](https://gitee.com/mindspore/models/tree/master/official/nlp/dgu)                |  鉁� |    |   |
-| Natural Language Processing (NLP) | Dialogue Generation | [DuConv](https://gitee.com/mindspore/models/tree/master/official/nlp/duconv)                |  鉁� |    |   |
-| Recommender | Recommender System, CTR prediction  | [DeepFM](https://gitee.com/mindspore/models/tree/master/official/recommend/deepfm)                               |  鉁� |  鉁� | 鉁� |
-| Recommender | Recommender System, Search, Ranking  | [Wide&Deep](https://gitee.com/mindspore/models/tree/master/official/recommend/wide_and_deep)             |  鉁� |  鉁� |   |
-| Recommender | Recommender System  | [NAML](https://gitee.com/mindspore/models/tree/master/official/recommend/naml)             |  鉁� |    |   |
-| Recommender | Recommender System  | [NCF](https://gitee.com/mindspore/models/tree/master/official/recommend/ncf)             |  鉁� |   |   |
-| Graph Neural Networks (GNN) | Text Classification  | [GCN](https://gitee.com/mindspore/models/tree/master/official/gnn/gcn)  |  鉁� |    |   |
-| Graph Neural Networks (GNN) | Text Classification  | [GAT](https://gitee.com/mindspore/models/tree/master/official/gnn/gat)  |  鉁� |    |   |
-| Graph Neural Networks (GNN) | Recommender System | [BGCF](https://gitee.com/mindspore/models/tree/master/official/gnn/bgcf) |  鉁� |    |   |
+| Audio | Speaker Recognition | [ecapa_tdnn](https://gitee.com/mindspore/models/tree/master/official/audio/ecapa_tdnn) |鉁厊   |   |
+| Audio | Speech Synthesis | [lpcnet](https://gitee.com/mindspore/models/tree/master/official/audio/lpcnet) |鉁厊 鉁� |   |
+| Audio | Speech Synthesis | [melgan](https://gitee.com/mindspore/models/tree/master/official/audio/melgan) |鉁厊 鉁� |   |
+| Audio | Speech Synthesis | [tacotron2](https://gitee.com/mindspore/models/tree/master/research/audio/tacotron2) |鉁厊   |   |
+| Graph Neural Network | Recommender System | [bgcf](https://gitee.com/mindspore/models/tree/master/official/gnn/bgcf) |鉁厊 鉁� |   |
+| Graph Neural Network | Text Classification | [gat](https://gitee.com/mindspore/models/tree/master/official/gnn/gat) |鉁厊 鉁� |   |
+| Graph Neural Network | Text Classification | [gcn](https://gitee.com/mindspore/models/tree/master/official/gnn/gcn) |鉁厊 鉁� |   |
+| Recommendation | Recommender System | [naml](https://gitee.com/mindspore/models/tree/master/official/recommend/naml) |鉁厊 鉁� |   |
+| Recommendation | Recommender System | [ncf](https://gitee.com/mindspore/models/tree/master/official/recommend/ncf) |鉁厊 鉁� |   |
+| Recommendation | Recommender System | [tbnet](https://gitee.com/mindspore/models/tree/master/official/recommend/tbnet) |鉁厊 鉁� |   |
+| Image | Image Classification | [alexnet](https://gitee.com/mindspore/models/tree/master/official/cv/alexnet) |鉁厊 鉁� |   |
+| Image | Image Denoise | [brdnet](https://gitee.com/mindspore/models/tree/master/official/cv/brdnet) |鉁厊   |   |
+| Image | Object Detection | [centerface](https://gitee.com/mindspore/models/tree/master/official/cv/centerface) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [cnn_direction_model](https://gitee.com/mindspore/models/tree/master/official/cv/cnn_direction_model) |鉁厊 鉁� |   |
+| Image | Scene Text Recognition | [cnnctc](https://gitee.com/mindspore/models/tree/master/official/cv/cnnctc) |鉁厊 鉁� | 鉁� |
+| Image | Scene Text Recognition | [crnn](https://gitee.com/mindspore/models/tree/master/official/cv/crnn) |鉁厊 鉁� | 鉁� |
+| Image | Scene Text Recognition | [crnn_seq2seq_ocr](https://gitee.com/mindspore/models/tree/master/official/cv/crnn_seq2seq_ocr) |鉁厊   |   |
+| Image | Image Classification | [cspdarknet53](https://gitee.com/mindspore/models/tree/master/official/cv/cspdarknet53) |鉁厊   |   |
+| Image | Object Detection | [ctpn](https://gitee.com/mindspore/models/tree/master/official/cv/ctpn) |鉁厊 鉁� |   |
+| Image | Object Detection | [darknet53](https://gitee.com/mindspore/models/tree/master/official/cv/darknet53) | | 鉁� |   |
+| Image | Semantic Segmentation | [deeplabv3](https://gitee.com/mindspore/models/tree/master/official/cv/deeplabv3) |鉁厊 鉁� | 鉁� |
+| Image | Text Detection | [deeptext](https://gitee.com/mindspore/models/tree/master/official/cv/deeptext) |鉁厊 鉁� |   |
+| Image | Image Classification | [densenet100](https://gitee.com/mindspore/models/tree/master/official/cv/densenet) |鉁厊 鉁� |   |
+| Image | Image Classification | [densenet121](https://gitee.com/mindspore/models/tree/master/official/cv/densenet) |鉁厊 鉁� |   |
+| Image | Depth Estimation | [depthnet](https://gitee.com/mindspore/models/tree/master/official/cv/depthnet) |鉁厊   |   |
+| Image | Image Denoise | [dncnn](https://gitee.com/mindspore/models/tree/master/official/cv/dncnn) | | 鉁� |   |
+| Image | Image Classification | [dpn](https://gitee.com/mindspore/models/tree/master/official/cv/dpn) |鉁厊 鉁� |   |
+| Image | Scene Text Detection | [east](https://gitee.com/mindspore/models/tree/master/official/cv/east) |鉁厊 鉁� |   |
+| Image | Image Classification | [efficientnet](https://gitee.com/mindspore/models/tree/master/official/cv/efficientnet) | | 鉁� | 鉁� |
+| Image | Image Classification | [erfnet](https://gitee.com/mindspore/models/tree/master/official/cv/erfnet) |鉁厊 鉁� |   |
+| Image | Scene Text Recognition | [essay-recogination](https://gitee.com/mindspore/models/tree/master/official/cv/essay-recogination) | | 鉁� |   |
+| Image | Object Detection | [FasterRCNN_Inception_Resnetv2](https://gitee.com/mindspore/models/tree/master/official/cv/faster_rcnn) |鉁厊 鉁� |   |
+| Image | Object Detection | [FasterRCNN_ResNetV1.5_50](https://gitee.com/mindspore/models/tree/master/official/cv/faster_rcnn) |鉁厊 鉁� |   |
+| Image | Object Detection | [FasterRCNN_ResNetV1_101](https://gitee.com/mindspore/models/tree/master/official/cv/faster_rcnn) |鉁厊 鉁� |   |
+| Image | Object Detection | [FasterRCNN_ResNetV1_152](https://gitee.com/mindspore/models/tree/master/official/cv/faster_rcnn) |鉁厊 鉁� |   |
+| Image | Object Detection | [FasterRCNN_ResNetV1_50](https://gitee.com/mindspore/models/tree/master/official/cv/faster_rcnn) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [fastscnn](https://gitee.com/mindspore/models/tree/master/official/cv/fastscnn) |鉁厊   |   |
+| Image | Semantic Segmentation | [FCN8s](https://gitee.com/mindspore/models/tree/master/official/cv/FCN8s) |鉁厊 鉁� |   |
+| Image | Image Classification | [googlenet](https://gitee.com/mindspore/models/tree/master/official/cv/googlenet) |鉁厊 鉁� |   |
+| Image | Image Classification | [inceptionv3](https://gitee.com/mindspore/models/tree/master/official/cv/inceptionv3) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [inceptionv4](https://gitee.com/mindspore/models/tree/master/official/cv/inceptionv4) |鉁厊 鉁� | 鉁� |
+| Image | Image Denoise | [LearningToSeeInTheDark](https://gitee.com/mindspore/models/tree/master/research/cv/LearningToSeeInTheDark) |鉁厊   |   |
+| Image | Image Classification | [lenet](https://gitee.com/mindspore/models/tree/master/official/cv/lenet) |鉁厊 鉁� | 鉁� |
+| Image | Object Detection | [maskrcnn_resnet50](https://gitee.com/mindspore/models/tree/master/official/cv/maskrcnn) |鉁厊 鉁� |   |
+| Image | Object Detection | [maskrcnn_mobilenetv1](https://gitee.com/mindspore/models/tree/master/official/cv/maskrcnn_mobilenetv1) |鉁厊 鉁� | 鉁� |
+| Image | Crowd Counting | [MCNN](https://gitee.com/mindspore/models/tree/master/official/cv/MCNN) |鉁厊 鉁� |   |
+| Image | Image Classification | [mobilenetv1](https://gitee.com/mindspore/models/tree/master/official/cv/mobilenetv1) |鉁厊 鉁� |   |
+| Image | Image Classification | [mobilenetv2](https://gitee.com/mindspore/models/tree/master/official/cv/mobilenetv2) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [mobilenetv3](https://gitee.com/mindspore/models/tree/master/official/cv/mobilenetv3) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [nasnet](https://gitee.com/mindspore/models/tree/master/official/cv/nasnet) |鉁厊 鉁� |   |
+| Image | Image Quality Assessment | [nima](https://gitee.com/mindspore/models/tree/master/official/cv/nima) |鉁厊 鉁� |   |
+| Image | Point Cloud Model | [octsqueeze](https://gitee.com/mindspore/models/tree/master/official/cv/octsqueeze) |鉁厊 鉁� |   |
+| Image | Keypoint Detection | [openpose](https://gitee.com/mindspore/models/tree/master/official/cv/openpose) |鉁厊   |   |
+| Image | Defect Detection | [patchcore](https://gitee.com/mindspore/models/tree/master/official/cv/patchcore) |鉁厊 鉁� |   |
+| Image | Camera Relocalization | [posenet](https://gitee.com/mindspore/models/tree/master/official/cv/posenet) |鉁厊 鉁� |   |
+| Image | Video Predictive Learning | [predrnn++](https://gitee.com/mindspore/models/tree/master/official/cv/predrnn++) |鉁厊   |   |
+| Image | Scene Text Detection | [psenet](https://gitee.com/mindspore/models/tree/master/official/cv/psenet) |鉁厊 鉁� |   |
+| Image | Pose Estimation | [pvnet](https://gitee.com/mindspore/models/tree/master/official/cv/pvnet) |鉁厊   |   |
+| Image | Optical Flow Estimation | [pwcnet](https://gitee.com/mindspore/models/tree/master/official/cv/pwcnet) |鉁厊 鉁� |   |
+| Image | Image Super Resolution | [RDN](https://gitee.com/mindspore/models/tree/master/official/cv/RDN) |鉁厊 鉁� |   |
+| Image | Image Classification | [resnet101](https://gitee.com/mindspore/models/tree/master/official/cv/resnet) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [resnet152](https://gitee.com/mindspore/models/tree/master/official/cv/resnet) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [resnet18](https://gitee.com/mindspore/models/tree/master/official/cv/resnet) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [resnet34](https://gitee.com/mindspore/models/tree/master/official/cv/resnet) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [resnet50](https://gitee.com/mindspore/models/tree/master/official/cv/resnet) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [resnet50_thor](https://gitee.com/mindspore/models/tree/master/official/cv/resnet) |鉁厊 鉁� |   |
+| Image | Image Classification | [resnext101](https://gitee.com/mindspore/models/tree/master/official/cv/resnext) |鉁厊 鉁� |   |
+| Image | Image Classification | [resnext50](https://gitee.com/mindspore/models/tree/master/official/cv/resnext) |鉁厊 鉁� |   |
+| Image | Object Detection | [retinaface_resnet50](https://gitee.com/mindspore/models/tree/master/official/cv/retinaface_resnet50) | | 鉁� |   |
+| Image | Object Detection | [retinanet](https://gitee.com/mindspore/models/tree/master/official/cv/retinanet) |鉁厊 鉁� |   |
+| Image | Image Classification | [se_resnext50](https://gitee.com/mindspore/models/tree/master/official/cv/se_resnext50) |鉁厊   |   |
+| Image | Image Matting | [semantic_human_matting](https://gitee.com/mindspore/models/tree/master/official/cv/semantic_human_matting) |鉁厊   |   |
+| Image | Image Classification | [se-resnet50](https://gitee.com/mindspore/models/tree/master/official/cv/resnet) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [shufflenetv1](https://gitee.com/mindspore/models/tree/master/official/cv/shufflenetv1) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [shufflenetv2](https://gitee.com/mindspore/models/tree/master/official/cv/shufflenetv2) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [simclr](https://gitee.com/mindspore/models/tree/master/official/cv/simclr) |鉁厊 鉁� |   |
+| Image | Keypoint Detection | [simple_pose](https://gitee.com/mindspore/models/tree/master/official/cv/simple_pose) |鉁厊 鉁� |   |
+| Image | Object Detection | [sphereface](https://gitee.com/mindspore/models/tree/master/official/cv/sphereface) |鉁厊 鉁� |   |
+| Image | Image Classification | [squeezenet](https://gitee.com/mindspore/models/tree/master/official/cv/squeezenet) |鉁厊 鉁� |   |
+| Image | Image Classification | [SqueezeNet_Residual](https://gitee.com/mindspore/models/tree/master/official/cv/squeezenet) |鉁厊 鉁� |   |
+| Image | Image Super Resolution | [srcnn](https://gitee.com/mindspore/models/tree/master/official/cv/srcnn) |鉁厊 鉁� |   |
+| Image | Object Detection | [ssd_mobilenet-v1-fpn](https://gitee.com/mindspore/models/tree/master/official/cv/ssd) |鉁厊 鉁� | 鉁� |
+| Image | Object Detection | [ssd-mobilenet-v2](https://gitee.com/mindspore/models/tree/master/official/cv/ssd) |鉁厊 鉁� | 鉁� |
+| Image | Object Detection | [ssd-resnet50-fpn](https://gitee.com/mindspore/models/tree/master/official/cv/ssd) |鉁厊 鉁� | 鉁� |
+| Image | Object Detection | [ssd-vgg16](https://gitee.com/mindspore/models/tree/master/official/cv/ssd) |鉁厊 鉁� | 鉁� |
+| Image | Defect Detection | [ssim-ae](https://gitee.com/mindspore/models/tree/master/official/cv/ssim-ae) |鉁厊   |   |
+| Image | Image Classification | [tinydarknet](https://gitee.com/mindspore/models/tree/master/official/cv/tinydarknet) |鉁厊 鉁� | 鉁� |
+| Image | Semantic Segmentation | [UNet_nested](https://gitee.com/mindspore/models/tree/master/official/cv/unet) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [unet2d](https://gitee.com/mindspore/models/tree/master/official/cv/unet) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [unet3d](https://gitee.com/mindspore/models/tree/master/official/cv/unet3d) |鉁厊 鉁� |   |
+| Image | Image Classification | [vgg16](https://gitee.com/mindspore/models/tree/master/official/cv/vgg16) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [vit](https://gitee.com/mindspore/models/tree/master/official/cv/vit) |鉁厊 鉁� |   |
+| Image | Scene Text Recognition | [warpctc](https://gitee.com/mindspore/models/tree/master/official/cv/warpctc) |鉁厊 鉁� |   |
+| Image | Image Classification | [xception](https://gitee.com/mindspore/models/tree/master/official/cv/xception) |鉁厊 鉁� |   |
+| Image | Object Detection | [yolov3_darknet53](https://gitee.com/mindspore/models/tree/master/official/cv/yolov3_darknet53) |鉁厊 鉁� |   |
+| Image | Object Detection | [yolov3_resnet18](https://gitee.com/mindspore/models/tree/master/official/cv/yolov3_resnet18) |鉁厊   |   |
+| Image | Object Detection | [yolov4](https://gitee.com/mindspore/models/tree/master/official/cv/yolov4) |鉁厊   |   |
+| Image | Object Detection | [yolov5s](https://gitee.com/mindspore/models/tree/master/official/cv/yolov5) |鉁厊 鉁� |   |
+| Recommendation | Click-Through Rate Prediction | [deep_and_cross](https://gitee.com/mindspore/models/tree/master/official/recommend/deep_and_cross) | | 鉁� |   |
+| Recommendation | Click-Through Rate Prediction | [deepfm](https://gitee.com/mindspore/models/tree/master/official/recommend/deepfm) |鉁厊 鉁� |   |
+| Recommendation | Click-Through Rate Prediction | [fibinet](https://gitee.com/mindspore/models/tree/master/official/recommend/fibinet) | | 鉁� |   |
+| Recommendation | Click-Through Rate Prediction | [wide_and_deep](https://gitee.com/mindspore/models/tree/master/official/recommend/wide_and_deep) |鉁厊 鉁� |   |
+| Recommendation | Click-Through Rate Prediction | [wide_and_deep_multitable](https://gitee.com/mindspore/models/tree/master/official/recommend/wide_and_deep_multitable) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [bert_base](https://gitee.com/mindspore/models/tree/master/official/nlp/bert) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [bert_bilstm_crf](https://gitee.com/mindspore/models/tree/master/official/nlp/bert) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [bert_finetuning](https://gitee.com/mindspore/models/tree/master/official/nlp/bert) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [bert_large](https://gitee.com/mindspore/models/tree/master/official/nlp/bert_thor) |鉁厊   |   |
+| Text | Natural Language Understanding | [bert_nezha](https://gitee.com/mindspore/models/tree/master/official/nlp/bert) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [cpm](https://gitee.com/mindspore/models/tree/master/official/nlp/cpm) |鉁厊 鉁� |   |
+| Text | Dialogue | [dgu](https://gitee.com/mindspore/models/tree/master/official/nlp/dgu) |鉁厊 鉁� |   |
+| Text | Dialogue | [duconv](https://gitee.com/mindspore/models/tree/master/official/nlp/duconv) |鉁厊 鉁� |   |
+| Text | Emotion Classification | [emotect](https://gitee.com/mindspore/models/tree/master/official/nlp/emotect) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [ernie](https://gitee.com/mindspore/models/tree/master/official/nlp/ernie) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [fasttext](https://gitee.com/mindspore/models/tree/master/official/nlp/fasttext) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [gnmt_v2](https://gitee.com/mindspore/models/tree/master/official/nlp/gnmt_v2) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [gpt3](https://gitee.com/mindspore/models/tree/master/official/nlp/gpt) |鉁厊   |   |
+| Text | Natural Language Understanding | [gru](https://gitee.com/mindspore/models/tree/master/official/nlp/gru) |鉁厊 鉁� |   |
+| Text | Emotion Classification | [lstm](https://gitee.com/mindspore/models/tree/master/official/nlp/lstm) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [mass](https://gitee.com/mindspore/models/tree/master/official/nlp/mass) |鉁厊 鉁� |   |
+| Text | Pre Training | [pangu_alpha](https://gitee.com/mindspore/models/tree/master/official/nlp/pangu_alpha) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [textcnn](https://gitee.com/mindspore/models/tree/master/official/nlp/textcnn) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [tinybert](https://gitee.com/mindspore/models/tree/master/official/nlp/tinybert) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [transformer](https://gitee.com/mindspore/models/tree/master/official/nlp/transformer) |鉁厊 鉁� |   |
+| Video | Object Tracking | [ADNet](https://gitee.com/mindspore/models/tree/master/official/cv/ADNet) |鉁厊   |   |
+| Video | Video Classification | [c3d](https://gitee.com/mindspore/models/tree/master/official/cv/c3d) |鉁厊 鉁� |   |
+| Video | Object Tracking | [Deepsort](https://gitee.com/mindspore/models/tree/master/official/cv/Deepsort) |鉁厊 鉁� |   |
 
 ### Research
 
-|  Domain | Sub Domain    | Network  | Ascend | GPU | CPU |
-|:------   |:------| :-----------   |:------:   |:------:  |:-----:  |
-| Computer Vision (CV) | Image Classification |[3D Densenet](https://gitee.com/mindspore/models/tree/master/research/cv/3D_DenseNet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Auto Augment](https://gitee.com/mindspore/models/tree/master/research/cv/autoaugment) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[AVA](https://gitee.com/mindspore/models/tree/master/research/cv/AVA_cifar) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[CCT](https://gitee.com/mindspore/models/tree/master/research/cv/cct) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[dnet-nas](https://gitee.com/mindspore/models/tree/master/research/cv/dnet_nas) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Efficientnet-b0](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b0) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Efficientnet-b1](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b1) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Efficientnet-b2](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b2) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Efficientnet-b3](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b3) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[FDA-BNN](https://gitee.com/mindspore/models/tree/master/research/cv/FDA-BNN) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[fishnet99](https://gitee.com/mindspore/models/tree/master/research/cv/fishnet99) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[GENET](https://gitee.com/mindspore/models/tree/master/research/cv/GENet_Res50) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[GhostNet](https://gitee.com/mindspore/models/tree/master/research/cv/ghostnet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Glore_res200](https://gitee.com/mindspore/models/tree/master/research/cv/glore_res) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Glore_res50](https://gitee.com/mindspore/models/tree/master/research/cv/glore_res) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[HarDNet](https://gitee.com/mindspore/models/tree/master/research/cv/hardnet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[HourNAS](https://gitee.com/mindspore/models/tree/master/research/cv/HourNAS) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[HRNetW48-cls](https://gitee.com/mindspore/models/tree/master/research/cv/HRNetW48_cls) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[ibn-net](https://gitee.com/mindspore/models/tree/master/research/cv/ibnnet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Inception ResNet V2](https://gitee.com/mindspore/models/tree/master/research/cv/inception_resnet_v2) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Resnetv2_50_frn](https://gitee.com/mindspore/models/tree/master/research/cv/resnetv2_50_frn) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[META-Baseline](https://gitee.com/mindspore/models/tree/master/research/cv/meta-baseline) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[MNasNet](https://gitee.com/mindspore/models/tree/master/research/cv/mnasnet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[MobilenetV3-Large](https://gitee.com/mindspore/models/tree/master/research/cv/mobilenetv3_large) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[MobilenetV3-Small](https://gitee.com/mindspore/models/tree/master/research/cv/mobilenetV3_small_x1_0) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[NFNet-F0](https://gitee.com/mindspore/models/tree/master/research/cv/NFNet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[ntsnet](https://gitee.com/mindspore/models/tree/master/research/cv/ntsnet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Pdarts](https://gitee.com/mindspore/models/tree/master/research/cv/PDarts) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[PNASNet-5](https://gitee.com/mindspore/models/tree/master/research/cv/pnasnet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[ProtoNet](https://gitee.com/mindspore/models/tree/master/research/cv/ProtoNet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Proxylessnas](https://gitee.com/mindspore/models/tree/master/research/cv/proxylessnas) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[RelationNet](https://gitee.com/mindspore/models/tree/master/research/cv/relationnet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[renas](https://gitee.com/mindspore/models/tree/master/research/cv/renas) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Res2net](https://gitee.com/mindspore/models/tree/master/research/cv/res2net) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[ResNeSt-50](https://gitee.com/mindspore/models/tree/master/research/cv/ResNeSt50) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[ResNet50-BAM](https://gitee.com/mindspore/models/tree/master/research/cv/resnet50_bam) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[ResNet50-quadruplet](https://gitee.com/mindspore/models/tree/master/research/cv/metric_learn) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[ResNet50-triplet](https://gitee.com/mindspore/models/tree/master/research/cv/metric_learn) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[ResNetV2](https://gitee.com/mindspore/models/tree/master/research/cv/metric_learn) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[ResNeXt152_vd_64x4d](https://gitee.com/mindspore/models/tree/master/research/cv/resnext152_64x4d) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[SE-Net](https://gitee.com/mindspore/models/tree/master/research/cv/SE-Net) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[SERes2Net50](https://gitee.com/mindspore/models/tree/master/research/cv/res2net) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[SinglePathNas](https://gitee.com/mindspore/models/tree/master/research/cv/single_path_nas) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[SKNet-50](https://gitee.com/mindspore/models/tree/master/research/cv/sknet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[SPPNet](https://gitee.com/mindspore/models/tree/master/research/cv/SPPNet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[SqueezeNet](https://gitee.com/mindspore/models/tree/master/research/cv/squeezenet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[SqueezeNet1_1](https://gitee.com/mindspore/models/tree/master/research/cv/squeezenet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Swin Transformer](https://gitee.com/mindspore/models/tree/master/research/cv/swin_transformer) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[TNT](https://gitee.com/mindspore/models/tree/master/research/cv/TNT) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[VGG19](https://gitee.com/mindspore/models/tree/master/research/cv/vgg19) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Vit-Base](https://gitee.com/mindspore/models/tree/master/research/cv/vit_base) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification |[Wide ResNet](https://gitee.com/mindspore/models/tree/master/research/cv/wideresnet) | 鉁� |   |   |
-| Computer Vision (CV) | Image Classification  | [FaceAttributes](https://gitee.com/mindspore/models/tree/master/research/cv/FaceAttribute)     |  鉁� |    |   |
-| Computer Vision (CV) | Image Classification  | [FaceQualityAssessment](https://gitee.com/mindspore/models/tree/master/research/cv/FaceQualityAssessment)     |  鉁� |    |   |
-| Computer Vision (CV) | Re-Identification |[Aligned-ReID](https://gitee.com/mindspore/models/tree/master/research/cv/AlignedReID) | 鉁� |   |   |
-| Computer Vision (CV) | Re-Identification |[DDAG](https://gitee.com/mindspore/models/tree/master/research/cv/DDAG) | 鉁� |   |   |
-| Computer Vision (CV) | Re-Identification |[MVD](https://gitee.com/mindspore/models/tree/master/research/cv/MVD) | 鉁� |   |   |
-| Computer Vision (CV) | Re-Identification |[OSNet](https://gitee.com/mindspore/models/tree/master/research/cv/osnet) | 鉁� |   |   |
-| Computer Vision (CV) | Re-Identification |[PAMTRI](https://gitee.com/mindspore/models/tree/master/research/cv/PAMTRI) | 鉁� |   |   |
-| Computer Vision (CV) | Re-Identification |[VehicleNet](https://gitee.com/mindspore/models/tree/master/research/cv/VehicleNet) | 鉁� |   |   |
-| Computer Vision (CV) | Face Detection  | [FaceDetection](https://gitee.com/mindspore/models/tree/master/research/cv/FaceDetection)     |  鉁� |    |   |
-| Computer Vision (CV) | Face Detection  | [FaceBoxes](https://gitee.com/mindspore/models/tree/master/research/cv/faceboxes) | 鉁� |   |   |
-| Computer Vision (CV) | Face Detection  | [RetinaFace](https://gitee.com/mindspore/models/tree/master/research/cv/retinaface) | 鉁� |   |   |
-| Computer Vision (CV) | Face Recognition | [Arcface](https://gitee.com/mindspore/models/tree/master/research/cv/arcface) | 鉁� |   |   |
-| Computer Vision (CV) | Face Recognition | [DeepID](https://gitee.com/mindspore/models/tree/master/research/cv/DeepID) | 鉁� |   |   |
-| Computer Vision (CV) | Face Recognition |[FaceRecognition](https://gitee.com/mindspore/models/tree/master/research/cv/FaceRecognition)     |  鉁� |   |  |
-| Computer Vision (CV) | Face Recognition |[FaceRecognitionForTracking](https://gitee.com/mindspore/models/tree/master/research/cv/FaceRecognitionForTracking)     |  鉁� |   |  |
-| Computer Vision (CV) | Face Recognition | [LightCNN](https://gitee.com/mindspore/models/tree/master/research/cv/LightCNN) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection  | [Spnas](https://gitee.com/mindspore/models/tree/master/research/cv/Spnas)     |  鉁� |    |   |
-| Computer Vision (CV) | Object Detection  | [SSD-GhostNet](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_ghostnet)               |  鉁� |    |   |
-| Computer Vision (CV锛� | Object Detection | [EGNet](https://gitee.com/mindspore/models/tree/master/research/cv/EGnet)  | 鉁� |   |  |
-| Computer Vision (CV) | Object Detection | [FasterRCNN-FPN-DCN](https://gitee.com/mindspore/models/tree/master/research/cv/faster_rcnn_dcn) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection | [NAS-FPN](https://gitee.com/mindspore/models/tree/master/research/cv/nas-fpn) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection | [RAS](https://gitee.com/mindspore/models/tree/master/research/cv/ras) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection | [r-cnn](https://gitee.com/mindspore/models/tree/master/research/cv/rcnn) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection | [RefineDet](https://gitee.com/mindspore/models/tree/master/research/cv/RefineDet) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection | [Res2net_fasterrcnn](https://gitee.com/mindspore/models/tree/master/research/cv/res2net_faster_rcnn) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection | [Res2net_yolov3](https://gitee.com/mindspore/models/tree/master/research/cv/res2net_yolov3) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection | [Retinanet_resnet101](https://gitee.com/mindspore/models/tree/master/research/cv/retinanet_resnet101) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection | [SSD_MobilenetV2_fpnlite](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_mobilenetV2_FPNlite) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection | [ssd_mobilenet_v2](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_mobilenetV2) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection | [ssd_resnet50](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_resnet50) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection | [ssd_inceptionv2](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_inception_v2) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection | [ssd_resnet34](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_resnet34) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection | [U-2-Net](https://gitee.com/mindspore/models/tree/master/research/cv/u2net) | 鉁� |   |   |
-| Computer Vision (CV) | Object Detection | [YOLOV3-tiny](https://gitee.com/mindspore/models/tree/master/research/cv/yolov3_tiny) | 鉁� |   |   |
-| Computer Vision (CV) | Object Tracking |[SiamFC](https://gitee.com/mindspore/models/tree/master/research/cv/SiamFC) | 鉁� |   |   |
-| Computer Vision (CV) | Object Tracking |[SiamRPN](https://gitee.com/mindspore/models/tree/master/research/cv/siamRPN) | 鉁� |   |   |
-| Computer Vision (CV) | Object Tracking |[FairMOT](https://gitee.com/mindspore/models/tree/master/research/cv/fairmot) | 鉁� |   |   |
-| Computer Vision (CV) | Key Point Detection | [CenterNet](https://gitee.com/mindspore/models/tree/master/research/cv/centernet)               |  鉁� |   | 鉁� |
-| Computer Vision (CV) | Key Point Detection | [CenterNet-hourglass](https://gitee.com/mindspore/models/tree/master/research/cv/centernet_det) | 鉁� |   |   |
-| Computer Vision (CV) | Key Point Detection | [CenterNet-resnet101](https://gitee.com/mindspore/models/tree/master/research/cv/centernet_resnet101) | 鉁� |   |   |
-| Computer Vision (CV) | Key Point Detection | [CenterNet-resnet50](https://gitee.com/mindspore/models/tree/master/research/cv/centernet_resnet50_v1) | 鉁� |   |   |
-| Computer Vision (CV) | Point Cloud Model |[PointNet](https://gitee.com/mindspore/models/tree/master/research/cv/pointnet) | 鉁� |   |   |
-| Computer Vision (CV) | Point Cloud Model |[PointNet++](https://gitee.com/mindspore/models/tree/master/research/cv/pointnet2) | 鉁� |   |   |
-| Computer Vision (CV) | Depth Estimation | [midas](https://gitee.com/mindspore/models/tree/master/research/cv/midas) | 鉁� |   |   |
-| Computer Vision (CV) | Sequential Image Classification | [TCN](https://gitee.com/mindspore/models/tree/master/research/cv/TCN) | 鉁� |   |   |
-| Computer Vision (CV) | Temporal Localization | [TALL](https://gitee.com/mindspore/models/tree/master/research/cv/tall) | 鉁� |   |   |
-| Computer Vision (CV) | Image Matting | [FCA-net](https://gitee.com/mindspore/models/tree/master/research/cv/FCANet) | 鉁� |   |   |
-| Computer Vision (CV) | Video Classification | [Attention Cluster](https://gitee.com/mindspore/models/tree/master/research/cv/AttentionCluster) | 鉁� |   |   |
-| Computer Vision (CV) | Video Classification | [ECO-lite](https://gitee.com/mindspore/models/tree/master/research/cv/ecolite) | 鉁� |   |   |
-| Computer Vision (CV) | Video Classification | [R(2+1)D](https://gitee.com/mindspore/models/tree/master/research/cv/r2plus1d) | 鉁� |   |   |
-| Computer Vision (CV) | Video Classification | [Resnet-3D](https://gitee.com/mindspore/models/tree/master/research/cv/resnet3d) | 鉁� |   |   |
-| Computer Vision (CV) | Video Classification | [StNet](https://gitee.com/mindspore/models/tree/master/research/cv/stnet) | 鉁� |   |   |
-| Computer Vision (CV) | Video Classification | [TSM](https://gitee.com/mindspore/models/tree/master/research/cv/tsm) | 鉁� |   |   |
-| Computer Vision (CV) | Video Classification | [TSN](https://gitee.com/mindspore/models/tree/master/research/cv/tsn) | 鉁� |   |   |
-| Computer Vision (CV) | Zero-Shot Learnning | [DEM](https://gitee.com/mindspore/models/tree/master/research/cv/dem) | 鉁� |   |   |
-| Computer Vision (CV) | Style Transfer |[AECRNET](https://gitee.com/mindspore/models/tree/master/research/cv/aecrnet) | 鉁� |   |   |
-| Computer Vision (CV) | Style Transfer |[APDrawingGAN](https://gitee.com/mindspore/models/tree/master/research/cv/APDrawingGAN) | 鉁� |   |   |
-| Computer Vision (CV) | Style Transfer |[Arbitrary-image-stylization](https://gitee.com/mindspore/models/tree/master/research/cv/ArbitraryStyleTransfer) | 鉁� |   |   |
-| Computer Vision (CV) | Style Transfer |[AttGAN](https://gitee.com/mindspore/models/tree/master/research/cv/AttGAN) | 鉁� |   |   |
-| Computer Vision (CV) | Style Transfer |[CycleGAN](https://gitee.com/mindspore/models/tree/master/research/cv/CycleGAN) | 鉁� |   |   |
-| Computer Vision (CV) | Image Super-Resolution |[CSD](https://gitee.com/mindspore/models/tree/master/research/cv/csd) | 鉁� |   |   |
-| Computer Vision (CV) | Image Super-Resolution |[DBPN](https://gitee.com/mindspore/models/tree/master/research/cv/DBPN) | 鉁� |   |   |
-| Computer Vision (CV) | Image Super-Resolution |[EDSR](https://gitee.com/mindspore/models/tree/master/research/cv/EDSR) | 鉁� |   |   |
-| Computer Vision (CV) | Image Super-Resolution |[esr-ea](https://gitee.com/mindspore/models/tree/master/research/cv/esr_ea) | 鉁� |   |   |
-| Computer Vision (CV) | Image Super-Resolution |[ESRGAN](https://gitee.com/mindspore/models/tree/master/research/cv/ESRGAN) | 鉁� |   |   |
-| Computer Vision (CV) | Image Super-Resolution |[IRN](https://gitee.com/mindspore/models/tree/master/research/cv/IRN) | 鉁� |   |   |
-| Computer Vision (CV) | Image Super-Resolution |[RCAN](https://gitee.com/mindspore/models/tree/master/research/cv/RCAN) | 鉁� |   |   |
-| Computer Vision (CV) | Image Super-Resolution |[sr-ea](https://gitee.com/mindspore/models/tree/master/research/cv/sr_ea) | 鉁� |   |   |
-| Computer Vision (CV) | Image Super-Resolution |[SRGAN](https://gitee.com/mindspore/models/tree/master/research/cv/SRGAN) | 鉁� |   |   |
-| Computer Vision (CV) | Image Super-Resolution |[wdsr](https://gitee.com/mindspore/models/tree/master/research/cv/wdsr) | 鉁� |   |   |
-| Computer Vision (CV) | Image Denoising |[Neighbor2Neighbor](https://gitee.com/mindspore/models/tree/master/research/cv/Neighbor2Neighbor) | 鉁� |   |   |
-| Computer Vision (CV) | Image Generation |[CGAN](https://gitee.com/mindspore/models/tree/master/research/cv/CGAN) | 鉁� |   |   |
-| Computer Vision (CV) | Image Generation |[DCGAN](https://gitee.com/mindspore/models/tree/master/research/cv/dcgan) | 鉁� |   |   |
-| Computer Vision (CV) | Image Generation |[GAN](https://gitee.com/mindspore/models/tree/master/research/cv/gan) | 鉁� |   |   |
-| Computer Vision (CV) | Image Generation |[IPT](https://gitee.com/mindspore/models/tree/master/research/cv/IPT) | 鉁� |   |   |
-| Computer Vision (CV) | Image Generation |[pgan](https://gitee.com/mindspore/models/tree/master/research/cv/PGAN) | 鉁� |   |   |
-| Computer Vision (CV) | Image Generation |[Photo2Cartoon](https://gitee.com/mindspore/models/tree/master/research/cv/U-GAT-IT) | 鉁� |   |   |
-| Computer Vision (CV) | Image Generation |[Pix2Pix](https://gitee.com/mindspore/models/tree/master/research/cv/Pix2Pix) | 鉁� |   |   |
-| Computer Vision (CV) | Image Generation |[SinGAN](https://gitee.com/mindspore/models/tree/master/research/cv/SinGAN) | 鉁� |   |   |
-| Computer Vision (CV) | Image Generation |[StarGAN](https://gitee.com/mindspore/models/tree/master/research/cv/StarGAN) | 鉁� |   |   |
-| Computer Vision (CV) | Image Generation |[STGAN](https://gitee.com/mindspore/models/tree/master/research/cv/STGAN) | 鉁� |   |   |
-| Computer Vision (CV) | Image Generation |[WGAN](https://gitee.com/mindspore/models/tree/master/research/cv/wgan) | 鉁� |   |   |
-| Computer Vision (CV) | Scene Text Detection | [AdvancedEast](https://gitee.com/mindspore/models/tree/master/research/cv/advanced_east) | 鉁� |   |   |
-| Computer Vision (CV) | Scene Text Detection | [TextFuseNet](https://gitee.com/mindspore/models/tree/master/research/cv/textfusenet) | 鉁� |   |   |
-| Computer Vision (CV) | Scene Text Recognition | [ManiDP](https://gitee.com/mindspore/models/tree/master/research/cv/ManiDP) | 鉁� |   |   |
-| Computer Vision (CV) | Semantic Segmentation | [3d-cnn](https://gitee.com/mindspore/models/tree/master/research/cv/3dcnn) | 鉁� |   |   |
-| Computer Vision (CV) | Semantic Segmentation | [adelaide_ea](https://gitee.com/mindspore/models/tree/master/research/cv/adelaide_ea) | 鉁� |   |   |
-| Computer Vision (CV) | Semantic Segmentation | [DDRNet](https://gitee.com/mindspore/models/tree/master/research/cv/DDRNet) | 鉁� |   |   |
-| Computer Vision (CV) | Semantic Segmentation | [E-Net](https://gitee.com/mindspore/models/tree/master/research/cv/E-NET) | 鉁� |   |   |
-| Computer Vision (CV) | Semantic Segmentation | [Hrnet](https://gitee.com/mindspore/models/tree/master/research/cv/HRNetW48_seg) | 鉁� |   |   |
-| Computer Vision (CV) | Semantic Segmentation | [ICNet](https://gitee.com/mindspore/models/tree/master/research/cv/ICNet) | 鉁� |   |   |
-| Computer Vision (CV) | Semantic Segmentation | [PSPnet](https://gitee.com/mindspore/models/tree/master/research/cv/PSPNet) | 鉁� |   |   |
-| Computer Vision (CV) | Semantic Segmentation | [RefineNet](https://gitee.com/mindspore/models/tree/master/research/cv/RefineNet) | 鉁� |   |   |
-| Computer Vision (CV) | Semantic Segmentation | [Res2net_deeplabv3](https://gitee.com/mindspore/models/tree/master/research/cv/res2net_deeplabv3) | 鉁� |   |   |
-| Computer Vision (CV) | Semantic Segmentation | [UNet 3+](https://gitee.com/mindspore/models/tree/master/research/cv/UNet3+) | 鉁� |   |   |
-| Computer Vision (CV) | Semantic Segmentation | [V-net](https://gitee.com/mindspore/models/tree/master/research/cv/vnet) | 鉁� |   |   |
-| Computer Vision (CV) | Semantic Segmentation | [Autodeeplab](https://gitee.com/mindspore/models/tree/master/research/cv/Auto-DeepLab) | 鉁� |   |   |
-| Computer Vision (CV) | Pose Estimation | [AlphaPose](https://gitee.com/mindspore/models/tree/master/research/cv/AlphaPose) | 鉁� |   |   |
-| Computer Vision (CV) | Pose Estimation | [Hourglass](https://gitee.com/mindspore/models/tree/master/research/cv/StackedHourglass) | 鉁� |   |   |
-| Computer Vision (CV) | Pose Estimation | [Simple Baseline](https://gitee.com/mindspore/models/tree/master/research/cv/simple_baselines) | 鉁� |   |   |
-| Computer Vision (CV) | Image Retrieval |[Delf](https://gitee.com/mindspore/models/tree/master/research/cv/delf) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Word Embedding | [Word2Vec Skip-Gram](https://gitee.com/mindspore/models/tree/master/research/nlp/skipgram) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Dialogue Generation | [DAM](https://gitee.com/mindspore/models/tree/master/research/nlp/dam) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Machine Translation | [Seq2Seq](https://gitee.com/mindspore/models/tree/master/research/nlp/seq2seq) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Emotion Classification | [Senta](https://gitee.com/mindspore/models/tree/master/research/nlp/senta) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Emotion Classification | [Attention LSTM](https://gitee.com/mindspore/models/tree/master/research/nlp/atae_lstm) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Named Entity Recognition | [LSTM_CRF](https://gitee.com/mindspore/models/tree/master/research/nlp/lstm_crf) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Text Classification | [HyperText](https://gitee.com/mindspore/models/tree/master/research/nlp/hypertext) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Text Classification | [TextRCNN](https://gitee.com/mindspore/models/tree/master/research/nlp/textrcnn) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Natural Language Understanding | [ALBert](https://gitee.com/mindspore/models/tree/master/research/nlp/albert)          |  鉁� |   |  |
-| Natural Language Processing (NLP) | Natural Language Understanding | [KT-Net](https://gitee.com/mindspore/models/tree/master/research/nlp/ktnet) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Natural Language Understanding | [LUKE](https://gitee.com/mindspore/models/tree/master/research/nlp/luke) | 鉁� |   |   |
-| Natural Language Processing (NLP) | Natural Language Understanding  | [TPRR](https://gitee.com/mindspore/models/tree/master/research/nlp/tprr)  |  鉁� |    |   |
-| Natural Language Processing (NLP) | Knowledge Graph Embedding | [RotatE](https://gitee.com/mindspore/models/tree/master/research/nlp/rotate) | 鉁� |   |   |
-| Recommender | Recommender System, CTR prediction | [AutoDis](https://gitee.com/mindspore/models/tree/master/research/recommend/autodis)          |  鉁� |    |   |
-| Recommender | Recommender System, CTR prediction | [DeepFFM](https://gitee.com/mindspore/models/tree/master/research/recommend/Fat-DeepFFM) | 鉁� |   |   |
-| Recommender | Recommender System, CTR prediction | [DIEN](https://gitee.com/mindspore/models/tree/master/research/recommend/DIEN) | 鉁� |   |   |
-| Recommender | Recommender System, CTR prediction | [DLRM](https://gitee.com/mindspore/models/tree/master/research/recommend/dlrm) | 鉁� |   |   |
-| Recommender | Recommender System, CTR prediction | [EDCN](https://gitee.com/mindspore/models/tree/master/research/recommend/EDCN) | 鉁� |   |   |
-| Recommender | Recommender System, CTR prediction | [MMOE](https://gitee.com/mindspore/models/tree/master/research/recommend/mmoe) | 鉁� |   |   |
-| Audio | Audio Tagging | [FCN-4](https://gitee.com/mindspore/models/tree/master/research/audio/fcn-4)   |  鉁� |    |   |
-| Audio | Keyword Spotting | [DS-CNN](https://gitee.com/mindspore/models/tree/master/research/audio/dscnn)   | 鉁� |   |   |
-| Audio | Speech Recognition | [CTCModel](https://gitee.com/mindspore/models/tree/master/research/audio/ctcmodel) | 鉁� |   |   |
-| Audio | Speech Synthesis | [Wavenet](https://gitee.com/mindspore/models/tree/master/research/audio/wavenet) | 鉁� |   |   |
-| GNN | Traffic Prediction | [STGCN](https://gitee.com/mindspore/models/tree/master/research/cv/stgcn) | 鉁� |   |   |
-| GNN | Traffic Prediction | [TGCN](https://gitee.com/mindspore/models/tree/master/research/cv/tgcn) | 鉁� |   |   |
-| GNN | Social and Information Networks | [SGCN](https://gitee.com/mindspore/models/tree/master/research/gnn/sgcn) | 鉁� |   |   |
-| GNN | Graph Classification | [DGCN](https://gitee.com/mindspore/models/tree/master/research/gnn/dgcn) | 鉁� |   |   |
-| GNN | Graph Classification | [SDNE](https://gitee.com/mindspore/models/tree/master/research/gnn/sdne) | 鉁� |   |   |
-| High Performance Computing | Molecular Dynamics | [DeepPotentialH2O](https://gitee.com/mindspore/models/tree/master/research/hpc/molecular_dynamics)   |  鉁� |    |   |
-| High Performance Computing | Ocean Model | [GOMO](https://gitee.com/mindspore/models/tree/master/research/hpc/ocean_model)   |    |  鉁� |   |
-| Reinforcement Learning | Recommender System, CTR prediction | [MMOE](https://gitee.com/mindspore/models/tree/master/research/recommend/mmoe) | 鉁� |   |   |
+|  Domain | Sub Domain    | Network  | Ascend  | GPU | CPU |
+|:------   |:------| :-----------  |:------:   |:------:  |:-----: |
+| 3D | 3D Reconstruction | [cmr](https://gitee.com/mindspore/models/tree/master/research/cv/cmr) | | 鉁� |   |
+| 3D | 3D Reconstruction | [DecoMR](https://gitee.com/mindspore/models/tree/master/research/cv/DecoMR) | | 鉁� |   |
+| 3D | 3D Reconstruction | [DeepLM](https://gitee.com/mindspore/models/tree/master/research/3d/DeepLM) | | 鉁� |   |
+| 3D | 3D Reconstruction | [eppmvsnet](https://gitee.com/mindspore/models/tree/master/research/cv/eppmvsnet) | | 鉁� |   |
+| 3D | 3D Object Detection | [pointpillars](https://gitee.com/mindspore/models/tree/master/research/cv/pointpillars) |鉁厊 鉁� |   |
+| Audio | Speech Recognition | [ctcmodel](https://gitee.com/mindspore/models/tree/master/research/audio/ctcmodel) |鉁厊   |   |
+| Audio | Speech Recognition | [deepspeech2](https://gitee.com/mindspore/models/tree/master/research/audio/deepspeech2) | | 鉁� |   |
+| Audio | Keyword Spotting | [dscnn](https://gitee.com/mindspore/models/tree/master/research/audio/dscnn) |鉁厊 鉁� |   |
+| Audio | Speech Synthesis | [FastSpeech](https://gitee.com/mindspore/models/tree/master/research/audio/FastSpeech) | | 鉁� |   |
+| Audio | Audio Tagging | [fcn-4](https://gitee.com/mindspore/models/tree/master/research/audio/fcn-4) |鉁厊 鉁� |   |
+| Audio | Speech Recognition | [jasper](https://gitee.com/mindspore/models/tree/master/research/audio/jasper) |鉁厊 鉁� |   |
+| Audio | Speech Synthesis | [wavenet](https://gitee.com/mindspore/models/tree/master/research/audio/wavenet) |鉁厊 鉁� |   |
+| Graph Neural Network | Graph Classification | [dgcn](https://gitee.com/mindspore/models/tree/master/research/gnn/dgcn) |鉁厊   |   |
+| Graph Neural Network | Text Classification | [hypertext](https://gitee.com/mindspore/models/tree/master/research/nlp/hypertext) |鉁厊 鉁� |   |
+| Graph Neural Network | Graph Classification | [sdne](https://gitee.com/mindspore/models/tree/master/research/gnn/sdne) |鉁厊   |   |
+| Graph Neural Network | Social and Information Networks | [sgcn](https://gitee.com/mindspore/models/tree/master/research/gnn/sgcn) |鉁厊 鉁� |   |
+| Graph Neural Network | Text Classification | [textrcnn](https://gitee.com/mindspore/models/tree/master/research/nlp/textrcnn) |鉁厊 鉁� |   |
+| High Performance Computing | High Performance Computing | [deepbsde](https://gitee.com/mindspore/models/tree/master/research/hpc/deepbsde) | | 鉁� |   |
+| High Performance Computing | High Performance Computing | [molecular_dynamics](https://gitee.com/mindspore/models/tree/master/research/hpc/molecular_dynamics) |鉁厊   |   |
+| High Performance Computing | High Performance Computing | [ocean_model](https://gitee.com/mindspore/models/tree/master/research/hpc/ocean_model) | | 鉁� |   |
+| High Performance Computing | High Performance Computing | [pafnucy](https://gitee.com/mindspore/models/tree/master/research/hpc/pafnucy) |鉁厊 鉁� |   |
+| High Performance Computing | High Performance Computing | [pfnn](https://gitee.com/mindspore/models/tree/master/research/hpc/pfnn) | | 鉁� |   |
+| High Performance Computing | High Performance Computing | [pinns](https://gitee.com/mindspore/models/tree/master/research/hpc/pinns) | | 鉁� |   |
+| Image | Image Classification | [3D_DenseNet](https://gitee.com/mindspore/models/tree/master/research/cv/3D_DenseNet) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [3dcnn](https://gitee.com/mindspore/models/tree/master/research/cv/3dcnn) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [adelaide_ea](https://gitee.com/mindspore/models/tree/master/research/cv/adelaide_ea) |鉁厊   |   |
+| Image | Scene Text Detection | [advanced_east](https://gitee.com/mindspore/models/tree/master/research/cv/advanced_east) |鉁厊 鉁� |   |
+| Image | Style Transfer | [aecrnet](https://gitee.com/mindspore/models/tree/master/research/cv/aecrnet) |鉁厊 鉁� |   |
+| Image | Re-Identification | [AlignedReID](https://gitee.com/mindspore/models/tree/master/research/cv/AlignedReID) | | 鉁� |   |
+| Image | Re-Identification | [AlignedReID++](https://gitee.com/mindspore/models/tree/master/research/cv/AlignedReID++) |鉁厊 鉁� |   |
+| Image | Pose Estimation | [AlphaPose](https://gitee.com/mindspore/models/tree/master/research/cv/AlphaPose) |鉁厊   |   |
+| Image | Style Transfer | [APDrawingGAN](https://gitee.com/mindspore/models/tree/master/research/cv/APDrawingGAN) |鉁厊 鉁� |   |
+| Image | Style Transfer | [ArbitraryStyleTransfer](https://gitee.com/mindspore/models/tree/master/research/cv/ArbitraryStyleTransfer) |鉁厊 鉁� |   |
+| Image | Object Detection | [arcface](https://gitee.com/mindspore/models/tree/master/research/cv/arcface) |鉁厊 鉁� |   |
+| Image | Keypoint Detection | [ArtTrack](https://gitee.com/mindspore/models/tree/master/research/cv/ArtTrack) | | 鉁� |   |
+| Image | Style Transfer | [AttGAN](https://gitee.com/mindspore/models/tree/master/research/cv/AttGAN) |鉁厊 鉁� |   |
+| Image | Image Classification | [augvit](https://gitee.com/mindspore/models/tree/master/research/cv/augvit) | | 鉁� |   |
+| Image | Image Classification | [autoaugment](https://gitee.com/mindspore/models/tree/master/research/cv/autoaugment) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [Auto-DeepLab](https://gitee.com/mindspore/models/tree/master/research/cv/Auto-DeepLab) |鉁厊   |   |
+| Image | Neural Architecture Search | [AutoSlim](https://gitee.com/mindspore/models/tree/master/research/cv/AutoSlim) |鉁厊 鉁� |   |
+| Image | Image Classification | [AVA_cifar](https://gitee.com/mindspore/models/tree/master/research/cv/AVA_cifar) |鉁厊 鉁� |   |
+| Image | Image Classification | [AVA_hpa](https://gitee.com/mindspore/models/tree/master/research/cv/AVA_hpa) |鉁厊 鉁� |   |
+| Image | Image Classification | [cait](https://gitee.com/mindspore/models/tree/master/research/cv/cait) |鉁厊 鉁� |   |
+| Image | Object Detection | [CascadeRCNN](https://gitee.com/mindspore/models/tree/master/research/cv/CascadeRCNN) |鉁厊 鉁� |   |
+| Image | Image Classification | [CBAM](https://gitee.com/mindspore/models/tree/master/research/cv/CBAM) |鉁厊   |   |
+| Image | Image Classification | [cct](https://gitee.com/mindspore/models/tree/master/research/cv/cct) |鉁厊 鉁� |   |
+| Image | Keypoint Detection | [centernet](https://gitee.com/mindspore/models/tree/master/research/cv/centernet) |鉁厊   | 鉁� |
+| Image | Keypoint Detection | [centernet_det](https://gitee.com/mindspore/models/tree/master/research/cv/centernet_det) |鉁厊   |   |
+| Image | Keypoint Detection | [centernet_resnet101](https://gitee.com/mindspore/models/tree/master/research/cv/centernet_resnet101) |鉁厊 鉁� |   |
+| Image | Keypoint Detection | [centernet_resnet50_v1](https://gitee.com/mindspore/models/tree/master/research/cv/centernet_resnet50_v1) |鉁厊   |   |
+| Image | Image Generation | [CGAN](https://gitee.com/mindspore/models/tree/master/research/cv/CGAN) |鉁厊 鉁� |   |
+| Image | Image Classification | [convnext](https://gitee.com/mindspore/models/tree/master/research/cv/convnext) |鉁厊 鉁� |   |
+| Image | Image Super Resolution | [csd](https://gitee.com/mindspore/models/tree/master/research/cv/csd) |鉁厊 鉁� |   |
+| Image | Image Generation | [CTSDG](https://gitee.com/mindspore/models/tree/master/research/cv/CTSDG) |鉁厊 鉁� |   |
+| Image | Style Transfer | [CycleGAN](https://gitee.com/mindspore/models/tree/master/research/cv/CycleGAN) |鉁厊 鉁� |   |
+| Image | Image Super Resolution | [DBPN](https://gitee.com/mindspore/models/tree/master/research/cv/DBPN) |鉁厊   |   |
+| Image | Image Super Resolution | [DBPN_GAN](https://gitee.com/mindspore/models/tree/master/research/cv/DBPN) |鉁厊   |   |
+| Image | Image Generation | [dcgan](https://gitee.com/mindspore/models/tree/master/research/cv/dcgan) |鉁厊 鉁� |   |
+| Image | Re-Identification | [DDAG](https://gitee.com/mindspore/models/tree/master/research/cv/DDAG) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [DDM](https://gitee.com/mindspore/models/tree/master/research/cv/DDM) |鉁厊   |   |
+| Image | Semantic Segmentation | [DDRNet](https://gitee.com/mindspore/models/tree/master/research/cv/DDRNet) |鉁厊 鉁� |   |
+| Image | Object Detection | [DeepID](https://gitee.com/mindspore/models/tree/master/research/cv/DeepID) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [deeplabv3plus](https://gitee.com/mindspore/models/tree/master/research/cv/deeplabv3plus) |鉁厊 鉁� |   |
+| Image | Image Retrieval | [delf](https://gitee.com/mindspore/models/tree/master/research/cv/delf) |鉁厊   |   |
+| Image | Zero-Shot Learning | [dem](https://gitee.com/mindspore/models/tree/master/research/cv/dem) |鉁厊 鉁� |   |
+| Image | Object Detection | [detr](https://gitee.com/mindspore/models/tree/master/research/cv/detr) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [dgcnet_res101](https://gitee.com/mindspore/models/tree/master/research/cv/dgcnet_res101) | | 鉁� |   |
+| Image | Instance Segmentation | [dlinknet](https://gitee.com/mindspore/models/tree/master/research/cv/dlinknet) |鉁厊   |   |
+| Image | Image Denoise | [DnCNN](https://gitee.com/mindspore/models/tree/master/research/cv/DnCNN) |鉁厊   |   |
+| Image | Image Classification | [dnet_nas](https://gitee.com/mindspore/models/tree/master/research/cv/dnet_nas) |鉁厊   |   |
+| Image | Image Classification | [DRNet](https://gitee.com/mindspore/models/tree/master/research/cv/DRNet) |鉁厊 鉁� |   |
+| Image | Image Super Resolution | [EDSR](https://gitee.com/mindspore/models/tree/master/research/cv/EDSR) |鉁厊   |   |
+| Image | Object Detection | [EfficientDet_d0](https://gitee.com/mindspore/models/tree/master/research/cv/EfficientDet_d0) |鉁厊   |   |
+| Image | Image Classification | [efficientnet-b0](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b0) |鉁厊   |   |
+| Image | Image Classification | [efficientnet-b1](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b1) |鉁厊   |   |
+| Image | Image Classification | [efficientnet-b2](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b2) |鉁厊 鉁� |   |
+| Image | Image Classification | [efficientnet-b3](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b3) |鉁厊 鉁� |   |
+| Image | Image Classification | [efficientnetv2](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnetv2) |鉁厊   |   |
+| Image | Salient Object Detection | [EGnet](https://gitee.com/mindspore/models/tree/master/research/cv/EGnet) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [E-NET](https://gitee.com/mindspore/models/tree/master/research/cv/E-NET) |鉁厊 鉁� |   |
+| Image | Image Super Resolution | [esr_ea](https://gitee.com/mindspore/models/tree/master/research/cv/esr_ea) |鉁厊 鉁� |   |
+| Image | Image Super Resolution | [ESRGAN](https://gitee.com/mindspore/models/tree/master/research/cv/ESRGAN) |鉁厊 鉁� |   |
+| Image | Image Classification | [FaceAttribute](https://gitee.com/mindspore/models/tree/master/research/cv/FaceAttribute) |鉁厊 鉁� |   |
+| Image | Object Detection | [faceboxes](https://gitee.com/mindspore/models/tree/master/research/cv/faceboxes) |鉁厊   |   |
+| Image | Object Detection | [FaceDetection](https://gitee.com/mindspore/models/tree/master/research/cv/FaceDetection) |鉁厊 鉁� |   |
+| Image | Face Recognition | [FaceNet](https://gitee.com/mindspore/models/tree/master/research/cv/FaceNet) |鉁厊 鉁� |   |
+| Image | Image Classification | [FaceQualityAssessment](https://gitee.com/mindspore/models/tree/master/research/cv/FaceQualityAssessment) |鉁厊 鉁� | 鉁� |
+| Image | Object Detection | [FaceRecognition](https://gitee.com/mindspore/models/tree/master/research/cv/FaceRecognition) |鉁厊 鉁� |   |
+| Image | Object Detection | [FaceRecognitionForTracking](https://gitee.com/mindspore/models/tree/master/research/cv/FaceRecognitionForTracking) |鉁厊 鉁� | 鉁� |
+| Image | Object Detection | [faster_rcnn_dcn](https://gitee.com/mindspore/models/tree/master/research/cv/faster_rcnn_dcn) |鉁厊 鉁� |   |
+| Image | Image Matting | [FCANet](https://gitee.com/mindspore/models/tree/master/research/cv/FCANet) |鉁厊   |   |
+| Image | Image Classification | [FDA-BNN](https://gitee.com/mindspore/models/tree/master/research/cv/FDA-BNN) |鉁厊 鉁� |   |
+| Image | Image Classification | [fishnet99](https://gitee.com/mindspore/models/tree/master/research/cv/fishnet99) |鉁厊 鉁� |   |
+| Image | Optical Flow Estimation | [flownet2](https://gitee.com/mindspore/models/tree/master/research/cv/flownet2) |鉁厊   |   |
+| Image | Image Generation | [gan](https://gitee.com/mindspore/models/tree/master/research/cv/gan) |鉁厊 鉁� |   |
+| Image | Image Classification | [GENet_Res50](https://gitee.com/mindspore/models/tree/master/research/cv/GENet_Res50) |鉁厊   |   |
+| Image | Image Classification | [ghostnet](https://gitee.com/mindspore/models/tree/master/research/cv/ghostnet) |鉁厊   |   |
+| Image | Image Classification | [ghostnet_d](https://gitee.com/mindspore/models/tree/master/research/cv/ghostnet_d) |鉁厊 鉁� |   |
+| Image | Image Classification | [glore_res200](https://gitee.com/mindspore/models/tree/master/research/cv/glore_res) |鉁厊 鉁� |   |
+| Image | Image Classification | [glore_res50](https://gitee.com/mindspore/models/tree/master/research/cv/glore_res) |鉁厊 鉁� |   |
+| Image | Image Classification | [hardnet](https://gitee.com/mindspore/models/tree/master/research/cv/hardnet) |鉁厊 鉁� |   |
+| Image | Edge Detection | [hed](https://gitee.com/mindspore/models/tree/master/research/cv/hed) |鉁厊 鉁� |   |
+| Image | Image Generation | [HiFaceGAN](https://gitee.com/mindspore/models/tree/master/research/cv/HiFaceGAN) | | 鉁� |   |
+| Image | Image Classification | [HourNAS](https://gitee.com/mindspore/models/tree/master/research/cv/HourNAS) | | 鉁� |   |
+| Image | Image Classification | [HRNetW48_cls](https://gitee.com/mindspore/models/tree/master/research/cv/HRNetW48_cls) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [HRNetW48_seg](https://gitee.com/mindspore/models/tree/master/research/cv/HRNetW48_seg) |鉁厊   |   |
+| Image | Image Classification | [ibnnet](https://gitee.com/mindspore/models/tree/master/research/cv/ibnnet) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [ICNet](https://gitee.com/mindspore/models/tree/master/research/cv/ICNet) |鉁厊   |   |
+| Image | Image Classification | [inception_resnet_v2](https://gitee.com/mindspore/models/tree/master/research/cv/inception_resnet_v2) |鉁厊 鉁� |   |
+| Image | Image Classification | [Inceptionv2](https://gitee.com/mindspore/models/tree/master/research/cv/Inception-v2) |鉁厊 鉁� |   |
+| Image | Image Matting | [IndexNet](https://gitee.com/mindspore/models/tree/master/research/cv/IndexNet) | | 鉁� |   |
+| Image | Image Generation | [IPT](https://gitee.com/mindspore/models/tree/master/research/cv/IPT) |鉁厊   |   |
+| Image | Image Super Resolution | [IRN](https://gitee.com/mindspore/models/tree/master/research/cv/IRN) |鉁厊 鉁� |   |
+| Image | Image Classification | [ISyNet](https://gitee.com/mindspore/models/tree/master/research/cv/ISyNet) |鉁厊 鉁� |   |
+| Image | Image Classification | [ivpf](https://gitee.com/mindspore/models/tree/master/research/cv/ivpf) | | 鉁� |   |
+| Image | Image Denoise | [LearningToSeeInTheDark](https://gitee.com/mindspore/models/tree/master/research/cv/LearningToSeeInTheDark) |鉁厊   |   |
+| Image | Meta Learning | [LEO](https://gitee.com/mindspore/models/tree/master/research/cv/LEO) |鉁厊 鉁� |   |
+| Image | Object Detection | [LightCNN](https://gitee.com/mindspore/models/tree/master/research/cv/LightCNN) |鉁厊 鉁� | 鉁� |
+| Image | Image Super Resolution | [lite-hrnet](https://gitee.com/mindspore/models/tree/master/research/cv/lite-hrnet) | | 鉁� |   |
+| Image | Image Classification | [lresnet100e_ir](https://gitee.com/mindspore/models/tree/master/research/cv/lresnet100e_ir) | | 鉁� |   |
+| Image | Object Detection | [m2det](https://gitee.com/mindspore/models/tree/master/research/cv/m2det) | | 鉁� |   |
+| Image | Autoencoder | [mae](https://gitee.com/mindspore/models/tree/master/research/cv/mae) |鉁厊 鉁� |   |
+| Image | Meta Learning | [MAML](https://gitee.com/mindspore/models/tree/master/research/cv/MAML) |鉁厊 鉁� |   |
+| Image | Scene Text Recognition | [ManiDP](https://gitee.com/mindspore/models/tree/master/research/cv/ManiDP) | | 鉁� |   |
+| Image | Face Recognition | [MaskedFaceRecognition](https://gitee.com/mindspore/models/tree/master/research/cv/MaskedFaceRecognition) |鉁厊   |   |
+| Image | Meta Learning | [meta-baseline](https://gitee.com/mindspore/models/tree/master/research/cv/meta-baseline) |鉁厊 鉁� |   |
+| Image | Re-Identification | [MGN](https://gitee.com/mindspore/models/tree/master/research/cv/MGN) |鉁厊 鉁� |   |
+| Image | Depth Estimation | [midas](https://gitee.com/mindspore/models/tree/master/research/cv/midas) |鉁厊 鉁� |   |
+| Image | Image Denoise | [MIMO-UNet](https://gitee.com/mindspore/models/tree/master/research/cv/MIMO-UNet) | | 鉁� |   |
+| Image | Image Classification | [mnasnet](https://gitee.com/mindspore/models/tree/master/research/cv/mnasnet) |鉁厊 鉁� |   |
+| Image | Image Classification | [mobilenetv3_large](https://gitee.com/mindspore/models/tree/master/research/cv/mobilenetv3_large) |鉁厊   | 鉁� |
+| Image | Image Classification | [mobilenetV3_small_x1_0](https://gitee.com/mindspore/models/tree/master/research/cv/mobilenetV3_small_x1_0) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [MultiTaskNet](https://gitee.com/mindspore/models/tree/master/research/cv/PAMTRI) |鉁厊 鉁� |   |
+| Image | Re-Identification | [MVD](https://gitee.com/mindspore/models/tree/master/research/cv/MVD) |鉁厊 鉁� |   |
+| Image | Object Detection | [nas-fpn](https://gitee.com/mindspore/models/tree/master/research/cv/nas-fpn) |鉁厊   |   |
+| Image | Image Denoise | [Neighbor2Neighbor](https://gitee.com/mindspore/models/tree/master/research/cv/Neighbor2Neighbor) |鉁厊 鉁� |   |
+| Image | Image Classification | [NFNet](https://gitee.com/mindspore/models/tree/master/research/cv/NFNet) |鉁厊 鉁� |   |
+| Image | Image Quality Assessment | [nima_vgg16](https://gitee.com/mindspore/models/tree/master/research/cv/nima_vgg16) | | 鉁� |   |
+| Image | Semantic Segmentation | [nnUNet](https://gitee.com/mindspore/models/tree/master/research/cv/nnUNet) |鉁厊 鉁� |   |
+| Image | Image Classification | [ntsnet](https://gitee.com/mindspore/models/tree/master/research/cv/ntsnet) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [OCRNet](https://gitee.com/mindspore/models/tree/master/research/cv/OCRNet) |鉁厊 鉁� |   |
+| Image | Re-Identification | [osnet](https://gitee.com/mindspore/models/tree/master/research/cv/osnet) |鉁厊 鉁� |   |
+| Image | Salient Object Detection | [PAGENet](https://gitee.com/mindspore/models/tree/master/research/cv/PAGENet) |鉁厊 鉁� |   |
+| Image | Image Retrieval | [pcb](https://gitee.com/mindspore/models/tree/master/research/cv/pcb_rpp) | | 鉁� |   |
+| Image | Image Retrieval | [pcb](https://gitee.com/mindspore/models/tree/master/research/cv/pcb_rpp) | | 鉁� |   |
+| Image | Image Retrieval | [pcb_rpp](https://gitee.com/mindspore/models/tree/master/research/cv/pcb_rpp) | | 鉁� |   |
+| Image | Image Classification | [PDarts](https://gitee.com/mindspore/models/tree/master/research/cv/PDarts) |鉁厊 鉁� |   |
+| Image | Image Generation | [PGAN](https://gitee.com/mindspore/models/tree/master/research/cv/PGAN) |鉁厊 鉁� |   |
+| Image | Image Generation | [Pix2Pix](https://gitee.com/mindspore/models/tree/master/research/cv/Pix2Pix) |鉁厊 鉁� |   |
+| Image | Image Super Resolution | [Pix2PixHD](https://gitee.com/mindspore/models/tree/master/research/cv/Pix2PixHD) |鉁厊   |   |
+| Image | Image Classification | [pnasnet](https://gitee.com/mindspore/models/tree/master/research/cv/pnasnet) |鉁厊 鉁� |   |
+| Image | Point Cloud Model | [pointnet](https://gitee.com/mindspore/models/tree/master/research/cv/pointnet) |鉁厊 鉁� |   |
+| Image | Point Cloud Model | [pointnet2](https://gitee.com/mindspore/models/tree/master/research/cv/pointnet2) |鉁厊 鉁� |   |
+| Image | Image Classification | [PoseEstNet](https://gitee.com/mindspore/models/tree/master/research/cv/PAMTRI) |鉁厊 鉁� |   |
+| Image | Image Classification | [ProtoNet](https://gitee.com/mindspore/models/tree/master/research/cv/ProtoNet) |鉁厊 鉁� |   |
+| Image | Image Classification | [proxylessnas](https://gitee.com/mindspore/models/tree/master/research/cv/proxylessnas) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [PSPNet](https://gitee.com/mindspore/models/tree/master/research/cv/PSPNet) |鉁厊   |   |
+| Image | Salient Object Detection | [ras](https://gitee.com/mindspore/models/tree/master/research/cv/ras) |鉁厊 鉁� |   |
+| Image | Image Super Resolution | [RCAN](https://gitee.com/mindspore/models/tree/master/research/cv/RCAN) |鉁厊   |   |
+| Image | Object Detection | [rcnn](https://gitee.com/mindspore/models/tree/master/research/cv/rcnn) |鉁厊 鉁� |   |
+| Image | Image Super Resolution | [REDNet30](https://gitee.com/mindspore/models/tree/master/research/cv/REDNet30) |鉁厊 鉁� |   |
+| Image | Object Detection | [RefineDet](https://gitee.com/mindspore/models/tree/master/research/cv/RefineDet) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [RefineNet](https://gitee.com/mindspore/models/tree/master/research/cv/RefineNet) |鉁厊 鉁� |   |
+| Image | Re-Identification | [ReIDStrongBaseline](https://gitee.com/mindspore/models/tree/master/research/cv/ReIDStrongBaseline) |鉁厊 鉁� |   |
+| Image | Image Classification | [relationnet](https://gitee.com/mindspore/models/tree/master/research/cv/relationnet) |鉁厊 鉁� |   |
+| Image | Image Classification | [renas](https://gitee.com/mindspore/models/tree/master/research/cv/renas) |鉁厊 鉁� | 鉁� |
+| Image | Semantic Segmentation | [repvgg](https://gitee.com/mindspore/models/tree/master/research/cv/repvgg) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [res2net_deeplabv3](https://gitee.com/mindspore/models/tree/master/research/cv/res2net_deeplabv3) |鉁厊   | 鉁� |
+| Image | Object Detection | [res2net_faster_rcnn](https://gitee.com/mindspore/models/tree/master/research/cv/res2net_faster_rcnn) |鉁厊 鉁� |   |
+| Image | Object Detection | [res2net_yolov3](https://gitee.com/mindspore/models/tree/master/research/cv/res2net_yolov3) |鉁厊 鉁� |   |
+| Image | Image Classification | [res2net101](https://gitee.com/mindspore/models/tree/master/research/cv/res2net) |鉁厊 鉁� |   |
+| Image | Image Classification | [res2net152](https://gitee.com/mindspore/models/tree/master/research/cv/res2net) |鉁厊 鉁� |   |
+| Image | Image Classification | [res2net50](https://gitee.com/mindspore/models/tree/master/research/cv/res2net) |鉁厊 鉁� |   |
+| Image | Image Classification | [ResNeSt50](https://gitee.com/mindspore/models/tree/master/research/cv/ResNeSt50) |鉁厊 鉁� |   |
+| Image | Image Classification | [resnet50_adv_pruning](https://gitee.com/mindspore/models/tree/master/research/cv/resnet50_adv_pruning) |鉁厊 鉁� |   |
+| Image | Image Classification | [resnet50_bam](https://gitee.com/mindspore/models/tree/master/research/cv/resnet50_bam) |鉁厊 鉁� |   |
+| Image | Image Classification | [ResNet50-Quadruplet](https://gitee.com/mindspore/models/tree/master/research/cv/metric_learn) |鉁厊 鉁� |   |
+| Image | Image Classification | [ResNet50-Triplet](https://gitee.com/mindspore/models/tree/master/research/cv/metric_learn) |鉁厊 鉁� |   |
+| Image | Image Classification | [ResnetV2_101](https://gitee.com/mindspore/models/tree/master/research/cv/resnetv2) |鉁厊 鉁� |   |
+| Image | Image Classification | [ResnetV2_152](https://gitee.com/mindspore/models/tree/master/research/cv/resnetv2) |鉁厊 鉁� |   |
+| Image | Image Classification | [ResnetV2_50](https://gitee.com/mindspore/models/tree/master/research/cv/resnetv2) |鉁厊 鉁� |   |
+| Image | Image Classification | [resnetv2_50_frn](https://gitee.com/mindspore/models/tree/master/research/cv/resnetv2_50_frn) |鉁厊 鉁� |   |
+| Image | Image Classification | [resnext152_64x4d](https://gitee.com/mindspore/models/tree/master/research/cv/resnext152_64x4d) |鉁厊 鉁� |   |
+| Image | Object Detection | [retinaface_mobilenet0.25](https://gitee.com/mindspore/models/tree/master/research/cv/retinaface) |鉁厊 鉁� |   |
+| Image | Object Detection | [retinanet_resnet101](https://gitee.com/mindspore/models/tree/master/research/cv/retinanet_resnet101) |鉁厊 鉁� |   |
+| Image | Object Detection | [retinanet_resnet152](https://gitee.com/mindspore/models/tree/master/research/cv/retinanet_resnet152) |鉁厊 鉁� |   |
+| Image | Object Detection | [rfcn](https://gitee.com/mindspore/models/tree/master/research/cv/rfcn) | | 鉁� |   |
+| Image | Image Classification | [SE_ResNeXt50](https://gitee.com/mindspore/models/tree/master/research/cv/SE_ResNeXt50) |鉁厊   |   |
+| Image | Image Classification | [senet_resnet101](https://gitee.com/mindspore/models/tree/master/research/cv/SE-Net) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [senet_resnet50](https://gitee.com/mindspore/models/tree/master/research/cv/SE-Net) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [se-res2net50](https://gitee.com/mindspore/models/tree/master/research/cv/res2net) |鉁厊 鉁� |   |
+| Image | Image Classification | [S-GhostNet](https://gitee.com/mindspore/models/tree/master/research/cv/S-GhostNet) |鉁厊   |   |
+| Image | Pose Estimation | [simple_baselines](https://gitee.com/mindspore/models/tree/master/research/cv/simple_baselines) |鉁厊 鉁� |   |
+| Image | Image Generation | [SinGAN](https://gitee.com/mindspore/models/tree/master/research/cv/SinGAN) |鉁厊   |   |
+| Image | Image Classification | [single_path_nas](https://gitee.com/mindspore/models/tree/master/research/cv/single_path_nas) |鉁厊 鉁� |   |
+| Image | Image Classification | [sknet](https://gitee.com/mindspore/models/tree/master/research/cv/sknet) |鉁厊 鉁� | 鉁� |
+| Image | Image Classification | [snn_mlp](https://gitee.com/mindspore/models/tree/master/research/cv/snn_mlp) | | 鉁� |   |
+| Image | Object Detection | [Spnas](https://gitee.com/mindspore/models/tree/master/research/cv/Spnas) |鉁厊   |   |
+| Image | Image Classification | [SPPNet](https://gitee.com/mindspore/models/tree/master/research/cv/SPPNet) |鉁厊 鉁� |   |
+| Image | Image Classification | [squeezenet1_1](https://gitee.com/mindspore/models/tree/master/research/cv/squeezenet1_1) |鉁厊 鉁� |   |
+| Image | Image Super Resolution | [sr_ea](https://gitee.com/mindspore/models/tree/master/research/cv/sr_ea) |鉁厊   |   |
+| Image | Image Super Resolution | [SRGAN](https://gitee.com/mindspore/models/tree/master/research/cv/SRGAN) |鉁厊 鉁� |   |
+| Image | Image Classification | [ssc_resnet50](https://gitee.com/mindspore/models/tree/master/research/cv/ssc_resnet50) |鉁厊 鉁� |   |
+| Image | Object Detection | [ssd_ghostnet](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_ghostnet) |鉁厊 鉁� | 鉁� |
+| Image | Object Detection | [ssd_inception_v2](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_inception_v2) | | 鉁� | 鉁� |
+| Image | Object Detection | [ssd_inceptionv2](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_inceptionv2) |鉁厊   |   |
+| Image | Object Detection | [ssd_mobilenetV2](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_mobilenetV2) |鉁厊 鉁� | 鉁� |
+| Image | Object Detection | [ssd_mobilenetV2_FPNlite](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_mobilenetV2_FPNlite) |鉁厊 鉁� | 鉁� |
+| Image | Object Detection | [ssd_resnet_34](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_resnet_34) | | 鉁� |   |
+| Image | Object Detection | [ssd_resnet34](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_resnet34) |鉁厊   | 鉁� |
+| Image | Object Detection | [ssd_resnet50](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_resnet50) |鉁厊   |   |
+| Image | Pose Estimation | [StackedHourglass](https://gitee.com/mindspore/models/tree/master/research/cv/StackedHourglass) |鉁厊   |   |
+| Image | Image Generation | [StarGAN](https://gitee.com/mindspore/models/tree/master/research/cv/StarGAN) |鉁厊 鉁� |   |
+| Image | Image Generation | [STGAN](https://gitee.com/mindspore/models/tree/master/research/cv/STGAN) |鉁厊 鉁� |   |
+| Image | Traffic Prediction | [stgcn](https://gitee.com/mindspore/models/tree/master/research/cv/stgcn) |鉁厊 鉁� |   |
+| Image | Image Classification | [stpm](https://gitee.com/mindspore/models/tree/master/research/cv/stpm) |鉁厊 鉁� |   |
+| Image | Image Classification | [swin_transformer](https://gitee.com/mindspore/models/tree/master/research/cv/swin_transformer) |鉁厊 鉁� |   |
+| Image | Temporal Localization | [tall](https://gitee.com/mindspore/models/tree/master/research/cv/tall) |鉁厊   |   |
+| Image | Image Classification | [TCN](https://gitee.com/mindspore/models/tree/master/research/cv/TCN) |鉁厊 鉁� |   |
+| Image | Scene Text Detection | [textfusenet](https://gitee.com/mindspore/models/tree/master/research/cv/textfusenet) |鉁厊   |   |
+| Image | Traffic Prediction | [tgcn](https://gitee.com/mindspore/models/tree/master/research/cv/tgcn) |鉁厊 鉁� |   |
+| Image | Image Classification | [tinynet](https://gitee.com/mindspore/models/tree/master/research/cv/tinynet) | | 鉁� |   |
+| Image | Image Classification | [TNT](https://gitee.com/mindspore/models/tree/master/research/cv/TNT) |鉁厊 鉁� |   |
+| Image | Object Detection | [u2net](https://gitee.com/mindspore/models/tree/master/research/cv/u2net) |鉁厊 鉁� |   |
+| Image | Image Generation | [U-GAT-IT](https://gitee.com/mindspore/models/tree/master/research/cv/U-GAT-IT) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [UNet3+](https://gitee.com/mindspore/models/tree/master/research/cv/UNet3+) |鉁厊 鉁� |   |
+| Image | Re-Identification | [VehicleNet](https://gitee.com/mindspore/models/tree/master/research/cv/VehicleNet) |鉁厊   |   |
+| Image | Image Classification | [vgg19](https://gitee.com/mindspore/models/tree/master/research/cv/vgg19) |鉁厊 鉁� |   |
+| Image | Image Classification | [ViG](https://gitee.com/mindspore/models/tree/master/research/cv/ViG) |鉁厊 鉁� |   |
+| Image | Image Classification | [vit_cifar](https://gitee.com/mindspore/models/tree/master/research/cv/vit_base) |鉁厊 鉁� |   |
+| Image | Semantic Segmentation | [vnet](https://gitee.com/mindspore/models/tree/master/research/cv/vnet) |鉁厊 鉁� |   |
+| Image | Image Classification | [wave_mlp](https://gitee.com/mindspore/models/tree/master/research/cv/wave_mlp) |鉁厊 鉁� |   |
+| Image | Image Super Resolution | [wdsr](https://gitee.com/mindspore/models/tree/master/research/cv/wdsr) |鉁厊 鉁� |   |
+| Image | Image Generation | [wgan](https://gitee.com/mindspore/models/tree/master/research/cv/wgan) |鉁厊   |   |
+| Image | Image Classification | [wideresnet](https://gitee.com/mindspore/models/tree/master/research/cv/wideresnet) |鉁厊 鉁� |   |
+| Image | Instance Segmentation | [Yolact++](https://gitee.com/mindspore/models/tree/master/research/cv/Yolact++) |鉁厊   |   |
+| Image | Object Detection | [yolov3_tiny](https://gitee.com/mindspore/models/tree/master/research/cv/yolov3_tiny) |鉁厊 鉁� |   |
+| Image | Object Detection | [yolox](https://gitee.com/mindspore/models/tree/master/research/cv/yolox) |鉁厊   |   |
+| Multi Modal | Multi Modal | [opt](https://gitee.com/mindspore/models/tree/master/research/mm/opt) |鉁厊 鉁� |   |
+| Multi Modal | Multi Modal | [TokenFusion](https://gitee.com/mindspore/models/tree/master/research/cv/TokenFusion) |鉁厊 鉁� |   |
+| Multi Modal | Multi Modal | [wukong](https://gitee.com/mindspore/models/tree/master/research/mm/wukong) |鉁厊   |   |
+| Recommendation | Click-Through Rate Prediction | [autodis](https://gitee.com/mindspore/models/tree/master/research/recommend/autodis) |鉁厊 鉁� |   |
+| Recommendation | Click-Through Rate Prediction | [DIEN](https://gitee.com/mindspore/models/tree/master/research/recommend/DIEN) |鉁厊 鉁� |   |
+| Recommendation | Click-Through Rate Prediction | [dlrm](https://gitee.com/mindspore/models/tree/master/research/recommend/dlrm) |鉁厊 鉁� |   |
+| Recommendation | Click-Through Rate Prediction | [EDCN](https://gitee.com/mindspore/models/tree/master/research/recommend/EDCN) |鉁厊 鉁� |   |
+| Recommendation | Click-Through Rate Prediction | [Fat-DeepFFM](https://gitee.com/mindspore/models/tree/master/research/recommend/Fat-DeepFFM) |鉁厊 鉁� |   |
+| Recommendation | Click-Through Rate Prediction | [mmoe](https://gitee.com/mindspore/models/tree/master/research/recommend/mmoe) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [albert](https://gitee.com/mindspore/models/tree/master/research/nlp/albert) |鉁厊 鉁� |   |
+| Text | Emotion Classification | [atae_lstm](https://gitee.com/mindspore/models/tree/master/research/nlp/atae_lstm) |鉁厊 鉁� |   |
+| Text | Dialogue | [dam](https://gitee.com/mindspore/models/tree/master/research/nlp/dam) |鉁厊   |   |
+| Text | Language Model | [gpt2](https://gitee.com/mindspore/models/tree/master/research/nlp/gpt2) |鉁厊   |   |
+| Text | Knowledge Graph Embedding | [hake](https://gitee.com/mindspore/models/tree/master/research/nlp/hake) | | 鉁� |   |
+| Text | Natural Language Understanding | [ktnet](https://gitee.com/mindspore/models/tree/master/research/nlp/ktnet) |鉁厊 鉁� |   |
+| Text | Named Entity Recognition | [lstm_crf](https://gitee.com/mindspore/models/tree/master/research/nlp/lstm_crf) |鉁厊   |   |
+| Text | Natural Language Understanding | [luke](https://gitee.com/mindspore/models/tree/master/research/nlp/luke) |鉁厊 鉁� |   |
+| Text | Knowledge Graph Embedding | [rotate](https://gitee.com/mindspore/models/tree/master/research/nlp/rotate) |鉁厊 鉁� |   |
+| Text | Emotion Classification | [senta](https://gitee.com/mindspore/models/tree/master/research/nlp/senta) |鉁厊 鉁� |   |
+| Text | Machine Translation | [seq2seq](https://gitee.com/mindspore/models/tree/master/research/nlp/seq2seq) |鉁厊   |   |
+| Text | Word Embedding | [skipgram](https://gitee.com/mindspore/models/tree/master/research/nlp/skipgram) |鉁厊 鉁� |   |
+| Text | Machine Translation | [speech_transformer](https://gitee.com/mindspore/models/tree/master/research/nlp/speech_transformer) |鉁厊   |   |
+| Text | Pre Training | [ternarybert](https://gitee.com/mindspore/models/tree/master/research/nlp/ternarybert) |鉁厊 鉁� |   |
+| Text | Natural Language Understanding | [tprr](https://gitee.com/mindspore/models/tree/master/research/nlp/tprr) |鉁厊   |   |
+| Text | Natural Language Understanding | [transformer_xl](https://gitee.com/mindspore/models/tree/master/research/nlp/transformer_xl) |鉁厊 鉁� |   |
+| Text | Knowledge Graph Embedding | [transX](https://gitee.com/mindspore/models/tree/master/research/nlp/transX) | | 鉁� |   |
+| Video | Video Classification | [AttentionCluster](https://gitee.com/mindspore/models/tree/master/research/cv/AttentionCluster) |鉁厊 鉁� |   |
+| Video | Others | [DYR](https://gitee.com/mindspore/models/tree/master/research/nlp/DYR) |鉁厊   |   |
+| Video | Video Classification | [ecolite](https://gitee.com/mindspore/models/tree/master/research/cv/ecolite) |鉁厊   |   |
+| Video | Object Tracking | [fairmot](https://gitee.com/mindspore/models/tree/master/research/cv/fairmot) |鉁厊 鉁� |   |
+| Video | Video Classification | [I3D](https://gitee.com/mindspore/models/tree/master/research/cv/I3D) |鉁厊   |   |
+| Video | Object Tracking | [JDE](https://gitee.com/mindspore/models/tree/master/research/cv/JDE) | | 鉁� |   |
+| Video | video Segment | [OSVOS](https://gitee.com/mindspore/models/tree/master/research/cv/OSVOS) | | 鉁� |   |
+| Video | Video Classification | [r2plus1d](https://gitee.com/mindspore/models/tree/master/research/cv/r2plus1d) |鉁厊 鉁� |   |
+| Video | video Super Resolution | [rbpn](https://gitee.com/mindspore/models/tree/master/research/cv/rbpn) |鉁厊   |   |
+| Video | Video Classification | [resnet3d](https://gitee.com/mindspore/models/tree/master/research/cv/resnet3d) |鉁厊   |   |
+| Video | Object Tracking | [SiamFC](https://gitee.com/mindspore/models/tree/master/research/cv/SiamFC) |鉁厊   |   |
+| Video | Object Tracking | [siamRPN](https://gitee.com/mindspore/models/tree/master/research/cv/siamRPN) |鉁厊 鉁� |   |
+| Video | Video Classification | [slowfast](https://gitee.com/mindspore/models/tree/master/research/cv/slowfast) |鉁厊 鉁� |   |
+| Video | Video Classification | [stnet](https://gitee.com/mindspore/models/tree/master/research/cv/stnet) |鉁厊   |   |
+| Video | Object Tracking | [tracktor](https://gitee.com/mindspore/models/tree/master/research/cv/tracktor) | | 鉁� |   |
+| Video | Object Tracking | [tracktor++](https://gitee.com/mindspore/models/tree/master/research/cv/tracktor++) |鉁厊 鉁� |   |
+| Video | Video Classification | [trn](https://gitee.com/mindspore/models/tree/master/research/cv/trn) | | 鉁� |   |
+| Video | Video Classification | [tsm](https://gitee.com/mindspore/models/tree/master/research/cv/tsm) |鉁厊 鉁� |   |
+| Video | Video Classification | [tsn](https://gitee.com/mindspore/models/tree/master/research/cv/tsn) |鉁厊 鉁� |   |
 
 - [Community](https://gitee.com/mindspore/models/tree/master/community)
 
diff --git a/README_CN.md b/README_CN.md
index 92cb579daf7523ee270b5f69fed7aef4ec4ad5c3..d41404c12e80c789686e19054814522d63abe9d5 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -15,310 +15,423 @@
 ### 鏍囧噯缃戠粶
 
 |  棰嗗煙 | 瀛愰鍩�  | 缃戠粶   | Ascend | GPU | CPU |
-|:----  |:-------  |:----   |:----:    |:----: |:----: |
-| 闊抽锛圓udio) | 闊抽鍚堟垚锛圫peech Synthesis) | [LPCNet](https://gitee.com/mindspore/models/tree/master/official/audio/lpcnet) | 鉁� |   |   |
-| 闊抽锛圓udio) | 闊抽鍚堟垚锛圫peech Synthesis) | [MelGAN](https://gitee.com/mindspore/models/tree/master/official/audio/melgan) | 鉁� |   |   |
-| 闊抽锛圓udio) | 闊抽鍚堟垚锛圫peech Synthesis) | [Tacotron2](https://gitee.com/mindspore/models/tree/master/official/audio/tacotron2) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐐逛簯妯″瀷锛圥oint Cloud Model锛� | [OctSqueeze](https://gitee.com/mindspore/models/tree/master/official/cv/octsqueeze) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍏夋祦浼拌锛圤ptical Flow Estimation锛� | [PWCNet](https://gitee.com/mindspore/models/tree/master/official/cv/pwcnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣璺熻釜锛圤bject Tracking锛� | [Deepsort](https://gitee.com/mindspore/models/tree/master/official/cv/Deepsort) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣璺熻釜锛圤bject Tracking锛� | [ADNet](https://gitee.com/mindspore/models/tree/master/official/cv/ADNet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� | [AlexNet](https://gitee.com/mindspore/models/tree/master/official/cv/alexnet)   |  鉁� |  鉁� |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [CNN](https://gitee.com/mindspore/models/tree/master/official/cv/cnn_direction_model)  |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [DenseNet100](https://gitee.com/mindspore/models/tree/master/official/cv/densenet) |   |   | 鉁� |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [DenseNet121](https://gitee.com/mindspore/models/tree/master/official/cv/densenet) |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [DPN](https://gitee.com/mindspore/models/tree/master/official/cv/dpn) |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [EfficientNet-B0](https://gitee.com/mindspore/models/tree/master/official/cv/efficientnet) |   |  鉁� |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [GoogLeNet](https://gitee.com/mindspore/models/tree/master/official/cv/googlenet)   |  鉁�     | 鉁� |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [InceptionV3](https://gitee.com/mindspore/models/tree/master/official/cv/inceptionv3) |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [InceptionV4](https://gitee.com/mindspore/models/tree/master/official/cv/inceptionv4) |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [LeNet](https://gitee.com/mindspore/models/tree/master/official/cv/lenet)    |  鉁� |  鉁� | 鉁� |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [MobileNetV1](https://gitee.com/mindspore/models/tree/master/official/cv/mobilenetv1)        |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [MobileNetV2](https://gitee.com/mindspore/models/tree/master/official/cv/mobilenetv2)        |  鉁� |  鉁� | 鉁� |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [MobileNetV3](https://gitee.com/mindspore/models/tree/master/official/cv/mobilenetv3)        |   |  鉁� |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [NASNet](https://gitee.com/mindspore/models/tree/master/official/cv/nasnet) | 鉁� |  鉁� |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [ResNet-18](https://gitee.com/mindspore/models/tree/master/official/cv/resnet)   |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [ResNet-34](https://gitee.com/mindspore/models/tree/master/official/cv/resnet)   |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [ResNet-50](https://gitee.com/mindspore/models/tree/master/official/cv/resnet)   |  鉁� |  鉁� | 鉁� |
-|璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [ResNet-101](https://gitee.com/mindspore/models/tree/master/official/cv/resnet)        |  鉁� | 鉁� |  |
-|璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [ResNet-152](https://gitee.com/mindspore/models/tree/master/official/cv/resnet)        |  鉁� |   |  |
-|璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [ResNeXt50](https://gitee.com/mindspore/models/tree/master/official/cv/resnext)    |  鉁� | 鉁� |  |
-|璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [ResNeXt101](https://gitee.com/mindspore/models/tree/master/official/cv/resnext)    |  鉁� |   |  |
-|璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [SE-ResNet50](https://gitee.com/mindspore/models/tree/master/official/cv/resnet)       |  鉁� |  |  |
-|璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [SE-ResNext50](https://gitee.com/mindspore/models/tree/master/official/cv/se_resnext50)       |  鉁� |  |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [ShuffleNetV1](https://gitee.com/mindspore/models/tree/master/official/cv/shufflenetv1)        |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [ShuffleNetV2](https://gitee.com/mindspore/models/tree/master/official/cv/shufflenetv2) |   |  鉁� |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  |[SqueezeNet](https://gitee.com/mindspore/models/tree/master/official/cv/squeezenet) |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [Tiny-DarkNet](https://gitee.com/mindspore/models/tree/master/official/cv/tinydarknet)  |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [VGG16](https://gitee.com/mindspore/models/tree/master/official/cv/vgg16)  |  鉁� |  鉁� |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛�  | [Xception](https://gitee.com/mindspore/models/tree/master/official/cv/xception) |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛� | [CspDarkNet53](https://gitee.com/mindspore/models/tree/master/official/cv/cspdarknet53) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛� | [ErfNet](https://gitee.com/mindspore/models/tree/master/official/cv/erfnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛� | [SimCLR](https://gitee.com/mindspore/models/tree/master/official/cv/simclr) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍥惧儚鍒嗙被锛圛mage Classification锛� | [Vit](https://gitee.com/mindspore/models/tree/master/official/cv/vit) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛�  | [CenterFace](https://gitee.com/mindspore/models/tree/master/official/cv/centerface)     |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛�  | [CTPN](https://gitee.com/mindspore/models/tree/master/official/cv/ctpn)     |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鐩爣妫€娴嬶紙Object Detection锛�  | [Faster R-CNN](https://gitee.com/mindspore/models/tree/master/official/cv/faster_rcnn)  |  鉁� |  鉁� |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鐩爣妫€娴嬶紙Object Detection锛�  | [Mask R-CNN](https://gitee.com/mindspore/models/tree/master/official/cv/maskrcnn)  |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛�  |[Mask R-CNN (MobileNetV1)](https://gitee.com/mindspore/models/tree/master/official/cv/maskrcnn_mobilenetv1)         |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鐩爣妫€娴嬶紙Object Detection锛�  | [SSD](https://gitee.com/mindspore/models/tree/master/official/cv/ssd)                   |  鉁� | 鉁� | 鉁� |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鐩爣妫€娴嬶紙Object Detection锛�  | [SSD-MobileNetV1-FPN](https://gitee.com/mindspore/models/tree/master/official/cv/ssd)         |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鐩爣妫€娴嬶紙Object Detection锛�  | [SSD-Resnet50-FPN](https://gitee.com/mindspore/models/tree/master/official/cv/ssd)                   |  鉁� |  |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鐩爣妫€娴嬶紙Object Detection锛�  | [SSD-VGG16](https://gitee.com/mindspore/models/tree/master/official/cv/ssd)                   |  鉁� |  |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛�  | [WarpCTC](https://gitee.com/mindspore/models/tree/master/official/cv/warpctc)                    |  鉁� |  鉁� |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鐩爣妫€娴嬶紙Object Detection锛�  | [YOLOv3-ResNet18](https://gitee.com/mindspore/models/tree/master/official/cv/yolov3_resnet18)   |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鐩爣妫€娴嬶紙Object Detection锛�  | [YOLOv3-DarkNet53](https://gitee.com/mindspore/models/tree/master/official/cv/yolov3_darknet53)   |  鉁� |  鉁� |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛�  |[YOLOv4](https://gitee.com/mindspore/models/tree/master/official/cv/yolov4)         |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛�  |[YOLOv5](https://gitee.com/mindspore/models/tree/master/official/cv/yolov5) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛�  |[RetinaNet](https://gitee.com/mindspore/models/tree/master/official/cv/retinanet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鏂囨湰妫€娴嬶紙Text Detection锛�  | [DeepText](https://gitee.com/mindspore/models/tree/master/official/cv/deeptext)                |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鏂囨湰妫€娴嬶紙Text Detection锛�  | [PSENet](https://gitee.com/mindspore/models/tree/master/official/cv/psenet)                |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鏂囨湰璇嗗埆锛圱ext Recognition锛�  | [CNN+CTC](https://gitee.com/mindspore/models/tree/master/official/cv/cnnctc)                |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛�  | [DeepLabV3](https://gitee.com/mindspore/models/tree/master/official/cv/deeplabv3)   |  鉁� |   | 鉁� |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛�  | [DeepLabV3+](https://gitee.com/mindspore/models/tree/master/research/cv/deeplabv3plus)   |  鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛�  | [U-Net2D (Medical)](https://gitee.com/mindspore/models/tree/master/official/cv/unet)   |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛�  | [U-Net3D (Medical)](https://gitee.com/mindspore/models/tree/master/official/cv/unet3d)   |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛�  | [U-Net++](https://gitee.com/mindspore/models/tree/master/official/cv/unet)                |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛�  | [Fast-SCNN](https://gitee.com/mindspore/models/tree/master/official/cv/fastscnn)                |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛�  | [FCN8s](https://gitee.com/mindspore/models/tree/master/official/cv/FCN8s)  | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 濮挎€佹娴嬶紙6DoF Pose Estimation锛� | [PVNet](https://gitee.com/mindspore/models/tree/master/official/cv/pvnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍏抽敭鐐规娴嬶紙Keypoint Detection锛�  |[OpenPose](https://gitee.com/mindspore/models/tree/master/official/cv/openpose)                |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍏抽敭鐐规娴嬶紙Keypoint Detection锛�  |[SimplePoseNet](https://gitee.com/mindspore/models/tree/master/official/cv/simple_pose)                |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鏂囨湰妫€娴嬶紙Scene Text Detection锛�  | [East](https://gitee.com/mindspore/models/tree/master/official/cv/east) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鏂囨湰妫€娴嬶紙Scene Text Detection锛�  | [PSENet](https://gitee.com/mindspore/models/tree/master/official/cv/psenet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鏂囨湰璇嗗埆锛圫cene Text Recognition锛�  |[CRNN](https://gitee.com/mindspore/models/tree/master/official/cv/crnn) |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鏂囨湰璇嗗埆锛圫cene Text Recognition锛�  |[CNN+CTC](https://gitee.com/mindspore/models/tree/master/official/cv/cnnctc) |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鏂囨湰璇嗗埆锛圫cene Text Recognition锛�  |[CRNN-Seq2Seq-OCR](https://gitee.com/mindspore/models/tree/master/official/cv/crnn_seq2seq_ocr) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鏂囨湰璇嗗埆锛圫cene Text Recognition锛�  |[WarpCTC](https://gitee.com/mindspore/models/tree/master/official/cv/warpctc) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 缂洪櫡妫€娴嬶紙Defect Detection锛�  |[PatchCore](https://gitee.com/mindspore/models/tree/master/official/cv/patchcore) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 缂洪櫡妫€娴嬶紙Defect Detection锛�  |[ssim-ae](https://gitee.com/mindspore/models/tree/master/official/cv/ssim-ae) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 浜鸿劯妫€娴嬶紙Face Detection锛�  | [RetinaFace-ResNet50](https://gitee.com/mindspore/models/tree/master/official/cv/retinaface_resnet50) | 鉁� | 鉁� |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 浜鸿劯妫€娴嬶紙Face Detection锛�  | [CenterFace](https://gitee.com/mindspore/models/tree/master/official/cv/centerface) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 浜鸿劯妫€娴嬶紙Face Detection锛� | [SphereFace](https://gitee.com/mindspore/models/tree/master/official/cv/sphereface) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 浜虹兢璁℃暟锛圕rowd Counting锛� | [MCNN](https://gitee.com/mindspore/models/tree/master/official/cv/MCNN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 娣卞害浼拌锛圖epth Estimation锛� | [DepthNet](https://gitee.com/mindspore/models/tree/master/official/cv/depthnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩告満閲嶅畾浣嶏紙Camera Relocalization锛� | [PoseNet](https://gitee.com/mindspore/models/tree/master/official/cv/posenet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鎶犲浘锛圛mage Matting锛� | [Semantic Human Matting](https://gitee.com/mindspore/models/tree/master/official/cv/semantic_human_matting) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 瑙嗛鍒嗙被锛圴ideo Classification锛� | [C3D](https://gitee.com/mindspore/models/tree/master/official/cv/c3d) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚瓒呭垎锛圛mage Super-Resolution锛� |[RDN](https://gitee.com/mindspore/models/tree/master/official/cv/RDN) | 鉁� | 鉁� |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚瓒呭垎锛圛mage Super-Resolution锛� | [SRCNN](https://gitee.com/mindspore/models/tree/master/official/cv/srcnn) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍘诲櫔锛圛mage Denoising锛� | [BRDNet](https://gitee.com/mindspore/models/tree/master/official/cv/brdnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍘诲櫔锛圛mage Denoising锛� | [DnCNN](https://gitee.com/mindspore/models/tree/master/official/cv/dncnn) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍘诲櫔锛圛mage Denoising锛� | [Learning-to-See-in-the-Dark](https://gitee.com/mindspore/models/tree/master/official/cv/LearningToSeeInTheDark) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚璐ㄩ噺璇勪及锛圛mage Quality Assessment锛� | [NIMA](https://gitee.com/mindspore/models/tree/master/official/cv/nima) | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛�  | [BERT](https://gitee.com/mindspore/models/tree/master/official/nlp/bert)  |  鉁� |  鉁� |  |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛�  | [FastText](https://gitee.com/mindspore/models/tree/master/official/nlp/fasttext)    |  鉁� |   |  |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛�  | [GNMT v2](https://gitee.com/mindspore/models/tree/master/official/nlp/gnmt_v2)    |  鉁� |   |  |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛�  | [GRU](https://gitee.com/mindspore/models/tree/master/official/nlp/gru)            |  鉁� |   |  |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛�  | [MASS](https://gitee.com/mindspore/models/tree/master/official/nlp/mass)    |  鉁� |  鉁� |  |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛�  | [SentimentNet](https://gitee.com/mindspore/models/tree/master/official/nlp/lstm)    |  鉁� |  鉁� | 鉁� |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛�  | [Transformer](https://gitee.com/mindspore/models/tree/master/official/nlp/transformer)  |  鉁� |  鉁� |  |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛�  | [TinyBERT](https://gitee.com/mindspore/models/tree/master/official/nlp/tinybert)   |  鉁� |  鉁� |  |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛�  | [TextCNN](https://gitee.com/mindspore/models/tree/master/official/nlp/textcnn)            |  鉁� |   |  |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛�  | [CPM](https://gitee.com/mindspore/models/tree/master/official/nlp/cpm)            | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛�  | [ERNIE](https://gitee.com/mindspore/models/tree/master/official/nlp/ernie)            | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛�  | [GPT-3](https://gitee.com/mindspore/models/tree/master/official/nlp/gpt)            | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鎯呮劅鍒嗘瀽锛圗motion Classification) | [EmoTect](https://gitee.com/mindspore/models/tree/master/official/nlp/emotect)                | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鎯呮劅鍒嗘瀽锛圗motion Classification) | [LSTM](https://gitee.com/mindspore/models/tree/master/official/nlp/lstm)                | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 瀵硅瘽绯荤粺锛圖ialogue Generation) | [DGU](https://gitee.com/mindspore/models/tree/master/official/nlp/dgu)                | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 瀵硅瘽绯荤粺锛圖ialogue Generation) | [DuConv](https://gitee.com/mindspore/models/tree/master/official/nlp/duconv)                | 鉁� |   |   |
-| 鎺ㄨ崘锛圧ecommender锛� | 鎺ㄨ崘绯荤粺銆佺偣鍑荤巼棰勪及锛圧ecommender System, CTR prediction锛�  | [DeepFM](https://gitee.com/mindspore/models/tree/master/official/recommend/deepfm)    |  鉁� |  鉁� | 鉁� |
-| 鎺ㄨ崘锛圧ecommender锛� | 鎺ㄨ崘绯荤粺銆佹悳绱€€佹帓搴忥紙Recommender System, Search, Ranking锛�  | [Wide&Deep](https://gitee.com/mindspore/models/tree/master/official/recommend/wide_and_deep)      |  鉁� |  鉁� |  |
-| 鎺ㄨ崘锛圧ecommender锛� | 鎺ㄨ崘绯荤粺锛圧ecommender System锛�  | [NAML](https://gitee.com/mindspore/models/tree/master/official/recommend/naml)             |  鉁� |   |  |
-| 鎺ㄨ崘锛圧ecommender锛� | 鎺ㄨ崘绯荤粺锛圧ecommender System锛�  | [NCF](https://gitee.com/mindspore/models/tree/master/official/recommend/ncf)    |  鉁� |  |  |
-| 鍥剧缁忕綉缁滐紙GNN锛� | 鏂囨湰鍒嗙被锛圱ext Classification锛�  | [GCN](https://gitee.com/mindspore/models/tree/master/official/gnn/gcn)  |  鉁� |   |  |
-| 鍥剧缁忕綉缁滐紙GNN锛� | 鏂囨湰鍒嗙被锛圱ext Classification锛�  | [GAT](https://gitee.com/mindspore/models/tree/master/official/gnn/gat) |  鉁� |   |  |
-| 鍥剧缁忕綉缁滐紙GNN锛� | 鎺ㄨ崘绯荤粺锛圧ecommender System锛� | [BGCF](https://gitee.com/mindspore/models/tree/master/official/gnn/bgcf) |  鉁� |   |  |
+|:------   |:------| :-----------  |:------:   |:------:  |:-----: |
+| 璇煶 | 澹扮汗璇嗗埆 | [ecapa_tdnn](https://gitee.com/mindspore/models/tree/master/official/audio/ecapa_tdnn) |鉁厊   |   |
+| 璇煶 | 璇煶鍚堟垚 | [lpcnet](https://gitee.com/mindspore/models/tree/master/official/audio/lpcnet) |鉁厊 鉁� |   |
+| 璇煶 | 璇煶鍚堟垚 | [melgan](https://gitee.com/mindspore/models/tree/master/official/audio/melgan) |鉁厊 鉁� |   |
+| 璇煶 | 璇煶鍚堟垚 | [tacotron2](https://gitee.com/mindspore/models/tree/master/research/audio/tacotron2) |鉁厊   |   |
+| 鎺ㄨ崘 | 鎺ㄨ崘绯荤粺 | [bgcf](https://gitee.com/mindspore/models/tree/master/official/gnn/bgcf) |鉁厊 鉁� |   |
+| 鍥剧缁忕綉缁� | 鏂囨湰鍒嗙被 | [gat](https://gitee.com/mindspore/models/tree/master/official/gnn/gat) |鉁厊 鉁� |   |
+| 鍥剧缁忕綉缁� | 鏂囨湰鍒嗙被 | [gcn](https://gitee.com/mindspore/models/tree/master/official/gnn/gcn) |鉁厊 鉁� |   |
+| 鎺ㄨ崘 | 鎺ㄨ崘绯荤粺 | [naml](https://gitee.com/mindspore/models/tree/master/official/recommend/naml) |鉁厊 鉁� |   |
+| 鎺ㄨ崘 | 鎺ㄨ崘绯荤粺 | [ncf](https://gitee.com/mindspore/models/tree/master/official/recommend/ncf) |鉁厊 鉁� |   |
+| 鎺ㄨ崘 | 鎺ㄨ崘绯荤粺 | [tbnet](https://gitee.com/mindspore/models/tree/master/official/recommend/tbnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [alexnet](https://gitee.com/mindspore/models/tree/master/official/cv/alexnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍘诲櫔 | [brdnet](https://gitee.com/mindspore/models/tree/master/official/cv/brdnet) |鉁厊   |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [centerface](https://gitee.com/mindspore/models/tree/master/official/cv/centerface) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [cnn_direction_model](https://gitee.com/mindspore/models/tree/master/official/cv/cnn_direction_model) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鏂囨湰璇嗗埆 | [cnnctc](https://gitee.com/mindspore/models/tree/master/official/cv/cnnctc) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鏂囨湰璇嗗埆 | [crnn](https://gitee.com/mindspore/models/tree/master/official/cv/crnn) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鏂囨湰璇嗗埆 | [crnn_seq2seq_ocr](https://gitee.com/mindspore/models/tree/master/official/cv/crnn_seq2seq_ocr) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [cspdarknet53](https://gitee.com/mindspore/models/tree/master/official/cv/cspdarknet53) |鉁厊   |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [ctpn](https://gitee.com/mindspore/models/tree/master/official/cv/ctpn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [darknet53](https://gitee.com/mindspore/models/tree/master/official/cv/darknet53) | | 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [deeplabv3](https://gitee.com/mindspore/models/tree/master/official/cv/deeplabv3) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鏂囨湰妫€娴� | [deeptext](https://gitee.com/mindspore/models/tree/master/official/cv/deeptext) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [densenet100](https://gitee.com/mindspore/models/tree/master/official/cv/densenet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [densenet121](https://gitee.com/mindspore/models/tree/master/official/cv/densenet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 娣卞害浼拌 | [depthnet](https://gitee.com/mindspore/models/tree/master/official/cv/depthnet) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍘诲櫔 | [dncnn](https://gitee.com/mindspore/models/tree/master/official/cv/dncnn) | | 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [dpn](https://gitee.com/mindspore/models/tree/master/official/cv/dpn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鏂囨湰妫€娴� | [east](https://gitee.com/mindspore/models/tree/master/official/cv/east) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [efficientnet](https://gitee.com/mindspore/models/tree/master/official/cv/efficientnet) | | 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [erfnet](https://gitee.com/mindspore/models/tree/master/official/cv/erfnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鏂囨湰璇嗗埆 | [essay-recogination](https://gitee.com/mindspore/models/tree/master/official/cv/essay-recogination) | | 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [FasterRCNN_Inception_Resnetv2](https://gitee.com/mindspore/models/tree/master/official/cv/faster_rcnn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [FasterRCNN_ResNetV1.5_50](https://gitee.com/mindspore/models/tree/master/official/cv/faster_rcnn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [FasterRCNN_ResNetV1_101](https://gitee.com/mindspore/models/tree/master/official/cv/faster_rcnn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [FasterRCNN_ResNetV1_152](https://gitee.com/mindspore/models/tree/master/official/cv/faster_rcnn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [FasterRCNN_ResNetV1_50](https://gitee.com/mindspore/models/tree/master/official/cv/faster_rcnn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [fastscnn](https://gitee.com/mindspore/models/tree/master/official/cv/fastscnn) |鉁厊   |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [FCN8s](https://gitee.com/mindspore/models/tree/master/official/cv/FCN8s) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [googlenet](https://gitee.com/mindspore/models/tree/master/official/cv/googlenet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [inceptionv3](https://gitee.com/mindspore/models/tree/master/official/cv/inceptionv3) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [inceptionv4](https://gitee.com/mindspore/models/tree/master/official/cv/inceptionv4) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍘诲櫔 | [LearningToSeeInTheDark](https://gitee.com/mindspore/models/tree/master/research/cv/LearningToSeeInTheDark) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [lenet](https://gitee.com/mindspore/models/tree/master/official/cv/lenet) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鐩爣妫€娴� | [maskrcnn_resnet50](https://gitee.com/mindspore/models/tree/master/official/cv/maskrcnn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [maskrcnn_mobilenetv1](https://gitee.com/mindspore/models/tree/master/official/cv/maskrcnn_mobilenetv1) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 浜虹兢璁℃暟 | [MCNN](https://gitee.com/mindspore/models/tree/master/official/cv/MCNN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [mobilenetv1](https://gitee.com/mindspore/models/tree/master/official/cv/mobilenetv1) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [mobilenetv2](https://gitee.com/mindspore/models/tree/master/official/cv/mobilenetv2) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [mobilenetv3](https://gitee.com/mindspore/models/tree/master/official/cv/mobilenetv3) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [nasnet](https://gitee.com/mindspore/models/tree/master/official/cv/nasnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚璐ㄩ噺璇勪及 | [nima](https://gitee.com/mindspore/models/tree/master/official/cv/nima) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐐逛簯妯″瀷 | [octsqueeze](https://gitee.com/mindspore/models/tree/master/official/cv/octsqueeze) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍏抽敭鐐规娴� | [openpose](https://gitee.com/mindspore/models/tree/master/official/cv/openpose) |鉁厊   |   |
+| 鍥惧儚 | 缂洪櫡妫€娴� | [patchcore](https://gitee.com/mindspore/models/tree/master/official/cv/patchcore) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩告満閲嶅畾浣� | [posenet](https://gitee.com/mindspore/models/tree/master/official/cv/posenet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 瑙嗛棰勬祴瀛︿範 | [predrnn++](https://gitee.com/mindspore/models/tree/master/official/cv/predrnn++) |鉁厊   |   |
+| 鍥惧儚 | 鏂囨湰妫€娴� | [psenet](https://gitee.com/mindspore/models/tree/master/official/cv/psenet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 濮挎€佷及璁� | [pvnet](https://gitee.com/mindspore/models/tree/master/official/cv/pvnet) |鉁厊   |   |
+| 鍥惧儚 | 鍏夋祦浼拌 | [pwcnet](https://gitee.com/mindspore/models/tree/master/official/cv/pwcnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [RDN](https://gitee.com/mindspore/models/tree/master/official/cv/RDN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [resnet101](https://gitee.com/mindspore/models/tree/master/official/cv/resnet) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [resnet152](https://gitee.com/mindspore/models/tree/master/official/cv/resnet) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [resnet18](https://gitee.com/mindspore/models/tree/master/official/cv/resnet) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [resnet34](https://gitee.com/mindspore/models/tree/master/official/cv/resnet) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [resnet50](https://gitee.com/mindspore/models/tree/master/official/cv/resnet) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [resnet50_thor](https://gitee.com/mindspore/models/tree/master/official/cv/resnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [resnext101](https://gitee.com/mindspore/models/tree/master/official/cv/resnext) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [resnext50](https://gitee.com/mindspore/models/tree/master/official/cv/resnext) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [retinaface_resnet50](https://gitee.com/mindspore/models/tree/master/official/cv/retinaface_resnet50) | | 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [retinanet](https://gitee.com/mindspore/models/tree/master/official/cv/retinanet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [se_resnext50](https://gitee.com/mindspore/models/tree/master/official/cv/se_resnext50) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鎶犲浘 | [semantic_human_matting](https://gitee.com/mindspore/models/tree/master/official/cv/semantic_human_matting) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [se-resnet50](https://gitee.com/mindspore/models/tree/master/official/cv/resnet) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [shufflenetv1](https://gitee.com/mindspore/models/tree/master/official/cv/shufflenetv1) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [shufflenetv2](https://gitee.com/mindspore/models/tree/master/official/cv/shufflenetv2) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [simclr](https://gitee.com/mindspore/models/tree/master/official/cv/simclr) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍏抽敭鐐规娴� | [simple_pose](https://gitee.com/mindspore/models/tree/master/official/cv/simple_pose) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [sphereface](https://gitee.com/mindspore/models/tree/master/official/cv/sphereface) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [squeezenet](https://gitee.com/mindspore/models/tree/master/official/cv/squeezenet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [SqueezeNet_Residual](https://gitee.com/mindspore/models/tree/master/official/cv/squeezenet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [srcnn](https://gitee.com/mindspore/models/tree/master/official/cv/srcnn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [ssd_mobilenet-v1-fpn](https://gitee.com/mindspore/models/tree/master/official/cv/ssd) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鐩爣妫€娴� | [ssd_mobilenet-v2](https://gitee.com/mindspore/models/tree/master/official/cv/ssd) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鐩爣妫€娴� | [ssd-resnet50-fpn](https://gitee.com/mindspore/models/tree/master/official/cv/ssd) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鐩爣妫€娴� | [ssd-vgg16](https://gitee.com/mindspore/models/tree/master/official/cv/ssd) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 缂洪櫡妫€娴� | [ssim-ae](https://gitee.com/mindspore/models/tree/master/official/cv/ssim-ae) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [tinydarknet](https://gitee.com/mindspore/models/tree/master/official/cv/tinydarknet) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [UNet_nested](https://gitee.com/mindspore/models/tree/master/official/cv/unet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [unet2d](https://gitee.com/mindspore/models/tree/master/official/cv/unet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [unet3d](https://gitee.com/mindspore/models/tree/master/official/cv/unet3d) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [vgg16](https://gitee.com/mindspore/models/tree/master/official/cv/vgg16) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [vit](https://gitee.com/mindspore/models/tree/master/official/cv/vit) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鏂囨湰璇嗗埆 | [warpctc](https://gitee.com/mindspore/models/tree/master/official/cv/warpctc) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [xception](https://gitee.com/mindspore/models/tree/master/official/cv/xception) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [yolov3_darknet53](https://gitee.com/mindspore/models/tree/master/official/cv/yolov3_darknet53) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [yolov3_resnet18](https://gitee.com/mindspore/models/tree/master/official/cv/yolov3_resnet18) |鉁厊   |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [yolov4](https://gitee.com/mindspore/models/tree/master/official/cv/yolov4) |鉁厊   |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [yolov5s](https://gitee.com/mindspore/models/tree/master/official/cv/yolov5) |鉁厊 鉁� |   |
+| 鎺ㄨ崘 | 鐐瑰嚮鐜囬娴� | [deep_and_cross](https://gitee.com/mindspore/models/tree/master/official/recommend/deep_and_cross) | | 鉁� |   |
+| 鎺ㄨ崘 | 鐐瑰嚮鐜囬娴� | [deepfm](https://gitee.com/mindspore/models/tree/master/official/recommend/deepfm) |鉁厊 鉁� |   |
+| 鎺ㄨ崘 | 鐐瑰嚮鐜囬娴� | [fibinet](https://gitee.com/mindspore/models/tree/master/official/recommend/fibinet) | | 鉁� |   |
+| 鎺ㄨ崘 | 鐐瑰嚮鐜囬娴� | [wide_and_deep](https://gitee.com/mindspore/models/tree/master/official/recommend/wide_and_deep) |鉁厊 鉁� |   |
+| 鎺ㄨ崘 | 鐐瑰嚮鐜囬娴� | [wide_and_deep_multitable](https://gitee.com/mindspore/models/tree/master/official/recommend/wide_and_deep_multitable) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [bert_base](https://gitee.com/mindspore/models/tree/master/official/nlp/bert) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [bert_bilstm_crf](https://gitee.com/mindspore/models/tree/master/official/nlp/bert) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [bert_finetuning](https://gitee.com/mindspore/models/tree/master/official/nlp/bert) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [bert_large](https://gitee.com/mindspore/models/tree/master/official/nlp/bert_thor) |鉁厊   |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [bert_nezha](https://gitee.com/mindspore/models/tree/master/official/nlp/bert) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [cpm](https://gitee.com/mindspore/models/tree/master/official/nlp/cpm) |鉁厊 鉁� |   |
+| 鏂囨湰 | 瀵硅瘽 | [dgu](https://gitee.com/mindspore/models/tree/master/official/nlp/dgu) |鉁厊 鉁� |   |
+| 鏂囨湰 | 瀵硅瘽 | [duconv](https://gitee.com/mindspore/models/tree/master/official/nlp/duconv) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鎯呯华鍒嗙被 | [emotect](https://gitee.com/mindspore/models/tree/master/official/nlp/emotect) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [ernie](https://gitee.com/mindspore/models/tree/master/official/nlp/ernie) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [fasttext](https://gitee.com/mindspore/models/tree/master/official/nlp/fasttext) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [gnmt_v2](https://gitee.com/mindspore/models/tree/master/official/nlp/gnmt_v2) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [gpt3](https://gitee.com/mindspore/models/tree/master/official/nlp/gpt) |鉁厊   |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [gru](https://gitee.com/mindspore/models/tree/master/official/nlp/gru) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鎯呯华鍒嗙被 | [lstm](https://gitee.com/mindspore/models/tree/master/official/nlp/lstm) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [mass](https://gitee.com/mindspore/models/tree/master/official/nlp/mass) |鉁厊 鉁� |   |
+| 鏂囨湰 | 棰勮缁� | [pangu_alpha](https://gitee.com/mindspore/models/tree/master/official/nlp/pangu_alpha) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [textcnn](https://gitee.com/mindspore/models/tree/master/official/nlp/textcnn) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [tinybert](https://gitee.com/mindspore/models/tree/master/official/nlp/tinybert) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [transformer](https://gitee.com/mindspore/models/tree/master/official/nlp/transformer) |鉁厊 鉁� |   |
+| 瑙嗛 | 鐩爣杩借釜 | [ADNet](https://gitee.com/mindspore/models/tree/master/official/cv/ADNet) |鉁厊   |   |
+| 瑙嗛 | 瑙嗛鍒嗙被 | [c3d](https://gitee.com/mindspore/models/tree/master/official/cv/c3d) |鉁厊 鉁� |   |
+| 瑙嗛 | 鐩爣杩借釜 | [Deepsort](https://gitee.com/mindspore/models/tree/master/official/cv/Deepsort) |鉁厊 鉁� |   |
 
 ### 鐮旂┒缃戠粶
 
 |  棰嗗煙 | 瀛愰鍩�  | 缃戠粶   | Ascend | GPU | CPU |
-|:----  |:-------  |:----   |:----:    |:----: |:----: |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[3D Densenet](https://gitee.com/mindspore/models/tree/master/research/cv/3D_DenseNet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Auto Augment](https://gitee.com/mindspore/models/tree/master/research/cv/autoaugment) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[AVA](https://gitee.com/mindspore/models/tree/master/research/cv/AVA_cifar) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[CCT](https://gitee.com/mindspore/models/tree/master/research/cv/cct) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[dnet-nas](https://gitee.com/mindspore/models/tree/master/research/cv/dnet_nas) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Efficientnet-b0](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b0) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Efficientnet-b1](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b1) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Efficientnet-b2](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b2) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Efficientnet-b3](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b3) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[FDA-BNN](https://gitee.com/mindspore/models/tree/master/research/cv/FDA-BNN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[fishnet99](https://gitee.com/mindspore/models/tree/master/research/cv/fishnet99) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[GENET](https://gitee.com/mindspore/models/tree/master/research/cv/GENet_Res50) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[GhostNet](https://gitee.com/mindspore/models/tree/master/research/cv/ghostnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Glore_res200](https://gitee.com/mindspore/models/tree/master/research/cv/glore_res) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Glore_res50](https://gitee.com/mindspore/models/tree/master/research/cv/glore_res) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[HarDNet](https://gitee.com/mindspore/models/tree/master/research/cv/hardnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[HourNAS](https://gitee.com/mindspore/models/tree/master/research/cv/HourNAS) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[HRNetW48-cls](https://gitee.com/mindspore/models/tree/master/research/cv/HRNetW48_cls) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[ibn-net](https://gitee.com/mindspore/models/tree/master/research/cv/ibnnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Inception ResNet V2](https://gitee.com/mindspore/models/tree/master/research/cv/inception_resnet_v2) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Resnetv2_50_frn](https://gitee.com/mindspore/models/tree/master/research/cv/resnetv2_50_frn) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[META-Baseline](https://gitee.com/mindspore/models/tree/master/research/cv/meta-baseline) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[MNasNet](https://gitee.com/mindspore/models/tree/master/research/cv/mnasnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[MobilenetV3-Large](https://gitee.com/mindspore/models/tree/master/research/cv/mobilenetv3_large) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[MobilenetV3-Small](https://gitee.com/mindspore/models/tree/master/research/cv/mobilenetV3_small_x1_0) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[NFNet-F0](https://gitee.com/mindspore/models/tree/master/research/cv/NFNet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[ntsnet](https://gitee.com/mindspore/models/tree/master/research/cv/ntsnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Pdarts](https://gitee.com/mindspore/models/tree/master/research/cv/PDarts) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[PNASNet-5](https://gitee.com/mindspore/models/tree/master/research/cv/pnasnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[ProtoNet](https://gitee.com/mindspore/models/tree/master/research/cv/ProtoNet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Proxylessnas](https://gitee.com/mindspore/models/tree/master/research/cv/proxylessnas) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[RelationNet](https://gitee.com/mindspore/models/tree/master/research/cv/relationnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[renas](https://gitee.com/mindspore/models/tree/master/research/cv/renas) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Res2net](https://gitee.com/mindspore/models/tree/master/research/cv/res2net) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[ResNeSt-50](https://gitee.com/mindspore/models/tree/master/research/cv/ResNeSt50) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[ResNet50-BAM](https://gitee.com/mindspore/models/tree/master/research/cv/resnet50_bam) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[ResNet50-quadruplet](https://gitee.com/mindspore/models/tree/master/research/cv/metric_learn) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[ResNet50-triplet](https://gitee.com/mindspore/models/tree/master/research/cv/metric_learn) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[ResNetV2](https://gitee.com/mindspore/models/tree/master/research/cv/metric_learn) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[ResNeXt152_vd_64x4d](https://gitee.com/mindspore/models/tree/master/research/cv/resnext152_64x4d) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[SE-Net](https://gitee.com/mindspore/models/tree/master/research/cv/SE-Net) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[SERes2Net50](https://gitee.com/mindspore/models/tree/master/research/cv/res2net) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[SinglePathNas](https://gitee.com/mindspore/models/tree/master/research/cv/single_path_nas) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[SKNet-50](https://gitee.com/mindspore/models/tree/master/research/cv/sknet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[SPPNet](https://gitee.com/mindspore/models/tree/master/research/cv/SPPNet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[SqueezeNet](https://gitee.com/mindspore/models/tree/master/research/cv/squeezenet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[SqueezeNet1_1](https://gitee.com/mindspore/models/tree/master/research/cv/squeezenet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Swin Transformer](https://gitee.com/mindspore/models/tree/master/research/cv/swin_transformer) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[TNT](https://gitee.com/mindspore/models/tree/master/research/cv/TNT) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[VGG19](https://gitee.com/mindspore/models/tree/master/research/cv/vgg19) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Vit-Base](https://gitee.com/mindspore/models/tree/master/research/cv/vit_base) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛� |[Wide ResNet](https://gitee.com/mindspore/models/tree/master/research/cv/wideresnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛�  |[FaceAttributes](https://gitee.com/mindspore/models/tree/master/research/cv/FaceAttribute)     |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍒嗙被锛圛mage Classification锛�  |[FaceQualityAssessment](https://gitee.com/mindspore/models/tree/master/research/cv/FaceQualityAssessment)     |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 閲嶈瘑鍒紙Re-Identification锛�  |[Aligned-ReID](https://gitee.com/mindspore/models/tree/master/research/cv/AlignedReID) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 閲嶈瘑鍒紙Re-Identification锛�  |[DDAG](https://gitee.com/mindspore/models/tree/master/research/cv/DDAG) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 閲嶈瘑鍒紙Re-Identification锛�  |[MVD](https://gitee.com/mindspore/models/tree/master/research/cv/MVD) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 閲嶈瘑鍒紙Re-Identification锛�  |[OSNet](https://gitee.com/mindspore/models/tree/master/research/cv/osnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 閲嶈瘑鍒紙Re-Identification锛�  |[PAMTRI](https://gitee.com/mindspore/models/tree/master/research/cv/PAMTRI) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 閲嶈瘑鍒紙Re-Identification锛�  |[VehicleNet](https://gitee.com/mindspore/models/tree/master/research/cv/VehicleNet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 浜鸿劯妫€娴嬶紙Face Detection锛�  | [FaceDetection](https://gitee.com/mindspore/models/tree/master/research/cv/FaceDetection)  |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 浜鸿劯妫€娴嬶紙Face Detection锛�  | [FaceBoxes](https://gitee.com/mindspore/models/tree/master/research/cv/faceboxes) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 浜鸿劯妫€娴嬶紙Face Detection锛� | [RetinaFace](https://gitee.com/mindspore/models/tree/master/research/cv/retinaface) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 浜鸿劯璇嗗埆锛團ace Recognition锛� | [Arcface](https://gitee.com/mindspore/models/tree/master/research/cv/arcface) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 浜鸿劯璇嗗埆锛團ace Recognition锛� | [DeepID](https://gitee.com/mindspore/models/tree/master/research/cv/DeepID) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 浜鸿劯璇嗗埆锛團ace Recognition锛� |[FaceRecognition](https://gitee.com/mindspore/models/tree/master/research/cv/FaceRecognition)     |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 浜鸿劯璇嗗埆锛團ace Recognition锛� |[FaceRecognitionForTracking](https://gitee.com/mindspore/models/tree/master/research/cv/FaceRecognitionForTracking)     |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 浜鸿劯璇嗗埆锛團ace Recognition锛� | [LightCNN](https://gitee.com/mindspore/models/tree/master/research/cv/LightCNN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛�  | [Spnas](https://gitee.com/mindspore/models/tree/master/research/cv/Spnas)           |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛�  | [SSD-GhostNet](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_ghostnet)           |  鉁� |   |  |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [EGNet](https://gitee.com/mindspore/models/tree/master/research/cv/EGnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [FasterRCNN-FPN-DCN](https://gitee.com/mindspore/models/tree/master/research/cv/faster_rcnn_dcn) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [NAS-FPN](https://gitee.com/mindspore/models/tree/master/research/cv/nas-fpn) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [RAS](https://gitee.com/mindspore/models/tree/master/research/cv/ras) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [r-cnn](https://gitee.com/mindspore/models/tree/master/research/cv/rcnn) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [RefineDet](https://gitee.com/mindspore/models/tree/master/research/cv/RefineDet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [Res2net_fasterrcnn](https://gitee.com/mindspore/models/tree/master/research/cv/res2net_faster_rcnn) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [Res2net_yolov3](https://gitee.com/mindspore/models/tree/master/research/cv/res2net_yolov3) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [Retinanet_resnet101](https://gitee.com/mindspore/models/tree/master/research/cv/retinanet_resnet101) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [SSD_MobilenetV2_fpnlite](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_mobilenetV2_FPNlite) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [ssd_mobilenet_v2](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_mobilenetV2) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [ssd_resnet50](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_resnet50) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [ssd_inceptionv2](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_inception_v2) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [ssd_resnet34](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_resnet34) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [U-2-Net](https://gitee.com/mindspore/models/tree/master/research/cv/u2net) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣妫€娴嬶紙Object Detection锛� | [YOLOV3-tiny](https://gitee.com/mindspore/models/tree/master/research/cv/yolov3_tiny) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣璺熻釜锛圤bject Tracking锛� |[SiamFC](https://gitee.com/mindspore/models/tree/master/research/cv/SiamFC) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣璺熻釜锛圤bject Tracking锛� |[SiamRPN](https://gitee.com/mindspore/models/tree/master/research/cv/siamRPN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐩爣璺熻釜锛圤bject Tracking锛� |[FairMOT](https://gitee.com/mindspore/models/tree/master/research/cv/fairmot) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍏抽敭鐐规娴嬶紙Key Point Detection锛� | [CenterNet](https://gitee.com/mindspore/models/tree/master/research/cv/centernet)          |  鉁� |  | 鉁� |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍏抽敭鐐规娴嬶紙Key Point Detection锛� | [CenterNet-hourglass](https://gitee.com/mindspore/models/tree/master/research/cv/centernet_det) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍏抽敭鐐规娴嬶紙Key Point Detection锛� | [CenterNet-resnet101](https://gitee.com/mindspore/models/tree/master/research/cv/centernet_resnet101) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛�  | 鍏抽敭鐐规娴嬶紙Key Point Detection锛� | [CenterNet-resnet50](https://gitee.com/mindspore/models/tree/master/research/cv/centernet_resnet50_v1) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐐逛簯妯″瀷锛圥oint Cloud Model锛�  |[PointNet](https://gitee.com/mindspore/models/tree/master/research/cv/pointnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐐逛簯妯″瀷锛圥oint Cloud Model锛�  |[PointNet++](https://gitee.com/mindspore/models/tree/master/research/cv/pointnet2) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鐐逛簯妯″瀷锛圥oint Cloud Model锛�  |[PointNet++](https://gitee.com/mindspore/models/tree/master/research/cv/pointnet2) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 娣卞害浼拌锛圖epth Estimation锛� | [midas](https://gitee.com/mindspore/models/tree/master/research/cv/midas) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 搴忓垪鍥剧墖鍒嗙被锛圫equential Image Classification锛� | [TCN](https://gitee.com/mindspore/models/tree/master/research/cv/TCN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鏃剁┖瀹氫綅锛圱emporal Localization锛� | [TALL](https://gitee.com/mindspore/models/tree/master/research/cv/tall) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鎶犲浘锛圛mage Matting锛� | [FCA-net](https://gitee.com/mindspore/models/tree/master/research/cv/FCANet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 瑙嗛鍒嗙被锛圴ideo Classification锛� | [Attention Cluster](https://gitee.com/mindspore/models/tree/master/research/cv/AttentionCluster) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 瑙嗛鍒嗙被锛圴ideo Classification锛� | [ECO-lite](https://gitee.com/mindspore/models/tree/master/research/cv/ecolite) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 瑙嗛鍒嗙被锛圴ideo Classification锛� | [R(2+1)D](https://gitee.com/mindspore/models/tree/master/research/cv/r2plus1d) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 瑙嗛鍒嗙被锛圴ideo Classification锛� | [Resnet-3D](https://gitee.com/mindspore/models/tree/master/research/cv/resnet3d) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 瑙嗛鍒嗙被锛圴ideo Classification锛� | [StNet](https://gitee.com/mindspore/models/tree/master/research/cv/stnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 瑙嗛鍒嗙被锛圴ideo Classification锛� | [TSM](https://gitee.com/mindspore/models/tree/master/research/cv/tsm) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 瑙嗛鍒嗙被锛圴ideo Classification锛� | [TSN](https://gitee.com/mindspore/models/tree/master/research/cv/tsn) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | Zero-Shot Learnning | [DEM](https://gitee.com/mindspore/models/tree/master/research/cv/dem) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 椋庢牸杩佺Щ锛圫tyle Transfer锛� |[AECRNET](https://gitee.com/mindspore/models/tree/master/research/cv/aecrnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 椋庢牸杩佺Щ锛圫tyle Transfer锛� |[APDrawingGAN](https://gitee.com/mindspore/models/tree/master/research/cv/APDrawingGAN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 椋庢牸杩佺Щ锛圫tyle Transfer锛� |[Arbitrary-image-stylization](https://gitee.com/mindspore/models/tree/master/research/cv/ArbitraryStyleTransfer) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 椋庢牸杩佺Щ锛圫tyle Transfer锛� |[AttGAN](https://gitee.com/mindspore/models/tree/master/research/cv/AttGAN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 椋庢牸杩佺Щ锛圫tyle Transfer锛� |[CycleGAN](https://gitee.com/mindspore/models/tree/master/research/cv/CycleGAN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚瓒呭垎锛圛mage Super-Resolution锛� |[CSD](https://gitee.com/mindspore/models/tree/master/research/cv/csd) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚瓒呭垎锛圛mage Super-Resolution锛� |[DBPN](https://gitee.com/mindspore/models/tree/master/research/cv/DBPN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚瓒呭垎锛圛mage Super-Resolution锛� |[EDSR](https://gitee.com/mindspore/models/tree/master/research/cv/EDSR) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚瓒呭垎锛圛mage Super-Resolution锛� |[esr-ea](https://gitee.com/mindspore/models/tree/master/research/cv/esr_ea) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚瓒呭垎锛圛mage Super-Resolution锛� |[ESRGAN](https://gitee.com/mindspore/models/tree/master/research/cv/ESRGAN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚瓒呭垎锛圛mage Super-Resolution锛� |[IRN](https://gitee.com/mindspore/models/tree/master/research/cv/IRN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚瓒呭垎锛圛mage Super-Resolution锛� |[RCAN](https://gitee.com/mindspore/models/tree/master/research/cv/RCAN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚瓒呭垎锛圛mage Super-Resolution锛� |[sr-ea](https://gitee.com/mindspore/models/tree/master/research/cv/sr_ea) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚瓒呭垎锛圛mage Super-Resolution锛� |[SRGAN](https://gitee.com/mindspore/models/tree/master/research/cv/SRGAN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚瓒呭垎锛圛mage Super-Resolution锛� |[wdsr](https://gitee.com/mindspore/models/tree/master/research/cv/wdsr) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鍘诲櫔锛圛mage Denoising锛� |[Neighbor2Neighbor](https://gitee.com/mindspore/models/tree/master/research/cv/Neighbor2Neighbor) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鐢熸垚锛圛mage Generation锛� |[CGAN](https://gitee.com/mindspore/models/tree/master/research/cv/CGAN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鐢熸垚锛圛mage Generation锛� |[DCGAN](https://gitee.com/mindspore/models/tree/master/research/cv/dcgan) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鐢熸垚锛圛mage Generation锛� |[GAN](https://gitee.com/mindspore/models/tree/master/research/cv/gan) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鐢熸垚锛圛mage Generation锛� |[IPT](https://gitee.com/mindspore/models/tree/master/research/cv/IPT) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鐢熸垚锛圛mage Generation锛� |[pgan](https://gitee.com/mindspore/models/tree/master/research/cv/PGAN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鐢熸垚锛圛mage Generation锛� |[Photo2Cartoon](https://gitee.com/mindspore/models/tree/master/research/cv/U-GAT-IT) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鐢熸垚锛圛mage Generation锛� |[Pix2Pix](https://gitee.com/mindspore/models/tree/master/research/cv/Pix2Pix) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鐢熸垚锛圛mage Generation锛� |[SinGAN](https://gitee.com/mindspore/models/tree/master/research/cv/SinGAN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鐢熸垚锛圛mage Generation锛� |[StarGAN](https://gitee.com/mindspore/models/tree/master/research/cv/StarGAN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鐢熸垚锛圛mage Generation锛� |[STGAN](https://gitee.com/mindspore/models/tree/master/research/cv/STGAN) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚鐢熸垚锛圛mage Generation锛� |[WGAN](https://gitee.com/mindspore/models/tree/master/research/cv/wgan) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鏂囨湰妫€娴嬶紙Scene Text Detection锛� | [AdvancedEast](https://gitee.com/mindspore/models/tree/master/research/cv/advanced_east) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鏂囨湰妫€娴嬶紙Scene Text Detection锛� | [TextFuseNet](https://gitee.com/mindspore/models/tree/master/research/cv/textfusenet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鏂囨湰璇嗗埆锛圫cene Text Recognition锛� | [ManiDP](https://gitee.com/mindspore/models/tree/master/research/cv/ManiDP) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛� | [3d-cnn](https://gitee.com/mindspore/models/tree/master/research/cv/3dcnn) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛� | [adelaide_ea](https://gitee.com/mindspore/models/tree/master/research/cv/adelaide_ea) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛� | [DDRNet](https://gitee.com/mindspore/models/tree/master/research/cv/DDRNet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛� | [E-Net](https://gitee.com/mindspore/models/tree/master/research/cv/E-NET) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛� | [Hrnet](https://gitee.com/mindspore/models/tree/master/research/cv/HRNetW48_seg) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛� | [ICNet](https://gitee.com/mindspore/models/tree/master/research/cv/ICNet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛� | [PSPnet](https://gitee.com/mindspore/models/tree/master/research/cv/PSPNet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛� | [RefineNet](https://gitee.com/mindspore/models/tree/master/research/cv/RefineNet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛� | [Res2net_deeplabv3](https://gitee.com/mindspore/models/tree/master/research/cv/res2net_deeplabv3) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛� | [UNet 3+](https://gitee.com/mindspore/models/tree/master/research/cv/UNet3+) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛� | [V-net](https://gitee.com/mindspore/models/tree/master/research/cv/vnet) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 璇箟鍒嗗壊锛圫emantic Segmentation锛� | [Autodeeplab](https://gitee.com/mindspore/models/tree/master/research/cv/Auto-DeepLab) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 濮挎€佷及璁★紙Pose Estimation锛� | [AlphaPose](https://gitee.com/mindspore/models/tree/master/research/cv/AlphaPose) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 濮挎€佷及璁★紙Pose Estimation锛� | [Hourglass](https://gitee.com/mindspore/models/tree/master/research/cv/StackedHourglass) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 濮挎€佷及璁★紙Pose Estimation锛� | [Simple Baseline](https://gitee.com/mindspore/models/tree/master/research/cv/simple_baselines) | 鉁� |   |   |
-| 璁$畻鏈鸿瑙夛紙CV锛� | 鍥惧儚妫€绱紙Image Retrieval锛� |[Delf](https://gitee.com/mindspore/models/tree/master/research/cv/delf) | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 璇嶅祵鍏ワ紙Word Embedding锛� | [Word2Vec Skip-Gram](https://gitee.com/mindspore/models/tree/master/research/nlp/skipgram) | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 瀵硅瘽绯荤粺锛圖ialogue Generation) | [DAM](https://gitee.com/mindspore/models/tree/master/research/nlp/dam) | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鏈哄櫒缈昏瘧锛圡achine Translation) | [Seq2Seq](https://gitee.com/mindspore/models/tree/master/research/nlp/seq2seq) | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鎯呮劅鍒嗘瀽锛圗motion Classification) | [Senta](https://gitee.com/mindspore/models/tree/master/research/nlp/senta) | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鎯呮劅鍒嗘瀽锛圗motion Classification) | [Attention LSTM](https://gitee.com/mindspore/models/tree/master/research/nlp/atae_lstm) | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鍛藉悕瀹炰綋璇嗗埆锛圢amed Entity Recognition) | [LSTM_CRF](https://gitee.com/mindspore/models/tree/master/research/nlp/lstm_crf) | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鏂囨湰鍒嗙被锛圱ext Classification) | [HyperText](https://gitee.com/mindspore/models/tree/master/research/nlp/hypertext) | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鏂囨湰鍒嗙被锛圱ext Classification) | [TextRCNN](https://gitee.com/mindspore/models/tree/master/research/nlp/textrcnn) | 鉁� |   |   |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛� | [ALBert](https://gitee.com/mindspore/models/tree/master/research/nlp/albert)          |  鉁� |   |  |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛� | [KT-Net](https://gitee.com/mindspore/models/tree/master/research/nlp/ktnet)          |  鉁� |   |  |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛� | [LUKE](https://gitee.com/mindspore/models/tree/master/research/nlp/luke)          |  鉁� |   |  |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛� | [TextRCNN](https://gitee.com/mindspore/models/tree/master/research/nlp/textrcnn)    |  鉁� |   |  |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鑷劧璇█鐞嗚В锛圢atural Language Understanding锛� | [TPRR](https://gitee.com/mindspore/models/tree/master/research/nlp/tprr)  |  鉁� |   |  |
-| 鑷劧璇█澶勭悊锛圢LP锛� | 鐭ヨ瘑鍥捐氨宓屽叆锛圞nowledge Graph Embedding锛� | [RotatE](https://gitee.com/mindspore/models/tree/master/research/nlp/rotate) | 鉁� |   |   |
-| 鎺ㄨ崘锛圧ecommender锛� | 鎺ㄨ崘绯荤粺銆佺偣鍑荤巼棰勪及锛圧ecommender System, CTR prediction锛� | [AutoDis](https://gitee.com/mindspore/models/tree/master/research/recommend/autodis)   |  鉁� |   |  |
-| 鎺ㄨ崘锛圧ecommender锛� | 鎺ㄨ崘绯荤粺銆佺偣鍑荤巼棰勪及锛圧ecommender System, CTR prediction锛� | [DeepFFM](https://gitee.com/mindspore/models/tree/master/research/recommend/Fat-DeepFFM) | 鉁� |   |   |
-| 鎺ㄨ崘锛圧ecommender锛� | 鎺ㄨ崘绯荤粺銆佺偣鍑荤巼棰勪及锛圧ecommender System, CTR prediction锛� | [DIEN](https://gitee.com/mindspore/models/tree/master/research/recommend/DIEN) | 鉁� |   |   |
-| 鎺ㄨ崘锛圧ecommender锛� | 鎺ㄨ崘绯荤粺銆佺偣鍑荤巼棰勪及锛圧ecommender System, CTR prediction锛� | [DLRM](https://gitee.com/mindspore/models/tree/master/research/recommend/dlrm) | 鉁� |   |   |
-| 鎺ㄨ崘锛圧ecommender锛� | 鎺ㄨ崘绯荤粺銆佺偣鍑荤巼棰勪及锛圧ecommender System, CTR prediction锛� | [EDCN](https://gitee.com/mindspore/models/tree/master/research/recommend/EDCN) | 鉁� |   |   |
-| 鎺ㄨ崘锛圧ecommender锛� | 鎺ㄨ崘绯荤粺銆佺偣鍑荤巼棰勪及锛圧ecommender System, CTR prediction锛� | [MMOE](https://gitee.com/mindspore/models/tree/master/research/recommend/mmoe) | 鉁� |   |   |
-|璇煶锛圓udio锛� | 闊抽鏍囨敞锛圓udio Tagging锛�  | [FCN-4](https://gitee.com/mindspore/models/tree/master/research/audio/fcn-4)   |  鉁� |   |  |
-|璇煶锛圓udio锛� | 鍏抽敭璇嶈瘑鍒紙Keyword Spotting锛�  | [DS-CNN](https://gitee.com/mindspore/models/tree/master/research/audio/dscnn)   | 鉁� |   |   |
-|璇煶锛圓udio锛� | 璇煶璇嗗埆锛圫peech Recognition锛�  | [CTCModel](https://gitee.com/mindspore/models/tree/master/research/audio/ctcmodel) | 鉁� |   |   |
-|璇煶锛圓udio锛� | 璇煶鍚堟垚锛圫peech Synthesis锛� | [Wavenet](https://gitee.com/mindspore/models/tree/master/research/audio/wavenet) | 鉁� |   |   |
-| 鍥剧缁忕綉缁滐紙GNN锛� | 浜ら€氶娴嬶紙Traffic Prediction) | [STGCN](https://gitee.com/mindspore/models/tree/master/research/cv/stgcn) | 鉁� |   |   |
-| 鍥剧缁忕綉缁滐紙GNN锛� | 浜ら€氶娴嬶紙Traffic Prediction) | [TGCN](https://gitee.com/mindspore/models/tree/master/research/cv/tgcn) | 鉁� |   |   |
-| 鍥剧缁忕綉缁滐紙GNN锛� | 绀句氦淇℃伅缃戠粶锛圫ocial and Information Networks锛� | [SGCN](https://gitee.com/mindspore/models/tree/master/research/gnn/sgcn) | 鉁� |   |   |
-| 鍥剧缁忕綉缁滐紙GNN锛� | 鍥剧粨鏋勬暟鎹垎绫伙紙Graph Classification锛� | [DGCN](https://gitee.com/mindspore/models/tree/master/research/gnn/dgcn) | 鉁� |   |   |
-| 鍥剧缁忕綉缁滐紙GNN锛� | 鍥剧粨鏋勬暟鎹垎绫伙紙Graph Classification锛� | [SDNE](https://gitee.com/mindspore/models/tree/master/research/gnn/sdne) | 鉁� |   |   |
-|楂樻€ц兘璁$畻锛圚PC锛� | 鍒嗗瓙鍔ㄥ姏瀛︼紙Molecular Dynamics锛�  |  [DeepPotentialH2O](https://gitee.com/mindspore/models/tree/master/research/hpc/molecular_dynamics)   |  鉁� |   |  |
-|楂樻€ц兘璁$畻锛圚PC锛� | 娴锋磱妯″瀷锛圤cean Model锛�  |  [GOMO](https://gitee.com/mindspore/models/tree/master/research/hpc/ocean_model)   |   |  鉁� |  |
+|:------   |:------| :-----------  |:------:   |:------:  |:-----: |
+| 3D | 涓夌淮閲嶅缓 | [cmr](https://gitee.com/mindspore/models/tree/master/research/cv/cmr) | | 鉁� |   |
+| 3D | 涓夌淮閲嶅缓 | [DecoMR](https://gitee.com/mindspore/models/tree/master/research/cv/DecoMR) | | 鉁� |   |
+| 3D | 涓夌淮閲嶅缓 | [DeepLM](https://gitee.com/mindspore/models/tree/master/research/3d/DeepLM) | | 鉁� |   |
+| 3D | 涓夌淮閲嶅缓 | [eppmvsnet](https://gitee.com/mindspore/models/tree/master/research/cv/eppmvsnet) | | 鉁� |   |
+| 3D | 涓夌淮鐗╀綋妫€娴� | [pointpillars](https://gitee.com/mindspore/models/tree/master/research/cv/pointpillars) |鉁厊 鉁� |   |
+| 璇煶 | 璇煶璇嗗埆 | [ctcmodel](https://gitee.com/mindspore/models/tree/master/research/audio/ctcmodel) |鉁厊   |   |
+| 璇煶 | 璇煶璇嗗埆 | [deepspeech2](https://gitee.com/mindspore/models/tree/master/research/audio/deepspeech2) | | 鉁� |   |
+| 璇煶 | 璇煶鍞ら啋 | [dscnn](https://gitee.com/mindspore/models/tree/master/research/audio/dscnn) |鉁厊 鉁� |   |
+| 璇煶 | 璇煶鍚堟垚 | [FastSpeech](https://gitee.com/mindspore/models/tree/master/research/audio/FastSpeech) | | 鉁� |   |
+| 璇煶 | 璇煶鏍囨敞 | [fcn-4](https://gitee.com/mindspore/models/tree/master/research/audio/fcn-4) |鉁厊 鉁� |   |
+| 璇煶 | 璇煶璇嗗埆 | [jasper](https://gitee.com/mindspore/models/tree/master/research/audio/jasper) |鉁厊 鉁� |   |
+| 璇煶 | 璇煶鍚堟垚 | [wavenet](https://gitee.com/mindspore/models/tree/master/research/audio/wavenet) |鉁厊 鉁� |   |
+| 鍥剧缁忕綉缁� | 鍥惧垎绫� | [dgcn](https://gitee.com/mindspore/models/tree/master/research/gnn/dgcn) |鉁厊   |   |
+| 鍥剧缁忕綉缁� | 鏂囨湰鍒嗙被 | [hypertext](https://gitee.com/mindspore/models/tree/master/research/nlp/hypertext) |鉁厊 鉁� |   |
+| 鍥剧缁忕綉缁� | 鍥惧垎绫� | [sdne](https://gitee.com/mindspore/models/tree/master/research/gnn/sdne) |鉁厊   |   |
+| 鍥剧缁忕綉缁� | 绀句細鍜屼俊鎭綉缁� | [sgcn](https://gitee.com/mindspore/models/tree/master/research/gnn/sgcn) |鉁厊 鉁� |   |
+| 鍥剧缁忕綉缁� | 鏂囨湰鍒嗙被 | [textrcnn](https://gitee.com/mindspore/models/tree/master/research/nlp/textrcnn) |鉁厊 鉁� |   |
+| 楂樻€ц兘璁$畻 | 楂樻€ц兘璁$畻 | [deepbsde](https://gitee.com/mindspore/models/tree/master/research/hpc/deepbsde) | | 鉁� |   |
+| 楂樻€ц兘璁$畻 | 楂樻€ц兘璁$畻 | [molecular_dynamics](https://gitee.com/mindspore/models/tree/master/research/hpc/molecular_dynamics) |鉁厊   |   |
+| 楂樻€ц兘璁$畻 | 楂樻€ц兘璁$畻 | [ocean_model](https://gitee.com/mindspore/models/tree/master/research/hpc/ocean_model) | | 鉁� |   |
+| 楂樻€ц兘璁$畻 | 楂樻€ц兘璁$畻 | [pafnucy](https://gitee.com/mindspore/models/tree/master/research/hpc/pafnucy) |鉁厊 鉁� |   |
+| 楂樻€ц兘璁$畻 | 楂樻€ц兘璁$畻 | [pfnn](https://gitee.com/mindspore/models/tree/master/research/hpc/pfnn) | | 鉁� |   |
+| 楂樻€ц兘璁$畻 | 楂樻€ц兘璁$畻 | [pinns](https://gitee.com/mindspore/models/tree/master/research/hpc/pinns) | | 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [3D_DenseNet](https://gitee.com/mindspore/models/tree/master/research/cv/3D_DenseNet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [3dcnn](https://gitee.com/mindspore/models/tree/master/research/cv/3dcnn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [adelaide_ea](https://gitee.com/mindspore/models/tree/master/research/cv/adelaide_ea) |鉁厊   |   |
+| 鍥惧儚 | 鏂囨湰妫€娴� | [advanced_east](https://gitee.com/mindspore/models/tree/master/research/cv/advanced_east) |鉁厊 鉁� |   |
+| 鍥惧儚 | 椋庢牸杞Щ | [aecrnet](https://gitee.com/mindspore/models/tree/master/research/cv/aecrnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 閲嶆柊璇嗗埆 | [AlignedReID](https://gitee.com/mindspore/models/tree/master/research/cv/AlignedReID) | | 鉁� |   |
+| 鍥惧儚 | 閲嶆柊璇嗗埆 | [AlignedReID++](https://gitee.com/mindspore/models/tree/master/research/cv/AlignedReID++) |鉁厊 鉁� |   |
+| 鍥惧儚 | 濮挎€佷及璁� | [AlphaPose](https://gitee.com/mindspore/models/tree/master/research/cv/AlphaPose) |鉁厊   |   |
+| 鍥惧儚 | 椋庢牸杞Щ | [APDrawingGAN](https://gitee.com/mindspore/models/tree/master/research/cv/APDrawingGAN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 椋庢牸杞Щ | [ArbitraryStyleTransfer](https://gitee.com/mindspore/models/tree/master/research/cv/ArbitraryStyleTransfer) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [arcface](https://gitee.com/mindspore/models/tree/master/research/cv/arcface) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍏抽敭鐐规娴� | [ArtTrack](https://gitee.com/mindspore/models/tree/master/research/cv/ArtTrack) | | 鉁� |   |
+| 鍥惧儚 | 椋庢牸杞Щ | [AttGAN](https://gitee.com/mindspore/models/tree/master/research/cv/AttGAN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [augvit](https://gitee.com/mindspore/models/tree/master/research/cv/augvit) | | 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [autoaugment](https://gitee.com/mindspore/models/tree/master/research/cv/autoaugment) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [Auto-DeepLab](https://gitee.com/mindspore/models/tree/master/research/cv/Auto-DeepLab) |鉁厊   |   |
+| 鍥惧儚 | 绁炵粡鏋舵瀯鎼滅储 | [AutoSlim](https://gitee.com/mindspore/models/tree/master/research/cv/AutoSlim) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [AVA_cifar](https://gitee.com/mindspore/models/tree/master/research/cv/AVA_cifar) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [AVA_hpa](https://gitee.com/mindspore/models/tree/master/research/cv/AVA_hpa) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [cait](https://gitee.com/mindspore/models/tree/master/research/cv/cait) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [CascadeRCNN](https://gitee.com/mindspore/models/tree/master/research/cv/CascadeRCNN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [CBAM](https://gitee.com/mindspore/models/tree/master/research/cv/CBAM) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [cct](https://gitee.com/mindspore/models/tree/master/research/cv/cct) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍏抽敭鐐规娴� | [centernet](https://gitee.com/mindspore/models/tree/master/research/cv/centernet) |鉁厊   | 鉁� |
+| 鍥惧儚 | 鍏抽敭鐐规娴� | [centernet_det](https://gitee.com/mindspore/models/tree/master/research/cv/centernet_det) |鉁厊   |   |
+| 鍥惧儚 | 鍏抽敭鐐规娴� | [centernet_resnet101](https://gitee.com/mindspore/models/tree/master/research/cv/centernet_resnet101) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍏抽敭鐐规娴� | [centernet_resnet50_v1](https://gitee.com/mindspore/models/tree/master/research/cv/centernet_resnet50_v1) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鐢熸垚 | [CGAN](https://gitee.com/mindspore/models/tree/master/research/cv/CGAN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [convnext](https://gitee.com/mindspore/models/tree/master/research/cv/convnext) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [csd](https://gitee.com/mindspore/models/tree/master/research/cv/csd) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鐢熸垚 | [CTSDG](https://gitee.com/mindspore/models/tree/master/research/cv/CTSDG) |鉁厊 鉁� |   |
+| 鍥惧儚 | 椋庢牸杞Щ | [CycleGAN](https://gitee.com/mindspore/models/tree/master/research/cv/CycleGAN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [DBPN](https://gitee.com/mindspore/models/tree/master/research/cv/DBPN) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [DBPN_GAN](https://gitee.com/mindspore/models/tree/master/research/cv/DBPN) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鐢熸垚 | [dcgan](https://gitee.com/mindspore/models/tree/master/research/cv/dcgan) |鉁厊 鉁� |   |
+| 鍥惧儚 | 閲嶆柊璇嗗埆 | [DDAG](https://gitee.com/mindspore/models/tree/master/research/cv/DDAG) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [DDM](https://gitee.com/mindspore/models/tree/master/research/cv/DDM) |鉁厊   |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [DDRNet](https://gitee.com/mindspore/models/tree/master/research/cv/DDRNet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [DeepID](https://gitee.com/mindspore/models/tree/master/research/cv/DeepID) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [deeplabv3plus](https://gitee.com/mindspore/models/tree/master/research/cv/deeplabv3plus) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚妫€绱� | [delf](https://gitee.com/mindspore/models/tree/master/research/cv/delf) |鉁厊   |   |
+| 鍥惧儚 | 闆舵牱鏈涔� | [dem](https://gitee.com/mindspore/models/tree/master/research/cv/dem) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [detr](https://gitee.com/mindspore/models/tree/master/research/cv/detr) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [dgcnet_res101](https://gitee.com/mindspore/models/tree/master/research/cv/dgcnet_res101) | | 鉁� |   |
+| 鍥惧儚 | 瀹炰緥鍒嗗壊 | [dlinknet](https://gitee.com/mindspore/models/tree/master/research/cv/dlinknet) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍘诲櫔 | [DnCNN](https://gitee.com/mindspore/models/tree/master/research/cv/DnCNN) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [dnet_nas](https://gitee.com/mindspore/models/tree/master/research/cv/dnet_nas) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [DRNet](https://gitee.com/mindspore/models/tree/master/research/cv/DRNet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [EDSR](https://gitee.com/mindspore/models/tree/master/research/cv/EDSR) |鉁厊   |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [EfficientDet_d0](https://gitee.com/mindspore/models/tree/master/research/cv/EfficientDet_d0) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [efficientnet-b0](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b0) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [efficientnet-b1](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b1) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [efficientnet-b2](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b2) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [efficientnet-b3](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnet-b3) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [efficientnetv2](https://gitee.com/mindspore/models/tree/master/research/cv/efficientnetv2) |鉁厊   |   |
+| 鍥惧儚 | 鏄捐憲鎬ф娴� | [EGnet](https://gitee.com/mindspore/models/tree/master/research/cv/EGnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [E-NET](https://gitee.com/mindspore/models/tree/master/research/cv/E-NET) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [esr_ea](https://gitee.com/mindspore/models/tree/master/research/cv/esr_ea) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [ESRGAN](https://gitee.com/mindspore/models/tree/master/research/cv/ESRGAN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [FaceAttribute](https://gitee.com/mindspore/models/tree/master/research/cv/FaceAttribute) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [faceboxes](https://gitee.com/mindspore/models/tree/master/research/cv/faceboxes) |鉁厊   |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [FaceDetection](https://gitee.com/mindspore/models/tree/master/research/cv/FaceDetection) |鉁厊 鉁� |   |
+| 鍥惧儚 | 浜鸿劯璇嗗埆 | [FaceNet](https://gitee.com/mindspore/models/tree/master/research/cv/FaceNet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [FaceQualityAssessment](https://gitee.com/mindspore/models/tree/master/research/cv/FaceQualityAssessment) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鐩爣妫€娴� | [FaceRecognition](https://gitee.com/mindspore/models/tree/master/research/cv/FaceRecognition) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [FaceRecognitionForTracking](https://gitee.com/mindspore/models/tree/master/research/cv/FaceRecognitionForTracking) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鐩爣妫€娴� | [faster_rcnn_dcn](https://gitee.com/mindspore/models/tree/master/research/cv/faster_rcnn_dcn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鎶犲浘 | [FCANet](https://gitee.com/mindspore/models/tree/master/research/cv/FCANet) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [FDA-BNN](https://gitee.com/mindspore/models/tree/master/research/cv/FDA-BNN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [fishnet99](https://gitee.com/mindspore/models/tree/master/research/cv/fishnet99) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍏夋祦浼拌 | [flownet2](https://gitee.com/mindspore/models/tree/master/research/cv/flownet2) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鐢熸垚 | [gan](https://gitee.com/mindspore/models/tree/master/research/cv/gan) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [GENet_Res50](https://gitee.com/mindspore/models/tree/master/research/cv/GENet_Res50) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ghostnet](https://gitee.com/mindspore/models/tree/master/research/cv/ghostnet) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ghostnet_d](https://gitee.com/mindspore/models/tree/master/research/cv/ghostnet_d) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [glore_res200](https://gitee.com/mindspore/models/tree/master/research/cv/glore_res) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [glore_res50](https://gitee.com/mindspore/models/tree/master/research/cv/glore_res) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [hardnet](https://gitee.com/mindspore/models/tree/master/research/cv/hardnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 杈圭紭妫€娴� | [hed](https://gitee.com/mindspore/models/tree/master/research/cv/hed) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鐢熸垚 | [HiFaceGAN](https://gitee.com/mindspore/models/tree/master/research/cv/HiFaceGAN) | | 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [HourNAS](https://gitee.com/mindspore/models/tree/master/research/cv/HourNAS) | | 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [HRNetW48_cls](https://gitee.com/mindspore/models/tree/master/research/cv/HRNetW48_cls) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [HRNetW48_seg](https://gitee.com/mindspore/models/tree/master/research/cv/HRNetW48_seg) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ibnnet](https://gitee.com/mindspore/models/tree/master/research/cv/ibnnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [ICNet](https://gitee.com/mindspore/models/tree/master/research/cv/ICNet) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [inception_resnet_v2](https://gitee.com/mindspore/models/tree/master/research/cv/inception_resnet_v2) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [Inceptionv2](https://gitee.com/mindspore/models/tree/master/research/cv/Inception-v2) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鎶犲浘 | [IndexNet](https://gitee.com/mindspore/models/tree/master/research/cv/IndexNet) | | 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鐢熸垚 | [IPT](https://gitee.com/mindspore/models/tree/master/research/cv/IPT) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [IRN](https://gitee.com/mindspore/models/tree/master/research/cv/IRN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ISyNet](https://gitee.com/mindspore/models/tree/master/research/cv/ISyNet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ivpf](https://gitee.com/mindspore/models/tree/master/research/cv/ivpf) | | 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍘诲櫔 | [LearningToSeeInTheDark](https://gitee.com/mindspore/models/tree/master/research/cv/LearningToSeeInTheDark) |鉁厊   |   |
+| 鍥惧儚 | 鍏冨涔� | [LEO](https://gitee.com/mindspore/models/tree/master/research/cv/LEO) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [LightCNN](https://gitee.com/mindspore/models/tree/master/research/cv/LightCNN) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [lite-hrnet](https://gitee.com/mindspore/models/tree/master/research/cv/lite-hrnet) | | 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [lresnet100e_ir](https://gitee.com/mindspore/models/tree/master/research/cv/lresnet100e_ir) | | 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [m2det](https://gitee.com/mindspore/models/tree/master/research/cv/m2det) | | 鉁� |   |
+| 鍥惧儚 | 鑷紪鐮� | [mae](https://gitee.com/mindspore/models/tree/master/research/cv/mae) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍏冨涔� | [MAML](https://gitee.com/mindspore/models/tree/master/research/cv/MAML) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鏂囨湰璇嗗埆 | [ManiDP](https://gitee.com/mindspore/models/tree/master/research/cv/ManiDP) | | 鉁� |   |
+| 鍥惧儚 | 浜鸿劯璇嗗埆 | [MaskedFaceRecognition](https://gitee.com/mindspore/models/tree/master/research/cv/MaskedFaceRecognition) |鉁厊   |   |
+| 鍥惧儚 | 鍏冨涔� | [meta-baseline](https://gitee.com/mindspore/models/tree/master/research/cv/meta-baseline) |鉁厊 鉁� |   |
+| 鍥惧儚 | 閲嶆柊璇嗗埆 | [MGN](https://gitee.com/mindspore/models/tree/master/research/cv/MGN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 娣卞害浼拌 | [midas](https://gitee.com/mindspore/models/tree/master/research/cv/midas) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍘诲櫔 | [MIMO-UNet](https://gitee.com/mindspore/models/tree/master/research/cv/MIMO-UNet) | | 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [mnasnet](https://gitee.com/mindspore/models/tree/master/research/cv/mnasnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [mobilenetv3_large](https://gitee.com/mindspore/models/tree/master/research/cv/mobilenetv3_large) |鉁厊   | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [mobilenetV3_small_x1_0](https://gitee.com/mindspore/models/tree/master/research/cv/mobilenetV3_small_x1_0) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [MultiTaskNet](https://gitee.com/mindspore/models/tree/master/research/cv/PAMTRI) |鉁厊 鉁� |   |
+| 鍥惧儚 | 閲嶆柊璇嗗埆 | [MVD](https://gitee.com/mindspore/models/tree/master/research/cv/MVD) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [nas-fpn](https://gitee.com/mindspore/models/tree/master/research/cv/nas-fpn) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍘诲櫔 | [Neighbor2Neighbor](https://gitee.com/mindspore/models/tree/master/research/cv/Neighbor2Neighbor) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [NFNet](https://gitee.com/mindspore/models/tree/master/research/cv/NFNet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚璐ㄩ噺璇勪及 | [nima_vgg16](https://gitee.com/mindspore/models/tree/master/research/cv/nima_vgg16) | | 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [nnUNet](https://gitee.com/mindspore/models/tree/master/research/cv/nnUNet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ntsnet](https://gitee.com/mindspore/models/tree/master/research/cv/ntsnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [OCRNet](https://gitee.com/mindspore/models/tree/master/research/cv/OCRNet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 閲嶆柊璇嗗埆 | [osnet](https://gitee.com/mindspore/models/tree/master/research/cv/osnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鏄捐憲鎬ф娴� | [PAGENet](https://gitee.com/mindspore/models/tree/master/research/cv/PAGENet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚妫€绱� | [pcb](https://gitee.com/mindspore/models/tree/master/research/cv/pcb_rpp) | | 鉁� |   |
+| 鍥惧儚 | 鍥惧儚妫€绱� | [pcb](https://gitee.com/mindspore/models/tree/master/research/cv/pcb_rpp) | | 鉁� |   |
+| 鍥惧儚 | 鍥惧儚妫€绱� | [pcb_rpp](https://gitee.com/mindspore/models/tree/master/research/cv/pcb_rpp) | | 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [PDarts](https://gitee.com/mindspore/models/tree/master/research/cv/PDarts) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鐢熸垚 | [PGAN](https://gitee.com/mindspore/models/tree/master/research/cv/PGAN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鐢熸垚 | [Pix2Pix](https://gitee.com/mindspore/models/tree/master/research/cv/Pix2Pix) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [Pix2PixHD](https://gitee.com/mindspore/models/tree/master/research/cv/Pix2PixHD) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [pnasnet](https://gitee.com/mindspore/models/tree/master/research/cv/pnasnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐐逛簯妯″瀷 | [pointnet](https://gitee.com/mindspore/models/tree/master/research/cv/pointnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐐逛簯妯″瀷 | [pointnet2](https://gitee.com/mindspore/models/tree/master/research/cv/pointnet2) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [PoseEstNet](https://gitee.com/mindspore/models/tree/master/research/cv/PAMTRI) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ProtoNet](https://gitee.com/mindspore/models/tree/master/research/cv/ProtoNet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [proxylessnas](https://gitee.com/mindspore/models/tree/master/research/cv/proxylessnas) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [PSPNet](https://gitee.com/mindspore/models/tree/master/research/cv/PSPNet) |鉁厊   |   |
+| 鍥惧儚 | 鏄捐憲鎬ф娴� | [ras](https://gitee.com/mindspore/models/tree/master/research/cv/ras) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [RCAN](https://gitee.com/mindspore/models/tree/master/research/cv/RCAN) |鉁厊   |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [rcnn](https://gitee.com/mindspore/models/tree/master/research/cv/rcnn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [REDNet30](https://gitee.com/mindspore/models/tree/master/research/cv/REDNet30) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [RefineDet](https://gitee.com/mindspore/models/tree/master/research/cv/RefineDet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [RefineNet](https://gitee.com/mindspore/models/tree/master/research/cv/RefineNet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 閲嶆柊璇嗗埆 | [ReIDStrongBaseline](https://gitee.com/mindspore/models/tree/master/research/cv/ReIDStrongBaseline) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [relationnet](https://gitee.com/mindspore/models/tree/master/research/cv/relationnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [renas](https://gitee.com/mindspore/models/tree/master/research/cv/renas) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [repvgg](https://gitee.com/mindspore/models/tree/master/research/cv/repvgg) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [res2net_deeplabv3](https://gitee.com/mindspore/models/tree/master/research/cv/res2net_deeplabv3) |鉁厊   | 鉁� |
+| 鍥惧儚 | 鐩爣妫€娴� | [res2net_faster_rcnn](https://gitee.com/mindspore/models/tree/master/research/cv/res2net_faster_rcnn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [res2net_yolov3](https://gitee.com/mindspore/models/tree/master/research/cv/res2net_yolov3) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [res2net101](https://gitee.com/mindspore/models/tree/master/research/cv/res2net) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [res2net152](https://gitee.com/mindspore/models/tree/master/research/cv/res2net) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [res2net50](https://gitee.com/mindspore/models/tree/master/research/cv/res2net) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ResNeSt50](https://gitee.com/mindspore/models/tree/master/research/cv/ResNeSt50) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [resnet50_adv_pruning](https://gitee.com/mindspore/models/tree/master/research/cv/resnet50_adv_pruning) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [resnet50_bam](https://gitee.com/mindspore/models/tree/master/research/cv/resnet50_bam) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ResNet50-Quadruplet](https://gitee.com/mindspore/models/tree/master/research/cv/metric_learn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ResNet50-Triplet](https://gitee.com/mindspore/models/tree/master/research/cv/metric_learn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ResnetV2_101](https://gitee.com/mindspore/models/tree/master/research/cv/resnetv2) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ResnetV2_152](https://gitee.com/mindspore/models/tree/master/research/cv/resnetv2) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ResnetV2_50](https://gitee.com/mindspore/models/tree/master/research/cv/resnetv2) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [resnetv2_50_frn](https://gitee.com/mindspore/models/tree/master/research/cv/resnetv2_50_frn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [resnext152_64x4d](https://gitee.com/mindspore/models/tree/master/research/cv/resnext152_64x4d) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [retinaface_mobilenet0.25](https://gitee.com/mindspore/models/tree/master/research/cv/retinaface) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [retinanet_resnet101](https://gitee.com/mindspore/models/tree/master/research/cv/retinanet_resnet101) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [retinanet_resnet152](https://gitee.com/mindspore/models/tree/master/research/cv/retinanet_resnet152) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [rfcn](https://gitee.com/mindspore/models/tree/master/research/cv/rfcn) | | 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [SE_ResNeXt50](https://gitee.com/mindspore/models/tree/master/research/cv/SE_ResNeXt50) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [senet_resnet101](https://gitee.com/mindspore/models/tree/master/research/cv/SE-Net) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [senet_resnet50](https://gitee.com/mindspore/models/tree/master/research/cv/SE-Net) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [se-res2net50](https://gitee.com/mindspore/models/tree/master/research/cv/res2net) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [S-GhostNet](https://gitee.com/mindspore/models/tree/master/research/cv/S-GhostNet) |鉁厊   |   |
+| 鍥惧儚 | 濮挎€佷及璁� | [simple_baselines](https://gitee.com/mindspore/models/tree/master/research/cv/simple_baselines) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鐢熸垚 | [SinGAN](https://gitee.com/mindspore/models/tree/master/research/cv/SinGAN) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [single_path_nas](https://gitee.com/mindspore/models/tree/master/research/cv/single_path_nas) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [sknet](https://gitee.com/mindspore/models/tree/master/research/cv/sknet) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [snn_mlp](https://gitee.com/mindspore/models/tree/master/research/cv/snn_mlp) | | 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [Spnas](https://gitee.com/mindspore/models/tree/master/research/cv/Spnas) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [SPPNet](https://gitee.com/mindspore/models/tree/master/research/cv/SPPNet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [squeezenet1_1](https://gitee.com/mindspore/models/tree/master/research/cv/squeezenet1_1) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [sr_ea](https://gitee.com/mindspore/models/tree/master/research/cv/sr_ea) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [SRGAN](https://gitee.com/mindspore/models/tree/master/research/cv/SRGAN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ssc_resnet50](https://gitee.com/mindspore/models/tree/master/research/cv/ssc_resnet50) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [ssd_ghostnet](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_ghostnet) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鐩爣妫€娴� | [ssd_inception_v2](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_inception_v2) | | 鉁� | 鉁� |
+| 鍥惧儚 | 鐩爣妫€娴� | [ssd_inceptionv2](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_inceptionv2) |鉁厊   |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [ssd_mobilenetV2](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_mobilenetV2) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鐩爣妫€娴� | [ssd_mobilenetV2_FPNlite](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_mobilenetV2_FPNlite) |鉁厊 鉁� | 鉁� |
+| 鍥惧儚 | 鐩爣妫€娴� | [ssd_resnet_34](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_resnet_34) | | 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [ssd_resnet34](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_resnet34) |鉁厊   | 鉁� |
+| 鍥惧儚 | 鐩爣妫€娴� | [ssd_resnet50](https://gitee.com/mindspore/models/tree/master/research/cv/ssd_resnet50) |鉁厊   |   |
+| 鍥惧儚 | 濮挎€佷及璁� | [StackedHourglass](https://gitee.com/mindspore/models/tree/master/research/cv/StackedHourglass) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鐢熸垚 | [StarGAN](https://gitee.com/mindspore/models/tree/master/research/cv/StarGAN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鐢熸垚 | [STGAN](https://gitee.com/mindspore/models/tree/master/research/cv/STGAN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 浜ら€氶娴� | [stgcn](https://gitee.com/mindspore/models/tree/master/research/cv/stgcn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [stpm](https://gitee.com/mindspore/models/tree/master/research/cv/stpm) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [swin_transformer](https://gitee.com/mindspore/models/tree/master/research/cv/swin_transformer) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鏃堕棿瀹氫綅 | [tall](https://gitee.com/mindspore/models/tree/master/research/cv/tall) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [TCN](https://gitee.com/mindspore/models/tree/master/research/cv/TCN) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鏂囨湰妫€娴� | [textfusenet](https://gitee.com/mindspore/models/tree/master/research/cv/textfusenet) |鉁厊   |   |
+| 鍥惧儚 | 浜ら€氶娴� | [tgcn](https://gitee.com/mindspore/models/tree/master/research/cv/tgcn) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [tinynet](https://gitee.com/mindspore/models/tree/master/research/cv/tinynet) | | 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [TNT](https://gitee.com/mindspore/models/tree/master/research/cv/TNT) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [u2net](https://gitee.com/mindspore/models/tree/master/research/cv/u2net) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鐢熸垚 | [U-GAT-IT](https://gitee.com/mindspore/models/tree/master/research/cv/U-GAT-IT) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [UNet3+](https://gitee.com/mindspore/models/tree/master/research/cv/UNet3+) |鉁厊 鉁� |   |
+| 鍥惧儚 | 閲嶆柊璇嗗埆 | [VehicleNet](https://gitee.com/mindspore/models/tree/master/research/cv/VehicleNet) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [vgg19](https://gitee.com/mindspore/models/tree/master/research/cv/vgg19) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [ViG](https://gitee.com/mindspore/models/tree/master/research/cv/ViG) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [vit_cifar](https://gitee.com/mindspore/models/tree/master/research/cv/vit_base) |鉁厊 鉁� |   |
+| 鍥惧儚 | 璇箟鍒嗗壊 | [vnet](https://gitee.com/mindspore/models/tree/master/research/cv/vnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [wave_mlp](https://gitee.com/mindspore/models/tree/master/research/cv/wave_mlp) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚瓒呭垎 | [wdsr](https://gitee.com/mindspore/models/tree/master/research/cv/wdsr) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鍥惧儚鐢熸垚 | [wgan](https://gitee.com/mindspore/models/tree/master/research/cv/wgan) |鉁厊   |   |
+| 鍥惧儚 | 鍥惧儚鍒嗙被 | [wideresnet](https://gitee.com/mindspore/models/tree/master/research/cv/wideresnet) |鉁厊 鉁� |   |
+| 鍥惧儚 | 瀹炰緥鍒嗗壊 | [Yolact++](https://gitee.com/mindspore/models/tree/master/research/cv/Yolact++) |鉁厊   |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [yolov3_tiny](https://gitee.com/mindspore/models/tree/master/research/cv/yolov3_tiny) |鉁厊 鉁� |   |
+| 鍥惧儚 | 鐩爣妫€娴� | [yolox](https://gitee.com/mindspore/models/tree/master/research/cv/yolox) |鉁厊   |   |
+| 澶氭ā鎬� | 澶氭ā鎬� | [opt](https://gitee.com/mindspore/models/tree/master/research/mm/opt) |鉁厊 鉁� |   |
+| 澶氭ā鎬� | 澶氭ā鎬� | [TokenFusion](https://gitee.com/mindspore/models/tree/master/research/cv/TokenFusion) |鉁厊 鉁� |   |
+| 澶氭ā鎬� | 澶氭ā鎬� | [wukong](https://gitee.com/mindspore/models/tree/master/research/mm/wukong) |鉁厊   |   |
+| 鎺ㄨ崘 | 鐐瑰嚮鐜囬娴� | [autodis](https://gitee.com/mindspore/models/tree/master/research/recommend/autodis) |鉁厊 鉁� |   |
+| 鎺ㄨ崘 | 鐐瑰嚮鐜囬娴� | [DIEN](https://gitee.com/mindspore/models/tree/master/research/recommend/DIEN) |鉁厊 鉁� |   |
+| 鎺ㄨ崘 | 鐐瑰嚮鐜囬娴� | [dlrm](https://gitee.com/mindspore/models/tree/master/research/recommend/dlrm) |鉁厊 鉁� |   |
+| 鎺ㄨ崘 | 鐐瑰嚮鐜囬娴� | [EDCN](https://gitee.com/mindspore/models/tree/master/research/recommend/EDCN) |鉁厊 鉁� |   |
+| 鎺ㄨ崘 | 鐐瑰嚮鐜囬娴� | [Fat-DeepFFM](https://gitee.com/mindspore/models/tree/master/research/recommend/Fat-DeepFFM) |鉁厊 鉁� |   |
+| 鎺ㄨ崘 | 鐐瑰嚮鐜囬娴� | [mmoe](https://gitee.com/mindspore/models/tree/master/research/recommend/mmoe) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [albert](https://gitee.com/mindspore/models/tree/master/research/nlp/albert) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鎯呯华鍒嗙被 | [atae_lstm](https://gitee.com/mindspore/models/tree/master/research/nlp/atae_lstm) |鉁厊 鉁� |   |
+| 鏂囨湰 | 瀵硅瘽 | [dam](https://gitee.com/mindspore/models/tree/master/research/nlp/dam) |鉁厊   |   |
+| 鏂囨湰 | 璇█妯″瀷 | [gpt2](https://gitee.com/mindspore/models/tree/master/research/nlp/gpt2) |鉁厊   |   |
+| 鏂囨湰 | 鐭ヨ瘑鍥惧祵鍏� | [hake](https://gitee.com/mindspore/models/tree/master/research/nlp/hake) | | 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [ktnet](https://gitee.com/mindspore/models/tree/master/research/nlp/ktnet) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鍛藉悕瀹炰綋璇嗗埆 | [lstm_crf](https://gitee.com/mindspore/models/tree/master/research/nlp/lstm_crf) |鉁厊   |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [luke](https://gitee.com/mindspore/models/tree/master/research/nlp/luke) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鐭ヨ瘑鍥惧祵鍏� | [rotate](https://gitee.com/mindspore/models/tree/master/research/nlp/rotate) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鎯呯华鍒嗙被 | [senta](https://gitee.com/mindspore/models/tree/master/research/nlp/senta) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鏈哄櫒缈昏瘧 | [seq2seq](https://gitee.com/mindspore/models/tree/master/research/nlp/seq2seq) |鉁厊   |   |
+| 鏂囨湰 | 璇嶅祵鍏� | [skipgram](https://gitee.com/mindspore/models/tree/master/research/nlp/skipgram) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鏈哄櫒缈昏瘧 | [speech_transformer](https://gitee.com/mindspore/models/tree/master/research/nlp/speech_transformer) |鉁厊   |   |
+| 鏂囨湰 | 棰勮缁� | [ternarybert](https://gitee.com/mindspore/models/tree/master/research/nlp/ternarybert) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [tprr](https://gitee.com/mindspore/models/tree/master/research/nlp/tprr) |鉁厊   |   |
+| 鏂囨湰 | 鑷劧璇█鐞嗚В | [transformer_xl](https://gitee.com/mindspore/models/tree/master/research/nlp/transformer_xl) |鉁厊 鉁� |   |
+| 鏂囨湰 | 鐭ヨ瘑鍥惧祵鍏� | [transX](https://gitee.com/mindspore/models/tree/master/research/nlp/transX) | | 鉁� |   |
+| 瑙嗛 | 瑙嗛鍒嗙被 | [AttentionCluster](https://gitee.com/mindspore/models/tree/master/research/cv/AttentionCluster) |鉁厊 鉁� |   |
+| 瑙嗛 | 鍏朵粬 | [DYR](https://gitee.com/mindspore/models/tree/master/research/nlp/DYR) |鉁厊   |   |
+| 瑙嗛 | 瑙嗛鍒嗙被 | [ecolite](https://gitee.com/mindspore/models/tree/master/research/cv/ecolite) |鉁厊   |   |
+| 瑙嗛 | 鐩爣杩借釜 | [fairmot](https://gitee.com/mindspore/models/tree/master/research/cv/fairmot) |鉁厊 鉁� |   |
+| 瑙嗛 | 瑙嗛鍒嗙被 | [I3D](https://gitee.com/mindspore/models/tree/master/research/cv/I3D) |鉁厊   |   |
+| 瑙嗛 | 鐩爣杩借釜 | [JDE](https://gitee.com/mindspore/models/tree/master/research/cv/JDE) | | 鉁� |   |
+| 瑙嗛 | 瑙嗛鍒嗗壊 | [OSVOS](https://gitee.com/mindspore/models/tree/master/research/cv/OSVOS) | | 鉁� |   |
+| 瑙嗛 | 瑙嗛鍒嗙被 | [r2plus1d](https://gitee.com/mindspore/models/tree/master/research/cv/r2plus1d) |鉁厊 鉁� |   |
+| 瑙嗛 | 瑙嗛瓒呭垎 | [rbpn](https://gitee.com/mindspore/models/tree/master/research/cv/rbpn) |鉁厊   |   |
+| 瑙嗛 | 瑙嗛鍒嗙被 | [resnet3d](https://gitee.com/mindspore/models/tree/master/research/cv/resnet3d) |鉁厊   |   |
+| 瑙嗛 | 鐩爣杩借釜 | [SiamFC](https://gitee.com/mindspore/models/tree/master/research/cv/SiamFC) |鉁厊   |   |
+| 瑙嗛 | 鐩爣杩借釜 | [siamRPN](https://gitee.com/mindspore/models/tree/master/research/cv/siamRPN) |鉁厊 鉁� |   |
+| 瑙嗛 | 瑙嗛鍒嗙被 | [slowfast](https://gitee.com/mindspore/models/tree/master/research/cv/slowfast) |鉁厊 鉁� |   |
+| 瑙嗛 | 瑙嗛鍒嗙被 | [stnet](https://gitee.com/mindspore/models/tree/master/research/cv/stnet) |鉁厊   |   |
+| 瑙嗛 | 鐩爣杩借釜 | [tracktor](https://gitee.com/mindspore/models/tree/master/research/cv/tracktor) | | 鉁� |   |
+| 瑙嗛 | 鐩爣杩借釜 | [tracktor++](https://gitee.com/mindspore/models/tree/master/research/cv/tracktor++) |鉁厊 鉁� |   |
+| 瑙嗛 | 瑙嗛鍒嗙被 | [trn](https://gitee.com/mindspore/models/tree/master/research/cv/trn) | | 鉁� |   |
+| 瑙嗛 | 瑙嗛鍒嗙被 | [tsm](https://gitee.com/mindspore/models/tree/master/research/cv/tsm) |鉁厊 鉁� |   |
+| 瑙嗛 | 瑙嗛鍒嗙被 | [tsn](https://gitee.com/mindspore/models/tree/master/research/cv/tsn) |鉁厊 鉁� |   |
+
+Process finished with exit code 0
 
 - [绀惧尯](https://gitee.com/mindspore/models/tree/master/community)
 
diff --git a/official/cv/LearningToSeeInTheDark/README_CN.md b/official/cv/LearningToSeeInTheDark/README_CN.md
deleted file mode 100644
index 196715d0aabaa40f938bbed10fbf3ae8e2d0b0d3..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/README_CN.md
+++ /dev/null
@@ -1,249 +0,0 @@
-# 鐩綍
-
-<!-- TOC -->
-
-- [鐩綍](#鐩綍)
-    - [姒傝堪](#姒傝堪)
-    - [璁烘枃](#璁烘枃)
-- [妯″瀷鏋舵瀯](#妯″瀷鏋舵瀯)
-- [鏁版嵁闆哴(#鏁版嵁闆�)
-- [鐜瑕佹眰](#鐜瑕佹眰)
-- [蹇€熷叆闂╙(#蹇€熷叆闂�)
-- [鑴氭湰璇存槑](#鑴氭湰璇存槑)
-    - [鑴氭湰缁撴瀯涓庤鏄嶿(#鑴氭湰缁撴瀯涓庤鏄�)
-- [鑴氭湰鍙傛暟](#鑴氭湰鍙傛暟)
-- [璁粌杩囩▼](#璁粌杩囩▼)
-    - [鐢ㄦ硶](#鐢ㄦ硶)
-        - [Ascend澶勭悊鍣ㄧ幆澧冭繍琛宂(#Ascend澶勭悊鍣ㄧ幆澧冭繍琛�)
-    - [缁撴灉](#缁撴灉)
-- [璇勪及杩囩▼](#璇勪及杩囩▼)
-    - [鐢ㄦ硶](#鐢ㄦ硶-1)
-        - [Ascend澶勭悊鍣ㄧ幆澧冭繍琛宂(#Ascend澶勭悊鍣ㄧ幆澧冭繍琛�-1)
-    - [缁撴灉](#缁撴灉-1)
-- [鎺ㄧ悊杩囩▼](#鎺ㄧ悊杩囩▼)
-    - [瀵煎嚭MindIR](#瀵煎嚭MindIR)
-    - [鍦ˋcsend310鎵ц鎺ㄧ悊](#鍦ˋcsend310鎵ц鎺ㄧ悊)
-    - [缁撴灉](#缁撴灉)
-- [妯″瀷鎻忚堪](#妯″瀷鎻忚堪)
-    - [鎬ц兘](#鎬ц兘)
-        - [璇勪及鎬ц兘](#璇勪及鎬ц兘)
-- [闅忔満鎯呭喌璇存槑](#闅忔満鎯呭喌璇存槑)
-- [ModelZoo涓婚〉](#modelzoo涓婚〉)
-
-<!-- /TOC -->
-
-# Learning To See In The Dark
-
-## 姒傝堪
-
-Leraning To See In The dark 鏄湪2018骞存彁鍑虹殑锛屽熀浜庡叏鍗风Н绁炵粡缃戠粶锛團CN锛夌殑涓€涓綉缁滄ā鍨嬶紝鐢ㄤ簬鍥惧儚澶勭悊銆傜綉缁滅殑涓婚缁撴瀯涓篣-net锛屽皢浣庢洕鍏夊害鐨勫浘鍍忚緭鍏ョ綉缁滐紝缁忚繃澶勭悊鍚庤緭鍑哄緱鍒板搴旂殑楂樻洕鍏夊害鍥惧儚锛屽疄鐜颁簡鍥惧儚鐨勫浜拰鍘诲櫔澶勭悊銆�
-
-## 璁烘枃
-
-[1] Chen C, Chen Q, Xu J, et al. Learning to See in the Dark[C]// 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition. IEEE, 2018.
-
-# 妯″瀷鏋舵瀯
-
-缃戠粶涓讳綋涓篣net锛屽皢raw data杈撳叆鍚巔ack鎴愬洓涓猚hannel锛屽幓闄lacklevel骞朵箻浠atio鍚庤緭鍏ョ綉缁滀富浣�(Unet)锛岃緭鍑轰负RGB鍥惧儚銆�
-
-# 鏁版嵁闆�
-
-- 鏁版嵁闆嗗湴鍧€:
-
-    - [涓嬭浇Sony鏁版嵁闆哴(https://storage.googleapis.com/isl-datasets/SID/Sony.zip)
-
-- 鏁版嵁闆嗗寘鍚簡瀹ゅ唴鍜屽澶栧浘鍍忋€傚澶栧浘鍍忛€氬父鏄湪鏈堝厜鎴栬閬撶収鏄庢潯浠朵笅鎷嶆憚銆傚湪瀹ゅ鍦烘櫙涓嬶紝鐩告満鐨勪寒搴︿竴鑸湪0.2 lux 鍜�5 lux 涔嬮棿銆傚鍐呭浘鍍忛€氬父鏇存殫銆傚湪瀹ゅ唴鍦烘櫙涓殑鐩告満浜害涓€鑸湪0.03 lux 鍜�0.3 lux 涔嬮棿銆傝緭鍏ュ浘鍍忕殑鏇濆厜鏃堕棿璁剧疆涓�1/30鍜�1/10绉掋€傜浉搴旂殑鍙傝€冨浘鍍� (鐪熷疄鍥惧儚) 鐨勬洕鍏夋椂闂撮€氬父浼氬欢闀�100鍒�300鍊嶏細鍗�10鑷�30绉掋€�
-
-- 鍦ㄦ湰缃戠粶涓负渚夸簬璁粌锛岄鍏堝皢鏁版嵁闆嗙殑RAW鏍煎紡鏂囦欢杞崲涓轰簡鍚屽悕h5鏂囦欢锛岃浆鎹㈡柟娉曞涓嬶細
-
-```shell
-python preprocess.py --raw_path [RAW_PATH] --save_path [SAVE_PATH]
-```
-
-- 鏁版嵁闆嗗垎绫伙紙鏂囦欢鍚嶅紑澶达級锛�
-
-    - 0: 璁粌鏁版嵁闆�
-    - 1锛氭帹鐞嗘暟鎹泦
-    - 2锛氶獙璇佹暟鎹泦
-
-- 鏁版嵁闆嗙洰褰曠粨鏋勶細
-
-```text
-鈹斺攢dataset
-    鈹溾攢long                  # label
-    鈹斺攢short                 # input
-```
-
-# 鐜瑕佹眰
-
-- 纭欢
-    - 鍑嗗Ascend澶勭悊鍣ㄦ惌寤虹‖浠剁幆澧冦€�
-- 妗嗘灦
-    - [MindSpore](https://www.mindspore.cn/install/en)
-- 濡傞渶鏌ョ湅璇︽儏锛岃鍙傝濡備笅璧勬簮锛�
-    - [MindSpore鏁欑▼](https://www.mindspore.cn/tutorials/zh-CN/master/index.html)
-    - [MindSpore Python API](https://www.mindspore.cn/docs/zh-CN/master/index.html)
-
-# 蹇€熷叆闂�
-
-閫氳繃瀹樻柟缃戠珯瀹夎MindSpore鍚庯紝鎮ㄥ彲浠ユ寜鐓у涓嬫楠よ繘琛岃缁冨拰璇勪及锛�
-
-- Ascend澶勭悊鍣ㄧ幆澧冭繍琛�
-
-```Shell
-# 鍒嗗竷寮忚缁�
-鐢ㄦ硶锛歴h run_distribute_train.sh [RANK_TABLE_FILE] [DATASET_PATH] [PRETRAINED_CKPT_PATH]锛堝彲閫夛級
-
-# 鍗曟満璁粌
-鐢ㄦ硶锛歴h run_standalone_train.sh [DATASET_PATH] [PRETRAINED_CKPT_PATH]锛堝彲閫夛級
-
-# 杩愯璇勪及绀轰緥
-鐢ㄦ硶锛歴h run_eval.sh [DATASET_PATH] [CHECKPOINT_PATH]
-```
-
-# 鑴氭湰璇存槑
-
-## 鑴氭湰缁撴瀯涓庤鏄�
-
-```text
-鈹斺攢鈹€LearningToSeeInTheDark
-  鈹溾攢鈹€ README.md
-  鈹溾攢鈹€ scripts
-    鈹溾攢鈹€ run_distribute_train.sh            # 鍚姩Ascend鍒嗗竷寮忚缁冿紙8鍗★級
-    鈹溾攢鈹€ run_eval.sh                        # 鍚姩Ascend璇勪及
-    鈹斺攢鈹€ run_standalone_train.sh            # 鍚姩Ascend鍗曟満璁粌锛堝崟鍗★級
-  鈹溾攢鈹€ src
-    鈹溾攢鈹€ myutils.py                         # TrainOneStepWithLossScale & GradClip
-    鈹斺攢鈹€ unet_parts.py                      # 缃戠粶涓婚缁撴瀯鐨勯儴鍒嗗畾涔�
-  鈹溾攢鈹€ eval.py                              # 璇勪及缃戠粶
-  鈹斺攢鈹€ train.py                             # 璁粌缃戠粶
-```
-
-# 鑴氭湰鍙傛暟
-
-- 閰嶇疆瓒呭弬鏁般€�
-
-```Python
-"batch_size":8,                   # 杈撳叆寮犻噺鐨勬壒娆″ぇ灏�
-"epoch_size":3000,                # 璁粌鍛ㄦ湡澶у皬
-"save_checkpoint":True,           # 鏄惁淇濆瓨妫€鏌ョ偣
-"save_checkpoint_epochs":100,     # 涓や釜妫€鏌ョ偣涔嬮棿鐨勫懆鏈熼棿闅旓紱榛樿鎯呭喌涓嬶紝鏈€鍚庝竴涓鏌ョ偣灏嗗湪鏈€鍚庝竴涓懆鏈熷畬鎴愬悗淇濆瓨
-"keep_checkpoint_max":100,        # 鍙繚瀛樻渶鍚庝竴涓猭eep_checkpoint_max妫€鏌ョ偣
-"save_checkpoint_path":"./",      # 妫€鏌ョ偣鐩稿浜庢墽琛岃矾寰勭殑淇濆瓨璺緞
-"warmup_epochs":500,              # 鐑韩鍛ㄦ湡鏁�  
-"lr":3e-4                         # 鍩虹瀛︿範鐜�
-"lr_end":1e-6,                    # 鏈€缁堝涔犵巼
-```
-
-# 璁粌杩囩▼
-
-## 鐢ㄦ硶
-
-### Ascend澶勭悊鍣ㄧ幆澧冭繍琛�
-
-```Shell
-# 鍒嗗竷寮忚缁�
-鐢ㄦ硶锛歴h run_distribute_train.sh [RANK_TABLE_FILE] [DATASET_PATH] [PRETRAINED_CKPT_PATH]锛堝彲閫夛級
-
-# 鍗曟満璁粌
-鐢ㄦ硶锛歴h run_standalone_train.sh [DATASET_PATH] [PRETRAINED_CKPT_PATH]锛堝彲閫夛級
-
-```
-
-鍒嗗竷寮忚缁冮渶瑕佹彁鍓嶅垱寤篔SON鏍煎紡鐨凥CCL閰嶇疆鏂囦欢銆�
-
-鍏蜂綋鎿嶄綔锛屽弬瑙乕hccn_tools](https://gitee.com/mindspore/models/tree/master/utils/hccl_tools)涓殑璇存槑銆�
-
-璁粌缁撴灉淇濆瓨鍦ㄧず渚嬭矾寰勪腑锛屾枃浠跺す鍚嶇О浠モ€渢rain鈥濇垨鈥渢rain_parallel鈥濆紑澶淬€傛偍鍙湪姝よ矾寰勪笅鐨勬棩蹇椾腑鎵惧埌妫€鏌ョ偣鏂囦欢浠ュ強缁撴灉锛屽涓嬫墍绀恒€�
-
-## 缁撴灉
-
-```text
-# 鍒嗗竷寮忚缁冪粨鏋滐紙8P锛�
-epoch: 1 step: 4, loss is 0.22979942
-epoch: 2 step: 4, loss is 0.25466543
-epoch: 3 step: 4, loss is 0.2032796
-epoch: 4 step: 4, loss is 0.18603589
-epoch: 5 step: 4, loss is 0.19579497
-...
-```
-
-# 璇勪及杩囩▼
-
-## 鐢ㄦ硶
-
-### Ascend澶勭悊鍣ㄧ幆澧冭繍琛�
-
-```Shell
-# 璇勪及
-Usage: sh run_eval.sh [DATASET_PATH] [CHECKPOINT_PATH]
-```
-
-```Shell
-# 璇勪及绀轰緥
-sh  run_eval.sh  /data/dataset/ImageNet/imagenet_original  Resnet152-140_5004.ckpt
-```
-
-## 缁撴灉
-
-璇勪及缁撴灉淇濆瓨鍦ㄧず渚嬭矾寰勪腑锛屾枃浠跺す鍚嶄负鈥渆val鈥濄€傛偍鍙湪姝よ矾寰勪笅鎵惧埌缁忚繃缃戠粶澶勭悊鐨勮緭鍑哄浘鍍忋€�
-
-# 鎺ㄧ悊杩囩▼
-
-## [瀵煎嚭MindIR](#contents)
-
-```shell
-python export.py --ckpt_file [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT]
-```
-
-鍙傛暟ckpt_file涓哄繀濉」锛�
-`EXPORT_FORMAT` 蹇呴』鍦� ["AIR", "MINDIR"]涓€夋嫨銆�
-
-## 鍦ˋscend310鎵ц鎺ㄧ悊
-
-鍦ㄦ墽琛屾帹鐞嗗墠锛宮indir鏂囦欢蹇呴』閫氳繃`export.py`鑴氭湰瀵煎嚭銆備互涓嬪睍绀轰簡浣跨敤minir妯″瀷鎵ц鎺ㄧ悊鐨勭ず渚嬨€�
-
-```shell
-# Ascend310 inference
-bash run_infer_310.sh [MINDIR_PATH] [DATASET_PATH] [DEVICE_ID]
-```
-
-- `MINDIR_PATH` mindir鏂囦欢璺緞
-- `DATASET_PATH` 鎺ㄧ悊鏁版嵁闆嗚矾寰�
-- `DEVICE_ID` 鍙€夛紝榛樿鍊间负0銆�
-
-## 缁撴灉
-
-鎺ㄧ悊缁撴灉淇濆瓨鍦ㄨ剼鏈墽琛岀殑褰撳墠璺緞锛屼綘鍙互鍦ㄥ綋鍓嶆枃浠跺す鏌ョ湅杈撳嚭鍥剧墖銆�
-
-# 妯″瀷鎻忚堪
-
-## 鎬ц兘
-
-### 璇勪及鎬ц兘
-
-| 鍙傛暟 | Ascend 910  |
-|---|---|
-| 妯″瀷鐗堟湰  | Learning To See In The Dark  |
-| 璧勬簮  |  Ascend 910锛汣PU锛�2.60GHz锛�192鏍革紱鍐呭瓨锛�755G |
-| 涓婁紶鏃ユ湡  |2021-06-21 ;  |
-| MindSpore鐗堟湰  | 1.2.0 |
-| 鏁版嵁闆�  |  SID |
-| 璁粌鍙傛暟  | epoch=2500, steps per epoch=35, batch_size = 8  |
-| 浼樺寲鍣�  | Adam  |
-| 鎹熷け鍑芥暟  | L1loss  |
-| 杈撳嚭  | 楂樹寒搴﹀浘鍍� |
-| 鎹熷け | 0.030  |
-| 閫熷害|606.12姣/姝ワ紙8鍗★級 |
-| 鎬绘椂闀�   |  132鍒嗛挓 |
-| 鍙傛暟(M)   | 60.19 |
-| 寰皟妫€鏌ョ偣 | 462M锛�.ckpt鏂囦欢锛�  |
-| 鑴氭湰  | [閾炬帴](https://gitee.com/mindspore/models/tree/master/research/cv/LearningToSeeInTheDark)  |
-
-# 闅忔満鎯呭喌璇存槑
-
-unet_parts.py train_sony.py涓悇鑷缃簡闅忔満绉嶅瓙銆�
-
-# ModelZoo涓婚〉
-
-璇锋祻瑙堝畼缃慬涓婚〉](https://gitee.com/mindspore/models)銆�
\ No newline at end of file
diff --git a/official/cv/LearningToSeeInTheDark/ascend310_infer/inc/utils.h b/official/cv/LearningToSeeInTheDark/ascend310_infer/inc/utils.h
deleted file mode 100644
index f8ae1e5b473d869b77af8d725a280d7c7665527c..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/ascend310_infer/inc/utils.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * Copyright 2021 Huawei Technologies Co., Ltd
- * 
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MINDSPORE_INFERENCE_UTILS_H_
-#define MINDSPORE_INFERENCE_UTILS_H_
-
-#include <sys/stat.h>
-#include <dirent.h>
-#include <vector>
-#include <string>
-#include <memory>
-#include "include/api/types.h"
-
-std::vector<std::string> GetAllFiles(std::string_view dirName);
-DIR *OpenDir(std::string_view dirName);
-std::string RealPath(std::string_view path);
-mindspore::MSTensor ReadFileToTensor(const std::string &file);
-int WriteResult(const std::string& imageFile, const std::vector<mindspore::MSTensor> &outputs);
-std::vector<std::string> GetAllFiles(std::string dir_name);
-std::vector<std::vector<std::string>> GetAllInputData(std::string dir_name);
-
-#endif
diff --git a/official/cv/LearningToSeeInTheDark/ascend310_infer/src/CMakeLists.txt b/official/cv/LearningToSeeInTheDark/ascend310_infer/src/CMakeLists.txt
deleted file mode 100644
index 0397995b0e0b37c4fa7c39c93ebd41011d5bd936..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/ascend310_infer/src/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-cmake_minimum_required(VERSION 3.14.1)
-project(MindSporeCxxTestcase[CXX])
-add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0)
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -std=c++17 -Werror -Wall -fPIE -Wl,--allow-shlib-undefined")
-set(PROJECT_SRC_ROOT ${CMAKE_CURRENT_LIST_DIR}/)
-option(MINDSPORE_PATH "mindspore install path" "")
-include_directories(${MINDSPORE_PATH})
-include_directories(${MINDSPORE_PATH}/include)
-include_directories(${PROJECT_SRC_ROOT}/../)
-find_library(MS_LIB libmindspore.so ${MINDSPORE_PATH}/lib)
-file(GLOB_RECURSE MD_LIB ${MINDSPORE_PATH}/_c_dataengine*)
-
-add_executable(main main.cc utils.cc)
-target_link_libraries(main ${MS_LIB} ${MD_LIB} gflags)
diff --git a/official/cv/LearningToSeeInTheDark/ascend310_infer/src/build.sh b/official/cv/LearningToSeeInTheDark/ascend310_infer/src/build.sh
deleted file mode 100644
index abcb999930ca5d62345b204d7fcfe4e097e8f0bb..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/ascend310_infer/src/build.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-cmake . -DMINDSPORE_PATH="`pip show mindspore-ascend | grep Location | awk '{print $2"/mindspore"}' | xargs realpath`"
-make
diff --git a/official/cv/LearningToSeeInTheDark/ascend310_infer/src/main.cc b/official/cv/LearningToSeeInTheDark/ascend310_infer/src/main.cc
deleted file mode 100644
index 6a43ff772d8ac96bd0dd78baad06876a966e2d74..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/ascend310_infer/src/main.cc
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Copyright 2021 Huawei Technologies Co., Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <sys/time.h>
-#include <gflags/gflags.h>
-#include <dirent.h>
-#include <iostream>
-#include <string>
-#include <algorithm>
-#include <iosfwd>
-#include <vector>
-#include <fstream>
-#include <sstream>
-
-#include "include/api/types.h"
-#include "include/api/model.h"
-#include "include/api/serialization.h"
-#include "include/api/context.h"
-#include "include/minddata/dataset/include/vision_ascend.h"
-#include "include/minddata/dataset/include/execute.h"
-#include "include/minddata/dataset/include/transforms.h"
-#include "include/minddata/dataset/include/vision.h"
-#include "inc/utils.h"
-
-using mindspore::dataset::TensorTransform;
-using mindspore::Context;
-using mindspore::Serialization;
-using mindspore::Model;
-using mindspore::Status;
-using mindspore::ModelType;
-using mindspore::Graph;
-using mindspore::DataType;
-using mindspore::GraphCell;
-using mindspore::kSuccess;
-using mindspore::MSTensor;
-using mindspore::dataset::Execute;
-
-DEFINE_string(mindir_path, "", "mindir path");
-DEFINE_string(dataset_path, ".", "dataset path");
-DEFINE_int32(device_id, 0, "device id");
-
-int main(int argc, char **argv) {
-    gflags::ParseCommandLineFlags(&argc, &argv, true);
-    if (RealPath(FLAGS_mindir_path).empty()) {
-        std::cout << "Invalid mindir path" << std::endl;
-        return 1;
-    }
-
-    auto context = std::make_shared<Context>();
-    auto ascend310_info = std::make_shared<mindspore::Ascend310DeviceInfo>();
-    ascend310_info->SetDeviceID(FLAGS_device_id);
-    context->MutableDeviceInfo().push_back(ascend310_info);
-
-    mindspore::Graph graph;
-    Status ret = Serialization::Load(FLAGS_mindir_path, ModelType::kMindIR, &graph);
-    if (ret != kSuccess) {
-        std::cout << "ERROR: Graph failed." << std::endl;
-        return 1;
-    }
-    Model model;
-    ret = model.Build(GraphCell(graph), context);
-    if (ret != kSuccess) {
-        std::cout << "ERROR: Build failed." << std::endl;
-        return 1;
-    }
-
-    std::vector<MSTensor> modelInputs = model.GetInputs();
-
-    auto all_files = GetAllFiles(FLAGS_dataset_path);
-    if (all_files.empty()) {
-        std::cout << "ERROR: No input data." << std::endl;
-        return 1;
-    }
-
-    std::map<double, double> costTime_map;
-    size_t size = all_files.size();
-
-    for (size_t i = 0; i < size; ++i) {
-        struct timeval start = {0};
-        struct timeval end = {0};
-        double startTime_ms;
-        double endTime_ms;
-        std::vector<MSTensor> inputs;
-        std::vector<MSTensor> outputs;
-        std::cout << "Start predict input files:" << all_files[i]<< std::endl;
-
-        mindspore::MSTensor input = ReadFileToTensor(all_files[i]);
-        inputs.emplace_back(modelInputs[0].Name(), modelInputs[0].DataType(), modelInputs[0].Shape(),
-                            input.Data().get(), input.DataSize());
-
-        gettimeofday(&start, NULL);
-        ret = model.Predict(inputs, &outputs);
-        gettimeofday(&end, NULL);
-        if (ret != kSuccess) {
-            std::cout << "Predict failed" << std::endl;
-        }
-
-        startTime_ms = (1.0 * start.tv_sec * 1000000 + start.tv_usec) / 1000;
-        endTime_ms = (1.0 * end.tv_sec * 1000000 + end.tv_usec) / 1000;
-        costTime_map.insert(std::pair<double, double>(startTime_ms, endTime_ms));
-
-        WriteResult(all_files[i], outputs);
-    }
-    double average = 0.0;
-    int infer_cnt = 0;
-    for (auto iter = costTime_map.begin(); iter != costTime_map.end(); iter++) {
-        double diff = 0.0;
-        diff = iter->second - iter->first;
-        average += diff;
-        infer_cnt++;
-    }
-
-    average = average / infer_cnt;
-    std::stringstream timeCost;
-    timeCost << "NN inference cost average time: "<< average << "ms of infer_count " << infer_cnt << std::endl;
-    std::cout << "NN inference cost average time: "<< average << "ms of infer_count " << infer_cnt << std::endl;
-    std::string file_name = "./time_Result" + std::string("/test_perform_static.txt");
-    std::ofstream file_stream(file_name.c_str(), std::ios::trunc);
-    file_stream << timeCost.str();
-    file_stream.close();
-    costTime_map.clear();
-  return 0;
-}
diff --git a/official/cv/LearningToSeeInTheDark/ascend310_infer/src/utils.cc b/official/cv/LearningToSeeInTheDark/ascend310_infer/src/utils.cc
deleted file mode 100644
index e48bd641f05bc77de118e4a5a775c203f7f99510..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/ascend310_infer/src/utils.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Copyright 2021 Huawei Technologies Co., Ltd
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "inc/utils.h"
-
-#include <fstream>
-#include <algorithm>
-#include <iostream>
-
-using mindspore::MSTensor;
-using mindspore::DataType;
-
-std::vector<std::string> GetAllFiles(std::string dir_name) {
-    struct dirent *filename;
-    DIR *dir = OpenDir(dir_name);
-    if (dir == nullptr) {
-        return {};
-    }
-    std::vector<std::string> res;
-    while ((filename = readdir(dir)) != nullptr) {
-        std::string dName = std::string(filename->d_name);
-        if (dName == "." || dName == ".." || filename->d_type != DT_REG) {
-            continue;
-        }
-        res.emplace_back(std::string(dir_name) + "/" + filename->d_name);
-    }
-    std::sort(res.begin(), res.end());
-    for (auto &f : res) {
-        std::cout << "image file: " << f << std::endl;
-    }
-    return res;
-}
-
-int WriteResult(const std::string& imageFile, const std::vector<MSTensor> &outputs) {
-    std::string homePath = "./result_Files";
-    for (size_t i = 0; i < outputs.size(); ++i) {
-        size_t outputSize;
-        std::shared_ptr<const void> netOutput = outputs[i].Data();
-        outputSize = outputs[i].DataSize();
-        int pos = imageFile.rfind('/');
-        std::string fileName(imageFile, pos + 1);
-        fileName.replace(fileName.find('.'), fileName.size() - fileName.find('.'), '_' + std::to_string(i) + ".bin");
-        std::string outFileName = homePath + "/" + fileName;
-        FILE *outputFile = fopen(outFileName.c_str(), "wb");
-        fwrite(netOutput.get(), outputSize, sizeof(char), outputFile);
-        fclose(outputFile);
-        outputFile = nullptr;
-    }
-    return 0;
-}
-
-mindspore::MSTensor ReadFileToTensor(const std::string &file) {
-  if (file.empty()) {
-    std::cout << "Pointer file is nullptr" << std::endl;
-    return mindspore::MSTensor();
-  }
-
-  std::ifstream ifs(file);
-  if (!ifs.good()) {
-    std::cout << "File: " << file << " is not exist" << std::endl;
-    return mindspore::MSTensor();
-  }
-
-  if (!ifs.is_open()) {
-    std::cout << "File: " << file << "open failed" << std::endl;
-    return mindspore::MSTensor();
-  }
-
-  ifs.seekg(0, std::ios::end);
-  size_t size = ifs.tellg();
-  mindspore::MSTensor buffer(file, mindspore::DataType::kNumberTypeUInt8, {static_cast<int64_t>(size)}, nullptr, size);
-
-  ifs.seekg(0, std::ios::beg);
-  ifs.read(reinterpret_cast<char *>(buffer.MutableData()), size);
-  ifs.close();
-
-  return buffer;
-}
-
-
-DIR *OpenDir(std::string_view dir_name) {
-    if (dir_name.empty()) {
-        std::cout << " dir_name is null ! " << std::endl;
-        return nullptr;
-    }
-    std::string real_path = RealPath(dir_name);
-    struct stat s;
-    lstat(real_path.c_str(), &s);
-    if (!S_ISDIR(s.st_mode)) {
-        std::cout << "dir_name is not a valid directory !" << std::endl;
-        return nullptr;
-    }
-    DIR *dir = opendir(real_path.c_str());
-    if (dir == nullptr) {
-        std::cout << "Can not open dir " << dir_name << std::endl;
-        return nullptr;
-    }
-    std::cout << "Successfully opened the dir " << dir_name << std::endl;
-    return dir;
-}
-
-std::string RealPath(std::string_view path) {
-    char real_path_mem[PATH_MAX] = {0};
-    char *real_path_ret = nullptr;
-    real_path_ret = realpath(path.data(), real_path_mem);
-    if (real_path_ret == nullptr) {
-        std::cout << "File: " << path << " is not exist.";
-        return "";
-    }
-
-    std::string real_path(real_path_mem);
-    std::cout << path << " realpath is: " << real_path << std::endl;
-    return real_path;
-}
-
diff --git a/official/cv/LearningToSeeInTheDark/export.py b/official/cv/LearningToSeeInTheDark/export.py
deleted file mode 100644
index 7451fde1557c0785512c382d0ee0049b25a121bf..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/export.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-import argparse as arg
-import numpy as np
-import mindspore as ms
-from mindspore import context, Tensor, export, load_checkpoint
-import mindspore.nn as nn
-from src.unet_parts import DoubleConv, Down, Up, OutConv
-
-
-class UNet(nn.Cell):
-
-    def __init__(self, n_channels, n_classes):
-        super(UNet, self).__init__()
-        self.n_channels = n_channels
-        self.n_classes = n_classes
-        self.inc = DoubleConv(n_channels, 32)
-        self.down1 = Down(32, 64)
-        self.down2 = Down(64, 128)
-        self.down3 = Down(128, 256)
-        self.down4 = Down(256, 512)
-        self.up1 = Up(512, 256)
-        self.up2 = Up(256, 128)
-        self.up3 = Up(128, 64)
-        self.up4 = Up(64, 32)
-        self.outc = OutConv(32, n_classes)
-
-    def construct(self, x):
-
-        x1 = self.inc(x)
-        x2 = self.down1(x1)
-        x3 = self.down2(x2)
-        x4 = self.down3(x3)
-        x5 = self.down4(x4)
-        x = self.up1(x5, x4)
-        x = self.up2(x, x3)
-        x = self.up3(x, x2)
-        x = self.up4(x, x1)
-        logits = self.outc(x)
-
-        return logits
-
-
-if __name__ == '__main__':
-    parser = arg.ArgumentParser(description='SID export')
-    parser.add_argument('--device_target', type=str, choices=['Ascend', 'GPU', 'CPU'], default='Ascend',
-                        help='device where the code will be implemented')
-    parser.add_argument('--device_id', type=int, default=0, help='device id')
-    parser.add_argument('--file_format', type=str, choices=['AIR', 'MINDIR'], default='MINDIR',
-                        help='file format')
-    parser.add_argument('--checkpoint_path', required=True, default=None, help='ckpt file path')
-    args = parser.parse_args()
-    context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
-    if args.device_target == 'Ascend':
-        context.set_context(device_id=args.device_id)
-
-    ckpt_dir = args.checkpoint_path
-    net = UNet(4, 12)
-    load_checkpoint(ckpt_dir, net=net)
-    net.set_train(False)
-
-    input_data = Tensor(np.zeros([1, 4, 1424, 2128]), ms.float32)
-    export(net, input_data, file_name='sid', file_format=args.file_format)
diff --git a/official/cv/LearningToSeeInTheDark/infer/convert/convert_om.sh b/official/cv/LearningToSeeInTheDark/infer/convert/convert_om.sh
deleted file mode 100644
index d34e0990990f6e265c6d1e0511d35390b81f91e5..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/infer/convert/convert_om.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/bash
-
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-air_path=$1
-om_path=$2
-
-export install_path=/usr/local/Ascend/
-
-export ASCEND_ATC_PATH=${install_path}/atc
-export LD_LIBRARY_PATH=${install_path}/atc/lib64:$LD_LIBRARY_PATH
-export PATH=/usr/local/python3.7.5/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH
-export PYTHONPATH=${install_path}/atc/python/site-packages:${install_path}/latest/atc/python/site-packages/auto_tune.egg/auto_tune:${install_path}/atc/python/site-packages/schedule_search.egg
-export ASCEND_OPP_PATH=${install_path}/opp
-
-echo "Input AIR file path: ${air_path}"
-echo "Output OM file path: ${om_path}"
-
-atc --input_format=NCHW \
-    --framework=1 \
-    --model="${air_path}" \
-    --output="${om_path}" \
-    --log=debug \
-    --output_type=FP32 \
-    --soc_version=Ascend310 \
-    --input_shape="input:1,4,1424,2128"
diff --git a/official/cv/LearningToSeeInTheDark/infer/convert/data_post.py b/official/cv/LearningToSeeInTheDark/infer/convert/data_post.py
deleted file mode 100644
index 612426aba1889304121dc66019b76473533ccbfc..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/infer/convert/data_post.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""test"""
-import os
-import glob
-import argparse as arg
-import numpy as np
-from PIL import Image
-
-
-
-if __name__ == '__main__':
-
-    parser = arg.ArgumentParser(description='MxBase Infer data postprocess')
-    parser.add_argument('--data_url', required=False, default='../result/', help='Location of bin data')
-    parser.add_argument('--result_url', required=False, default='../result_png/', help='Location of result data')
-    args = parser.parse_args()
-
-    file_list = glob.glob(args.data_url + "*")
-    print(file_list)
-    for file in file_list:
-        data = np.fromfile(file, '<f4')
-        print(data.shape)
-        data = np.reshape(data, (1, 3, 2848, 4256))
-        data = np.transpose(np.squeeze(data, 0), (1, 2, 0))
-        data = np.minimum(np.maximum(data, 0), 1)
-        data = np.trunc(data * 255)
-        data = data.astype(np.int8)
-        image = Image.fromarray(data, 'RGB')
-        file_name = os.path.basename(file)
-        image.save(args.result_url+file_name[:-4] + '.png')
diff --git a/official/cv/LearningToSeeInTheDark/infer/convert/infer_data_preprocess.py b/official/cv/LearningToSeeInTheDark/infer/convert/infer_data_preprocess.py
deleted file mode 100644
index 25af83a29f0ea2bed48889110f3bbb91c54f2a7a..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/infer/convert/infer_data_preprocess.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""test"""
-import os
-import glob
-import h5py
-import numpy as np
-
-
-def pack_raw(raw):
-    """ pack sony raw data into 4 channels """
-
-    im = np.maximum(raw - 512, 0) / (16383 - 512)  # subtract the black level
-    im = np.expand_dims(im, axis=2)
-    img_shape = im.shape
-    H = img_shape[0]
-    W = img_shape[1]
-
-    out = np.concatenate((im[0:H:2, 0:W:2, :],  # 鍙栦粠0鍒癏-1鍜學-1锛屾闀夸负2锛屼笅鍚�
-                          im[0:H:2, 1:W:2, :],
-                          im[1:H:2, 1:W:2, :],
-                          im[1:H:2, 0:W:2, :]), axis=2)
-    return out
-
-
-def get_test_data(input_dir1, gt_dir1, test_ids1, result_dir1):
-    """ preprocess input data into .bin files """
-    for test_id in test_ids1:
-        in_files = glob.glob(input_dir1 + '%05d_00*.hdf5' % test_id)
-
-        gt_files = glob.glob(gt_dir1 + '%05d_00*.hdf5' % test_id)
-        gt_path = gt_files[0]
-        gt_fn = os.path.basename(gt_path)
-        gt_exposure = float(gt_fn[9: -6])
-
-        for in_path in in_files:  # 澶嶅啓label
-
-            in_fn = os.path.basename(in_path)
-            in_exposure = float(in_fn[9: -6])
-            ratio = min(gt_exposure / in_exposure, 300.0)
-            ima = h5py.File(in_path, 'r')
-            in_rawed = ima.get('in')[:]
-            input_image = np.expand_dims(pack_raw(in_rawed), axis=0) * ratio
-            input_image = np.minimum(input_image, 1.0)
-            input_image = input_image.transpose([0, 3, 1, 2])
-            input_image = np.float32(input_image)
-            bin_name = os.path.join(result_dir1, in_fn[0: 9]) + '.bin'
-            print(bin_name)
-            input_image.tofile(bin_name)
-
-
-if __name__ == '__main__':
-
-    local_data_path = '../rawed_sony'
-    input_dir = os.path.join(local_data_path, 'short/')
-    gt_dir = os.path.join(local_data_path, 'long/')
-    result_dir = './bin'
-    test_fns = glob.glob(gt_dir + '1*.hdf5')
-    test_ids = [int(os.path.basename(test_fn)[0:5]) for test_fn in test_fns]
-    get_test_data(input_dir, gt_dir, test_ids, result_dir)
diff --git a/official/cv/LearningToSeeInTheDark/infer/data/config/ltsitd.pipeline b/official/cv/LearningToSeeInTheDark/infer/data/config/ltsitd.pipeline
deleted file mode 100644
index a085a17ad1e131a837291a351f94038a5a0d9655..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/infer/data/config/ltsitd.pipeline
+++ /dev/null
@@ -1,36 +0,0 @@
-{
-    "im_learningtoseeinthedark": {
-        "stream_config": {
-            "deviceId": "0"
-        },
-        "appsrc0": {
-            "props": {
-                "blocksize": "409600"
-            },
-            "factory": "appsrc",
-            "next": "mxpi_tensorinfer0"
-        },
-         "mxpi_tensorinfer0": {
-            "props": {
-                "dataSource": "appsrc0",
-                "modelPath": "../data/model/sid.om"
-            },
-            "factory": "mxpi_tensorinfer",
-            "next": "mxpi_dataserialize0"
-        },
-        "mxpi_dataserialize0": {
-            "props": {
-                "outputDataKeys": "mxpi_tensorinfer0"
-            },
-            "factory": "mxpi_dataserialize",
-            "next": "appsink0"
-        },  
-        "appsink0": {
-            "props": {
-                "blocksize": "4096000"
-            },
-            "factory": "appsink"
-        }
-    }
-}
-
diff --git a/official/cv/LearningToSeeInTheDark/infer/docker_start_infer.sh b/official/cv/LearningToSeeInTheDark/infer/docker_start_infer.sh
deleted file mode 100644
index aa0d34c5d7c090b414d5b15b18637194ee1a2ec7..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/infer/docker_start_infer.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-docker_image=$1
-share_dir=$2
-echo "$1"
-echo "$2"
-if [ -z "${docker_image}" ]; then
-    echo "please input docker_image"
-    exit 1
-fi
-
-if [ ! -d "${share_dir}" ]; then
-    echo "please input share directory that contains dataset, models and codes"
-    exit 1
-fi
-
-
-docker run -it \
-    --device=/dev/davinci0 \
-    --device=/dev/davinci_manager \
-    --device=/dev/devmm_svm \
-    --device=/dev/hisi_hdc \
-    --privileged \
-    -v //usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
-    -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
-    -v ${share_dir}:${share_dir} \
-    ${docker_image} \
-    /bin/bash
diff --git a/official/cv/LearningToSeeInTheDark/infer/mxbase/CMakeLists.txt b/official/cv/LearningToSeeInTheDark/infer/mxbase/CMakeLists.txt
deleted file mode 100644
index 0ae122d44ace4e232eced1a9f408caa566d7746c..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/infer/mxbase/CMakeLists.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-cmake_minimum_required(VERSION 3.10.0)
-project(learningToSeeInTheDark)
-
-set(TARGET learningToSeeInTheDark)
-
-SET(CMAKE_BUILD_TYPE "Debug")
-SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g -ggdb")
-SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall")
-
-add_definitions(-DENABLE_DVPP_INTERFACE)
-add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
-add_definitions(-Dgoogle=mindxsdk_private)
-add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall)
-add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -pie)
-
-# Check environment variable
-if(NOT DEFINED ENV{ASCEND_HOME})
-    message(FATAL_ERROR "please define environment variable:ASCEND_HOME")
-endif()
-if(NOT DEFINED ENV{ASCEND_VERSION})
-    message(WARNING "please define environment variable:ASCEND_VERSION")
-endif()
-if(NOT DEFINED ENV{ARCH_PATTERN})
-    message(WARNING "please define environment variable:ARCH_PATTERN")
-endif()
-
-set(ACL_INC_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/include)
-set(ACL_LIB_DIR $ENV{ASCEND_HOME}/$ENV{ASCEND_VERSION}/$ENV{ARCH_PATTERN}/acllib/lib64)
-
-set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME})
-set(MXBASE_INC ${MXBASE_ROOT_DIR}/include)
-set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib)
-set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors)
-set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/include/MxBase/postprocess/include)
-
-if(DEFINED ENV{MXSDK_OPENSOURCE_DIR})
-    set(OPENSOURCE_DIR $ENV{MXSDK_OPENSOURCE_DIR})
-else()
-    set(OPENSOURCE_DIR ${MXBASE_ROOT_DIR}/opensource)
-endif()
-
-
-include_directories(${ACL_INC_DIR})
-include_directories(${OPENSOURCE_DIR}/include)
-include_directories(${OPENSOURCE_DIR}/include/opencv4)
-
-include_directories(${MXBASE_INC})
-include_directories(${MXBASE_POST_PROCESS_DIR})
-
-link_directories(${ACL_LIB_DIR})
-link_directories(${OPENSOURCE_DIR}/lib)
-link_directories(${MXBASE_LIB_DIR})
-link_directories(${MXBASE_POST_LIB_DIR})
-
-
-add_executable(${TARGET} ./src/main.cpp ./src/LearningToSeeInTheDark.cpp)
-
-target_link_libraries(${TARGET} glog cpprest mxbase opencv_world stdc++fs)
-
-install(TARGETS ${TARGET} RUNTIME DESTINATION ${PROJECT_SOURCE_DIR}/)
diff --git a/official/cv/LearningToSeeInTheDark/infer/mxbase/build.sh b/official/cv/LearningToSeeInTheDark/infer/mxbase/build.sh
deleted file mode 100644
index 6eba5fc38098c247e009b036874a3c9c17831eb8..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/infer/mxbase/build.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# env
-
-export ASCEND_HOME=/usr/local/Ascend
-export ASCEND_VERSION=nnrt/latest
-export ARCH_PATTERN=.
-export LD_LIBRARY_PATH=${MX_SDK_HOME}/lib/modelpostprocessors:${LD_LIBRARY_PATH}
-mkdir -p build
-cd build || exit
-
-function make_plugin() {
-    if ! cmake ..;
-    then
-      echo "cmake failed."
-      return 1
-    fi
-
-    if ! (make);
-    then
-      echo "make failed."
-      return 1
-    fi
-
-    return 0
-}
-
-if make_plugin;
-then
-  echo "INFO: Build successfully."
-else
-  echo "ERROR: Build failed."
-fi
-
-cd - || exit
diff --git a/official/cv/LearningToSeeInTheDark/infer/mxbase/src/LearningToSeeInTheDark.cpp b/official/cv/LearningToSeeInTheDark/infer/mxbase/src/LearningToSeeInTheDark.cpp
deleted file mode 100644
index 0c7a760aa8c177e103608f99d967cf70b1fe4280..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/infer/mxbase/src/LearningToSeeInTheDark.cpp
+++ /dev/null
@@ -1,194 +0,0 @@
-/**
- * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "LearningToSeeInTheDark.h"
-#include <unistd.h>
-#include <sys/stat.h>
-#include <fstream>
-#include <iostream>
-#include "MxBase/DeviceManager/DeviceManager.h"
-#include "MxBase/Log/Log.h"
-
-const uint32_t EACH_LABEL_LENGTH = 4;
-const uint32_t MAX_LENGTH = 12121088;
-
-APP_ERROR LearningToSeeInTheDark::Init(const InitParam &initParam) {
-    deviceId_ = initParam.deviceId;
-    APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
-    if (ret != APP_ERR_OK) {
-        LogError << "Init devices failed, ret=" << ret << ".";
-        return ret;
-    }
-    ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
-    if (ret != APP_ERR_OK) {
-        LogError << "Set context failed, ret=" << ret << ".";
-        return ret;
-    }
-    dvppWrapper_ = std::make_shared<MxBase::DvppWrapper>();
-    ret = dvppWrapper_->Init();
-    if (ret != APP_ERR_OK) {
-        LogError << "DvppWrapper init failed, ret=" << ret << ".";
-        return ret;
-    }
-    model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
-    ret = model_->Init(initParam.modelPath, modelDesc_);
-    if (ret != APP_ERR_OK) {
-        LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
-        return ret;
-    }
-
-    return APP_ERR_OK;
-}
-
-APP_ERROR LearningToSeeInTheDark::DeInit() {
-    dvppWrapper_->DeInit();
-    model_->DeInit();
-    MxBase::DeviceManager::GetInstance()->DestroyDevices();
-    return APP_ERR_OK;
-}
-
-APP_ERROR LearningToSeeInTheDark::ReadTensorFromFile(const std::string &file, uint32_t *data, uint32_t size) {
-    if (data == NULL || size < MAX_LENGTH) {
-        LogError << "Input data is invalid.";
-        return APP_ERR_COMM_INVALID_POINTER;
-    }
-    std::ifstream infile;
-    // open label file
-    infile.open(file, std::ios_base::in | std::ios_base::binary);
-    // check label file validity
-    if (infile.fail()) {
-        LogError << "Failed to open label file: " << file << ".";
-        return APP_ERR_COMM_OPEN_FAIL;
-    }
-    infile.read(reinterpret_cast<char*>(data), sizeof(uint32_t)* MAX_LENGTH);
-    infile.close();
-    return APP_ERR_OK;
-}
-
-APP_ERROR LearningToSeeInTheDark::ReadInputTensor(const std::string &fileName,
-                                                  std::vector<MxBase::TensorBase> *inputs) {
-    uint32_t *data = new uint32_t[MAX_LENGTH]();
-    APP_ERROR ret = ReadTensorFromFile(fileName, data, MAX_LENGTH);
-    if (ret != APP_ERR_OK) {
-        LogError << "ReadTensorFromFile failed.";
-        return ret;
-    }
-
-    const uint32_t dataSize = MAX_LENGTH*4;
-    LogInfo << dataSize;
-    MxBase::MemoryData memoryDataDst(dataSize, MxBase::MemoryData::MEMORY_DEVICE, deviceId_);
-    MxBase::MemoryData memoryDataSrc(reinterpret_cast<void*>(data), dataSize, MxBase::MemoryData::MEMORY_HOST_MALLOC);
-    ret = MxBase::MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
-    if (ret != APP_ERR_OK) {
-        LogError << GetError(ret) << "Memory malloc and copy failed.";
-        return ret;
-    }
-
-    std::vector<uint32_t> shape = { 1, MAX_LENGTH };
-    inputs->push_back(MxBase::TensorBase(memoryDataDst, false, shape, MxBase::TENSOR_DTYPE_UINT32));
-    delete[] data;
-    return APP_ERR_OK;
-}
-
-
-APP_ERROR LearningToSeeInTheDark::Inference(const std::vector<MxBase::TensorBase> &inputs,
-                                            std::vector<MxBase::TensorBase> &outputs) {
-    auto dtypes = model_->GetOutputDataType();
-    for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
-        std::vector<uint32_t> shape = {};
-        for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
-             shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]);
-        }
-        MxBase::TensorBase tensor(shape, dtypes[i], MxBase::MemoryData::MemoryType::MEMORY_DEVICE, deviceId_);
-        APP_ERROR ret = MxBase::TensorBase::TensorBaseMalloc(tensor);
-        if (ret != APP_ERR_OK) {
-            LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
-            return ret;
-        }
-        outputs.push_back(tensor);
-    }
-    MxBase::DynamicInfo dynamicInfo = {};
-    dynamicInfo.dynamicType = MxBase::DynamicType::STATIC_BATCH;
-    auto startTime = std::chrono::high_resolution_clock::now();
-    APP_ERROR ret = model_->ModelInference(inputs, outputs, dynamicInfo);
-    auto endTime = std::chrono::high_resolution_clock::now();
-    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
-    // save time
-    inferCostTimeMilliSec += costMs;
-    if (ret != APP_ERR_OK) {
-        LogError << "ModelInference failed, ret=" << ret << ".";
-        return ret;
-    }
-    LogInfo << "Inference success";
-    return APP_ERR_OK;
-}
-
-APP_ERROR  LearningToSeeInTheDark::WriteResult(const std::string &imageFile,
-                                               std::vector<MxBase::TensorBase> &outputs) {
-    LogInfo << "imageFile:" << imageFile;
-    std::string imgName = imageFile.substr(imageFile.find_last_of("/") + 1);
-    LogInfo << "imgName:" << imgName;
-    size_t dot = imgName.find_last_of(".");
-    for (size_t i = 0; i < outputs.size(); ++i) {
-        APP_ERROR ret = outputs[i].ToHost();
-        if (ret != APP_ERR_OK) {
-            LogError << GetError(ret) << "Tohost fail.";
-            return ret;
-        }
-        void *netOutput = outputs[i].GetBuffer();
-        std::vector<uint32_t> outshape = outputs[i].GetShape();
-        // size_t outputSize = outputs[i].GetByteSize();
-        std::string outFileName = "../result/" + imgName.substr(0, dot) + "1.bin";
-        LogInfo << "outFileName:" << outFileName;
-        FILE *outputFile_ = fopen(outFileName.c_str(), "wb");
-        // if (outputFile_ == NULL) {
-        //     std::cout << "read fail" << std::endl;
-        // }
-        fwrite(netOutput, outshape[0] * outshape[1] * outshape[2] * outshape[3], sizeof(float), outputFile_);
-        fclose(outputFile_);
-        outputFile_ = nullptr;
-    }
-    return APP_ERR_OK;
-}
-
-APP_ERROR LearningToSeeInTheDark::Process(const std::string &fileName) {
-    std::vector<MxBase::TensorBase> inputs = {};
-    std::vector<MxBase::TensorBase> outputs = {};
-    APP_ERROR ret = ReadInputTensor(fileName, &inputs);
-    if (ret != APP_ERR_OK) {
-        LogError << "Read input failed, ret=" << ret << ".";
-        return ret;
-    }
-
-    auto startTime = std::chrono::high_resolution_clock::now();
-    ret = Inference(inputs, outputs);
-    auto endTime = std::chrono::high_resolution_clock::now();
-    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
-    // save time
-    inferCostTimeMilliSec += costMs;
-    if (ret != APP_ERR_OK) {
-        LogError << "Inference failed, ret=" << ret << ".";
-        return ret;
-    }
-
-    ret = WriteResult(fileName, outputs);
-    if (ret != APP_ERR_OK) {
-        LogError << "Save infer results into file failed. ret = " << ret << ".";
-        return ret;
-    }
-
-    return APP_ERR_OK;
-}
diff --git a/official/cv/LearningToSeeInTheDark/infer/mxbase/src/LearningToSeeInTheDark.h b/official/cv/LearningToSeeInTheDark/infer/mxbase/src/LearningToSeeInTheDark.h
deleted file mode 100644
index e6d0c493fa2d75566732842f6eaaef0ea1b413c3..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/infer/mxbase/src/LearningToSeeInTheDark.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MXBASE_LEARNINGTOSEEINTHEDARK_H
-#define MXBASE_LEARNINGTOSEEINTHEDARK_H
-
-#include <memory>
-#include <utility>
-#include <vector>
-#include <string>
-#include <map>
-#include <opencv2/opencv.hpp>
-#include "MxBase/DvppWrapper/DvppWrapper.h"
-#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
-#include "MxBase/Tensor/TensorContext/TensorContext.h"
-
-struct InitParam {
-    uint32_t deviceId;
-    std::string modelPath;
-};
-
-class LearningToSeeInTheDark {
- public:
-    APP_ERROR Init(const InitParam &initParam);
-    APP_ERROR DeInit();
-    APP_ERROR Inference(const std::vector<MxBase::TensorBase> &inputs, std::vector<MxBase::TensorBase> &outputs);
-    APP_ERROR Process(const std::string &fileName);
-    // get infer time
-    double GetInferCostMilliSec() const { return inferCostTimeMilliSec; }
- protected:
-    APP_ERROR ReadTensorFromFile(const std::string &file, uint32_t *data, uint32_t size);
-    APP_ERROR ReadInputTensor(const std::string &fileName, std::vector<MxBase::TensorBase> *inputs);
-    APP_ERROR WriteResult(const std::string &imageFile, std::vector<MxBase::TensorBase> &outputs);
- private:
-    std::shared_ptr<MxBase::DvppWrapper> dvppWrapper_;
-    std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
-    MxBase::ModelDesc modelDesc_;
-    std::vector<std::string> labelMap_ = {};
-    uint32_t deviceId_ = 0;
-    // infer time
-    double inferCostTimeMilliSec = 0.0;
-};
-
-#endif
diff --git a/official/cv/LearningToSeeInTheDark/infer/mxbase/src/main.cpp b/official/cv/LearningToSeeInTheDark/infer/mxbase/src/main.cpp
deleted file mode 100644
index fc2cb4bdba3159a32daea1ab15f7c5babd6891f8..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/infer/mxbase/src/main.cpp
+++ /dev/null
@@ -1,93 +0,0 @@
-// Mindx.cpp : Defines the entry point for the console application.
-//
-
-/*
- * Copyright (c) 2021. Huawei Technologies Co., Ltd. All rights reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <dirent.h>
-#include "MxBase/Log/Log.h"
-#include "LearningToSeeInTheDark.h"
-
-namespace {
-    const uint32_t BATCH_SIZE = 8;
-    const uint32_t EACH_LABEL_LENGTH = 4;
-    const uint32_t MAX_LENGTH = 12121088;
-}  // namespace
-
-APP_ERROR ReadFilesFromPath(const std::string &path, std::vector<std::string> *files) {
-    DIR *dirPtr = opendir(path.c_str());
-    if (dirPtr == nullptr) {
-        LogError << "Opendir failed. dir:" << path << path.c_str();
-        return APP_ERR_INTERNAL_ERROR;
-    }
-    dirent *direntPtr = nullptr;
-    while ((direntPtr = readdir(dirPtr)) != nullptr) {
-        std::string fileName = direntPtr->d_name;
-        if (fileName == "." || fileName == "..") {
-            continue;
-        }
-
-        files->emplace_back(path + "/" + fileName);
-    }
-    LogInfo << "Opendir ok. dir:";
-    closedir(dirPtr);
-    // sort ascending order
-    std::sort(files->begin(), files->end());
-    std::cout << "The size of files is " << files->size() << std::endl;
-    return APP_ERR_OK;
-}
-
-int main(int argc, char* argv[]) {
-    if (argc <= 1) {
-        LogWarn << "Please input image path, such as '../../data/images'.";
-        return APP_ERR_OK;
-    }
-
-    InitParam initParam = {};
-    initParam.deviceId = 0;
-    initParam.modelPath = "../../data/model/sid.om";
-    auto learningtoseeinthedark = std::make_shared<LearningToSeeInTheDark>();
-    APP_ERROR ret = learningtoseeinthedark->Init(initParam);
-    if (ret != APP_ERR_OK) {
-        LogError << "LearningToSeeInTheDark Classify init failed, ret=" << ret << ".";
-        return ret;
-    }
-
-    std::string inferPath = argv[1];
-    std::vector<std::string> files;
-    ret = ReadFilesFromPath(inferPath, &files);
-    if (ret != APP_ERR_OK) {
-        LogError << "Read files from path failed, ret=" << ret << ".";
-        return ret;
-    }
-
-    auto startTime = std::chrono::high_resolution_clock::now();
-    for (uint32_t i = 0; i < files.size(); i++) {
-        ret = learningtoseeinthedark->Process(files[i]);
-        if (ret != APP_ERR_OK) {
-            LogError << "LearningToSeeInTheDark Classify process failed, ret=" << ret << ".";
-            learningtoseeinthedark->DeInit();
-            return ret;
-        }
-    }
-    auto endTime = std::chrono::high_resolution_clock::now();
-
-    learningtoseeinthedark->DeInit();
-    double costMilliSecs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
-    double fps = 1000.0*files.size() / learningtoseeinthedark->GetInferCostMilliSec();
-    LogInfo << "[Process Delay] cost:" << costMilliSecs << " ms\tfps: " << fps << "imgs/sec";
-    return APP_ERR_OK;
-}
diff --git a/official/cv/LearningToSeeInTheDark/infer/sdk/main.py b/official/cv/LearningToSeeInTheDark/infer/sdk/main.py
deleted file mode 100644
index a30622205cc1162c4a92520ae4e2de414999fa02..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/infer/sdk/main.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# coding=utf-8
-
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-import os
-import sys
-import time
-import MxpiDataType_pb2 as MxpiDataType
-import numpy as np
-
-from PIL import Image
-from StreamManagerApi import StreamManagerApi, InProtobufVector, \
-    MxProtobufIn, StringVector
-
-def send_source_data(appsrc_id, tensor, stream_name, stream_manager):
-    """
-    Construct the input of the stream,
-    send inputs data to a specified stream based on streamName.
-    Args:
-        appsrc_id: an RGB image:the appsrc component number for SendProtobuf
-        tensor: the tensor type of the input file
-        stream_name: stream Name
-        stream_manager:the StreamManagerApi
-    Returns:
-        bool: send data success or not
-    """
-    tensor_package_list = MxpiDataType.MxpiTensorPackageList()
-    tensor_package = tensor_package_list.tensorPackageVec.add()
-    array_bytes = tensor.tobytes()
-    tensor_vec = tensor_package.tensorVec.add()
-    tensor_vec.deviceId = 0
-    tensor_vec.memType = 0
-    for i in tensor.shape:
-        tensor_vec.tensorShape.append(i)
-    tensor_vec.dataStr = array_bytes
-    tensor_vec.tensorDataSize = len(array_bytes)
-    key = "appsrc{}".format(appsrc_id).encode('utf-8')
-    protobuf_vec = InProtobufVector()
-    protobuf = MxProtobufIn()
-    protobuf.key = key
-    protobuf.type = b'MxTools.MxpiTensorPackageList'
-    protobuf.protobuf = tensor_package_list.SerializeToString()
-    protobuf_vec.push_back(protobuf)
-
-    ret = stream_manager.SendProtobuf(stream_name, appsrc_id, protobuf_vec)
-    if ret < 0:
-        print("Failed to send data to stream.")
-        return False
-    return True
-
-
-def run():
-    """
-    read pipeline and do infer
-    """
-    # init stream manager
-    stream_manager_api = StreamManagerApi()
-    ret = stream_manager_api.InitManager()
-    if ret != 0:
-        print("Failed to init Stream manager, ret=%s" % str(ret))
-        return
-
-        # create streams by pipeline config file
-    with open("../data/config/ltsitd.pipeline", 'rb') as f:
-        pipelineStr = f.read()
-    ret = stream_manager_api.CreateMultipleStreams(pipelineStr)
-
-    if ret != 0:
-        print("Failed to create Stream, ret=%s" % str(ret))
-        return
-
-    stream_name = b'im_learningtoseeinthedark'
-
-    # Construct the input of the stream
-    # data_input = MxDataInput()
-    infer_total_time = 0
-    dir_name = sys.argv[1]
-    res_dir_name = sys.argv[2]
-    file_list = os.listdir(dir_name) #
-    if not os.path.exists(res_dir_name):
-        os.makedirs(res_dir_name)
-
-    for file_name in file_list:
-        file_path = os.path.join(dir_name, file_name)
-        tensor = np.fromfile(file_path, dtype=np.float32)
-
-        #input shape
-        tensor = np.resize(tensor, (1, 4, 1424, 2128))
-        if not send_source_data(0, tensor, stream_name, stream_manager_api):
-            return
-
-        # Obtain the inference result by specifying streamName and uniqueId.
-        key_vec = StringVector()
-        key_vec.push_back(b'mxpi_tensorinfer0')
-        start_time = time.time()
-        infer_result = stream_manager_api.GetProtobuf(stream_name, 0, key_vec)
-        infer_total_time += time.time() - start_time
-
-        print("print infer_result.size()")
-        print(infer_result.size())
-
-        if infer_result.size() == 0:
-            print("inferResult is null")
-            return
-        if infer_result[0].errorCode != 0:
-            print("GetProtobuf error. errorCode=%d" % (infer_result[0].errorCode))
-            return
-
-        result = MxpiDataType.MxpiTensorPackageList()
-        result.ParseFromString(infer_result[0].messageBuf)
-        res = np.frombuffer(result.tensorPackageVec[0].tensorVec[0].dataStr, dtype='<f4')
-
-        print("print res")
-        print(res)
-
-        #output shape
-        res = res.reshape((1, 3, 2848, 4256))
-
-        #postprocess
-        res = np.minimum(np.maximum(res, 0), 1)
-        res = np.trunc(res[0] * 255)
-        res = res.astype(np.int8)
-        res = res.transpose([1, 2, 0])
-
-        #save as image
-        im = Image.fromarray(res, 'RGB')
-        save_path = os.path.join(res_dir_name + file_name)
-        im.save(save_path + '.png')
-
-    #print the total time of inference
-    print("The total time of inference is {} s".format(infer_total_time))
-
-    # destroy streams
-    stream_manager_api.DestroyAllStreams()
-
-
-if __name__ == '__main__':
-    run()
diff --git a/official/cv/LearningToSeeInTheDark/infer/sdk/run.sh b/official/cv/LearningToSeeInTheDark/infer/sdk/run.sh
deleted file mode 100644
index d468152a739512611ab5d07483f2edd3e5f995f1..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/infer/sdk/run.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-image_path=$1
-result_dir=$2
-
-set -e
-
-# Simple log helper functions
-info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
-warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
-
-export GST_PLUGIN_SCANNER=${MX_SDK_HOME}/opensource/libexec/gstreamer-1.0/gst-plugin-scanner
-export GST_PLUGIN_PATH=${MX_SDK_HOME}/opensource/lib/gstreamer-1.0:${MX_SDK_HOME}/lib/plugins
-
-#to set PYTHONPATH, import the StreamManagerApi.py
-export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
-
-python3.7 main.py $image_path  $result_dir 
-exit 0
diff --git a/official/cv/LearningToSeeInTheDark/modelarts/train_modelarts.py b/official/cv/LearningToSeeInTheDark/modelarts/train_modelarts.py
deleted file mode 100644
index 82829f8ccc6529a435bfbc8ae0e46c55ba171e95..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/modelarts/train_modelarts.py
+++ /dev/null
@@ -1,256 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""train"""
-from __future__ import division
-import os
-import glob
-import argparse as arg
-import ast
-import h5py
-import numpy as np
-import mindspore as ms
-from mindspore import context, Model, Tensor, export
-from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
-from mindspore.nn.loss import L1Loss
-from mindspore.nn.dynamic_lr import piecewise_constant_lr as pc_lr
-from mindspore.nn.dynamic_lr import warmup_lr
-import mindspore.dataset as ds
-from mindspore.communication.management import init
-import mindspore.nn as nn
-from mindspore.context import ParallelMode
-from mindspore.train.loss_scale_manager import DynamicLossScaleManager
-from mindspore.train.serialization import load_checkpoint, load_param_into_net
-import moxing as mox
-from src.unet_parts import DoubleConv, Down, Up, OutConv
-from src.myutils import GNMTTrainOneStepWithLossScaleCell, WithLossCell
-
-
-class UNet(nn.Cell):
-    """ Unet """
-    def __init__(self, n_channels, n_classes):
-        super(UNet, self).__init__()
-        self.n_channels = n_channels
-        self.n_classes = n_classes
-        self.inc = DoubleConv(n_channels, 32)
-        self.down1 = Down(32, 64)
-        self.down2 = Down(64, 128)
-        self.down3 = Down(128, 256)
-        self.down4 = Down(256, 512)
-        self.up1 = Up(512, 256)
-        self.up2 = Up(256, 128)
-        self.up3 = Up(128, 64)
-        self.up4 = Up(64, 32)
-        self.outc = OutConv(32, n_classes)
-
-    def construct(self, x):
-        """ Unet construct """
-        x1 = self.inc(x)
-        x2 = self.down1(x1)
-        x3 = self.down2(x2)
-        x4 = self.down3(x3)
-        x5 = self.down4(x4)
-        x = self.up1(x5, x4)
-        x = self.up2(x, x3)
-        x = self.up3(x, x2)
-        x = self.up4(x, x1)
-        logits = self.outc(x)
-        return logits
-
-
-def pack_raw(raw):
-    """ pack sony raw data into 4 channels """
-    im = np.maximum(raw - 512, 0) / (16383 - 512)  # subtract the black level
-
-    im = np.expand_dims(im, axis=2)
-    img_shape = im.shape
-    H = img_shape[0]
-    W = img_shape[1]
-
-    out = np.concatenate((im[0:H:2, 0:W:2, :],
-                          im[0:H:2, 1:W:2, :],
-                          im[1:H:2, 1:W:2, :],
-                          im[1:H:2, 0:W:2, :]), axis=2)
-    return out
-
-
-def get_dataset(input_dir1, gt_dir1, train_ids1, num_shards=None, shard_id=None, distribute=False):
-    """ get mindspore dataset from raw data """
-    input_final_data = []
-    gt_final_data = []
-    for train_id in train_ids1:
-        in_files = glob.glob(input_dir1 + '%05d_00*.hdf5' % train_id)
-
-        gt_files = glob.glob(gt_dir1 + '%05d_00*.hdf5' % train_id)
-        gt_path = gt_files[0]
-        gt_fn = os.path.basename(gt_path)
-        gt_exposure = float(gt_fn[9: -6])
-        gt = h5py.File(gt_path, 'r')
-        gt_rawed = gt.get('gt')[:]
-        gt_image = np.expand_dims(np.float32(gt_rawed / 65535.0), axis=0)
-        gt_image = gt_image.transpose([0, 3, 1, 2])
-
-        for in_path in in_files:
-            gt_final_data.append(gt_image[0])
-
-            in_fn = os.path.basename(in_path)
-            in_exposure = float(in_fn[9: -6])
-            ratio = min(gt_exposure / in_exposure, 300)
-            im = h5py.File(in_path, 'r')
-            in_rawed = im.get('in')[:]
-            input_image = np.expand_dims(pack_raw(in_rawed), axis=0) * ratio
-            input_image = np.float32(input_image)
-            input_image = input_image.transpose([0, 3, 1, 2])
-            input_final_data.append(input_image[0])
-    data = (input_final_data, gt_final_data)
-    if distribute:
-        datasets = ds.NumpySlicesDataset(data, ['input', 'label'], shuffle=True,
-                                         num_shards=num_shards, shard_id=shard_id)
-    else:
-        datasets = ds.NumpySlicesDataset(data, ['input', 'label'], shuffle=False)
-    return datasets
-
-
-def dynamic_lr(steps_per_epoch, warmup_epochss):   # if warmup, plus warmup_epochs
-    """ learning rate with warmup"""
-    milestone = [(1200 + warmup_epochss) * steps_per_epoch,
-                 (1300 + warmup_epochss) * steps_per_epoch,
-                 (1700 + warmup_epochss) * steps_per_epoch,
-                 (2500 + warmup_epochss) * steps_per_epoch]
-    learning_rates = [3e-4, 1e-5, 3e-6, 1e-6]
-    lrs = pc_lr(milestone, learning_rates)
-    return lrs
-
-
-def random_crop_and_flip(image, label):
-    """ random crop and flip """
-    ps = 512
-    # random crop
-    h = image.shape[1]
-    w = image.shape[2]
-    xx = np.random.randint(0, h - ps)
-    yy = np.random.randint(0, w - ps)
-    image = image[:, xx:xx + ps, yy:yy + ps]
-    label = label[:, xx * 2:xx * 2 + ps * 2, yy * 2:yy * 2 + ps * 2]
-    # random flip
-    if np.random.randint(2) == 1:  # random flip
-        image = np.flip(image, axis=1)
-        label = np.flip(label, axis=1)
-    if np.random.randint(2) == 1:
-        image = np.flip(image, axis=2)
-        label = np.flip(label, axis=2)
-    if np.random.randint(2) == 1:  # random transpose
-        image = np.transpose(image, (0, 2, 1))
-        label = np.transpose(label, (0, 2, 1))
-    image = np.minimum(image, 1.0)
-
-    return image, label
-
-
-def model_export(arguments):
-    output_dir = arguments.local_output_dir
-    ckpt_file = glob.glob(output_dir + '/' + '*.ckpt')[0]
-    print(ckpt_file)
-    network = UNet(4, 12)
-    load_checkpoint(ckpt_file, net=network)
-    network.set_train(False)
-
-    input_data = Tensor(np.zeros([1, 4, 1424, 2128]), ms.float32)
-    export_file = os.path.join(output_dir, arguments.export_name)
-    export(network, input_data, file_name=export_file, file_format=arguments.export_model_format)
-    return 0
-
-
-if __name__ == "__main__":
-
-    parser = arg.ArgumentParser(description='Mindspore SID Example')
-    parser.add_argument('--device_target', default='Ascend',
-                        help='device where the code will be implemented')
-    parser.add_argument('--data_url', required=True, default=None, help='Location of data')
-    parser.add_argument('--train_url', required=True, default=None, help='obs browser path')
-    parser.add_argument('--pre_trained', required=False, default=None, help='Ckpt file path')
-    parser.add_argument('--run_distribute', type=ast.literal_eval, default=False, help='If run distributed')
-    parser.add_argument('--batch_size', type=int, default=8)
-    parser.add_argument('--total_epochs', type=int, default=3000)
-    parser.add_argument('--warmup_epochs', type=int, default=500)
-    parser.add_argument('--local_data_dir', type=str, default="/cache")
-    parser.add_argument('--local_output_dir', type=str, default="/cache/train_output/")
-    parser.add_argument('--export_model_format', type=str, choices=['AIR', 'MINDIR'], default='AIR')
-    parser.add_argument('--export_name', type=str, default="sid")
-    args = parser.parse_args()
-
-    local_data_path = args.local_data_dir
-    train_output_path = args.local_output_dir
-
-    mox.file.copy_parallel(args.data_url, local_data_path)
-    print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~file copy success~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
-    if args.run_distribute:
-        device_num = int(os.getenv('RANK_SIZE'))
-        device_id = int(os.getenv('DEVICE_ID'))
-        context.set_context(device_id=device_id)
-        context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
-                                          gradients_mean=True)
-        init()
-    context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
-
-    input_dir = os.path.join(local_data_path, 'short/')
-    gt_dir = os.path.join(local_data_path, 'long/')
-
-    train_fns = glob.glob(gt_dir + '0*.hdf5')
-    train_ids = [int(os.path.basename(train_fn)[0:5]) for train_fn in train_fns]
-
-    net = UNet(4, 12)
-    net_loss = L1Loss()
-    net = WithLossCell(net, net_loss)
-
-    if args.run_distribute:
-        dataset = get_dataset(input_dir, gt_dir, train_ids,
-                              num_shards=device_num, shard_id=device_id, distribute=True)
-    else:
-        dataset = get_dataset(input_dir, gt_dir, train_ids)
-    transform_list = [random_crop_and_flip]
-    dataset = dataset.map(transform_list, input_columns=['input', 'label'], output_columns=['input', 'label'])
-    dataset = dataset.batch(batch_size=args.batch_size, drop_remainder=True)
-    batches_per_epoch = dataset.get_dataset_size()
-
-    lr_warm = warmup_lr(learning_rate=3e-4, total_step=args.warmup_epochs * batches_per_epoch,
-                        step_per_epoch=batches_per_epoch, warmup_epoch=args.warmup_epochs)
-    lr = dynamic_lr(batches_per_epoch, args.warmup_epochs)
-    lr = lr_warm + lr[args.warmup_epochs:]
-    if len(lr) > args.total_epochs:
-        lr = lr[:args.total_epochs]
-    net_opt = nn.Adam(net.trainable_params(), lr)
-    scale_manager = DynamicLossScaleManager()
-    net = GNMTTrainOneStepWithLossScaleCell(net, net_opt, scale_manager.get_update_cell())
-
-    ckpt_dir = args.pre_trained
-    if ckpt_dir is not None:
-        param_dict = load_checkpoint(ckpt_dir)
-        load_param_into_net(net, param_dict)
-    model = Model(net)
-
-    loss_cb = LossMonitor()
-    time_cb = TimeMonitor(data_size=4)
-    config_ck = CheckpointConfig(save_checkpoint_steps=args.total_epochs * batches_per_epoch, keep_checkpoint_max=100)
-    if not os.path.exists(args.local_output_dir):
-        os.mkdir(args.local_output_dir)
-
-    ckpoint_cb = ModelCheckpoint(prefix='sony_trained_net', directory=args.local_output_dir, config=config_ck)
-    callbacks_list = [ckpoint_cb, loss_cb, time_cb]
-    model.train(epoch=args.total_epochs, train_dataset=dataset,
-                callbacks=callbacks_list,
-                dataset_sink_mode=True)
-    model_export(args)
-
-    mox.file.copy_parallel(train_output_path, args.train_url)
diff --git a/official/cv/LearningToSeeInTheDark/postprocess.py b/official/cv/LearningToSeeInTheDark/postprocess.py
deleted file mode 100644
index fcf8fdc8ac5e1dcd19f02b9b06080e6ffccd93ea..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/postprocess.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-""" generate image from 310 out """
-import os
-import glob
-import numpy as np
-from PIL import Image
-
-file_dir = './scripts/result_Files/'
-file_list = glob.glob(file_dir + '*.bin')
-rgb_path = './picture/'
-
-if __name__ == '__main__':
-    for file_path in file_list:
-        file_name = os.path.basename(file_path)[0: 6]
-        output = np.fromfile(file_path, dtype=np.float32)
-        output = output.reshape((1, 3, 2848, 4256))
-        output = np.minimum(np.maximum(output, 0), 1)
-        output = np.trunc(output[0] * 255)
-        output = output.astype(np.int8)
-        output = output.transpose([1, 2, 0])
-        im = Image.fromarray(output, 'RGB')
-        im.save(rgb_path + file_name + '.png')
diff --git a/official/cv/LearningToSeeInTheDark/preprocess.py b/official/cv/LearningToSeeInTheDark/preprocess.py
deleted file mode 100644
index 0cf1733dc04158e7db230970ce35e37476616d3e..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/preprocess.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""preprocess the dataset into h5 files"""
-import os
-import glob
-import argparse as arg
-import rawpy
-import h5py
-import numpy as np
-
-
-if __name__ == '__main__':
-    parser = arg.ArgumentParser(description='data preprocess')
-    parser.add_argument('--raw_path', type=str, help='raw data path')
-    parser.add_argument('--save_path', type=str, help='save data path')
-    args = parser.parse_args()
-
-    raw_in_file_dir = os.path.join(args.raw_path + 'short/')
-    raw_gt_file_dir = os.path.join(args.raw_path + 'long/')
-    save_path_in = os.path.join(args.save_path + 'short/')
-    save_path_gt = os.path.join(args.save_path + 'long/')
-
-    raw_file_paths = glob.glob(raw_in_file_dir + '*.ARW')
-    for file_path in raw_file_paths:
-        raw = rawpy.imread(file_path)
-        im = raw.raw_image_visible.astype(np.float32)
-        f = h5py.File(save_path_in + os.path.basename(file_path)[0:-4] + '.hdf5', 'w')
-        f.create_dataset('in', data=im)
-
-    raw_file_paths = glob.glob(raw_gt_file_dir + '*.ARW')
-    for file_path in raw_file_paths:
-        raw = rawpy.imread(file_path)
-        im1 = raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)
-        f = h5py.File(save_path_gt+os.path.basename(file_path)[0:-4] + '.hdf5', 'w')
-        f.create_dataset('gt', data=im1)
diff --git a/official/cv/LearningToSeeInTheDark/scripts/docker_start.sh b/official/cv/LearningToSeeInTheDark/scripts/docker_start.sh
deleted file mode 100644
index 7c1c17d85abfb5fdc2ae45ef6c8bde811541a93b..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/scripts/docker_start.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/bash
-
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-docker_image=$1
-data_dir=$2
-model_dir=$3
-
-docker run -it --ipc=host \
-               --device=/dev/davinci0 \
-               --device=/dev/davinci1 \
-               --device=/dev/davinci2 \
-               --device=/dev/davinci3 \
-               --device=/dev/davinci4 \
-               --device=/dev/davinci5 \
-               --device=/dev/davinci6 \
-               --device=/dev/davinci7 \
-               --device=/dev/davinci_manager \
-               --device=/dev/devmm_svm --device=/dev/hisi_hdc \
-               -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
-               -v /usr/local/Ascend/add-ons/:/usr/local/Ascend/add-ons/ \
-               -v ${model_dir}:${model_dir} \
-               -v ${data_dir}:${data_dir}  \
-               -v ~/ascend/log/npu/conf/slog/slog.conf:/var/log/npu/conf/slog/slog.conf \
-               -v ~/ascend/log/npu/slog/:/var/log/npu/slog -v ~/ascend/log/npu/profiling/:/var/log/npu/profiling \
-               -v ~/ascend/log/npu/dump/:/var/log/npu/dump -v ~/ascend/log/npu/:/usr/slog ${docker_image} \
-               /bin/bash
diff --git a/official/cv/LearningToSeeInTheDark/scripts/run_distribute_train.sh b/official/cv/LearningToSeeInTheDark/scripts/run_distribute_train.sh
deleted file mode 100644
index 911e0dc621e341e90dc1798a52fabc463418e46b..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/scripts/run_distribute_train.sh
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/bin/bash
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-echo "=============================================================================================================="
-echo "Please run the script as: "
-echo "bash run_distribute_train.sh RANK_TABLE_FILE DATA_PATH PRETRAINED_CKPT_PATH](optional)"
-echo "For example: bash run_distribute_train.sh hccl_8p_01234567_127.0.0.1.json /path/dataset"
-echo "It is better to use the absolute path."
-echo "=============================================================================================================="
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-PATH1=$(get_real_path $1)
-PATH2=$(get_real_path $2)
-
-if [ $# == 3 ]
-then 
-    PATH3=$(get_real_path $3)
-fi
-
-if [ ! -f $PATH1 ]
-then 
-    echo "error: RANK_TABLE_FILE=$PATH1 is not a file"
-exit 1
-fi 
-
-if [ ! -d $PATH2 ]
-then 
-    echo "error: DATA_PATH=$PATH2 is not a directory"
-exit 1
-fi 
-
-if [ $# == 3 ] && [ ! -f $PATH3 ]
-then
-    echo "error: PRETRAINED_CKPT_PATH=$PATH3 is not a file"
-exit 1
-fi
-
-ulimit -u unlimited
-export DEVICE_NUM=8
-export RANK_SIZE=8
-export RANK_TABLE_FILE=$PATH1
-export MINDSPORE_HCCL_CONFIG_PATH=$PATH1
-
-DATA_PATH=$2
-export DATA_PATH=${DATA_PATH}
-
-for((i=0;i<${RANK_SIZE};i++))
-do
-    rm -rf device$i
-    mkdir device$i
-    cp ../*.py ./device$i
-    cp *.sh ./device$i
-    cp -r ../src ./device$i
-    cd ./device$i
-    export DEVICE_ID=$i
-    export RANK_ID=$((i))
-    echo "start training for device $i"
-    env > env$i.log
-
-    if [ $# == 2 ]
-    then
-        python train_sony.py  --run_distribute=True  --data_url=$PATH2 &> train.log &
-    fi
-    
-    if [ $# == 3 ]
-    then
-        python train_sony.py --run_distribute=True  --data_url=$PATH2 --pre_trained=$PATH3 &> train.log &
-    fi
-
-    cd ../
-done
diff --git a/official/cv/LearningToSeeInTheDark/scripts/run_eval.sh b/official/cv/LearningToSeeInTheDark/scripts/run_eval.sh
deleted file mode 100644
index b767b37b2e83330ccaffe3bbedcddfaf45c1a3ad..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/scripts/run_eval.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-echo "=============================================================================================================="
-echo "Please run the script as: "
-echo "bash run_eval.sh DATA_PATH CHECKPOINT_PATH "
-echo "For example: bash run.sh /path/dataset Resnet152-140_5004.ckpt"
-echo "It is better to use the absolute path."
-echo "=============================================================================================================="
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-PATH1=$(get_real_path $1)
-PATH2=$(get_real_path $2)
-
-if [ ! -d $PATH1 ]
-then 
-    echo "error: DATASET_PATH=$PATH1 is not a directory"
-exit 1
-fi 
-
-if [ ! -f $PATH2 ]
-then 
-    echo "error: CHECKPOINT_PATH=$PATH2 is not a file"
-exit 1
-fi 
-
-ulimit -u unlimited
-export DEVICE_NUM=1
-export DEVICE_ID=6
-export RANK_SIZE=$DEVICE_NUM
-export RANK_ID=0
-
-if [ -d "eval" ];
-then
-    rm -rf ./eval
-fi
-mkdir ./eval
-cp ../*.py ./eval
-cp *.sh ./eval
-cp -r ../src ./eval
-cd ./eval 
-env > env.log
-echo "start evaluation for device $DEVICE_ID"
-python test_sony.py --data_url=$PATH1 --checkpoint_path=$PATH2 &> eval.log &
-cd ..
\ No newline at end of file
diff --git a/official/cv/LearningToSeeInTheDark/scripts/run_infer_310.sh b/official/cv/LearningToSeeInTheDark/scripts/run_infer_310.sh
deleted file mode 100644
index 7a9a75b4ca318fa3c4c482e965c07c5859bc0f9c..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/scripts/run_infer_310.sh
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/bin/bash
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-if [[ $# -lt 2 || $# -gt 3 ]]; then
-    echo "Usage: sh run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [DEVICE_ID]
-    DEVICE_ID is optional, it can be set by environment variable device_id, otherwise the value is zero"
-exit 1
-fi
-
-get_real_path(){
-    if [ "${1:0:1}" == "/" ]; then
-        echo "$1"
-    else
-        echo "$(realpath -m $PWD/$1)"
-    fi
-}
-model=$(get_real_path $1)
-data_path=$(get_real_path $2)
-
-device_id=0
-if [ $# == 3 ]; then    
-    device_id=$3
-fi
-
-echo "mindir name: "$model
-echo "dataset path: "$data_path
-echo "device id: "$device_id
-
-export ASCEND_HOME=/usr/local/Ascend/
-if [ -d ${ASCEND_HOME}/ascend-toolkit ]; then
-    export PATH=$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/ccec_compiler/bin:$ASCEND_HOME/ascend-toolkit/latest/atc/bin:$PATH
-    export LD_LIBRARY_PATH=/usr/local/lib:$ASCEND_HOME/ascend-toolkit/latest/atc/lib64:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:$LD_LIBRARY_PATH
-    export TBE_IMPL_PATH=$ASCEND_HOME/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe
-    export PYTHONPATH=${TBE_IMPL_PATH}:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/python/site-packages:$PYTHONPATH
-    export ASCEND_OPP_PATH=$ASCEND_HOME/ascend-toolkit/latest/opp
-else
-    export ASCEND_HOME=/usr/local/Ascend/latest/
-    export PATH=$ASCEND_HOME/atc/ccec_compiler/bin:$ASCEND_HOME/atc/bin:$PATH
-    export LD_LIBRARY_PATH=/usr/local/lib:$ASCEND_HOME/atc/lib64:$ASCEND_HOME/acllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:$LD_LIBRARY_PATH
-    export PYTHONPATH=$ASCEND_HOME/atc/python/site-packages:$PYTHONPATH
-    export ASCEND_OPP_PATH=$ASCEND_HOME/opp
-fi
-
-function compile_app()
-{
-    cd ../ascend310_infer/src/ || exit
-    if [ -f "Makefile" ]; then
-        make clean
-    fi
-    sh build.sh &> build.log    
-}
-
-function infer()
-{
-    cd - || exit
-    if [ -d result_Files ]; then
-        rm -rf ./result_Files
-    fi
-    if [ -d time_Result ]; then
-        rm -rf ./time_Result
-    fi
-    mkdir result_Files
-    mkdir time_Result
-    ../ascend310_infer/src/main --mindir_path=$model --dataset_path=$data_path --device_id=$device_id  &> infer.log
-}
-
-
-compile_app
-if [ $? -ne 0 ]; then
-    echo "compile app code failed"
-    exit 1
-fi
-infer
-if [ $? -ne 0 ]; then
-    echo " execute inference failed"
-    exit 1
-fi
diff --git a/official/cv/LearningToSeeInTheDark/scripts/run_standalone_train.sh b/official/cv/LearningToSeeInTheDark/scripts/run_standalone_train.sh
deleted file mode 100644
index 41227682e45ad293e2a90a145cd796a433f89ac5..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/scripts/run_standalone_train.sh
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/bin/bash
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-echo "=============================================================================================================="
-echo "Please run the script as: "
-echo "bash run_standalone_train.sh DATA_PATH PRETRAINED_CKPT_PATH(optional)"
-echo "For example: bash run_standalone_train.sh /path/dataset"
-echo "It is better to use the absolute path."
-echo "=============================================================================================================="
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-PATH1=$(get_real_path $1)
-if [ $# == 2 ]
-then
-    PATH2=$(get_real_path $2)
-fi
-
-if [ ! -d $PATH1 ]
-then 
-    echo "error: DATASET_PATH=$PATH1 is not a directory"
-exit 1
-fi
-
-if [ $# == 2 ] && [ ! -f $PATH2 ]
-then
-    echo "error: PRETRAINED_CKPT_PATH=$PATH2 is not a file"
-exit 1
-fi
-
-ulimit -u unlimited
-export DEVICE_NUM=1
-export DEVICE_ID=6
-export RANK_SIZE=$DEVICE_NUM
-export RANK_ID=0
-
-if [ -d "train" ];
-then
-    rm -rf ./train
-fi
-mkdir ./train
-cp ../*.py ./train
-cp *.sh ./train
-cp -r ../src ./train
-cd ./train 
-echo "start training for device $DEVICE_ID"
-env > env.log
-if [ $# == 1 ]
-then
-    python train_sony.py  --run_distribute=False  --data_url=$PATH1 &> train.log &
-fi
-
-if [ $# == 2 ]
-then
-    python train_sony.py  --run_distribute=False  --data_url=$PATH1 --pre_trained=$PATH2 &> train.log &
-fi
-cd ..
-
-
diff --git a/official/cv/LearningToSeeInTheDark/src/configs.py b/official/cv/LearningToSeeInTheDark/src/configs.py
deleted file mode 100644
index 69a8b0856f16b35e68d5c072dde479f512083499..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/src/configs.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""
-network config setting, will be used in train.py and eval.py
-"""
-from easydict import EasyDict as ed
-
-config = ed({
-    "batch_size": 8,
-    "total_epochs": 3000,
-    "warmup_epochs": 500,
-    "train_output_dir": "/cache/train_output",
-})
diff --git a/official/cv/LearningToSeeInTheDark/src/myutils.py b/official/cv/LearningToSeeInTheDark/src/myutils.py
deleted file mode 100644
index 428e7ae5819f94320ed5570f967fd2b9c3a1a8ea..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/src/myutils.py
+++ /dev/null
@@ -1,231 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Train one step with loss scale"""
-from mindspore import nn
-from mindspore.ops import operations as P
-from mindspore.ops import functional as F
-from mindspore.ops import composite as C
-from mindspore.common.tensor import Tensor
-from mindspore.common.parameter import Parameter
-from mindspore.common import dtype as mstype
-from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
-from mindspore.context import ParallelMode
-from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _get_gradients_mean
-
-
-class WithLossCell(nn.Cell):
-    """
-    Wrap the network with loss function to compute loss.
-    Args:
-        backbone (Cell): The target network to wrap.
-        loss_fn (Cell): The loss function used to compute loss.
-    """
-    def __init__(self, backbone, loss_fn):
-        super(WithLossCell, self).__init__(auto_prefix=False)
-        self._backbone = backbone
-        self._loss_fn = loss_fn
-
-    def construct(self, x, label):
-        """ construct of loss cell  """
-        logits = self._backbone(x)
-        return self._loss_fn(logits, label)
-
-    @property
-    def backbone_network(self):
-        """
-        Get the backbone network.
-        Returns:
-            Cell, return backbone network.
-        """
-        return self._backbone
-
-
-GRADIENT_CLIP_TYPE = 1
-GRADIENT_CLIP_VALUE = 5
-clip_grad = C.MultitypeFuncGraph("clip_grad")
-
-
-class ClipGradients(nn.Cell):
-    """
-    Clip gradients.
-    Returns:
-        List, a list of clipped_grad tuples.
-    """
-    def __init__(self):
-        super(ClipGradients, self).__init__()
-        self.clip_by_norm = nn.ClipByNorm()
-        self.cast = P.Cast()
-        self.dtype = P.DType()
-
-    def construct(self, grads, clip_type, clip_value):
-        """
-        Construct gradient clip network.
-        Args:
-            grads (list): List of gradient tuples.
-            clip_type (Tensor): The way to clip, 'value' or 'norm'.
-            clip_value (Tensor): Specifies how much to clip.
-        Returns:
-            List, a list of clipped_grad tuples.
-        """
-        if clip_type not in (0, 1):
-            return grads
-        new_grads = ()
-        for grad in grads:
-            dt = self.dtype(grad)
-            if clip_type == 0:
-                t = C.clip_by_value(grad, self.cast(F.tuple_to_array((-clip_value,)), dt),
-                                    self.cast(F.tuple_to_array((clip_value,)), dt))
-            else:
-                t = self.clip_by_norm(grad, self.cast(F.tuple_to_array((clip_value,)), dt))
-            new_grads = new_grads + (t,)
-        return new_grads
-
-
-@clip_grad.register("Number", "Number", "Tensor")
-def _clip_grad(clip_type, clip_value, grad):
-    """
-    Clip gradients.
-    Inputs:
-        clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
-        clip_value (float): Specifies how much to clip.
-        grad (tuple[Tensor]): Gradients.
-    Outputs:
-        tuple[Tensor], clipped gradients.
-    """
-    if clip_type not in [0, 1]:
-        return grad
-    dt = F.dtype(grad)
-    if clip_type == 0:
-        new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array((-clip_value,)), dt),
-                                   F.cast(F.tuple_to_array((clip_value,)), dt))
-    else:
-        new_grad = nn.ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), dt))
-    return new_grad
-
-
-grad_scale = C.MultitypeFuncGraph("grad_scale")
-
-reciprocal = P.Reciprocal()
-
-
-@grad_scale.register("Tensor", "Tensor")
-def tensor_grad_scale(scale, grad):
-    """ grad scale """
-    return grad * F.cast(reciprocal(scale), F.dtype(grad))
-
-
-_grad_overflow = C.MultitypeFuncGraph("_grad_overflow")
-
-grad_overflow = P.FloatStatus()
-
-
-@_grad_overflow.register("Tensor")
-def _tensor_grad_overflow(grad):
-    return grad_overflow(grad)
-
-
-class GNMTTrainOneStepWithLossScaleCell(nn.Cell):
-    """
-    Encapsulation class of GNMT network training.
-    Append an optimizer to the training network after that the construct
-    function can be called to create the backward graph.
-    Args:
-        network: Cell. The training network. Note that loss function should have
-            been added.
-        optimizer: Optimizer. Optimizer for updating the weights.
-    Returns:
-        Tuple[Tensor, Tensor, Tensor], loss, overflow, sen.
-    """
-    def __init__(self, network, optimizer, scale_update_cell=None):
-        super(GNMTTrainOneStepWithLossScaleCell, self).__init__(auto_prefix=False)
-        self.network = network
-        self.network.set_grad()
-        self.network.add_flags(defer_inline=True)
-        self.weights = optimizer.parameters
-        self.optimizer = optimizer
-        self.grad = C.GradOperation(get_by_list=True, sens_param=True)
-        self.reducer_flag = False
-        self.all_reduce = P.AllReduce()
-        self.parallel_mode = _get_parallel_mode()
-        if self.parallel_mode not in ParallelMode.MODE_LIST:
-            raise ValueError("Parallel mode does not support: ", self.parallel_mode)
-        if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
-            self.reducer_flag = True
-        self.grad_reducer = None
-        if self.reducer_flag:
-            mean = _get_gradients_mean()
-            degree = _get_device_num()
-            self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree)
-        self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
-        self.clip_gradients = ClipGradients()
-        self.cast = P.Cast()
-        self.alloc_status = P.NPUAllocFloatStatus()
-        self.get_status = P.NPUGetFloatStatus()
-        self.clear_before_grad = P.NPUClearFloatStatus()
-        self.reduce_sum = P.ReduceSum(keep_dims=False)
-        self.base = Tensor(1, mstype.float32)
-        self.less_equal = P.LessEqual()
-        self.hyper_map = C.HyperMap()
-        self.loss_scale = None
-        self.loss_scaling_manager = scale_update_cell
-
-        if scale_update_cell:
-            self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32))
-        self.add_flags(has_effect=True)
-        self.loss_scalar = P.ScalarSummary()
-
-    def construct(self, inputs, labels, sens=None):
-        """
-            network processing
-            overflow testing
-        """
-        weights = self.weights
-        loss = self.network(inputs, labels)
-
-        # Alloc status.
-        init = self.alloc_status()
-
-        # Clear overflow buffer.
-        self.clear_before_grad(init)
-        if sens is None:
-            scaling_sens = self.loss_scale
-        else:
-            scaling_sens = sens
-        grads = self.grad(self.network, weights)(inputs, labels, self.cast(scaling_sens, mstype.float32))
-        grads = self.hyper_map(F.partial(grad_scale, scaling_sens), grads)
-        grads = self.clip_gradients(grads, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE)
-
-        if self.reducer_flag:
-            # Apply grad reducer on grads.
-            grads = self.grad_reducer(grads)
-
-        self.get_status(init)
-        flag_sum = self.reduce_sum(init, (0,))
-
-        if self.is_distributed:
-            # Sum overflow flag over devices.
-            flag_reduce = self.all_reduce(flag_sum)
-            cond = self.less_equal(self.base, flag_reduce)
-        else:
-            cond = self.less_equal(self.base, flag_sum)
-
-        overflow = cond
-
-        if sens is None:
-            overflow = self.loss_scaling_manager(self.loss_scale, cond)
-        if not overflow:
-            self.optimizer(grads)
-        self.loss_scalar("loss", loss)
-        return (loss, cond, scaling_sens)
diff --git a/official/cv/LearningToSeeInTheDark/src/unet_parts.py b/official/cv/LearningToSeeInTheDark/src/unet_parts.py
deleted file mode 100644
index 98222051e28a37247b798b96c9df9180649f7d6c..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/src/unet_parts.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Unet Components"""
-import mindspore.nn as nn
-import mindspore.ops.operations as F
-from mindspore.ops import Maximum
-from mindspore.ops import DepthToSpace as dts
-from mindspore.common.initializer import TruncatedNormal
-from mindspore.common.initializer import XavierUniform
-import mindspore as ms
-ms.set_seed(1212)
-
-
-class LRelu(nn.Cell):
-    """ activation function """
-    def __init__(self):
-        super(LRelu, self).__init__()
-        self.max = Maximum()
-
-    def construct(self, x):
-        """ construct of lrelu activation """
-        return self.max(x * 0.2, x)
-
-
-class DoubleConv(nn.Cell):
-    """conv2d for two times with lrelu activation"""
-    def __init__(self, in_channels, out_channels, mid_channels=None):
-        super(DoubleConv, self).__init__()
-        if not mid_channels:
-            mid_channels = out_channels
-        self.kernel_init = XavierUniform()
-        self.double_conv = nn.SequentialCell(
-            [nn.Conv2d(in_channels, mid_channels, kernel_size=3, stride=1, pad_mode="same",
-                       weight_init=self.kernel_init), LRelu(),
-             nn.Conv2d(mid_channels, out_channels, kernel_size=3, stride=1, pad_mode="same",
-                       weight_init=self.kernel_init), LRelu()])
-
-    def construct(self, x):
-        """ construct of double conv2d """
-        return self.double_conv(x)
-
-
-class Down(nn.Cell):
-    """Downscaling with maxpool then double conv"""
-
-    def __init__(self, in_channels, out_channels):
-        super(Down, self).__init__()
-        self.maxpool_conv = nn.SequentialCell(
-            [nn.MaxPool2d(kernel_size=2, stride=2, pad_mode="same"),
-             DoubleConv(in_channels, out_channels)]
-        )
-
-    def construct(self, x):
-        """ construct of down cell """
-        return self.maxpool_conv(x)
-
-
-class Up(nn.Cell):
-    """Upscaling then double conv"""
-
-    def __init__(self, in_channels, out_channels):
-        super(Up, self).__init__()
-        self.concat = F.Concat(axis=1)
-        self.kernel_init = TruncatedNormal(0.02)
-        self.conv = DoubleConv(in_channels, out_channels)
-        self.up = nn.Conv2dTranspose(in_channels, in_channels // 2, kernel_size=2, stride=2,
-                                     pad_mode='same', weight_init=self.kernel_init)
-
-    def construct(self, x1, x2):
-        """ construct of up cell """
-        x1 = self.up(x1)
-        x = self.concat((x1, x2))
-        return self.conv(x)
-
-
-class OutConv(nn.Cell):
-    """trans data into RGB channels"""
-    def __init__(self, in_channels, out_channels):
-        super(OutConv, self).__init__()
-        self.kernel_init = XavierUniform()
-        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, pad_mode='same', weight_init=self.kernel_init)
-        self.DtS = dts(block_size=2)
-
-    def construct(self, x):
-        """ construct of last conv """
-        x = self.conv(x)
-        x = self.DtS(x)
-        return x
diff --git a/official/cv/LearningToSeeInTheDark/test_sony.py b/official/cv/LearningToSeeInTheDark/test_sony.py
deleted file mode 100644
index 292aae9f892d63e5f8f45e0aadb8b987d6d13644..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/test_sony.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""test"""
-from __future__ import division
-import argparse as arg
-import os
-import glob
-from PIL import Image
-import h5py
-import numpy as np
-import mindspore.nn as nn
-from mindspore import context, Tensor, dtype
-from mindspore.train.serialization import load_checkpoint, load_param_into_net
-from src.unet_parts import DoubleConv, Down, Up, OutConv
-
-
-class UNet(nn.Cell):
-    """ Unet """
-
-    def __init__(self, n_channels, n_classes):
-        super(UNet, self).__init__()
-        self.n_channels = n_channels
-        self.n_classes = n_classes
-        self.inc = DoubleConv(n_channels, 32)
-        self.down1 = Down(32, 64)
-        self.down2 = Down(64, 128)
-        self.down3 = Down(128, 256)
-        self.down4 = Down(256, 512)
-        self.up1 = Up(512, 256)
-        self.up2 = Up(256, 128)
-        self.up3 = Up(128, 64)
-        self.up4 = Up(64, 32)
-        self.outc = OutConv(32, n_classes)
-
-    def construct(self, x):
-        """Unet construct"""
-
-        x1 = self.inc(x)
-        x2 = self.down1(x1)
-        x3 = self.down2(x2)
-        x4 = self.down3(x3)
-        x5 = self.down4(x4)
-        x = self.up1(x5, x4)
-        x = self.up2(x, x3)
-        x = self.up3(x, x2)
-        x = self.up4(x, x1)
-        logits = self.outc(x)
-
-        return logits
-
-
-def pack_raw(raw):
-    """ pack sony raw data into 4 channels """
-
-    im = np.maximum(raw - 512, 0) / (16383 - 512)  # subtract the black level
-    im = np.expand_dims(im, axis=2)
-    img_shape = im.shape
-    H = img_shape[0]
-    W = img_shape[1]
-
-    out = np.concatenate((im[0:H:2, 0:W:2, :],
-                          im[0:H:2, 1:W:2, :],
-                          im[1:H:2, 1:W:2, :],
-                          im[1:H:2, 0:W:2, :]), axis=2)
-    return out
-
-
-def get_test_data(input_dir1, gt_dir1, test_ids1):
-    """ trans input raw data into arrays then pack into a list """
-
-    final_test_inputs = []
-    for test_id in test_ids1:
-        in_files = glob.glob(input_dir1 + '%05d_00*.hdf5' % test_id)
-
-        gt_files = glob.glob(gt_dir1 + '%05d_00*.hdf5' % test_id)
-        gt_path = gt_files[0]
-        gt_fn = os.path.basename(gt_path)
-        gt_exposure = float(gt_fn[9: -6])
-
-        for in_path in in_files:
-
-            in_fn = os.path.basename(in_path)
-            in_exposure = float(in_fn[9: -6])
-            ratio = min(gt_exposure / in_exposure, 300.0)
-            ima = h5py.File(in_path, 'r')
-            in_rawed = ima.get('in')[:]
-            input_image = np.expand_dims(pack_raw(in_rawed), axis=0) * ratio
-            input_image = np.minimum(input_image, 1.0)
-            input_image = input_image.transpose([0, 3, 1, 2])
-            input_image = np.float32(input_image)
-            final_test_inputs.append(input_image)
-    return final_test_inputs
-
-
-if __name__ == '__main__':
-    parser = arg.ArgumentParser(description='Mindspore SID Eval')
-    parser.add_argument('--device_target', default='Ascend',
-                        help='device where the code will be implemented')
-    parser.add_argument('--data_url', required=True, default=None, help='Location of data')
-    parser.add_argument('--checkpoint_path', required=True, default=None, help='ckpt file path')
-    args = parser.parse_args()
-    context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
-    local_data_path = args.data_url
-    input_dir = os.path.join(local_data_path, 'short/')
-    gt_dir = os.path.join(local_data_path, 'long/')
-    test_fns = glob.glob(gt_dir + '1*.hdf5')
-    test_ids = [int(os.path.basename(test_fn)[0:5]) for test_fn in test_fns]
-    ckpt_dir = args.checkpoint_path
-    param_dict = load_checkpoint(ckpt_dir)
-    net = UNet(4, 12)
-    load_param_into_net(net, param_dict)
-
-    in_ims = get_test_data(input_dir, gt_dir, test_ids)
-    i = 0
-    for in_im in in_ims:
-        output = net(Tensor(in_im, dtype.float32))
-        output = output.asnumpy()
-        output = np.minimum(np.maximum(output, 0), 1)
-        output = np.trunc(output[0] * 255)
-        output = output.astype(np.int8)
-        output = output.transpose([1, 2, 0])
-        image_out = Image.fromarray(output, 'RGB')
-        image_out.save('output_%d.png' % i)
-        i += 1
diff --git a/official/cv/LearningToSeeInTheDark/train_sony.py b/official/cv/LearningToSeeInTheDark/train_sony.py
deleted file mode 100644
index b5affbfdd9b4bca59db46bf3b29c96e162abfc71..0000000000000000000000000000000000000000
--- a/official/cv/LearningToSeeInTheDark/train_sony.py
+++ /dev/null
@@ -1,222 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""train"""
-from __future__ import division
-import os
-import glob
-import argparse as arg
-import ast
-import h5py
-import numpy as np
-from mindspore import context, Model
-from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
-from mindspore.nn.loss import L1Loss
-from mindspore.nn.dynamic_lr import piecewise_constant_lr as pc_lr
-from mindspore.nn.dynamic_lr import warmup_lr
-import mindspore.dataset as ds
-from mindspore.communication.management import init
-import mindspore.nn as nn
-from mindspore.context import ParallelMode
-from mindspore.train.loss_scale_manager import DynamicLossScaleManager
-from mindspore.train.serialization import load_checkpoint, load_param_into_net
-from src.unet_parts import DoubleConv, Down, Up, OutConv
-from src.myutils import GNMTTrainOneStepWithLossScaleCell, WithLossCell
-from src.configs import config
-
-
-class UNet(nn.Cell):
-    """ Unet """
-    def __init__(self, n_channels, n_classes):
-        super(UNet, self).__init__()
-        self.n_channels = n_channels
-        self.n_classes = n_classes
-        self.inc = DoubleConv(n_channels, 32)
-        self.down1 = Down(32, 64)
-        self.down2 = Down(64, 128)
-        self.down3 = Down(128, 256)
-        self.down4 = Down(256, 512)
-        self.up1 = Up(512, 256)
-        self.up2 = Up(256, 128)
-        self.up3 = Up(128, 64)
-        self.up4 = Up(64, 32)
-        self.outc = OutConv(32, n_classes)
-
-    def construct(self, x):
-        """ Unet construct """
-        x1 = self.inc(x)
-        x2 = self.down1(x1)
-        x3 = self.down2(x2)
-        x4 = self.down3(x3)
-        x5 = self.down4(x4)
-        x = self.up1(x5, x4)
-        x = self.up2(x, x3)
-        x = self.up3(x, x2)
-        x = self.up4(x, x1)
-        logits = self.outc(x)
-        return logits
-
-
-def pack_raw(raw):
-    """ pack sony raw data into 4 channels """
-    im = np.maximum(raw - 512, 0) / (16383 - 512)  # subtract the black level
-
-    im = np.expand_dims(im, axis=2)
-    img_shape = im.shape
-    H = img_shape[0]
-    W = img_shape[1]
-
-    out = np.concatenate((im[0:H:2, 0:W:2, :],
-                          im[0:H:2, 1:W:2, :],
-                          im[1:H:2, 1:W:2, :],
-                          im[1:H:2, 0:W:2, :]), axis=2)
-    return out
-
-
-def get_dataset(input_dir1, gt_dir1, train_ids1, num_shards=None, shard_id=None, distribute=False):
-    """ get mindspore dataset from raw data """
-    input_final_data = []
-    gt_final_data = []
-    for train_id in train_ids1:
-        in_files = glob.glob(input_dir1 + '%05d_00*.hdf5' % train_id)
-
-        gt_files = glob.glob(gt_dir1 + '%05d_00*.hdf5' % train_id)
-        gt_path = gt_files[0]
-        gt_fn = os.path.basename(gt_path)
-        gt_exposure = float(gt_fn[9: -6])
-        gt = h5py.File(gt_path, 'r')
-        gt_rawed = gt.get('gt')[:]
-        gt_image = np.expand_dims(np.float32(gt_rawed / 65535.0), axis=0)
-        gt_image = gt_image.transpose([0, 3, 1, 2])
-
-        for in_path in in_files:
-            gt_final_data.append(gt_image[0])
-
-            in_fn = os.path.basename(in_path)
-            in_exposure = float(in_fn[9: -6])
-            ratio = min(gt_exposure / in_exposure, 300)
-            im = h5py.File(in_path, 'r')
-            in_rawed = im.get('in')[:]
-            input_image = np.expand_dims(pack_raw(in_rawed), axis=0) * ratio
-            input_image = np.float32(input_image)
-            input_image = input_image.transpose([0, 3, 1, 2])
-            input_final_data.append(input_image[0])
-    data = (input_final_data, gt_final_data)
-    if distribute:
-        datasets = ds.NumpySlicesDataset(data, ['input', 'label'], shuffle=True,
-                                         num_shards=num_shards, shard_id=shard_id)
-    else:
-        datasets = ds.NumpySlicesDataset(data, ['input', 'label'], shuffle=True)
-    return datasets
-
-
-def dynamic_lr(steps_per_epoch, warmup_epochss):   # if warmup, plus warmup_epochs
-    """ learning rate with warmup"""
-    milestone = [(1200 + warmup_epochss) * steps_per_epoch,
-                 (1300 + warmup_epochss) * steps_per_epoch,
-                 (1700 + warmup_epochss) * steps_per_epoch,
-                 (2500 + warmup_epochss) * steps_per_epoch]
-    learning_rates = [3e-4, 1e-5, 3e-6, 1e-6]
-    lrs = pc_lr(milestone, learning_rates)
-    return lrs
-
-
-def random_crop_and_flip(image, label):
-    """ random crop and flip """
-    ps = 512
-    # random crop
-    h = image.shape[1]
-    w = image.shape[2]
-    xx = np.random.randint(0, h - ps)
-    yy = np.random.randint(0, w - ps)
-    image = image[:, xx:xx + ps, yy:yy + ps]
-    label = label[:, xx * 2:xx * 2 + ps * 2, yy * 2:yy * 2 + ps * 2]
-    # random flip
-    if np.random.randint(2) == 1:  # random flip
-        image = np.flip(image, axis=1)
-        label = np.flip(label, axis=1)
-    if np.random.randint(2) == 1:
-        image = np.flip(image, axis=2)
-        label = np.flip(label, axis=2)
-    if np.random.randint(2) == 1:  # random transpose
-        image = np.transpose(image, (0, 2, 1))
-        label = np.transpose(label, (0, 2, 1))
-    image = np.minimum(image, 1.0)
-
-    return image, label
-
-
-if __name__ == "__main__":
-
-    parser = arg.ArgumentParser(description='Mindspore SID Example')
-    parser.add_argument('--device_target', default='Ascend',
-                        help='device where the code will be implemented')
-    parser.add_argument('--data_url', required=True, default=None, help='Location of data')
-    parser.add_argument('--pre_trained', required=False, default=None, help='Ckpt file path')
-    parser.add_argument('--run_distribute', type=ast.literal_eval, required=False, default=None,
-                        help='If run distributed')
-    args = parser.parse_args()
-    context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
-
-    if args.run_distribute:
-        device_num = int(os.getenv('RANK_SIZE'))
-        device_id = int(os.getenv('DEVICE_ID'))
-        context.set_context(device_id=device_id)
-        context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
-                                          gradients_mean=True)
-        init()
-
-    local_data_path = args.data_url
-
-    input_dir = os.path.join(local_data_path, 'short/')
-    gt_dir = os.path.join(local_data_path, 'long/')
-
-    train_fns = glob.glob(gt_dir + '0*.hdf5')
-    train_ids = [int(os.path.basename(train_fn)[0:5]) for train_fn in train_fns]
-
-    net = UNet(4, 12)
-    net_loss = L1Loss()
-    net = WithLossCell(net, net_loss)
-    if args.run_distribute:
-        dataset = get_dataset(input_dir, gt_dir, train_ids,
-                              num_shards=device_num, shard_id=device_id, distribute=True)
-    else:
-        dataset = get_dataset(input_dir, gt_dir, train_ids)
-    transform_list = [random_crop_and_flip]
-    dataset = dataset.map(transform_list, input_columns=['input', 'label'], output_columns=['input', 'label'])
-    dataset = dataset.batch(batch_size=config.batch_size, drop_remainder=True)
-    batches_per_epoch = dataset.get_dataset_size()
-
-    lr_warm = warmup_lr(learning_rate=3e-4, total_step=config.warmup_epochs * batches_per_epoch,
-                        step_per_epoch=batches_per_epoch, warmup_epoch=config.warmup_epochs)
-    lr = dynamic_lr(batches_per_epoch, config.warmup_epochs)
-    lr = lr_warm + lr[config.warmup_epochs:]
-    net_opt = nn.Adam(net.trainable_params(), lr)
-    scale_manager = DynamicLossScaleManager()
-    net = GNMTTrainOneStepWithLossScaleCell(net, net_opt, scale_manager.get_update_cell())
-
-    ckpt_dir = args.pre_trained
-    if ckpt_dir is not None:
-        param_dict = load_checkpoint(ckpt_dir)
-        load_param_into_net(net, param_dict)
-    model = Model(net)
-
-    loss_cb = LossMonitor()
-    time_cb = TimeMonitor(data_size=4)
-    config_ck = CheckpointConfig(save_checkpoint_steps=100 * batches_per_epoch, keep_checkpoint_max=100)
-    ckpoint_cb = ModelCheckpoint(prefix='sony_trained_net', directory=config.train_output_dir, config=config_ck)
-    callbacks_list = [ckpoint_cb, loss_cb, time_cb]
-    model.train(epoch=config.total_epochs, train_dataset=dataset,
-                callbacks=callbacks_list,
-                dataset_sink_mode=True)
diff --git a/official/cv/resnet_thor/README.md b/official/cv/resnet_thor/README.md
deleted file mode 100644
index 73b0b2d454558fd1799d66746f18ab217fe063fc..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/README.md
+++ /dev/null
@@ -1,301 +0,0 @@
-# ResNet-50-THOR Example
-
-- [ResNet-50-THOR Example](#resnet-50-thor-example)
-    - [Description](#description)
-    - [Model Architecture](#model-architecture)
-    - [Dataset](#dataset)
-    - [Features](#features)
-    - [Environment Requirements](#environment-requirements)
-    - [Quick Start](#quick-start)
-    - [Script Description](#script-description)
-        - [Script Code Structure](#script-code-structure)
-        - [Script Parameters](#script-parameters)
-        - [Training Process](#training-process)
-            - [Ascend 910](#ascend-910)
-            - [GPU](#gpu)
-        - [Evaluation Process](#evaluation-process)
-            - [Ascend 910](#ascend-910-1)
-            - [GPU](#gpu-1)
-    - [Model Description](#model-description)
-        - [Evaluation Performance](#evaluation-performance)
-        - [Inference Performance](#inference-performance)
-    - [Description of Random Situation](#description-of-random-situation)
-    - [ModelZoo HomePage](#modelzoo-homepage)
-
-## Description
-
-This is an example of training ResNet-50 V1.5 with ImageNet2012 dataset by second-order optimizer THOR. THOR is a novel approximate seond-order optimization method in MindSpore. With fewer iterations, THOR can finish ResNet-50 V1.5 training in 72 minutes to top-1 accuracy of 75.9% using 8 Ascend 910, which is much faster than SGD with Momentum.
-
-## Model Architecture
-
-The overall network architecture of ResNet-50 is show below:[link](https://arxiv.org/pdf/1512.03385.pdf)
-
-## Dataset
-
-Dataset used: ImageNet2012
-
-- Dataset size 224*224 colorful images in 1000 classes
-    - Train锛�1,281,167 images  
-    - Test锛� 50,000 images
-
-- Data format锛歫peg
-    - Note锛欴ata will be processed in dataset.py
-
-- Download the dataset ImageNet2012
-
-> Unzip the ImageNet2012 dataset to any path you want and the folder structure should include train and eval dataset as follows:
-
-```shell
-    鈹溾攢鈹€ ilsvrc                  # train dataset
-    鈹斺攢鈹€ ilsvrc_eval             # infer dataset
-```
-
-## Features
-
-The classical first-order optimization algorithm, such as SGD, has a small amount of computation, but the convergence speed is slow and requires lots of iterations. The second-order optimization algorithm uses the second-order derivative of the target function to accelerate convergence, can converge faster to the optimal value of the model and requires less iterations. But the application of the second-order optimization algorithm in deep neural network training is not common because of the high computation cost. The main computational cost of the second-order optimization algorithm lies in the inverse operation of the second-order information matrix (Hessian matrix, Fisher information matrix, etc.), and the time complexity is about $O (n^3)$. On the basis of the existing natural gradient algorithm,  we developed the available second-order optimizer THOR  in MindSpore by adopting approximation and shearing of  Fisher information matrix to reduce the computational complexity of the inverse matrix. With eight Ascend 910 chips, THOR can complete ResNet50-v1.5-ImageNet training in 72 minutes.
-
-## Environment Requirements
-
-- Hardware锛圓scend/GPU锛�
-    - Prepare hardware environment with Ascend or GPU processor.
-
-- Framework
-    - [MindSpore](https://www.mindspore.cn/install/en)
-- For more information, please check the resources below锛�
-    - [MindSpore Tutorials](https://www.mindspore.cn/tutorials/en/master/index.html)
-    - [MindSpore Python API](https://www.mindspore.cn/docs/en/master/index.html)
-
-## Quick Start
-
-After installing MindSpore via the official website, you can start training and evaluation as follows:
-
-- Running on Ascend
-
-```python
-# run distributed training example
-bash run_distribute_train.sh [RANK_TABLE_FILE] [DATASET_PATH] [DEVICE_NUM]
-
-# run evaluation example
-bash run_eval.sh [DATASET_PATH] [CHECKPOINT_PATH]
-```
-
-> For distributed training, a hccl configuration file with JSON format needs to be created in advance. About the configuration file, you can refer to the [HCCL_TOOL](https://gitee.com/mindspore/models/tree/master/utils/hccl_tools).
-
-- Running on GPU
-
-```python
-# run distributed training example
-bash run_distribute_train_gpu.sh [DATASET_PATH] [DEVICE_NUM]
-
-# run evaluation example
-bash run_eval_gpu.sh [DATASET_PATH] [CHECKPOINT_PATH]
- ```
-
-## Script Description
-
-### Script Code Structure
-
-```shell
-鈹斺攢鈹€ resnet_thor
-    鈹溾攢鈹€ README.md                                 # descriptions about resnet_thor
-    鈹溾攢鈹€ scripts
-    鈹�    鈹溾攢鈹€ run_distribute_train.sh               # launch distributed training for Ascend
-    鈹�    鈹斺攢鈹€ run_eval.sh                           # launch inference for Ascend
-    鈹�    鈹溾攢鈹€ run_distribute_train_gpu.sh           # launch distributed training for GPU
-    鈹�    鈹斺攢鈹€ run_eval_gpu.sh                       # launch inference for GPU
-    鈹溾攢鈹€src
-    鈹�    鈹溾攢鈹€ crossentropy.py                       # CrossEntropy loss function
-    鈹�    鈹溾攢鈹€ config.py                             # parameter configuration
-    鈹�    鈹溾攢鈹€ dataset_helper.py                     # dataset help for minddata dataset
-    鈹�    鈹溾攢鈹€ grad_reducer_thor.py                  # grad reducer for thor
-    鈹�    鈹溾攢鈹€ model_thor.py                         # model for train
-    鈹�    鈹溾攢鈹€ resnet_thor.py                        # resnet50_thor backone
-    鈹�    鈹溾攢鈹€ thor.py                               # thor optimizer
-    鈹�    鈹溾攢鈹€ thor_layer.py                         # thor layer
-    鈹�    鈹斺攢鈹€ dataset.py                            # data preprocessing
-    鈹溾攢鈹€ eval.py                                   # infer script
-    鈹溾攢鈹€ train.py                                  # train script
-    鈹溾攢鈹€ export.py                                 # export checkpoint file into air file
-    鈹斺攢鈹€ mindspore_hub_conf.py                     # config file for mindspore hub repository
-```
-
-### Script Parameters
-
-Parameters for both training and inference can be set in config.py.
-
-- Parameters for Ascend 910
-
-```shell
-"class_num": 1001,                # dataset class number
-"batch_size": 32,                 # batch size of input tensor(only supports 32)
-"loss_scale": 128,                # loss scale
-"momentum": 0.9,                  # momentum of THOR optimizer
-"weight_decay": 5e-4,             # weight decay
-"epoch_size": 45,                 # only valid for taining, which is always 1 for inference
-"save_checkpoint": True,          # whether save checkpoint or not
-"save_checkpoint_epochs": 1,      # the epoch interval between two checkpoints. By default, the checkpoint will be saved every epoch
-"keep_checkpoint_max": 15,        # only keep the last keep_checkpoint_max checkpoint
-"save_checkpoint_path": "./",     # path to save checkpoint relative to the executed path
-"use_label_smooth": True,         # label smooth
-"label_smooth_factor": 0.1,       # label smooth factor
-"lr_init": 0.045,                 # learning rate init value
-"lr_decay": 6,                    # learning rate decay rate value
-"lr_end_epoch": 70,               # learning rate end epoch value
-"damping_init": 0.03,             # damping init value for Fisher information matrix
-"damping_decay": 0.87,            # damping decay rate
-"frequency": 834,                 # the step interval to update second-order information matrix(should be divisor of the steps of per epoch)
-```
-
-- Parameters for GPU
-
-```shell
-"class_num": 1001,                # dataset class number
-"batch_size": 32,                 # batch size of input tensor
-"loss_scale": 128,                # loss scale
-"momentum": 0.9,                  # momentum of THOR optimizer
-"weight_decay": 5e-4,             # weight decay
-"epoch_size": 40,                 # only valid for taining, which is always 1 for inference
-"save_checkpoint": True,          # whether save checkpoint or not
-"save_checkpoint_epochs": 1,      # the epoch interval between two checkpoints. By default, the checkpoint will be saved every epoch
-"keep_checkpoint_max": 15,        # only keep the last keep_checkpoint_max checkpoint
-"save_checkpoint_path": "./",     # path to save checkpoint relative to the executed path
-"use_label_smooth": True,         # label smooth
-"label_smooth_factor": 0.1,       # label smooth factor
-"lr_init": 0.05672,               # learning rate init value
-"lr_decay": 4.9687,               # learning rate decay rate value
-"lr_end_epoch": 50,               # learning rate end epoch value
-"damping_init": 0.02345,          # damping init value for Fisher information matrix
-"damping_decay": 0.5467,          # damping decay rate
-"frequency": 834,                 # the step interval to update second-order information matrix(should be divisor of the steps of per epoch)
-```
-
-> Due to the limitation of operators, the value of batch size only supports 32 in Ascend currently. And the update frequency of second-order information matrix must be set the divisor of the steps of per epoch(for example, 834 is the divisor of 5004). As a word, our algorithm is not very flexible in setting those parameters due to the limitations of the framework and operators. But we will solve these problems in the future versions.
-
-### Training Process
-
-#### Ascend 910
-
-```shell
-  bash run_distribute_train.sh [RANK_TABLE_FILE] [DATASET_PATH] [DEVICE_NUM]
-```
-
-We need three parameters for this scripts.
-
-- `RANK_TABLE_FILE`锛歵he path of rank_table.json
-- `DATASET_PATH`锛歵he path of train dataset.
-- `DEVICE_NUM`: the device number for distributed train.
-
-Training result will be stored in the current path, whose folder name begins with "train_parallel".  Under this, you can find checkpoint file together with result like the following in log.
-
-```shell
-...
-epoch: 1 step: 5004, loss is 4.4182425
-epoch: 2 step: 5004, loss is 3.740064
-epoch: 3 step: 5004, loss is 4.0546017
-epoch: 4 step: 5004, loss is 3.7598825
-epoch: 5 step: 5004, loss is 3.3744206
-......
-epoch: 40 step: 5004, loss is 1.6907625
-epoch: 41 step: 5004, loss is 1.8217756
-epoch: 42 step: 5004, loss is 1.6453942
-...
-```
-
-#### GPU
-
-```shell
-bash run_distribute_train_gpu.sh [DATASET_PATH] [DEVICE_NUM]
-```
-
-Training result will be stored in the current path, whose folder name begins with "train_parallel".  Under this, you can find checkpoint file together with result like the following in log.
-
-```shell
-...
-epoch: 1 step: 5004, loss is 4.2546034
-epoch: 2 step: 5004, loss is 4.0819564
-epoch: 3 step: 5004, loss is 3.7005644
-epoch: 4 step: 5004, loss is 3.2668946
-epoch: 5 step: 5004, loss is 3.023509
-......
-epoch: 36 step: 5004, loss is 1.645802
-...
-```
-
-### Evaluation Process
-
-Before running the command below, please check the checkpoint path used for evaluation. Please set the checkpoint path to be the absolute full path, e.g., "username/resnet_thor/train_parallel0/resnet-42_5004.ckpt".
-
-#### Ascend 910
-
-```shell
-  bash run_eval.sh [DATASET_PATH] [CHECKPOINT_PATH]
-```
-
-We need two parameters for this scripts.
-
-- `DATASET_PATH`锛歵he path of evaluation dataset.
-- `CHECKPOINT_PATH`: the absolute path for checkpoint file.
-
-> checkpoint can be produced in training process.
-
-Inference result will be stored in the example path, whose folder name is "eval". Under this, you can find result like the following in log.
-
-```shell
-  result: {'top_5_accuracy': 0.9295574583866837, 'top_1_accuracy': 0.761443661971831} ckpt=train_parallel0/resnet-42_5004.ckpt
-```
-
-#### GPU
-
-```shell
-  bash run_eval_gpu.sh [DATASET_PATH] [CHECKPOINT_PATH]
-```
-
-Inference result will be stored in the example path, whose folder name is "eval". Under this, you can find result like the following in log.
-
-```shell
-  result: {'top_5_accuracy': 0.9287972151088348, 'top_1_accuracy': 0.7597031049935979} ckpt=train_parallel/resnet-36_5004.ckpt
-```
-
-## Model Description
-
-### Evaluation Performance
-
-| Parameters                 | Ascend 910                                                   |   GPU |
-| -------------------------- | --------------------------------------        |---------------------------------- |
-| Model Version              | ResNet50-v1.5                                 |ResNet50-v1.5|
-| Resource                   | Ascend 910; CPU 2.60GHz, 192cores; Memory 755G; OS Euler2.8  | GPU(Tesla V100 SXM2)锛孋PU 2.1GHz 24cores锛孧emory 128G
-| uploaded Date              | 07/05/2021 (month/day/year)                   |   07/05/2021(month/day/year)
-| MindSpore Version          | 1.3.0                                   | 1.3.0 |
-| Dataset                    | ImageNet2012                                   | ImageNet2012|
-| Training Parameters        | epoch=45, steps per epoch=5004, batch_size = 32             |epoch=40, steps per epoch=5004, batch_size = 32  |
-| Optimizer                  | THOR                                                         |THOR|
-| Loss Function              | Softmax Cross Entropy                                       |Softmax Cross Entropy           |
-| outputs                    | probability                                                 |  probability          |
-| Loss                       |1.6453942                                                    | 1.645802 |
-| Speed                      |  20.4ms/step锛�8pcs锛�                     |76ms/step锛�8pcs锛墊
-| Total time(acc to 75.9%)   | 72 mins                          | 229 mins|
-| Parameters (M)             | 25.5                                                         | 25.5 |
-| Checkpoint for Fine tuning | 491M (.ckpt file)                                         |380M (.ckpt file)     |
-| Scripts                    | [Link](https://gitee.com/mindspore/models/tree/master/official/cv/resnet_thor) |[Link](https://gitee.com/mindspore/models/tree/master/official/cv/resnet_thor) |
-
-### Inference Performance
-
-| Parameters          | Ascend 910                  | GPU                         |
-| ------------------- | --------------------------- | --------------------------- |
-| Model Version       | ResNet50-v1.5               | ResNet50-v1.5               |
-| Resource            | Ascend 910; OS Euler2.8                  | GPU                         |
-| Uploaded Date       | 07/05/2021 (month/day/year) | 07/05/2021(month/day/year)  |
-| MindSpore Version   | 1.3.0                       | 1.3.0                       |
-| Dataset             | ImageNet2012                | ImageNet2012                |
-| batch_size          | 32                          | 32                          |
-| outputs             | probability                 | probability                 |
-| Accuracy            | 76.14%                      | 75.97%                      |
-| Model for inference | 98M (.air file)             |                             |
-
-## Description of Random Situation
-
-In dataset.py, we set the seed inside 鈥渃reate_dataset" function. We also use random seed in train.py.
-
-## ModelZoo HomePage
-
- Please check the official [homepage](https://gitee.com/mindspore/models).  
diff --git a/official/cv/resnet_thor/README_CN.md b/official/cv/resnet_thor/README_CN.md
deleted file mode 100644
index 602342a75faad074ee6abe8e1a1a07fa42e5abfd..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/README_CN.md
+++ /dev/null
@@ -1,307 +0,0 @@
-# ResNet-50-THOR 绀轰緥
-
-<!-- TOC -->
-
-- [ResNet-50-THOR 绀轰緥](#resnet-50-thor-绀轰緥)
-    - [姒傝堪](#姒傝堪)
-    - [妯″瀷鏋舵瀯](#妯″瀷鏋舵瀯)
-    - [鏁版嵁闆哴(#鏁版嵁闆�)
-    - [鐗规€(#鐗规€�)
-    - [鐜瑕佹眰](#鐜瑕佹眰)
-    - [蹇€熷叆闂╙(#蹇€熷叆闂�)
-    - [鑴氭湰鎻忚堪](#鑴氭湰鎻忚堪)
-        - [鑴氭湰浠g爜缁撴瀯](#鑴氭湰浠g爜缁撴瀯)
-        - [鑴氭湰鍙傛暟](#鑴氭湰鍙傛暟)
-        - [璁粌杩囩▼](#璁粌杩囩▼)
-            - [Ascend 910](#ascend-910)
-            - [GPU](#gpu)
-        - [鎺ㄧ悊杩囩▼](#鎺ㄧ悊杩囩▼)
-            - [Ascend 910](#ascend-910-1)
-            - [GPU](#gpu-1)
-    - [妯″瀷鎻忚堪](#妯″瀷鎻忚堪)
-        - [璁粌鎬ц兘](#璁粌鎬ц兘)
-        - [鎺ㄧ悊鎬ц兘](#鎺ㄧ悊鎬ц兘)
-    - [闅忔満鎯呭喌璇存槑](#闅忔満鎯呭喌璇存槑)
-    - [ModelZoo棣栭〉](#modelzoo棣栭〉)
-
-<!-- /TOC -->
-
-## 姒傝堪
-
-鏈枃涓句緥璇存槑浜嗗浣曠敤浜岄樁浼樺寲鍣═HOR鍙奍mageNet2012鏁版嵁闆嗚缁僐esNet-50 V1.5缃戠粶銆俆HOR鏄疢indSpore涓竴绉嶈繎浼间簩闃朵紭鍖栥€佽凯浠f洿灏戠殑鏂版柟娉曘€俆HOR閲囩敤8鍗scend 910锛岃兘鍦�72鍒嗛挓鍐呰揪鍒�75.9%鐨則op-1鍑嗙‘鐜囷紝瀹屾垚ResNet-50 V1.5璁粌锛岃繙楂樹簬浣跨敤SGD+Momentum绠楁硶銆�
-
-## 妯″瀷鏋舵瀯
-
-ResNet-50鐨勬€讳綋缃戠粶鏋舵瀯濡備笅锛歔閾炬帴](https://arxiv.org/pdf/1512.03385.pdf)
-
-## 鏁版嵁闆�
-
-浣跨敤鐨勬暟鎹泦锛欼mageNet2012
-
-- 鏁版嵁闆嗗ぇ灏忥細鍏�1000涓被鐨�224*224褰╄壊鍥惧儚
-    - 璁粌闆嗭細1,281,167寮犲浘鍍�
-    - 娴嬭瘯闆嗭細5涓囧紶鍥惧儚
-
-- 鏁版嵁鏍煎紡锛欽PEG
-    - 娉細鏁版嵁鍦╠ataset.py涓鐞嗐€�
-
-- 涓嬭浇鏁版嵁闆咺mageNet2012銆�
-
-> 瑙e帇ImageNet2012鏁版嵁闆嗗埌浠绘剰璺緞锛岀洰褰曠粨鏋勫簲鍖呭惈璁粌鏁版嵁闆嗗拰楠岃瘉鏁版嵁闆嗭紝濡備笅鎵€绀猴細
-
-```shell
-    鈹溾攢鈹€ ilsvrc                  # 璁粌鏁版嵁闆�
-    鈹斺攢鈹€ ilsvrc_eval             # 楠岃瘉鏁版嵁闆�
-```
-
-## 鐗规€�
-
-浼犵粺涓€闃朵紭鍖栫畻娉曪紝濡係GD锛岃绠楅噺灏忥紝浣嗘敹鏁涢€熷害鎱紝杩唬娆℃暟澶氥€備簩闃朵紭鍖栫畻娉曞埄鐢ㄧ洰鏍囧嚱鏁扮殑浜岄樁瀵兼暟鍔犻€熸敹鏁涳紝鏀舵暃閫熷害鏇村揩锛岃凯浠f鏁板皯銆備絾鏄紝鐢变簬璁$畻鎴愭湰楂橈紝浜岄樁浼樺寲绠楁硶鍦ㄦ繁搴︾缁忕綉缁滆缁冧腑鐨勫簲鐢ㄥ苟涓嶆櫘閬嶃€備簩闃朵紭鍖栫畻娉曠殑涓昏璁$畻鎴愭湰鍦ㄤ簬浜岄樁淇℃伅鐭╅樀锛圚essian鐭╅樀銆丗isher淇℃伅鐭╅樀绛夛級鐨勬眰閫嗚繍绠楋紝鏃堕棿澶嶆潅搴︾害涓�$O (n^3)$銆傚湪鐜版湁鑷劧姊害绠楁硶鐨勫熀纭€涓婏紝閫氳繃杩戜技鍜屽壀鍒嘑isher淇℃伅鐭╅樀浠ラ檷浣庨€嗙煩闃电殑璁$畻澶嶆潅搴︼紝瀹炵幇浜嗗熀浜嶮indSpore鐨勪簩闃朵紭鍖栧櫒THOR銆俆HOR浣跨敤8寮燗scend 910鑺墖锛屽彲鍦�72鍒嗛挓鍐呭畬鎴怰esNet50-v1.5+ImageNet鐨勮缁冦€�
-
-## 鐜瑕佹眰
-
-- 纭欢锛氭槆鑵惧鐞嗗櫒锛圓scend鎴朑PU锛�
-    - 浣跨敤Ascend鎴朑PU澶勭悊鍣ㄦ惌寤虹‖浠剁幆澧冦€�
-
-- 妗嗘灦
-    - [MindSpore](https://www.mindspore.cn/install)
-- 濡傞渶鏌ョ湅璇︽儏锛岃鍙傝濡備笅璧勬簮锛�
-    - [MindSpore鏁欑▼](https://www.mindspore.cn/tutorials/zh-CN/master/index.html)
-    - [MindSpore Python API](https://www.mindspore.cn/docs/zh-CN/master/index.html)
-
-## 蹇€熷叆闂�
-
-閫氳繃瀹樻柟缃戠珯瀹夎MindSpore鍚庯紝鎮ㄥ彲浠ユ寜鐓у涓嬫楠よ繘琛岃缁冨拰楠岃瘉锛�
-
-- Ascend澶勭悊鍣ㄧ幆澧冭繍琛�
-
-```python
-# 鍒嗗竷寮忚缁冭繍琛岀ず渚�
-bash run_distribute_train.sh [RANK_TABLE_FILE] [DATASET_PATH] [DEVICE_NUM]
-
-# 鎺ㄧ悊杩愯绀轰緥
-bash run_eval.sh [DATASET_PATH] [CHECKPOINT_PATH]
-```
-
-> 瀵逛簬鍒嗗竷寮忚缁冿紝闇€瑕佹彁鍓嶅垱寤篔SON鏍煎紡鐨凥CCL閰嶇疆鏂囦欢銆傚叧浜庨厤缃枃浠讹紝鍙互鍙傝€僛HCCL_TOOL](https://gitee.com/mindspore/models/tree/master/utils/hccl_tools)
-銆�
-
-- GPU澶勭悊鍣ㄧ幆澧冭繍琛�
-
-```python
-# 鍒嗗竷寮忚缁冭繍琛岀ず渚�
-bash run_distribute_train_gpu.sh [DATASET_PATH] [DEVICE_NUM]
-
-# 鎺ㄧ悊杩愯绀轰緥
-bash run_eval_gpu.sh [DATASET_PATH] [CHECKPOINT_PATH]
- ```
-
-## 鑴氭湰鎻忚堪
-
-### 鑴氭湰浠g爜缁撴瀯
-
-```shell
-鈹斺攢鈹€ resnet_thor
-    鈹溾攢鈹€ README.md                                  # resnet_thor鐩稿叧鎻忚堪
-    鈹溾攢鈹€ scripts
-    鈹�    鈹溾攢鈹€ run_distribute_train.sh               # 鍚姩Ascend鍒嗗竷寮忚缁�
-    鈹�    鈹斺攢鈹€ run_eval.sh                           # 鍚姩Ascend鎺ㄧ悊
-    鈹�    鈹溾攢鈹€ run_distribute_train_gpu.sh           # 鍚姩GPU鍒嗗竷寮忚缁�
-    鈹�    鈹斺攢鈹€ run_eval_gpu.sh                       # 鍚姩GPU鎺ㄧ悊
-    鈹溾攢鈹€src
-    鈹�    鈹溾攢鈹€ crossentropy.py                       # 浜ゅ弶鐔垫崯澶卞嚱鏁�
-    鈹�    鈹溾攢鈹€ config.py                             # 鍙傛暟閰嶇疆
-    鈹�    鈹溾攢鈹€ dataset_helper.py                     # minddata鏁版嵁鑴氭湰
-    鈹�    鈹溾攢鈹€ grad_reducer_thor.py                  # Thor鐨勬搴educer
-    鈹�    鈹溾攢鈹€ model_thor.py                         # model鑴氭湰
-    鈹�    鈹溾攢鈹€ resnet_thor.py                        # resnet50妯″瀷
-    鈹�    鈹溾攢鈹€ thor.py                               # Thor浼樺寲鍣�
-    鈹�    鈹溾攢鈹€ thor_layer.py                         # Thor灞�
-    鈹�    鈹斺攢鈹€ dataset.py                            # 鏁版嵁棰勫鐞�
-    鈹溾攢鈹€ eval.py                                    # 鎺ㄧ悊鑴氭湰
-    鈹溾攢鈹€ train.py                                   # 璁粌鑴氭湰
-    鈹溾攢鈹€ export.py                                  # 灏哻heckpoint鏂囦欢瀵煎嚭涓篗INDIR,AIR鏂囦欢
-    鈹斺攢鈹€ mindspore_hub_conf.py                      # MinSpore Hub浠撳簱鐨勯厤缃枃浠�
-```
-
-### 鑴氭湰鍙傛暟
-
-鍦╟onfig.py涓彲浠ュ悓鏃堕厤缃缁冨拰鎺ㄧ悊鍙傛暟銆�
-
-- Ascend 910鍙傛暟璇存槑
-
-```shell
-"class_num"锛�1001, # 鏁版嵁闆嗙被鏁�
-"batch_size"锛�32, # 杈撳叆寮犻噺鐨勬壒娆″ぇ灏忥紙鍙敮鎸�32锛�
-"loss_scale"锛�128, # loss_scale缂╂斁绯绘暟
-"momentum": 0.9, # THOR浼樺寲鍣ㄤ腑鍔ㄩ噺
-"weight_decay": 5e-4, # 鏉冮噸琛板噺绯绘暟
-"epoch_size"锛�45, # 姝ゅ€间粎閫傜敤浜庤缁冿紱搴旂敤浜庢帹鐞嗘椂鍥哄畾涓�1
-"save_checkpoint": True, # 鏄惁淇濆瓨checkpoint
-"save_checkpoint_epochs": 1, # 涓や釜checkpoint涔嬮棿鐨勮疆娆¢棿闅旓紱榛樿鎯呭喌涓嬶紝姣忎釜epoch閮戒細淇濆瓨checkpoint
-"keep_checkpoint_max": 15, # 鍙繚鐣欐渶鍚庣殑keep_checkpoint_max涓猚heckpoint鏂囦欢
-"save_checkpoint_path": "./", # checkpoint鏂囦欢鐨勪繚瀛樿矾寰�
-"use_label_smooth": True, # 鏄惁浣跨敤label smooth
-"label_smooth_factor": 0.1, # label smooth绯绘暟
-"lr_init": 0.045, # 鍒濆瀛︿範鐜�
-"lr_decay": 6,# 瀛︿範鐜囪“鍑忓€�
-"lr_end_epoch"锛�70, # 瀛︿範閫熺巼缁撴潫epoch鍊�
-"damping_init"锛�0.03, # 鍒濆Fisher淇℃伅鐭╅樀闃诲凹
-"damping_decay": 0.87, # 闃诲凹琛板噺鐜�
-"frequency": 834, # 鏇存柊浜岄樁淇℃伅鐭╅樀鐨勬闀块棿闅旓紙搴斾负姣忎釜epoch step鏁扮殑闄ゆ暟锛�
-```
-
-- GPU鍙傛暟
-
-```shell
-"class_num"锛�1001, # 鏁版嵁闆嗙被鏁�
-"batch_size"锛�32, # 杈撳叆寮犻噺鐨勬壒娆″ぇ灏�
-"loss_scale"锛�128, # loss缂╂斁绯绘暟
-"momentum": 0.9, # THOR浼樺寲鍣ㄤ腑momentum
-"weight_decay": 5e-4, # 鏉冮噸琛板噺绯绘暟
-"epoch_size"锛�40, # 鍙璁粌鏈夋晥锛屾帹鐞嗗浐瀹氬€间负1
-"save_checkpoint": True, # 鏄惁淇濆瓨checkpoint
-"save_checkpoint_epochs": 1, # 涓や釜checkpoint涔嬮棿鐨勮疆娆¢棿闅旓紱榛樿鎯呭喌涓嬶紝姣忎釜epoch閮戒細淇濆瓨checkpoint
-"keep_checkpoint_max": 15, # 鍙繚鐣欐渶鍚庣殑keep_checkpoint_max涓猚heckpoint鏂囦欢
-"save_checkpoint_path": "./", # checkpoint鏂囦欢鐨勪繚瀛樿矾寰�
-"use_label_smooth": True, # 鏄惁浣跨敤label smooth
-"label_smooth_factor": 0.1, # label smooth绯绘暟
-"lr_init": 0.05672, # 瀛︿範閫熺巼鍒濆鍊�
-"lr_decay"锛�4.9687,# 瀛︿範閫熺巼琛板噺鐜囧€�
-"lr_end_epoch"锛�50, # 瀛︿範閫熺巼缁撴潫epoch鍊�
-"damping_init"锛�0.02345,# Fisher淇℃伅鐭╅樀闃诲凹鍒濆鍊�
-"damping_decay": 0.5467, # 闃诲凹琛板噺鐜�
-"frequency": 834, # 鏇存柊浜岄樁淇℃伅鐭╅樀鐨勬闀块棿闅旓紙搴斾负姣廵poch step鏁扮殑闄ゆ暟锛�
-```
-
-> 鐢变簬绠楀瓙鐨勯檺鍒讹紝鐩墠Ascend涓璪atch size鍙敮鎸�32銆備簩闃朵俊鎭煩闃电殑鏇存柊棰戠巼蹇呴』璁剧疆涓烘瘡涓猠poch鐨剆tep鏁扮殑闄ゆ暟锛堜緥濡傦紝834鏄�5004鐨勯櫎鏁帮級銆傛€讳箣锛岀敱浜庢鏋跺拰绠楀瓙鐨勫眬闄愭€э紝鎴戜滑鐨勭畻娉曞湪璁剧疆杩欎簺鍙傛暟鏃跺苟涓嶅崄鍒嗙伒娲汇€備絾鍚庣画鐗堟湰浼氳В鍐宠繖浜涢棶棰樸€�
-
-### 璁粌杩囩▼
-
-#### Ascend 910
-
-```shell
-  bash run_distribute_train.sh [RANK_TABLE_FILE] [DATASET_PATH] [DEVICE_NUM]
-```
-
-姝よ剼鏈渶璁剧疆涓変釜鍙傛暟锛�
-
-- `RANK_TABLE_FILE`锛歳ank_table.json鏂囦欢璺緞
-- `DATASET_PATH`锛氳缁冩暟鎹泦鐨勮矾寰�
-- `DEVICE_NUM`锛氬垎甯冨紡璁粌鐨勮澶囧彿
-
-璁粌缁撴灉淇濆瓨鍦ㄥ綋鍓嶈矾寰勪笅锛屾枃浠跺す鍚嶇О浠モ€渢rain_parallel鈥濆紑澶淬€傛偍鍙湪鏃ュ織涓壘鍒癱heckpoint鏂囦欢浠ュ強缁撴灉锛屽涓嬫墍绀恒€�
-
-```shell
-...
-epoch锛�1 step锛�5004锛宭oss is 4.4182425
-epoch锛�2 step: 5004锛宭oss is 3.740064
-epoch锛�3 step: 5004锛宭oss is 4.0546017
-epoch锛�4 step: 5004锛宭oss is 3.7598825
-epoch锛�5 step: 5004锛宭oss is 3.3744206
-......
-epoch锛�40 step: 5004锛宭oss is 1.6907625
-epoch锛�41 step: 5004锛宭oss is 1.8217756
-epoch锛�42 step: 5004锛宭oss is 1.6453942
-...
-```
-
-#### GPU
-
-```shell
-bash run_distribute_train_gpu.sh [DATASET_PATH] [DEVICE_NUM]
-```
-
-璁粌缁撴灉淇濆瓨鍦ㄥ綋鍓嶈矾寰勪笅锛屾枃浠跺す鍚嶇О浠モ€渢rain_parallel鈥濆紑澶淬€傛偍鍙湪鏃ュ織涓壘鍒癱heckpoint鏂囦欢浠ュ強缁撴灉锛屽涓嬫墍绀恒€�
-
-```shell
-...
-epoch锛� 1 step: 5004锛宭oss is 4.2546034
-epoch锛� 2 step: 5004锛宭oss is 4.0819564
-epoch锛� 3 step: 5004锛宭oss is 3.7005644
-epoch锛� 4 step: 5004锛宭oss is 3.2668946
-epoch锛� 5 step: 5004锛宭oss is 3.023509
-......
-epoch锛� 36 step: 5004锛宭oss is 1.645802
-...
-```
-
-### 鎺ㄧ悊杩囩▼
-
-鍦ㄨ繍琛屼互涓嬪懡浠や箣鍓嶏紝璇锋鏌ョ敤浜庢帹鐞嗙殑checkpoint璺緞銆傝灏哻heckpoint璺緞璁剧疆涓虹粷瀵硅矾寰勶紝濡俙username/resnet_thor/train_parallel0/resnet-42_5004.ckpt`銆�
-
-#### Ascend 910
-
-```shell
-  bash run_eval.sh [DATASET_PATH] [CHECKPOINT_PATH]
-```
-
-姝よ剼鏈渶璁剧疆涓や釜鍙傛暟锛�
-
-- `DATASET_PATH`锛氶獙璇佹暟鎹泦鐨勮矾寰勩€�
-- `CHECKPOINT_PATH`锛歝heckpoint鏂囦欢鐨勭粷瀵硅矾寰勩€�
-
-> 璁粌杩囩▼涓彲浠ョ敓鎴恈heckpoint銆�
-
-鎺ㄧ悊缁撴灉淇濆瓨鍦ㄧず渚嬭矾寰勶紝鏂囦欢澶瑰悕涓篳eval`銆傛偍鍙湪鏃ュ織涓壘鍒板涓嬬粨鏋溿€�
-
-```shell
-  result: {'top_5_accuracy': 0.9295574583866837, 'top_1_accuracy': 0.761443661971831} ckpt=train_parallel0/resnet-42_5004.ckpt
-```
-
-#### GPU
-
-```shell
-  bash run_eval_gpu.sh [DATASET_PATH] [CHECKPOINT_PATH]
-```
-
-鎺ㄧ悊缁撴灉淇濆瓨鍦ㄧず渚嬭矾寰勶紝鏂囦欢澶瑰悕涓篳eval`銆傛偍鍙湪鏃ュ織涓壘鍒板涓嬬粨鏋溿€�
-
-```shell
-  result: {'top_5_accuracy': 0.9287972151088348, 'top_1_accuracy': 0.7597031049935979} ckpt=train_parallel/resnet-36_5004.ckpt
-```
-
-## 妯″瀷鎻忚堪
-
-### 璁粌鎬ц兘
-
-| 鍙傛暟 | Ascend 910 | GPU |
-| -------------------------- | -------------------------------------- | ---------------------------------- |
-| 妯″瀷鐗堟湰 | ResNet50-v1.5 | ResNet50-v1.5 |
-| 璧勬簮 |Ascend 910锛汣PU 2.60GHz锛�192鏍革紱鍐呭瓨 755G锛涚郴缁� Euler2.8  | GPU(Tesla V100 SXM2)-CPU 2.1GHz 24鏍�-鍐呭瓨128G |
-| 涓婁紶鏃ユ湡 | 2021-07-05 | 2021-07-05 |
-| MindSpore鐗堟湰 | 1.3.0 | 1.3.0|
-| 鏁版嵁闆� | ImageNet2012 | ImageNet2012 |
-| 璁粌鍙傛暟 | epoch=45, steps per epoch=5004, batch_size = 32 |epoch=40, steps per epoch=5004, batch_size = 32 |
-| 浼樺寲鍣� |THOR|THOR |
-| 鎹熻€楀嚱鏁� | Softmax浜ゅ弶鐔� | Softmax浜ゅ弶鐔� |
-| 杈撳嚭 | 姒傜巼 | 姒傜巼 |
-| loss | 1.6453942 | 1.645802 |
-| Speed | 20.4姣/姝ワ紙8鍗★級 | 76姣/姝ワ紙8鍗★級 |
-| 鎬绘椂闂达紙鎸�75.9%璁$畻锛� | 72鍒嗛挓 | 229鍒嗛挓 |
-| 鍙傛暟(M) | 25.5 |25.5 |
-| checkpoint | 491M锛�.ckpt file锛� | 380M锛�.ckpt file锛� |
-| 鑴氭湰 |[閾炬帴](https://gitee.com/mindspore/models/tree/master/official/cv/resnet_thor) |[閾炬帴](https://gitee.com/mindspore/models/tree/master/official/cv/resnet_thor) |
-
-### 鎺ㄧ悊鎬ц兘
-
-| 鍙傛暟                 | Ascend 910                  | GPU                         |
-| ------------------- | --------------------------- | --------------------------- |
-| 妯″瀷鐗堟湰             | ResNet50-v1.5               | ResNet50-v1.5               |
-| 璧勬簮                 | Ascend 910锛涚郴缁� Euler2.8                   | GPU                         |
-| 涓婁紶鏃ユ湡              |  2021-07-05                | 2021-07-05                 |
-| MindSpore鐗堟湰        | 1.3.0                      | 1.3.0                       |
-| 鏁版嵁闆�               | ImageNet2012                | ImageNet2012                |
-| 鎵瑰ぇ灏�               | 32                          | 32                          |
-| 杈撳嚭                 | 姒傜巼                         | 姒傜巼                 |
-| 绮惧害                | 76.14%                      | 75.97%                      |
-| 鎺ㄧ悊妯″瀷             | 98M (.air file)             |                             |
-
-## 闅忔満鎯呭喌璇存槑
-
-鍦╠ataset.py涓紝鎴戜滑璁剧疆浜嗏€渃reate_dataset鈥濆嚱鏁板唴鐨勭瀛愩€傛垜浠繕鍦╰rain.py涓娇鐢ㄩ殢鏈虹瀛愩€�
-
-## ModelZoo棣栭〉
-
- 璇锋煡鐪嬪畼鏂筟涓婚〉](https://gitee.com/mindspore/models)
- 銆�  
diff --git a/official/cv/resnet_thor/eval.py b/official/cv/resnet_thor/eval.py
deleted file mode 100644
index 56b564da93888a8b2f735a8433a0330e7781e10c..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/eval.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""train resnet."""
-import os
-import argparse
-from mindspore import context
-from mindspore.common import set_seed
-from mindspore.train.model import Model
-from mindspore.train.serialization import load_checkpoint, load_param_into_net
-from src.crossentropy import CrossEntropy
-from src.config import config
-from src.dataset import create_dataset
-from src.resnet import resnet50 as resnet
-
-parser = argparse.ArgumentParser(description='Image classification')
-parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path')
-parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
-parser.add_argument('--device_target', type=str, default='Ascend', help='Device target')
-args_opt = parser.parse_args()
-
-set_seed(1)
-
-if __name__ == '__main__':
-    target = args_opt.device_target
-
-    # init context
-    context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False)
-    if target != "GPU":
-        device_id = int(os.getenv('DEVICE_ID'))
-        context.set_context(device_id=device_id)
-
-    # create dataset
-    dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=False, batch_size=config.batch_size,
-                             target=target)
-
-    # define net
-    net = resnet(class_num=config.class_num)
-    net.add_flags_recursive(thor=False)
-
-    # load checkpoint
-    param_dict = load_checkpoint(args_opt.checkpoint_path)
-    keys = list(param_dict.keys())
-    for key in keys:
-        if "damping" in key:
-            param_dict.pop(key)
-    load_param_into_net(net, param_dict)
-    net.set_train(False)
-
-    # define loss, model
-    if not config.use_label_smooth:
-        config.label_smooth_factor = 0.0
-    loss = CrossEntropy(smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
-
-    # define model
-    model = Model(net, loss_fn=loss, metrics={'top_1_accuracy', 'top_5_accuracy'})
-
-    # eval model
-    res = model.eval(dataset)
-    print("result:", res, "ckpt=", args_opt.checkpoint_path)
diff --git a/official/cv/resnet_thor/export.py b/official/cv/resnet_thor/export.py
deleted file mode 100644
index 1bf0facca4fb3a2c59715c8202f113a37e64d9c8..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/export.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""export"""
-import argparse
-import numpy as np
-
-from mindspore import context, Tensor, load_checkpoint, load_param_into_net, export
-from src.resnet import resnet50 as resnet
-from src.config import config
-
-parser = argparse.ArgumentParser(description='checkpoint export')
-parser.add_argument("--device_id", type=int, default=0, help="Device id")
-parser.add_argument("--batch_size", type=int, default=1, help="batch size")
-parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.")
-parser.add_argument('--width', type=int, default=224, help='input width')
-parser.add_argument('--height', type=int, default=224, help='input height')
-parser.add_argument("--file_name", type=str, default="resnet_thor", help="output file name.")
-parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="MINDIR", help="file format")
-parser.add_argument("--device_target", type=str, default="Ascend",
-                    choices=["Ascend", "GPU", "CPU"], help="device target (default: Ascend)")
-args = parser.parse_args()
-
-context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, device_id=args.device_id)
-
-if __name__ == '__main__':
-
-    context.set_context(mode=context.GRAPH_MODE, save_graphs=False)
-
-    # define net
-    net = resnet(class_num=config.class_num)
-    net.add_flags_recursive(thor=False)
-
-    # load checkpoint
-    param_dict = load_checkpoint(args.ckpt_file)
-    keys = list(param_dict.keys())
-    for key in keys:
-        if "damping" in key:
-            param_dict.pop(key)
-    load_param_into_net(net, param_dict)
-
-    inputs = np.random.uniform(0.0, 1.0, size=[args.batch_size, 3, args.height, args.width]).astype(np.float32)
-    export(net, Tensor(inputs), file_name=args.file_name, file_format=args.file_format)
diff --git a/official/cv/resnet_thor/mindspore_hub_conf.py b/official/cv/resnet_thor/mindspore_hub_conf.py
deleted file mode 100644
index 9b705551f8f5ceeb7fa9ab4885fc9b1c36b9f714..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/mindspore_hub_conf.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""hub config."""
-from src.resnet import resnet50
-
-def create_network(name, *args, **kwargs):
-    if name == 'resnet50_thor':
-        return resnet50(*args, **kwargs)
-    raise NotImplementedError(f"{name} is not implemented in the repo")
diff --git a/official/cv/resnet_thor/requirements.txt b/official/cv/resnet_thor/requirements.txt
deleted file mode 100644
index 6bad10388ecb1eefd890a797d833976a5e631541..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/requirements.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-numpy
-scipy
diff --git a/official/cv/resnet_thor/scripts/run_distribute_train.sh b/official/cv/resnet_thor/scripts/run_distribute_train.sh
deleted file mode 100644
index 534a6006c9be43de10f33e36b228d6bb2b16f2c9..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/scripts/run_distribute_train.sh
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-if [ $# != 3 ]
-then
-    echo "Usage: sh run_distribute_train.sh [RANK_TABLE_FILE] [DATASET_PATH] [DEVICE_NUM]"
-exit 1
-fi
-
-if [ ! -f $1 ]
-then
-    echo "error: RANK_TABLE_FILE=$1 is not a file"
-exit 1
-fi
-
-if [ ! -d $2 ]
-then
-    echo "error: DATASET_PATH=$2 is not a directory"
-exit 1
-fi
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-PATH1=$(get_real_path $1)
-PATH2=$(get_real_path $2)
-
-ulimit -u unlimited
-export DEVICE_NUM=$3
-export RANK_SIZE=$3
-export RANK_TABLE_FILE=$PATH1
-
-for((i=0; i<${DEVICE_NUM}; i++))
-do
-    export DEVICE_ID=$i
-    export RANK_ID=$i
-    rm -rf ./train_parallel$i
-    mkdir ./train_parallel$i
-    cp ../*.py ./train_parallel$i
-    cp -r ../src ./train_parallel$i
-    cd ./train_parallel$i || exit
-    echo "start training for rank $RANK_ID, device $DEVICE_ID"
-
-    env > env.log
-    python train.py --run_distribute=True --device_num=$DEVICE_NUM --dataset_path=$PATH2 > log 2>&1 &
-    cd ..
-done
diff --git a/official/cv/resnet_thor/scripts/run_distribute_train_gpu.sh b/official/cv/resnet_thor/scripts/run_distribute_train_gpu.sh
deleted file mode 100644
index b7e63456fef750e2cfd9de9ae4eb1e04c7148239..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/scripts/run_distribute_train_gpu.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-if [ $# != 2 ]
-then
-    echo "Usage: sh run_distribute_train_gpu.sh [DATASET_PATH] [DEVICE_NUM]"
-    exit 1
-fi
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-PATH1=$(get_real_path $1)
-
-ulimit -u unlimited
-export DEVICE_NUM=$2
-export RANK_SIZE=$2
-
-rm -rf ./train_parallel
-mkdir ./train_parallel
-cp ../*.py ./train_parallel
-cp *.sh ./train_parallel
-cp -r ../src ./train_parallel
-cd ./train_parallel || exit
-
-mpirun --allow-run-as-root -n $RANK_SIZE --output-filename log_output --merge-stderr-to-stdout \
-  python train.py --run_distribute=True \
-    --device_num=$DEVICE_NUM --device_target="GPU" --dataset_path=$PATH1 &> log &
-
diff --git a/official/cv/resnet_thor/scripts/run_eval.sh b/official/cv/resnet_thor/scripts/run_eval.sh
deleted file mode 100644
index e6a0174a2a582ea83b33f6c6aa1e30661cee4f76..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/scripts/run_eval.sh
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-if [ $# != 2 ]
-then 
-    echo "Usage: sh run_eval.sh [DATASET_PATH] [CHECKPOINT_PATH]"
-exit 1
-fi
-
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-PATH1=$(get_real_path $1)
-PATH2=$(get_real_path $2)
-
-
-if [ ! -d $PATH1 ]
-then 
-    echo "error: DATASET_PATH=$PATH1 is not a directory"
-exit 1
-fi 
-
-if [ ! -f $PATH2 ]
-then 
-    echo "error: CHECKPOINT_PATH=$PATH2 is not a file"
-exit 1
-fi 
-
-ulimit -u unlimited
-export DEVICE_NUM=1
-export DEVICE_ID=0
-export RANK_SIZE=$DEVICE_NUM
-export RANK_ID=0
-
-if [ -d "eval" ];
-then
-    rm -rf ./eval
-fi
-mkdir ./eval
-cp ../*.py ./eval
-cp *.sh ./eval
-cp -r ../src ./eval
-cd ./eval || exit
-env > env.log
-echo "start evaluation for device $DEVICE_ID"
-python eval.py --dataset_path=$PATH1 --checkpoint_path=$PATH2 &> log &
-cd ..
diff --git a/official/cv/resnet_thor/scripts/run_eval_gpu.sh b/official/cv/resnet_thor/scripts/run_eval_gpu.sh
deleted file mode 100644
index c9be5dc8665b84c499717e4c3fec8e83d0d28f00..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/scripts/run_eval_gpu.sh
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-if [ $# != 2 ]
-then 
-    echo "Usage: sh run_eval_gpu.sh [DATASET_PATH] [CHECKPOINT_PATH]"
-exit 1
-fi
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-PATH1=$(get_real_path $1)
-PATH2=$(get_real_path $2)
-
-
-if [ ! -d $PATH1 ]
-then 
-    echo "error: DATASET_PATH=$PATH1 is not a directory"
-exit 1
-fi 
-
-if [ ! -f $PATH2 ]
-then 
-    echo "error: CHECKPOINT_PATH=$PATH2 is not a file"
-exit 1
-fi 
-
-ulimit -u unlimited
-export DEVICE_NUM=1
-export DEVICE_ID=0
-export RANK_SIZE=$DEVICE_NUM
-export RANK_ID=0
-
-if [ -d "eval" ];
-then
-    rm -rf ./eval
-fi
-mkdir ./eval
-cp ../*.py ./eval
-cp *.sh ./eval
-cp -r ../src ./eval
-cd ./eval || exit
-env > env.log
-echo "start evaluation for device $DEVICE_ID"
-python eval.py --dataset_path=$PATH1 --checkpoint_path=$PATH2 --device_target="GPU" &> log &
-cd ..
diff --git a/official/cv/resnet_thor/src/config.py b/official/cv/resnet_thor/src/config.py
deleted file mode 100644
index 50af1186823d6a35bd4c9a87f35619a2e60daa43..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/src/config.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""
-network config setting, will be used in train.py and eval.py
-"""
-from easydict import EasyDict as ed
-
-# config for resnet50, imagenet2012, Ascend 910
-config = ed({
-    "class_num": 1001,
-    "batch_size": 32,
-    "loss_scale": 128,
-    "momentum": 0.9,
-    "weight_decay": 5e-4,
-    "epoch_size": 45,
-    "pretrain_epoch_size": 0,
-    "save_checkpoint": True,
-    "save_checkpoint_epochs": 2,
-    "keep_checkpoint_max": 15,
-    "save_checkpoint_path": "./",
-    "use_label_smooth": True,
-    "label_smooth_factor": 0.1,
-    "lr_init": 0.05803,
-    "lr_decay": 4.04839,
-    "lr_end_epoch": 53,
-    "damping_init": 0.02714,
-    "damping_decay": 0.50036,
-    "frequency": 834,
-    "use_dynamic_frequency": False,
-    "first_stage_steps": 835,
-})
-
-# config for resnet50, imagenet2012, GPU
-config_gpu = ed({
-    "class_num": 1001,
-    "batch_size": 32,
-    "loss_scale": 128,
-    "momentum": 0.9,
-    "weight_decay": 5e-4,
-    "epoch_size": 40,
-    "save_checkpoint": True,
-    "save_checkpoint_epochs": 1,
-    "keep_checkpoint_max": 15,
-    "save_checkpoint_path": "./",
-    "use_label_smooth": True,
-    "label_smooth_factor": 0.1,
-    "lr_init": 0.05672,
-    "lr_decay": 4.9687,
-    "lr_end_epoch": 50,
-    "damping_init": 0.02345,
-    "damping_decay": 0.5467,
-    "frequency": 834,
-    "use_dynamic_frequency": False,
-    "first_stage_steps": 835,
-})
diff --git a/official/cv/resnet_thor/src/crossentropy.py b/official/cv/resnet_thor/src/crossentropy.py
deleted file mode 100644
index 2a5a10f728f46b77ca33c444b47f3b18ce703c84..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/src/crossentropy.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""CrossEntropy"""
-import mindspore.nn as nn
-from mindspore import Tensor
-from mindspore.common import dtype as mstype
-from mindspore.ops import functional as F
-from mindspore.ops import operations as P
-
-
-class Loss(nn.Cell):
-    """
-    Base class for other losses.
-    """
-    def __init__(self, reduction='mean'):
-        super(Loss, self).__init__()
-        if reduction is None:
-            reduction = 'none'
-
-        if reduction not in ('mean', 'sum', 'none'):
-            raise ValueError(f"reduction method for {reduction.lower()} is not supported")
-
-        self.average = True
-        self.reduce = True
-        if reduction == 'sum':
-            self.average = False
-        if reduction == 'none':
-            self.reduce = False
-
-        self.reduce_mean = P.ReduceMean()
-        self.reduce_sum = P.ReduceSum()
-        self.mul = P.Mul()
-        self.cast = P.Cast()
-
-    def get_axis(self, x):
-        shape = F.shape(x)
-        length = F.tuple_len(shape)
-        perm = F.make_range(0, length)
-        return perm
-
-    def get_loss(self, x, weights=1.0):
-        """
-        Computes the weighted loss
-        Args:
-            weights: Optional `Tensor` whose rank is either 0, or the same rank as inputs, and must be broadcastable to
-                inputs (i.e., all dimensions must be either `1`, or the same as the corresponding inputs dimension).
-        """
-        input_dtype = x.dtype
-        x = self.cast(x, mstype.float32)
-        weights = self.cast(weights, mstype.float32)
-        x = self.mul(weights, x)
-        if self.reduce and self.average:
-            x = self.reduce_mean(x, self.get_axis(x))
-        if self.reduce and not self.average:
-            x = self.reduce_sum(x, self.get_axis(x))
-        x = self.cast(x, input_dtype)
-        return x
-
-    def construct(self, base, target):
-        raise NotImplementedError
-
-
-class CrossEntropy(Loss):
-    """CrossEntropy"""
-    def __init__(self, smooth_factor=0., num_classes=1000):
-        super(CrossEntropy, self).__init__()
-        self.onehot = P.OneHot()
-        self.on_value = Tensor(1.0 - smooth_factor, mstype.float32)
-        self.off_value = Tensor(1.0 * smooth_factor / (num_classes - 1), mstype.float32)
-        self.ce = nn.SoftmaxCrossEntropyWithLogits()
-        self.mean = P.ReduceMean(False)
-
-    def construct(self, logit, label):
-        one_hot_label = self.onehot(label, F.shape(logit)[1], self.on_value, self.off_value)
-        loss = self.ce(logit, one_hot_label)
-        loss = self.mean(loss, 0)
-        return loss
diff --git a/official/cv/resnet_thor/src/dataset.py b/official/cv/resnet_thor/src/dataset.py
deleted file mode 100644
index 63dd12a90dd178587534e7ccfba45c2050a46447..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/src/dataset.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright 2020-2022 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""
-create train or eval dataset.
-"""
-import os
-import mindspore.common.dtype as mstype
-import mindspore.dataset as ds
-import mindspore.dataset.vision as C
-import mindspore.dataset.transforms as C2
-from mindspore.communication.management import init, get_rank, get_group_size
-
-
-def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend"):
-    """
-    create a train or eval imagenet2012 dataset for resnet50
-
-    Args:
-        dataset_path(string): the path of dataset.
-        do_train(bool): whether dataset is used for train or eval.
-        repeat_num(int): the repeat times of dataset. Default: 1
-        batch_size(int): the batch size of dataset. Default: 32
-        target(str): the device target. Default: Ascend
-    Returns:
-        dataset
-    """
-
-    if target == "Ascend":
-        device_num, rank_id = _get_rank_info()
-        num_parallels = 8
-    else:
-        init()
-        rank_id = get_rank()
-        device_num = get_group_size()
-        num_parallels = 4
-
-    if device_num == 1:
-        data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=num_parallels, shuffle=True)
-    else:
-        data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=num_parallels, shuffle=True,
-                                         num_shards=device_num, shard_id=rank_id)
-
-    image_size = 224
-    # Computed from random subset of ImageNet training images
-    mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
-    std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
-
-    # define map operations
-    if do_train:
-        trans = [
-            C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            C.RandomHorizontalFlip(prob=0.5),
-            C.Normalize(mean=mean, std=std),
-            C.HWC2CHW()
-        ]
-    else:
-        trans = [
-            C.Decode(),
-            C.Resize(256),
-            C.CenterCrop(image_size),
-            C.Normalize(mean=mean, std=std),
-            C.HWC2CHW()
-        ]
-
-    type_cast_op = C2.TypeCast(mstype.int32)
-
-    data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=num_parallels)
-    data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallels)
-
-    # apply batch operations
-    data_set = data_set.batch(batch_size, drop_remainder=True)
-
-    # apply dataset repeat operation
-    data_set = data_set.repeat(repeat_num)
-
-    return data_set
-
-
-def _get_rank_info():
-    """
-    get rank size and rank id
-    """
-    rank_size = int(os.environ.get("RANK_SIZE", 1))
-
-    if rank_size > 1:
-        rank_size = get_group_size()
-        rank_id = get_rank()
-    else:
-        rank_size = 1
-        rank_id = 0
-
-    return rank_size, rank_id
diff --git a/official/cv/resnet_thor/src/resnet.py b/official/cv/resnet_thor/src/resnet.py
deleted file mode 100644
index c160f48d981d8de89b888579ae29a9ddec1c65d1..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/src/resnet.py
+++ /dev/null
@@ -1,573 +0,0 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""ResNet."""
-import math
-import numpy as np
-from scipy.stats import truncnorm
-import mindspore.nn as nn
-import mindspore.common.dtype as mstype
-from mindspore.ops import operations as P
-from mindspore.ops import functional as F
-from mindspore.common.tensor import Tensor
-
-
-def _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size):
-    fan_in = in_channel * kernel_size * kernel_size
-    scale = 1.0
-    scale /= max(1., fan_in)
-    stddev = (scale ** 0.5) / .87962566103423978
-    mu, sigma = 0, stddev
-    weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(out_channel * in_channel * kernel_size * kernel_size)
-    weight = np.reshape(weight, (out_channel, in_channel, kernel_size, kernel_size))
-    return Tensor(weight, dtype=mstype.float32)
-
-
-def _weight_variable(shape, factor=0.01):
-    init_value = np.random.randn(*shape).astype(np.float32) * factor
-    return Tensor(init_value)
-
-
-def calculate_gain(nonlinearity, param=None):
-    """calculate_gain"""
-    linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
-    res = 0
-    if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
-        res = 1
-    elif nonlinearity == 'tanh':
-        res = 5.0 / 3
-    elif nonlinearity == 'relu':
-        res = math.sqrt(2.0)
-    elif nonlinearity == 'leaky_relu':
-        if param is None:
-            negative_slope = 0.01
-        elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
-            # True/False are instances of int, hence check above
-            negative_slope = param
-        else:
-            raise ValueError("negative_slope {} not a valid number".format(param))
-        res = math.sqrt(2.0 / (1 + negative_slope ** 2))
-    else:
-        raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
-    return res
-
-
-def _calculate_fan_in_and_fan_out(tensor):
-    """_calculate_fan_in_and_fan_out"""
-    dimensions = len(tensor)
-    if dimensions < 2:
-        raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
-    if dimensions == 2:  # Linear
-        fan_in = tensor[1]
-        fan_out = tensor[0]
-    else:
-        num_input_fmaps = tensor[1]
-        num_output_fmaps = tensor[0]
-        receptive_field_size = 1
-        if dimensions > 2:
-            receptive_field_size = tensor[2] * tensor[3]
-        fan_in = num_input_fmaps * receptive_field_size
-        fan_out = num_output_fmaps * receptive_field_size
-    return fan_in, fan_out
-
-
-def _calculate_correct_fan(tensor, mode):
-    mode = mode.lower()
-    valid_modes = ['fan_in', 'fan_out']
-    if mode not in valid_modes:
-        raise ValueError("Mode {} not supported, please use one of {}".format(mode, valid_modes))
-    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
-    return fan_in if mode == 'fan_in' else fan_out
-
-
-def kaiming_normal(inputs_shape, a=0, mode='fan_in', nonlinearity='leaky_relu'):
-    fan = _calculate_correct_fan(inputs_shape, mode)
-    gain = calculate_gain(nonlinearity, a)
-    std = gain / math.sqrt(fan)
-    return np.random.normal(0, std, size=inputs_shape).astype(np.float32)
-
-
-def kaiming_uniform(inputs_shape, a=0., mode='fan_in', nonlinearity='leaky_relu'):
-    fan = _calculate_correct_fan(inputs_shape, mode)
-    gain = calculate_gain(nonlinearity, a)
-    std = gain / math.sqrt(fan)
-    bound = math.sqrt(3.0) * std  # Calculate uniform bounds from standard deviation
-    return np.random.uniform(-bound, bound, size=inputs_shape).astype(np.float32)
-
-
-def _conv3x3(in_channel, out_channel, stride=1, use_se=False, res_base=False):
-    if use_se:
-        weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=3)
-    else:
-        weight_shape = (out_channel, in_channel, 3, 3)
-        weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
-    if res_base:
-        return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride,
-                         padding=1, pad_mode='pad', weight_init=weight)
-    return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride,
-                     padding=0, pad_mode='same', weight_init=weight)
-
-
-def _conv1x1(in_channel, out_channel, stride=1, use_se=False, res_base=False):
-    if use_se:
-        weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=1)
-    else:
-        weight_shape = (out_channel, in_channel, 1, 1)
-        weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
-    if res_base:
-        return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride,
-                         padding=0, pad_mode='pad', weight_init=weight)
-    return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride,
-                     padding=0, pad_mode='same', weight_init=weight)
-
-
-def _conv7x7(in_channel, out_channel, stride=1, use_se=False, res_base=False):
-    if use_se:
-        weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=7)
-    else:
-        weight_shape = (out_channel, in_channel, 7, 7)
-        weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
-    if res_base:
-        return nn.Conv2d(in_channel, out_channel,
-                         kernel_size=7, stride=stride, padding=3, pad_mode='pad', weight_init=weight)
-    return nn.Conv2d(in_channel, out_channel,
-                     kernel_size=7, stride=stride, padding=0, pad_mode='same', weight_init=weight)
-
-
-def _bn(channel, res_base=False):
-    if res_base:
-        return nn.BatchNorm2d(channel, eps=1e-5, momentum=0.1,
-                              gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)
-    return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,
-                          gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)
-
-
-def _bn_last(channel):
-    return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,
-                          gamma_init=0, beta_init=0, moving_mean_init=0, moving_var_init=1)
-
-
-def _fc(in_channel, out_channel, use_se=False):
-    if use_se:
-        weight = np.random.normal(loc=0, scale=0.01, size=out_channel * in_channel)
-        weight = Tensor(np.reshape(weight, (out_channel, in_channel)), dtype=mstype.float32)
-    else:
-        weight_shape = (out_channel, in_channel)
-        weight = Tensor(kaiming_uniform(weight_shape, a=math.sqrt(5)))
-    return nn.Dense(in_channel, out_channel, has_bias=True, weight_init=weight, bias_init=0)
-
-
-class ResidualBlock(nn.Cell):
-    """
-    ResNet V1 residual block definition.
-
-    Args:
-        in_channel (int): Input channel.
-        out_channel (int): Output channel.
-        stride (int): Stride size for the first convolutional layer. Default: 1.
-        use_se (bool): Enable SE-ResNet50 net. Default: False.
-        se_block(bool): Use se block in SE-ResNet50 net. Default: False.
-
-    Returns:
-        Tensor, output tensor.
-
-    Examples:
-        >>> ResidualBlock(3, 256, stride=2)
-    """
-    expansion = 4
-
-    def __init__(self,
-                 in_channel,
-                 out_channel,
-                 stride=1,
-                 use_se=False, se_block=False):
-        super(ResidualBlock, self).__init__()
-        self.stride = stride
-        self.use_se = use_se
-        self.se_block = se_block
-        channel = out_channel // self.expansion
-        self.conv1 = _conv1x1(in_channel, channel, stride=1, use_se=self.use_se)
-        self.bn1 = _bn(channel)
-        if self.use_se and self.stride != 1:
-            self.e2 = nn.SequentialCell([_conv3x3(channel, channel, stride=1, use_se=True), _bn(channel),
-                                         nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same')])
-        else:
-            self.conv2 = _conv3x3(channel, channel, stride=stride, use_se=self.use_se)
-            self.bn2 = _bn(channel)
-
-        self.conv3 = _conv1x1(channel, out_channel, stride=1, use_se=self.use_se)
-        self.bn3 = _bn_last(out_channel)
-        if self.se_block:
-            self.se_global_pool = P.ReduceMean(keep_dims=False)
-            self.se_dense_0 = _fc(out_channel, int(out_channel / 4), use_se=self.use_se)
-            self.se_dense_1 = _fc(int(out_channel / 4), out_channel, use_se=self.use_se)
-            self.se_sigmoid = nn.Sigmoid()
-            self.se_mul = P.Mul()
-        self.relu = nn.ReLU()
-
-        self.down_sample = False
-
-        if stride != 1 or in_channel != out_channel:
-            self.down_sample = True
-        self.down_sample_layer = None
-
-        if self.down_sample:
-            if self.use_se:
-                if stride == 1:
-                    self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel,
-                                                                         stride, use_se=self.use_se), _bn(out_channel)])
-                else:
-                    self.down_sample_layer = nn.SequentialCell([nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same'),
-                                                                _conv1x1(in_channel, out_channel, 1,
-                                                                         use_se=self.use_se), _bn(out_channel)])
-            else:
-                self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride,
-                                                                     use_se=self.use_se), _bn(out_channel)])
-
-    def construct(self, x):
-        identity = x
-
-        out = self.conv1(x)
-        out = self.bn1(out)
-        out = self.relu(out)
-        if self.use_se and self.stride != 1:
-            out = self.e2(out)
-        else:
-            out = self.conv2(out)
-            out = self.bn2(out)
-            out = self.relu(out)
-        out = self.conv3(out)
-        out = self.bn3(out)
-        if self.se_block:
-            out_se = out
-            out = self.se_global_pool(out, (2, 3))
-            out = self.se_dense_0(out)
-            out = self.relu(out)
-            out = self.se_dense_1(out)
-            out = self.se_sigmoid(out)
-            out = F.reshape(out, F.shape(out) + (1, 1))
-            out = self.se_mul(out, out_se)
-
-        if self.down_sample:
-            identity = self.down_sample_layer(identity)
-
-        out = out + identity
-        out = self.relu(out)
-
-        return out
-
-
-class ResidualBlockBase(nn.Cell):
-    """
-    ResNet V1 residual block definition.
-
-    Args:
-        in_channel (int): Input channel.
-        out_channel (int): Output channel.
-        stride (int): Stride size for the first convolutional layer. Default: 1.
-        use_se (bool): Enable SE-ResNet50 net. Default: False.
-        se_block(bool): Use se block in SE-ResNet50 net. Default: False.
-        res_base (bool): Enable parameter setting of resnet18. Default: True.
-
-    Returns:
-        Tensor, output tensor.
-
-    Examples:
-        >>> ResidualBlockBase(3, 256, stride=2)
-    """
-
-    def __init__(self,
-                 in_channel,
-                 out_channel,
-                 stride=1,
-                 use_se=False,
-                 se_block=False,
-                 res_base=True):
-        super(ResidualBlockBase, self).__init__()
-        self.res_base = res_base
-        self.conv1 = _conv3x3(in_channel, out_channel, stride=stride, res_base=self.res_base)
-        self.bn1d = _bn(out_channel)
-        self.conv2 = _conv3x3(out_channel, out_channel, stride=1, res_base=self.res_base)
-        self.bn2d = _bn(out_channel)
-        self.relu = nn.ReLU()
-
-        self.down_sample = False
-        if stride != 1 or in_channel != out_channel:
-            self.down_sample = True
-
-        self.down_sample_layer = None
-        if self.down_sample:
-            self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride,
-                                                                 use_se=use_se, res_base=self.res_base),
-                                                        _bn(out_channel, res_base)])
-
-    def construct(self, x):
-        identity = x
-
-        out = self.conv1(x)
-        out = self.bn1d(out)
-        out = self.relu(out)
-
-        out = self.conv2(out)
-        out = self.bn2d(out)
-
-        if self.down_sample:
-            identity = self.down_sample_layer(identity)
-
-        out = out + identity
-        out = self.relu(out)
-
-        return out
-
-
-class ResNet(nn.Cell):
-    """
-    ResNet architecture.
-
-    Args:
-        block (Cell): Block for network.
-        layer_nums (list): Numbers of block in different layers.
-        in_channels (list): Input channel in each layer.
-        out_channels (list): Output channel in each layer.
-        strides (list):  Stride size in each layer.
-        num_classes (int): The number of classes that the training images are belonging to.
-        use_se (bool): Enable SE-ResNet50 net. Default: False.
-        se_block(bool): Use se block in SE-ResNet50 net in layer 3 and layer 4. Default: False.
-        res_base (bool): Enable parameter setting of resnet18. Default: False.
-
-    Returns:
-        Tensor, output tensor.
-
-    Examples:
-        >>> ResNet(ResidualBlock,
-        >>>        [3, 4, 6, 3],
-        >>>        [64, 256, 512, 1024],
-        >>>        [256, 512, 1024, 2048],
-        >>>        [1, 2, 2, 2],
-        >>>        10)
-    """
-
-    def __init__(self,
-                 block,
-                 layer_nums,
-                 in_channels,
-                 out_channels,
-                 strides,
-                 num_classes,
-                 use_se=False,
-                 res_base=False):
-        super(ResNet, self).__init__()
-
-        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
-            raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")
-        self.use_se = use_se
-        self.res_base = res_base
-        self.se_block = False
-        if self.use_se:
-            self.se_block = True
-
-        if self.use_se:
-            self.conv1_0 = _conv3x3(3, 32, stride=2, use_se=self.use_se)
-            self.bn1_0 = _bn(32)
-            self.conv1_1 = _conv3x3(32, 32, stride=1, use_se=self.use_se)
-            self.bn1_1 = _bn(32)
-            self.conv1_2 = _conv3x3(32, 64, stride=1, use_se=self.use_se)
-        else:
-            self.conv1 = _conv7x7(3, 64, stride=2, res_base=self.res_base)
-        self.bn1 = _bn(64, self.res_base)
-        self.relu = P.ReLU()
-
-        if self.res_base:
-            self.pad = nn.Pad(paddings=((0, 0), (0, 0), (1, 1), (1, 1)))
-            self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid")
-        else:
-            self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")
-
-        self.layer1 = self._make_layer(block,
-                                       layer_nums[0],
-                                       in_channel=in_channels[0],
-                                       out_channel=out_channels[0],
-                                       stride=strides[0],
-                                       use_se=self.use_se)
-        self.layer2 = self._make_layer(block,
-                                       layer_nums[1],
-                                       in_channel=in_channels[1],
-                                       out_channel=out_channels[1],
-                                       stride=strides[1],
-                                       use_se=self.use_se)
-        self.layer3 = self._make_layer(block,
-                                       layer_nums[2],
-                                       in_channel=in_channels[2],
-                                       out_channel=out_channels[2],
-                                       stride=strides[2],
-                                       use_se=self.use_se,
-                                       se_block=self.se_block)
-        self.layer4 = self._make_layer(block,
-                                       layer_nums[3],
-                                       in_channel=in_channels[3],
-                                       out_channel=out_channels[3],
-                                       stride=strides[3],
-                                       use_se=self.use_se,
-                                       se_block=self.se_block)
-
-        self.mean = P.ReduceMean(keep_dims=True)
-        self.flatten = nn.Flatten()
-        self.end_point = _fc(out_channels[3], num_classes, use_se=self.use_se)
-
-    def _make_layer(self, block, layer_num, in_channel, out_channel, stride, use_se=False, se_block=False):
-        """
-        Make stage network of ResNet.
-
-        Args:
-            block (Cell): Resnet block.
-            layer_num (int): Layer number.
-            in_channel (int): Input channel.
-            out_channel (int): Output channel.
-            stride (int): Stride size for the first convolutional layer.
-            se_block(bool): Use se block in SE-ResNet50 net. Default: False.
-        Returns:
-            SequentialCell, the output layer.
-
-        Examples:
-            >>> _make_layer(ResidualBlock, 3, 128, 256, 2)
-        """
-        layers = []
-
-        resnet_block = block(in_channel, out_channel, stride=stride, use_se=use_se)
-        layers.append(resnet_block)
-        if se_block:
-            for _ in range(1, layer_num - 1):
-                resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se)
-                layers.append(resnet_block)
-            resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se, se_block=se_block)
-            layers.append(resnet_block)
-        else:
-            for _ in range(1, layer_num):
-                resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se)
-                layers.append(resnet_block)
-        return nn.SequentialCell(layers)
-
-    def construct(self, x):
-        if self.use_se:
-            x = self.conv1_0(x)
-            x = self.bn1_0(x)
-            x = self.relu(x)
-            x = self.conv1_1(x)
-            x = self.bn1_1(x)
-            x = self.relu(x)
-            x = self.conv1_2(x)
-        else:
-            x = self.conv1(x)
-        x = self.bn1(x)
-        x = self.relu(x)
-        if self.res_base:
-            x = self.pad(x)
-        c1 = self.maxpool(x)
-
-        c2 = self.layer1(c1)
-        c3 = self.layer2(c2)
-        c4 = self.layer3(c3)
-        c5 = self.layer4(c4)
-
-        out = self.mean(c5, (2, 3))
-        out = self.flatten(out)
-        out = self.end_point(out)
-
-        return out
-
-
-def resnet18(class_num=10):
-    """
-    Get ResNet18 neural network.
-
-    Args:
-        class_num (int): Class number.
-
-    Returns:
-        Cell, cell instance of ResNet18 neural network.
-
-    Examples:
-        >>> net = resnet18(10)
-    """
-    return ResNet(ResidualBlockBase,
-                  [2, 2, 2, 2],
-                  [64, 64, 128, 256],
-                  [64, 128, 256, 512],
-                  [1, 2, 2, 2],
-                  class_num,
-                  res_base=True)
-
-
-def resnet50(class_num=10):
-    """
-    Get ResNet50 neural network.
-
-    Args:
-        class_num (int): Class number.
-
-    Returns:
-        Cell, cell instance of ResNet50 neural network.
-
-    Examples:
-        >>> net = resnet50(10)
-    """
-    return ResNet(ResidualBlock,
-                  [3, 4, 6, 3],
-                  [64, 256, 512, 1024],
-                  [256, 512, 1024, 2048],
-                  [1, 2, 2, 2],
-                  class_num)
-
-
-def se_resnet50(class_num=1001):
-    """
-    Get SE-ResNet50 neural network.
-
-    Args:
-        class_num (int): Class number.
-
-    Returns:
-        Cell, cell instance of SE-ResNet50 neural network.
-
-    Examples:
-        >>> net = se-resnet50(1001)
-    """
-    return ResNet(ResidualBlock,
-                  [3, 4, 6, 3],
-                  [64, 256, 512, 1024],
-                  [256, 512, 1024, 2048],
-                  [1, 2, 2, 2],
-                  class_num,
-                  use_se=True)
-
-
-def resnet101(class_num=1001):
-    """
-    Get ResNet101 neural network.
-
-    Args:
-        class_num (int): Class number.
-
-    Returns:
-        Cell, cell instance of ResNet101 neural network.
-
-    Examples:
-        >>> net = resnet101(1001)
-    """
-    return ResNet(ResidualBlock,
-                  [3, 4, 23, 3],
-                  [64, 256, 512, 1024],
-                  [256, 512, 1024, 2048],
-                  [1, 2, 2, 2],
-                  class_num)
diff --git a/official/cv/resnet_thor/train.py b/official/cv/resnet_thor/train.py
deleted file mode 100644
index 6ddbd7e38f8afe4258170893c22fffa63d52b68b..0000000000000000000000000000000000000000
--- a/official/cv/resnet_thor/train.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""train resnet."""
-import os
-import argparse
-import numpy as np
-
-from mindspore import context
-from mindspore import Tensor
-from mindspore.common import set_seed
-from mindspore.context import ParallelMode
-from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor, LossMonitor
-from mindspore.train.loss_scale_manager import FixedLossScaleManager
-from mindspore.communication.management import init, get_rank, get_group_size
-from mindspore.parallel import set_algo_parameters
-from mindspore.train.train_thor import ConvertModelUtils
-from mindspore.nn.optim import thor
-from mindspore.train.model import Model
-
-from src.resnet import resnet50 as resnet
-from src.dataset import create_dataset
-from src.crossentropy import CrossEntropy as CrossEntropySmooth
-
-parser = argparse.ArgumentParser(description='Image classification')
-parser.add_argument('--run_distribute', type=bool, default=False, help='Run distribute')
-parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
-parser.add_argument('--device_target', type=str, default='Ascend', help='Device target')
-parser.add_argument('--device_num', type=int, default=1, help='Device num')
-args_opt = parser.parse_args()
-
-if args_opt.device_target == "Ascend":
-    from src.config import config
-else:
-    from src.config import config_gpu as config
-
-set_seed(1)
-
-
-def filter_checkpoint_parameter_by_list(origin_dict, param_filter):
-    """remove useless parameters according to filter_list"""
-    for key in list(origin_dict.keys()):
-        for name in param_filter:
-            if name in key:
-                print("Delete parameter from checkpoint: ", key)
-                del origin_dict[key]
-                break
-
-
-def apply_eval(eval_param):
-    eval_model = eval_param["model"]
-    eval_ds = eval_param["dataset"]
-    metrics_name = eval_param["metrics_name"]
-    res = eval_model.eval(eval_ds)
-    return res[metrics_name]
-
-
-def get_thor_lr(global_step, lr_init, decay, total_epochs, steps_per_epoch, decay_epochs=100):
-    """get_model_lr"""
-    lr_each_step = []
-    total_steps = steps_per_epoch * total_epochs
-    for i in range(total_steps):
-        epoch = (i + 1) / steps_per_epoch
-        base = (1.0 - float(epoch) / total_epochs) ** decay
-        lr_local = lr_init * base
-        if epoch >= decay_epochs:
-            lr_local = lr_local * 0.5
-        if epoch >= decay_epochs + 1:
-            lr_local = lr_local * 0.5
-        lr_each_step.append(lr_local)
-    current_step = global_step
-    lr_each_step = np.array(lr_each_step).astype(np.float32)
-    learning_rate = lr_each_step[current_step:]
-    return learning_rate
-
-
-def get_thor_damping(global_step, damping_init, decay_rate, total_epochs, steps_per_epoch):
-    """get_model_damping"""
-    damping_each_step = []
-    total_steps = steps_per_epoch * total_epochs
-    for step in range(total_steps):
-        epoch = (step + 1) / steps_per_epoch
-        damping_here = damping_init * (decay_rate ** (epoch / 10))
-        damping_each_step.append(damping_here)
-    current_step = global_step
-    damping_each_step = np.array(damping_each_step).astype(np.float32)
-    damping_now = damping_each_step[current_step:]
-    return damping_now
-
-
-if __name__ == '__main__':
-    target = args_opt.device_target
-    ckpt_save_dir = config.save_checkpoint_path
-
-    # init context
-    context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False)
-
-    if args_opt.run_distribute:
-        if target == "Ascend":
-            device_id = int(os.getenv('DEVICE_ID'))
-            context.set_context(device_id=device_id)
-            context.set_auto_parallel_context(device_num=args_opt.device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
-                                              gradients_mean=True)
-            set_algo_parameters(elementwise_op_strategy_follow=True)
-            context.set_auto_parallel_context(all_reduce_fusion_config=[85, 160])
-            init()
-        # GPU target
-        else:
-            init()
-            context.set_auto_parallel_context(device_num=get_group_size(), parallel_mode=ParallelMode.DATA_PARALLEL,
-                                              gradients_mean=True)
-            context.set_auto_parallel_context(all_reduce_fusion_config=[85, 160])
-        ckpt_save_dir = config.save_checkpoint_path + "ckpt_" + str(get_rank()) + "/"
-
-    # create dataset
-    dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=True, repeat_num=1,
-                             batch_size=config.batch_size, target=target)
-    step_size = dataset.get_dataset_size()
-
-    # define net
-    net = resnet(class_num=config.class_num)
-
-    # init lr
-    lr = get_thor_lr(0, config.lr_init, config.lr_decay, config.lr_end_epoch, step_size, decay_epochs=39)
-    lr = Tensor(lr)
-
-    # define loss
-    if not config.use_label_smooth:
-        config.label_smooth_factor = 0.0
-    loss = CrossEntropySmooth(smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
-    loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)
-    metrics = {"acc"}
-    damping = get_thor_damping(0, config.damping_init, config.damping_decay, 70, step_size)
-    split_indices = [26, 53]
-    opt = thor(net, lr, Tensor(damping), config.momentum, config.weight_decay, config.loss_scale,
-               config.batch_size, split_indices=split_indices, frequency=config.frequency)
-    model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics=metrics,
-                  amp_level="O2", keep_batchnorm_fp32=False)
-
-    model = ConvertModelUtils().convert_to_thor_model(model=model, network=net, loss_fn=loss, optimizer=opt,
-                                                      loss_scale_manager=loss_scale, metrics={'acc'},
-                                                      amp_level="O2", keep_batchnorm_fp32=False)
-
-    # define callbacks
-    time_cb = TimeMonitor(data_size=step_size)
-    loss_cb = LossMonitor()
-    cb = [time_cb, loss_cb]
-    if config.save_checkpoint:
-        config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_epochs * step_size,
-                                     keep_checkpoint_max=config.keep_checkpoint_max)
-        ckpt_cb = ModelCheckpoint(prefix="resnet", directory=ckpt_save_dir, config=config_ck)
-        cb += [ckpt_cb]
-
-    # train model
-    dataset_sink_mode = True
-    model.train(config.epoch_size, dataset, callbacks=cb,
-                sink_size=dataset.get_dataset_size(), dataset_sink_mode=dataset_sink_mode)
diff --git a/official/nlp/prophetnet/README.md b/official/nlp/prophetnet/README.md
deleted file mode 100644
index 7fa77f02b6aeb66ffcc861cc9da08155250d93e4..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Contents
-
-The prophetnet is under development. It will be released soon.
diff --git a/official/nlp/prophetnet/__init__.py b/official/nlp/prophetnet/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/official/nlp/prophetnet/apply_bpe_encoding.py b/official/nlp/prophetnet/apply_bpe_encoding.py
deleted file mode 100644
index 24341a62ace89e1762eb61a5947003e3934e5afc..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/apply_bpe_encoding.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Apply bpe script."""
-import os
-import argparse
-from multiprocessing import Pool, cpu_count
-
-from src.utils import Dictionary
-from src.utils import bpe_encode
-
-parser = argparse.ArgumentParser(description='Apply BPE.')
-parser.add_argument("--codes", type=str, default="", required=True,
-                    help="bpe codes path.")
-parser.add_argument("--src_folder", type=str, default="", required=True,
-                    help="raw corpus folder.")
-parser.add_argument("--output_folder", type=str, default="", required=True,
-                    help="encoded corpus output path.")
-parser.add_argument("--prefix", type=str, default="", required=False,
-                    help="Prefix of text file.")
-parser.add_argument("--vocab_path", type=str, default="", required=True,
-                    help="Generated vocabulary output path.")
-parser.add_argument("--threshold", type=int, default=None, required=False,
-                    help="Filter out words that frequency is lower than threshold.")
-parser.add_argument("--processes", type=int, default=2, required=False,
-                    help="Number of processes to use.")
-
-if __name__ == '__main__':
-    args, _ = parser.parse_known_args()
-
-    if not (args.codes and args.src_folder and args.output_folder):
-        raise ValueError("Please enter required params.")
-
-    source_folder = args.src_folder
-    output_folder = args.output_folder
-    codes = args.codes
-
-    if not os.path.exists(codes):
-        raise FileNotFoundError("`--codes` is not existed.")
-    if not os.path.exists(source_folder) or not os.path.isdir(source_folder):
-        raise ValueError("`--src_folder` must be a dir and existed.")
-    if not os.path.exists(output_folder) or not os.path.isdir(output_folder):
-        raise ValueError("`--output_folder` must be a dir and existed.")
-    if not isinstance(args.prefix, str) or len(args.prefix) > 128:
-        raise ValueError("`--prefix` must be a str and len <= 128.")
-    if not isinstance(args.processes, int):
-        raise TypeError("`--processes` must be an integer.")
-
-    available_dict = []
-    args_groups = []
-    for file in os.listdir(source_folder):
-        if args.prefix and not file.startswith(args.prefix):
-            continue
-        if file.endswith(".txt"):
-            output_path = os.path.join(output_folder, file.replace(".txt", "_bpe.txt"))
-            dict_path = os.path.join(output_folder, file.replace(".txt", ".dict"))
-            available_dict.append(dict_path)
-            args_groups.append((codes, os.path.join(source_folder, file),
-                                output_path, dict_path))
-
-    kernel_size = 1 if args.processes <= 0 else args.processes
-    kernel_size = min(kernel_size, cpu_count())
-    pool = Pool(kernel_size)
-    for arg in args_groups:
-        pool.apply_async(bpe_encode, args=arg)
-    pool.close()
-    pool.join()
-
-    vocab = Dictionary.load_from_text(available_dict)
-    if args.threshold is not None:
-        vocab = vocab.shrink(args.threshold)
-    vocab.persistence(args.vocab_path)
-    print(f" | Vocabulary Size: {len(vocab)}")
diff --git a/official/nlp/prophetnet/config/__init__.py b/official/nlp/prophetnet/config/__init__.py
deleted file mode 100644
index d5c6589ee795545dd72383664a03374a817acc4c..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/config/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""MASS model configuration."""
-from .config import TransformerConfig
-
-__all__ = [
-    "TransformerConfig"
-]
diff --git a/official/nlp/prophetnet/config/config.py b/official/nlp/prophetnet/config/config.py
deleted file mode 100644
index 259ba4444436e591135a15fc39eaa0f302b17f56..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/config/config.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Configuration class for Transformer."""
-import os
-import json
-import copy
-from typing import List
-
-import mindspore.common.dtype as mstype
-
-
-def _is_dataset_file(file: str):
-    return "tfrecord" in file.lower() or "mindrecord" in file.lower()
-
-
-def _get_files_from_dir(folder: str):
-    _files = []
-    for file in os.listdir(folder):
-        if _is_dataset_file(file):
-            _files.append(os.path.join(folder, file))
-    return _files
-
-
-def get_source_list(folder: str) -> List:
-    """
-    Get file list from a folder.
-
-    Returns:
-        list, file list.
-    """
-    _list = []
-    if not folder:
-        return _list
-
-    if os.path.isdir(folder):
-        _list = _get_files_from_dir(folder)
-    else:
-        if _is_dataset_file(folder):
-            _list.append(folder)
-    return _list
-
-
-PARAM_NODES = {"dataset_config",
-               "model_config",
-               "loss_scale_config",
-               "learn_rate_config",
-               "checkpoint_options"}
-
-
-class TransformerConfig:
-    """
-    Configuration for `Transformer`.
-
-    Args:
-        random_seed (int): Random seed.
-        batch_size (int): Batch size of input dataset.
-        epochs (int): Epoch number.
-        dataset_sink_mode (bool): Whether enable dataset sink mode.
-        dataset_sink_step (int): Dataset sink step.
-        lr_scheduler (str): Whether use lr_scheduler, only support "ISR" now.
-        lr (float): Initial learning rate.
-        min_lr (float): Minimum learning rate.
-        decay_start_step (int): Step to decay.
-        warmup_steps (int): Warm up steps.
-        dataset_schema (str): Path of dataset schema file.
-        pre_train_dataset (str): Path of pre-training dataset file or folder.
-        fine_tune_dataset (str): Path of fine-tune dataset file or folder.
-        test_dataset (str): Path of test dataset file or folder.
-        valid_dataset (str): Path of validation dataset file or folder.
-        ckpt_path (str): Checkpoints save path.
-        save_ckpt_steps (int): Interval of saving ckpt.
-        ckpt_prefix (str): Prefix of ckpt file.
-        keep_ckpt_max (int): Max ckpt files number.
-        seq_length (int): Length of input sequence. Default: 64.
-        vocab_size (int): The shape of each embedding vector. Default: 46192.
-        hidden_size (int): Size of embedding, attention, dim. Default: 512.
-        num_hidden_layers (int): Encoder, Decoder layers.
-        ngram (int): Number of tokens to predict ahead. Default: 2.
-        accumulation_steps (int): Number of steps to hold until next gradient optimization. Default: 1.
-        num_attention_heads (int): Number of hidden layers in the Transformer encoder/decoder
-            cell. Default: 6.
-        intermediate_size (int): Size of intermediate layer in the Transformer
-            encoder/decoder cell. Default: 4096.
-        hidden_act (str): Activation function used in the Transformer encoder/decoder
-            cell. Default: "relu".
-        loss_scale_mode (str): Loss scale mode. Default: "dynamic".
-        init_loss_scale (int): Initialized loss scale.
-        loss_scale_factor (int): Loss scale factor.
-        scale_window (int): Window size of loss scale.
-        beam_width (int): Beam width for beam search in inferring. Default: 4.
-        length_penalty_weight (float): Penalty for sentence length. Default: 1.0.
-        label_smoothing (float): Label smoothing setting. Default: 0.1.
-        input_mask_from_dataset (bool): Specifies whether to use the input mask that loaded from
-            dataset. Default: True.
-        save_graphs (bool): Whether to save graphs, please set to True if mindinsight
-            is wanted.
-        dtype (mstype): Data type of the input. Default: mstype.float32.
-        max_decode_length (int): Max decode length for inferring. Default: 64.
-        hidden_dropout_prob (float): The dropout probability for hidden outputs. Default: 0.1.
-        attention_dropout_prob (float): The dropout probability for
-            Multi-head Self-Attention. Default: 0.1.
-        max_position_embeddings (int): Maximum length of sequences used in this
-            model. Default: 512.
-        initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.
-    """
-
-    def __init__(self,
-                 random_seed=74,
-                 batch_size=64, epochs=1,
-                 dataset_sink_mode=True, dataset_sink_step=1,
-                 lr_scheduler="", optimizer="adam",
-                 lr=1e-4, min_lr=1e-6,
-                 decay_steps=10000, poly_lr_scheduler_power=1,
-                 decay_start_step=-1, warmup_steps=2000,
-                 pre_train_dataset: str = None,
-                 fine_tune_dataset: str = None,
-                 test_dataset: str = None,
-                 valid_dataset: str = None,
-                 ckpt_path: str = None,
-                 save_ckpt_steps=2000,
-                 ckpt_prefix="CKPT",
-                 existed_ckpt="",
-                 keep_ckpt_max=20,
-                 seq_length=128,
-                 vocab_size=46192,
-                 hidden_size=512,
-                 num_hidden_layers=6,
-                 ngram=2,
-                 accumulation_steps=1,
-                 disable_ngram_loss=False,
-                 num_attention_heads=8,
-                 intermediate_size=4096,
-                 hidden_act="relu",
-                 hidden_dropout_prob=0.1,
-                 attention_dropout_prob=0.1,
-                 max_position_embeddings=64,
-                 initializer_range=0.02,
-                 loss_scale_mode="dynamic",
-                 init_loss_scale=2 ** 10,
-                 loss_scale_factor=2, scale_window=2000,
-                 beam_width=5,
-                 length_penalty_weight=1.0,
-                 label_smoothing=0.1,
-                 input_mask_from_dataset=True,
-                 save_graphs=False,
-                 dtype=mstype.float32,
-                 max_decode_length=64):
-
-        self.save_graphs = save_graphs
-        self.random_seed = random_seed
-        self.pre_train_dataset = get_source_list(pre_train_dataset)  # type: List[str]
-        self.fine_tune_dataset = get_source_list(fine_tune_dataset)  # type: List[str]
-        self.valid_dataset = get_source_list(valid_dataset)  # type: List[str]
-        self.test_dataset = get_source_list(test_dataset)  # type: List[str]
-
-        if not isinstance(epochs, int) and epochs < 0:
-            raise ValueError("`epoch` must be type of int.")
-
-        self.epochs = epochs
-        self.dataset_sink_mode = dataset_sink_mode
-        self.dataset_sink_step = dataset_sink_step
-
-        self.ckpt_path = ckpt_path
-        self.keep_ckpt_max = keep_ckpt_max
-        self.save_ckpt_steps = save_ckpt_steps
-        self.ckpt_prefix = ckpt_prefix
-        self.existed_ckpt = existed_ckpt
-
-        self.batch_size = batch_size
-        self.seq_length = seq_length
-        self.vocab_size = vocab_size
-        self.hidden_size = hidden_size
-        self.num_hidden_layers = num_hidden_layers
-        self.ngram = ngram
-        self.accumulation_steps = accumulation_steps
-        self.disable_ngram_loss = disable_ngram_loss
-        self.num_attention_heads = num_attention_heads
-        self.hidden_act = hidden_act
-        self.intermediate_size = intermediate_size
-        self.hidden_dropout_prob = hidden_dropout_prob
-        self.attention_dropout_prob = attention_dropout_prob
-        self.max_position_embeddings = max_position_embeddings
-        self.initializer_range = initializer_range
-        self.label_smoothing = label_smoothing
-
-        self.beam_width = beam_width
-        self.length_penalty_weight = length_penalty_weight
-        self.max_decode_length = max_decode_length
-        self.input_mask_from_dataset = input_mask_from_dataset
-        self.compute_type = mstype.float32
-        self.dtype = dtype
-
-        self.loss_scale_mode = loss_scale_mode
-        self.scale_window = scale_window
-        self.loss_scale_factor = loss_scale_factor
-        self.init_loss_scale = init_loss_scale
-
-        self.optimizer = optimizer
-        self.lr = lr
-        self.lr_scheduler = lr_scheduler
-        self.min_lr = min_lr
-        self.poly_lr_scheduler_power = poly_lr_scheduler_power
-        self.decay_steps = decay_steps
-        self.decay_start_step = decay_start_step
-        self.warmup_steps = warmup_steps
-
-        self.train_url = ""
-
-    @classmethod
-    def from_dict(cls, json_object: dict):
-        """Constructs a `TransformerConfig` from a Python dictionary of parameters."""
-        _params = {}
-        for node in PARAM_NODES:
-            for key in json_object[node]:
-                _params[key] = json_object[node][key]
-        return cls(**_params)
-
-    @classmethod
-    def from_json_file(cls, json_file):
-        """Constructs a `TransformerConfig` from a json file of parameters."""
-        with open(json_file, "r") as reader:
-            return cls.from_dict(json.load(reader))
-
-    def to_dict(self):
-        """Serializes this instance to a Python dictionary."""
-        output = copy.deepcopy(self.__dict__)
-        return output
-
-    def to_json_string(self):
-        """Serializes this instance to a JSON string."""
-        return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
diff --git a/official/nlp/prophetnet/config/finetune.json b/official/nlp/prophetnet/config/finetune.json
deleted file mode 100644
index d2191536121b296c4203569cb6486064d56f1953..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/config/finetune.json
+++ /dev/null
@@ -1,59 +0,0 @@
-{
-    "dataset_config": {
-      "epochs": 5,
-      "batch_size": 1,
-      "pre_train_dataset": "",
-      "fine_tune_dataset": "../cnndm_data_prophetnet/dataset_hugging_face_tokenized/train",
-      "test_dataset": "",
-      "valid_dataset": "",
-      "dataset_sink_mode": false,
-      "dataset_sink_step": 100
-    },
-    "model_config": {
-      "random_seed": 1,
-      "save_graphs": false,
-      "seq_length": 512,
-      "vocab_size": 30522,
-      "hidden_size": 512,
-      "num_hidden_layers": 3,
-      "ngram": 2,
-      "accumulation_steps": 1,
-      "disable_ngram_loss": false,
-      "num_attention_heads": 8,
-      "intermediate_size": 2048,
-      "hidden_act": "gelu",
-      "hidden_dropout_prob": 0.1,
-      "attention_dropout_prob": 0.1,
-      "max_position_embeddings": 512,
-      "initializer_range": 0.02,
-      "label_smoothing": 0.1,
-      "beam_width": 5,
-      "length_penalty_weight": 1.0,
-      "max_decode_length": 64,
-      "input_mask_from_dataset": true
-    },
-    "loss_scale_config": {
-      "loss_scale_mode":"static",
-      "init_loss_scale": 1,
-      "loss_scale_factor": 2,
-      "scale_window": 200
-    },
-    "learn_rate_config": {
-      "optimizer": "adam",
-      "lr": 1e-4,
-      "lr_scheduler": "isr",
-      "poly_lr_scheduler_power": 0.5,
-      "decay_steps": 10000,
-      "decay_start_step": 1000,
-      "warmup_steps": 1000,
-      "min_lr": 1e-7
-    },
-    "checkpoint_options": {
-      "existed_ckpt": "",
-      "save_ckpt_steps": 20000,
-      "keep_ckpt_max": 50,
-      "ckpt_prefix": "ckpt",
-      "ckpt_path": "checkpoints"
-    }
-  }
-  
\ No newline at end of file
diff --git a/official/nlp/prophetnet/config/pretrain.json b/official/nlp/prophetnet/config/pretrain.json
deleted file mode 100644
index c73e65473011239059f421d939de34599d407d51..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/config/pretrain.json
+++ /dev/null
@@ -1,58 +0,0 @@
-{
-    "dataset_config": {
-      "epochs": 2,
-      "batch_size": 1,
-      "pre_train_dataset": "../news_crawl/dataset/tf_small_pretrain",
-      "fine_tune_dataset": "",
-      "test_dataset": "",
-      "valid_dataset": "",
-      "dataset_sink_mode": false,
-      "dataset_sink_step": 100
-    },
-    "model_config": {
-      "random_seed": 100,
-      "save_graphs": false,
-      "seq_length": 128,
-      "vocab_size": 44000,
-      "hidden_size": 768,
-      "num_hidden_layers": 3,
-      "ngram": 2,
-      "disable_ngram_loss": false,
-      "num_attention_heads": 12,
-      "intermediate_size": 3072,
-      "hidden_act": "relu",
-      "hidden_dropout_prob": 0.1,
-      "attention_dropout_prob": 0.1,
-      "max_position_embeddings": 64,
-      "initializer_range": 0.02,
-      "label_smoothing": 0.1,
-      "beam_width": 4,
-      "length_penalty_weight": 1.0,
-      "max_decode_length": 64,
-      "input_mask_from_dataset": true
-    },
-    "loss_scale_config": {
-      "loss_scale_mode":"static",
-      "init_loss_scale": 32,
-      "loss_scale_factor": 2,
-      "scale_window": 200
-    },
-    "learn_rate_config": {
-      "optimizer": "adam",
-      "lr": 1e-4,
-      "lr_scheduler": "poly",
-      "poly_lr_scheduler_power": 0.5,
-      "decay_steps": 10000,
-      "decay_start_step": 12000,
-      "warmup_steps": 4000,
-      "min_lr": 1e-6
-    },
-    "checkpoint_options": {
-      "existed_ckpt": "/home/yanglinfeng/ProphetNet/training_result/checkpoints/ckpt_1_0.ckpt",
-      "save_ckpt_steps": 10,
-      "keep_ckpt_max": 50,
-      "ckpt_prefix": "ckpt",
-      "ckpt_path": "checkpoints"
-    }
-  }
-  
\ No newline at end of file
diff --git a/official/nlp/prophetnet/config/test.json b/official/nlp/prophetnet/config/test.json
deleted file mode 100644
index 775a6ab26e437824b125f9306cc99270c12583f5..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/config/test.json
+++ /dev/null
@@ -1,57 +0,0 @@
-{
-  "dataset_config": {
-    "epochs": 2,
-    "batch_size": 1,
-    "pre_train_dataset": "",
-    "fine_tune_dataset": "",
-    "test_dataset": "../cnndm_data_prophetnet/dataset_hugging_face_tokenized",
-    "valid_dataset": "",
-    "dataset_sink_mode": false,
-    "dataset_sink_step": 100
-  },
-  "model_config": {
-    "random_seed": 100,
-    "save_graphs": false,
-    "seq_length": 512,
-    "vocab_size": 30522,
-    "hidden_size": 512,
-    "num_hidden_layers": 3,
-    "ngram": 2,
-    "disable_ngram_loss": false,
-    "num_attention_heads": 8,
-    "intermediate_size": 2048,
-    "hidden_act": "gelu",
-    "hidden_dropout_prob": 0.1,
-    "attention_dropout_prob": 0.1,
-    "max_position_embeddings": 512,
-    "initializer_range": 0.02,
-    "label_smoothing": 0.1,
-    "beam_width": 5,
-    "length_penalty_weight": 1.2,
-    "max_decode_length": 110,
-    "input_mask_from_dataset": true
-  },
-  "loss_scale_config": {
-    "loss_scale_mode":"static",
-    "init_loss_scale": 32,
-    "loss_scale_factor": 2,
-    "scale_window": 200
-  },
-  "learn_rate_config": {
-    "optimizer": "adam",
-    "lr": 1e-4,
-    "lr_scheduler": "poly",
-    "poly_lr_scheduler_power": 0.5,
-    "decay_steps": 10000,
-    "decay_start_step": 12000,
-    "warmup_steps": 4000,
-    "min_lr": 1e-6
-  },
-  "checkpoint_options": {
-    "existed_ckpt": "../training_weight/ckpt-1_20000.ckpt",
-    "save_ckpt_steps": 500,
-    "keep_ckpt_max": 50,
-    "ckpt_prefix": "ckpt",
-    "ckpt_path": "checkpoints"
-  }
-}
diff --git a/official/nlp/prophetnet/eval.py b/official/nlp/prophetnet/eval.py
deleted file mode 100644
index 4b55ac0be9b7b3170898ba5f3e8deeacc23172fd..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/eval.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Evaluation api."""
-import os
-import argparse
-import pickle
-
-from mindspore.common import dtype as mstype
-from mindspore import context
-
-from config import TransformerConfig
-from src.transformer import infer, infer_ppl
-from src.utils import Dictionary
-from src.utils import get_score
-
-parser = argparse.ArgumentParser(description='Evaluation MASS.')
-parser.add_argument("--config", type=str, required=True,
-                    help="Model config json file path.")
-parser.add_argument("--vocab", type=str, required=True,
-                    help="Vocabulary to use.")
-parser.add_argument("--output", type=str, required=True,
-                    help="Result file path.")
-parser.add_argument("--metric", type=str, default='rouge',
-                    help='Set eval method.')
-parser.add_argument("--platform", type=str, required=True,
-                    help="model working platform.")
-
-
-def get_config(config):
-    config = TransformerConfig.from_json_file(config)
-    config.compute_type = mstype.float32
-    config.dtype = mstype.float32
-    return config
-
-
-if __name__ == '__main__':
-    args, _ = parser.parse_known_args()
-    if args.vocab.endswith("bin"):
-        vocab = Dictionary.load_from_persisted_dict(args.vocab)
-    else:
-        vocab = Dictionary.load_from_text([args.vocab])
-    _config = get_config(args.config)
-
-    device_id = os.getenv('DEVICE_ID', None)
-    if device_id is None:
-        device_id = 0
-    device_id = int(device_id)
-    context.set_context(
-        #mode=context.GRAPH_MODE,
-        mode=context.PYNATIVE_MODE,
-        device_target=args.platform,
-        reserve_class_name_in_scope=False,
-        device_id=device_id)
-
-    if args.metric == 'rouge':
-        result = infer(_config)
-    else:
-        result = infer_ppl(_config)
-
-    with open(args.output, "wb") as f:
-        pickle.dump(result, f, 1)
-
-    # get score by given metric
-    score = get_score(result, vocab, metric=args.metric)
-    print(score)
diff --git a/official/nlp/prophetnet/gigaword.py b/official/nlp/prophetnet/gigaword.py
deleted file mode 100644
index f473ddd5ce9f3ee4efdb718934442a3fd2703ffa..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/gigaword.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Generate Gigaword dataset."""
-import os
-import argparse
-
-from src.dataset import BiLingualDataLoader
-from src.language_model import NoiseChannelLanguageModel
-from src.utils import Dictionary
-
-parser = argparse.ArgumentParser(description='Create Gigaword fine-tune Dataset.')
-parser.add_argument("--train_src", type=str, default="", required=False,
-                    help="train dataset source file path.")
-parser.add_argument("--train_ref", type=str, default="", required=False,
-                    help="train dataset reference file path.")
-parser.add_argument("--test_src", type=str, default="", required=False,
-                    help="test dataset source file path.")
-parser.add_argument("--test_ref", type=str, default="", required=False,
-                    help="test dataset reference file path.")
-parser.add_argument("--noise_prob", type=float, default=0., required=False,
-                    help="add noise prob.")
-parser.add_argument("--existed_vocab", type=str, default="", required=False,
-                    help="existed vocab path.")
-parser.add_argument("--max_len", type=int, default=64, required=False,
-                    help="max length of sentences.")
-parser.add_argument("--output_folder", type=str, default="", required=True,
-                    help="dataset output path.")
-parser.add_argument("--format", type=str, default="tfrecord", required=False,
-                    help="dataset format.")
-
-if __name__ == '__main__':
-    args, _ = parser.parse_known_args()
-
-    vocab = Dictionary.load_from_persisted_dict(args.existed_vocab)
-
-    if args.train_src and args.train_ref:
-        train = BiLingualDataLoader(
-            src_filepath=args.train_src,
-            tgt_filepath=args.train_ref,
-            src_dict=vocab, tgt_dict=vocab,
-            src_lang="en", tgt_lang="en",
-            language_model=NoiseChannelLanguageModel(add_noise_prob=args.noise_prob),
-            max_sen_len=args.max_len
-        )
-        if "tf" in args.format.lower():
-            train.write_to_tfrecord(
-                path=os.path.join(args.output_folder, "gigaword_train_dataset.tfrecord")
-            )
-        else:
-            train.write_to_mindrecord(
-                path=os.path.join(args.output_folder, "gigaword_train_dataset.mindrecord")
-            )
-
-    if args.test_src and args.test_ref:
-        test = BiLingualDataLoader(
-            src_filepath=args.test_src,
-            tgt_filepath=args.test_ref,
-            src_dict=vocab, tgt_dict=vocab,
-            src_lang="en", tgt_lang="en",
-            language_model=NoiseChannelLanguageModel(add_noise_prob=0),
-            max_sen_len=args.max_len
-        )
-        if "tf" in args.format.lower():
-            test.write_to_tfrecord(
-                path=os.path.join(args.output_folder, "gigaword_test_dataset.tfrecord")
-            )
-        else:
-            test.write_to_mindrecord(
-                path=os.path.join(args.output_folder, "gigaword_test_dataset.mindrecord")
-            )
-
-    print(f" | Vocabulary size: {vocab.size}.")
diff --git a/official/nlp/prophetnet/instructions b/official/nlp/prophetnet/instructions
deleted file mode 100644
index a877263a751019b8ab648aa5cdeea4002f042c16..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/instructions
+++ /dev/null
@@ -1,209 +0,0 @@
-python tokenize_corpus.py --corpus_folder /{path}/corpus --output_folder /{path}/tokenized_corpus --tokenizer nltk --pool_size 16
-cd tokenized_corpus/
-
-# build bpe codes
-cat *.txt | subword-nmt learn-bpe -s 46000 -o all.bpe.codes
-
-# build bpe dict
-"subword-nmt get-vocab -i tokenized.txt -o vocab_en.dict.bin"
-
-# apply bpe encoding
-python apply_bpe_encoding.py --codes ~/Mindspore/mindspore/model_zoo/official/nlp/mass/tokenized_corpus/all.bpe.codes \
-    --src_folder ~/Mindspore/mindspore/model_zoo/official/nlp/mass/tokenized_corpus/ \
-    --output_folder ~/Mindspore/mindspore/model_zoo/official/nlp/mass/tokenized_corpus/bpe \
-    --vocab_path ~/Mindspore/mindspore/model_zoo/official/nlp/mass/tokenized_corpus/vocab_en.dict.bin \
-    --processes 32
-
-# build dataset news crawl
-python news_crawl.py --src_folder ./news_crawl \
-    --dict_folder ./news_crawl \
-    --existed_vocab ./tokenized_corpus/vocab_en.dict.bin \
-    --mask_ratio 0.5 \
-    --output_folder ./news_crawl/dataset/tf_small_pretrain \
-    --max_len 128 \
-    --processes 32 \
-    --ngram 2
-
-# build dataset cnndm
-python cnn_dm.py --test_src ./cnndm_data_prophetnet/prophetnet_tokenized/test.src.txt  --test_ref ./cnndm_data_prophetnet/prophetnet_tokenized/test.tgt.txt  --existed_vocab ./cnndm_data_prophetnet/cnndm_torch_prophetnet_30522.bin  --noise_prob 0.0  --output_folder ./cnndm_data_prophetnet/dataset_hugging_face_tokenized/ --max_len 512
-
-
-# train
-bash run_gpu.sh --task t --device_num 1 --device_id 3 --config ./config/config.json
-
-# inference
-bash run_gpu.sh --task i \
-                --device_num 1 \
-                --device_id 3 \
-                --config ./config/test.json \
-                --output output \
-                --metric rouge \
-                --vocab ./cnndm_data_prophetnet/cnndm_torch_prophetnet_30522.bin
-    
-# pytorch model structure
-NgramTransformerProphetModel(
-  (encoder): TransformerEncoder(
-    (embed_tokens): Embedding(30522, 512, padding_idx=0)
-    (embed_positions): LearnedPositionalEmbedding(513, 512, padding_idx=0)
-    (layers): ModuleList(
-      (0): TransformerEncoderLayer(
-        (self_attn): MultiheadAttention(
-          (k_proj): Linear(in_features=512, out_features=512, bias=True)
-          (v_proj): Linear(in_features=512, out_features=512, bias=True)
-          (q_proj): Linear(in_features=512, out_features=512, bias=True)
-          (out_proj): Linear(in_features=512, out_features=512, bias=True)
-        )
-        (self_attn_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-        (fc1): Linear(in_features=512, out_features=2048, bias=True)
-        (fc2): Linear(in_features=2048, out_features=512, bias=True)
-        (final_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-      )
-      (1): TransformerEncoderLayer(
-        (self_attn): MultiheadAttention(
-          (k_proj): Linear(in_features=512, out_features=512, bias=True)
-          (v_proj): Linear(in_features=512, out_features=512, bias=True)
-          (q_proj): Linear(in_features=512, out_features=512, bias=True)
-          (out_proj): Linear(in_features=512, out_features=512, bias=True)
-        )
-        (self_attn_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-        (fc1): Linear(in_features=512, out_features=2048, bias=True)
-        (fc2): Linear(in_features=2048, out_features=512, bias=True)
-        (final_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-      )
-      (2): TransformerEncoderLayer(
-        (self_attn): MultiheadAttention(
-          (k_proj): Linear(in_features=512, out_features=512, bias=True)
-          (v_proj): Linear(in_features=512, out_features=512, bias=True)
-          (q_proj): Linear(in_features=512, out_features=512, bias=True)
-          (out_proj): Linear(in_features=512, out_features=512, bias=True)
-        )
-        (self_attn_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-        (fc1): Linear(in_features=512, out_features=2048, bias=True)
-        (fc2): Linear(in_features=2048, out_features=512, bias=True)
-        (final_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-      )
-    )
-    (emb_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-  )
-  (decoder): NgramTransformerDecoder(
-    (embed_tokens): Embedding(30522, 512, padding_idx=0)
-    (embed_positions): LearnedPositionalEmbedding(514, 512, padding_idx=0)
-    (ngram_input_embed): Embedding(2, 512)
-    (layers): ModuleList(
-      (0): NgramTransformerDecoderLayer(
-        (ngram_self_attn): NgramMultiheadAttention(
-          (relative_linear): Linear(in_features=512, out_features=256, bias=True)
-          (out_proj): Linear(in_features=512, out_features=512, bias=True)
-        )
-        (self_attn_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-        (encoder_attn): MultiheadAttention(
-          (k_proj): Linear(in_features=512, out_features=512, bias=True)
-          (v_proj): Linear(in_features=512, out_features=512, bias=True)
-          (q_proj): Linear(in_features=512, out_features=512, bias=True)
-          (out_proj): Linear(in_features=512, out_features=512, bias=True)
-        )
-        (encoder_attn_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-        (fc1): Linear(in_features=512, out_features=2048, bias=True)
-        (fc2): Linear(in_features=2048, out_features=512, bias=True)
-        (final_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-      )
-      (1): NgramTransformerDecoderLayer(
-        (ngram_self_attn): NgramMultiheadAttention(
-          (relative_linear): Linear(in_features=512, out_features=256, bias=True)
-          (out_proj): Linear(in_features=512, out_features=512, bias=True)
-        )
-        (self_attn_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-        (encoder_attn): MultiheadAttention(
-          (k_proj): Linear(in_features=512, out_features=512, bias=True)
-          (v_proj): Linear(in_features=512, out_features=512, bias=True)
-          (q_proj): Linear(in_features=512, out_features=512, bias=True)
-          (out_proj): Linear(in_features=512, out_features=512, bias=True)
-        )
-        (encoder_attn_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-        (fc1): Linear(in_features=512, out_features=2048, bias=True)
-        (fc2): Linear(in_features=2048, out_features=512, bias=True)
-        (final_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-      )
-      (2): NgramTransformerDecoderLayer(
-        (ngram_self_attn): NgramMultiheadAttention(
-          (relative_linear): Linear(in_features=512, out_features=256, bias=True)
-          (out_proj): Linear(in_features=512, out_features=512, bias=True)
-        )
-        (self_attn_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-        (encoder_attn): MultiheadAttention(
-          (k_proj): Linear(in_features=512, out_features=512, bias=True)
-          (v_proj): Linear(in_features=512, out_features=512, bias=True)
-          (q_proj): Linear(in_features=512, out_features=512, bias=True)
-          (out_proj): Linear(in_features=512, out_features=512, bias=True)
-        )
-        (encoder_attn_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-        (fc1): Linear(in_features=512, out_features=2048, bias=True)
-        (fc2): Linear(in_features=2048, out_features=512, bias=True)
-        (final_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-      )
-    )
-    (emb_layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
-  )
-)
-
-data example:
-src_tokens
-tensor([[ 1996, 11555, 18172,  7042,  2055,  1037, 18147,  5913,  3756,  6982,
-          1999,  1996,  4120,  1012,  2007,  1996,  4022,  2000,  2022,  3621,
-          2062,  4795,  1010,  2021,  2074,  2004, 26102,  1010,  1996,  7726,
-          3212,  2038,  2042, 27696,  1996,  6745,  2804,  2000,  2049,  4170,
-          1011,  1037,  8235,  4408, 28653,  2630,  6982,  1012, 11216,  1997,
-          1996, 27143,  1011,  2550, 21905,  2442,  2031,  2245,  2008,  1996,
-         13576,  8703,  2052,  2191,  1996,  7477, 12586,  1999,  2007,  1996,
-          2784,  5380,  1997,  1996,  2152, 11915,  1012, 17186,  2091,  2005,
-          2678,  1012,  3239,  1011,  9105,  1024,  7726,  3212,  9058,  2020,
-          4760,  2125,  2037,  4408, 28653, 12622,  2006,  2110,  2547,  1012,
-         18783,  1024,  7726,  3212,  3738,  3233,  2006,  2327,  1997,  1996,
-          8254,  2050,  1021,  6982,  2328, 27143,  1012,  2021,  2009,  1005,
-          1055,  2524,  2000,  2903,  2008,  1996,  4099,  2180,  1005,  1056,
-          2156,  2023,  2028,  2746,  2007,  1996,  6120,  2437,  2009,  3233,
-          2041,  2066,  1037, 14699,  7639,  2114,  1996,  2300,  1005,  1055,
-          3302,  1012,  1996,  3212,  2001,  4760,  2125,  1996,  3239,  1011,
-          9105,  4325,  1010,  2029,  2003,  2105,  1996,  2946,  1997,  1037,
-         15437,  1010,  2006,  4238,  2110,  2547,  7483,  1012,  3212,  4584,
-          1010,  2738,  4603,  2135,  5102,  1999,  5810,  2601, 11408,  4102,
-          2000,  2037, 28190,  2911,  1010,  3427,  2004,  1996,  8254,  2050,
-          1011,  1021,  1010,  6055,  2007,  3424,  1011,  2911, 10815,  1010,
-          2001,  3390,  2012, 24112,  2099, 17532,  1010,  2379,  1996,  6143,
-         11195,  1997,  7570, 10867, 17040,  1012,  2048,  2047,  7726,  1011,
-          2328,  1043, 16102,  4313,  4942,  2015,  1998,  2048, 13671, 25215,
-         11890, 27528,  2102,  2020,  2036,  5359,  2000,  1996,  3212,  1012,
-          8235,  2630,  1024,  4238,  1005,  1055,  4397,  3390,  1043, 16102,
-          4313,  6982,  5829,  1999,  2392,  1997,  1037,  4049,  1999,  1996,
-          2670,  3417,  1997, 24112,  2099, 17532,  1999,  1996,  4723,  6084,
-          1012, 19194,  1024,  1996, 12622,  3233,  2041,  2066,  1037, 14699,
-          1011,  7639,  2114,  1996,  3302,  1997,  1996,  2712,  1012,  3212,
-          2708,  4373,  5902,  5292, 28065, 14511,  4430,  2360, 13380,  2072,
-          2001,  9339,  2006,  7726,  2547,  2004,  3038,  2008,  1996,  3842,
-          2442, 10295,  1996,  1005, 14751,  2974,  1998,  2327,  1011,  3694,
-          4128,  2000,  4047,  2049,  6645,  1012,  1005,  1043, 16102,  4313,
-          2465, 12622,  2064,  2543, 10815,  1998, 18544,  2012,  1996,  2168,
-          2051,  1010,  1998,  2064,  5452,  1999,  1996,  4723,  6084,  1005,
-          1055,  8467,  5380,  1012,  4238,  2038,  4912,  2000, 12200,  2049,
-          2250,  3639,  1998,  3987,  9859,  1010,  3038,  2151,  2825,  2925,
-          4491,  2006,  2009,  2052,  2272,  2013,  1996,  2250,  1998,  2712,
-          1012,  1996,  2406,  2085,  4447,  2000,  2022,  1005,  2969,  7182,
-          1005,  1999,  3408,  1997, 17731,  3941,  2000,  3113,  2049,  2510,
-          3791,  1012, 14430,  1024,  1996,  7726,  6982,  1005,  1055,  2453,
-          2022,  2062,  9252,  2084,  1996, 11555,  1005, 21864, 15952,  3756,
-          6982,  1010, 15885,  1010,  2021,  2027,  2024,  8053, 14224, 11401,
-          1012,   102]], device='cuda:0')
-prev_output_tokens
-tensor([[  102,  7726,  2110,  2547,  3662,  8333,  1997,  1996,  2047,  3719,
-          1011,  1037,  8254,  2050,  1021,  6982,  1010,  2048,  1043, 16102,
-          4313,  4942,  2015,  1998,  1037,  3940,  1997, 25215, 11890, 27528,
-          2102,  1012,     2,  3212,  4584,  2360,  2008,  1996,  4170,  2442,
-         10295,  1005,  1996, 14751,  2974,  1005,  2000,  4047,  2049,  6645,
-          1012]], device='cuda:0')
-target_tokens:
-tensor([[ 7726,  2110,  2547,  3662,  8333,  1997,  1996,  2047,  3719,  1011,
-          1037,  8254,  2050,  1021,  6982,  1010,  2048,  1043, 16102,  4313,
-          4942,  2015,  1998,  1037,  3940,  1997, 25215, 11890, 27528,  2102,
-          1012,     2,  3212,  4584,  2360,  2008,  1996,  4170,  2442, 10295,
-          1005,  1996, 14751,  2974,  1005,  2000,  4047,  2049,  6645,  1012,
-           102]], device='cuda:0')
\ No newline at end of file
diff --git a/official/nlp/prophetnet/news_crawl.py b/official/nlp/prophetnet/news_crawl.py
deleted file mode 100644
index ff563ac157305a605ecbec8f413b3d09ce569037..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/news_crawl.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Generate News Crawl corpus dataset."""
-import argparse
-
-from src.utils import Dictionary
-from src.utils.preprocess import create_pre_training_dataset
-
-parser = argparse.ArgumentParser(description='Create News Crawl Pre-Training Dataset.')
-parser.add_argument("--src_folder", type=str, default="", required=True,
-                    help="Raw corpus folder.")
-parser.add_argument("--existed_vocab", type=str, default="", required=True,
-                    help="Existed vocab path.")
-parser.add_argument("--mask_ratio", type=float, default=0.4, required=True,
-                    help="Mask ratio.")
-parser.add_argument("--output_folder", type=str, default="", required=True,
-                    help="Dataset output path.")
-parser.add_argument("--max_len", type=int, default=32, required=False,
-                    help="Max length of sentences.")
-parser.add_argument("--ngram", type=int, default=3, required=True,
-                    help="Number of tokens to predict ahead.")
-parser.add_argument("--suffix", type=str, default="", required=False,
-                    help="Add suffix to output file.")
-parser.add_argument("--processes", type=int, default=2, required=False,
-                    help="Size of processes pool.")
-
-if __name__ == '__main__':
-    args, _ = parser.parse_known_args()
-    if not (args.src_folder and args.output_folder):
-        raise ValueError("Please enter required params.")
-
-    if not args.existed_vocab:
-        raise ValueError("`--existed_vocab` is required.")
-
-    vocab = Dictionary.load_from_persisted_dict(args.existed_vocab)
-
-    create_pre_training_dataset(
-        folder_path=args.src_folder,
-        output_folder_path=args.output_folder,
-        vocabulary=vocab,
-        prefix="news.20", suffix=args.suffix,
-        mask_ratio=args.mask_ratio,
-        ngram=args.ngram,
-        min_sen_len=10,
-        max_sen_len=args.max_len,
-        dataset_type="tfrecord",
-        cores=args.processes
-    )
-    print(f" | Vocabulary size: {vocab.size}.")
diff --git a/official/nlp/prophetnet/requirements.txt b/official/nlp/prophetnet/requirements.txt
deleted file mode 100644
index e70fb6f693e4ad2b494618e2a70c3e15c791cf14..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/requirements.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-nltk
-jieba
-numpy
-rouge
diff --git a/official/nlp/prophetnet/scripts/__init__.py b/official/nlp/prophetnet/scripts/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/official/nlp/prophetnet/scripts/learn_subword.sh b/official/nlp/prophetnet/scripts/learn_subword.sh
deleted file mode 100644
index 05dd516880bf73bffb48fcbf5219eaef9c627d87..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/scripts/learn_subword.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-src_folder_path=$1  # source text folder path.
-
-cd $src_folder_path || exit
-cat *.txt | subword-nmt learn-bpe -s 46000 -o all.bpe.codes
diff --git a/official/nlp/prophetnet/scripts/run_ascend.sh b/official/nlp/prophetnet/scripts/run_ascend.sh
deleted file mode 100644
index 16f6bb3f1f6d8b7a635b09840ce214f244578d95..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/scripts/run_ascend.sh
+++ /dev/null
@@ -1,179 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-export DEVICE_ID=0
-export RANK_ID=0
-export RANK_SIZE=1
-
-options=`getopt -u -o ht:n:i:j:c:o:v:m: -l help,task:,device_num:,device_id:,hccl_json:,config:,output:,vocab:,metric: -- "$@"`
-eval set -- "$options"
-echo $options
-
-echo_help()
-{
-  echo "Usage:"
-  echo "bash train.sh [-h] [-t t|i] [-n N] [-i N] [-j FILE] [-c FILE] [-o FILE] [-v FILE]"
-  echo "options:"
-  echo "        -h --help                show usage"
-  echo "        -t --task                select task, 't' for training and 'i' for inference"
-  echo "        -n --device_num          training with N devices"
-  echo "        -i --device_id           training with device i"
-  echo "        -j --hccl_json           set the rank table file"
-  echo "        -c --config              set the configuration file"
-  echo "        -o --output              set the output file of inference"
-  echo "        -v --vocab               set the vocabulary"
-  echo "        -m --metric              set the metric"
-}
-
-set_hccl_json()
-{
-  while [ -n "$1" ]
-  do
-    if [[ "$1" == "-j" || "$1"  == "--hccl_json" ]]
-    then
-      export RANK_TABLE_FILE=$2
-      break
-    fi
-    shift
-  done
-}
-set_device_id()
-{
-  while [ -n "$1" ]
-  do
-    if [[ "$1" == "-i" || "$1" == "--device_id" ]]
-    then
-      if [[ $2 -ge 0 && $2 -le 7 ]]
-      then
-        export DEVICE_ID=$2
-      fi
-      break
-    fi
-    shift
-  done
-}
-
-while [ -n "$1" ]
-do
-  case "$1" in
-  -h|--help)
-      echo_help
-      shift
-      ;;
-  -t|--task)
-    echo "task:"
-    if [ "$2" == "t" ]
-    then
-      task=train
-    elif [ "$2" == "i" ]
-    then
-      task=infer
-    fi
-    shift 2
-    ;;
-  -n|--device_num)
-    echo "device_num"
-    if [ $2 -eq 1 ]
-    then
-      set_device_id $options
-    elif [ $2 -gt 1 ]
-    then
-        export HCCL_FLAG=1
-        export DEPLOY_MODE=0
-
-        export RANK_SIZE=$2
-        set_hccl_json $options
-    fi
-    shift 2
-    ;;
-  -i|--device_id)
-    echo "set device id"
-    export DEVICE_ID=$2
-    shift 2
-    ;;
-  -c|--config)
-    echo "config";
-    configurations=$2
-    shift 2
-    ;;
-  -o|--output)
-    echo "output";
-    output=$2
-    shift 2
-    ;;
-  -v|--vocab)
-    echo "vocab";
-    vocab=$2
-    shift 2
-    ;;
-  -m|--metric)
-    echo "metric";
-    metric=$2
-    shift 2
-    ;;
-  --)
-    shift
-    break
-    ;;
-  *)
-    shift
-    ;;
-esac
-done
-
-file_path=$(cd "$(dirname $0)" || exit; pwd)
-for((i=0; i < $RANK_SIZE; i++))
-do
-  if [ $RANK_SIZE -gt 1 ]
-  then
-    echo $RANK_SIZE
-    export RANK_ID=$i
-    export DEVICE_ID=$[i]
-  fi
-  echo "Working on device $i"
-
-  cd $file_path || exit
-  cd ../ || exit
-
-  rm -rf ./${task}_prophetnet_$DEVICE_ID
-  mkdir ./${task}_prophetnet_$DEVICE_ID
-
-  cp train_gradient_accumulation.py ./${task}_prophetnet_$DEVICE_ID
-  cp train.py ./${task}_prophetnet_$DEVICE_ID
-  cp eval.py ./${task}_prophetnet_$DEVICE_ID
-  cp -r src ./${task}_prophetnet_$DEVICE_ID
-  cp -r config ./${task}_prophetnet_$DEVICE_ID
-  cp $configurations ./${task}_prophetnet_$DEVICE_ID
-
-  if [ $vocab ]
-  then
-    cp $vocab ./${task}_prophetnet_$DEVICE_ID
-  fi
-
-  cd ./${task}_prophetnet_$DEVICE_ID || exit
-  env > log.log
-  echo $task
-  if [ "$task" == "train" ]
-  then
-    #python train.py --config ${configurations##*/} --platform Ascend >>log.log 2>&1 &
-    python train.py --config ${configurations##*/} --platform Ascend
-  elif [ "$task" == "infer" ]
-  then
-    #python eval.py --config ${configurations##*/} --output ${output} --vocab ${vocab##*/} --metric ${metric} --platform Ascend >>log_infer.log 2>&1 &
-    python eval.py --config ${configurations##*/} --output ${output} --vocab ${vocab##*/} --metric ${metric} --platform Ascend 
-  fi
-  cd ../
-done
diff --git a/official/nlp/prophetnet/scripts/run_gpu.sh b/official/nlp/prophetnet/scripts/run_gpu.sh
deleted file mode 100644
index 0698647a41d7e80431ceca759102dd0058eba187..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/scripts/run_gpu.sh
+++ /dev/null
@@ -1,162 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-export DEVICE_ID=0
-export RANK_ID=0
-export RANK_SIZE=1
-
-options=`getopt -u -o ht:n:i::o:v:m: -l help,task:,device_num:,device_id:,config:,output:,vocab:,metric: -- "$@"`
-eval set -- "$options"
-echo $options
-
-echo_help()
-{
-  echo "Usage:"
-  echo "bash train.sh [-h] [-t t|i] [-n N] [-i N] [-j FILE] [-c FILE] [-o FILE] [-v FILE]"
-  echo "options:"
-  echo "        -h --help                show usage"
-  echo "        -t --task                select task, 't' for training and 'i' for inference"
-  echo "        -n --device_num          training with N devices"
-  echo "        -i --device_id           training with device i"
-  echo "        -c --config              set the configuration file"
-  echo "        -o --output              set the output file of inference"
-  echo "        -v --vocab               set the vocabulary"
-  echo "        -m --metric              set the metric"
-}
-
-set_device_id()
-{
-  while [ -n "$1" ]
-  do
-    if [[ "$1" == "-i" || "$1" == "--device_id" ]]
-    then
-      if [[ $2 -ge 0 && $2 -le 7 ]]
-      then
-        export DEVICE_ID=$2
-      fi
-      break
-    fi
-    shift
-  done
-}
-
-while [ -n "$1" ]
-do
-  case "$1" in
-  -h|--help)
-      echo_help
-      shift
-      ;;
-  -t|--task)
-    echo "task:"
-    if [ "$2" == "t" ]
-    then
-      task=train
-    elif [ "$2" == "i" ]
-    then
-      task=infer
-    fi
-    shift 2
-    ;;
-  -n|--device_num)
-    echo "device_num"
-    if [ $2 -eq 1 ]
-    then
-      set_device_id $options
-    elif [ $2 -gt 1 ]
-    then
-        export RANK_SIZE=$2
-    fi
-    shift 2
-    ;;
-  -i|--device_id)
-    echo "set device id"
-    export DEVICE_ID=$2
-    shift 2
-    ;;
-  -c|--config)
-    echo "config";
-    configurations=$2
-    shift 2
-    ;;
-  -o|--output)
-    echo "output";
-    output=$2
-    shift 2
-    ;;
-  -v|--vocab)
-    echo "vocab";
-    vocab=$2
-    shift 2
-    ;;
-  -m|--metric)
-    echo "metric";
-    metric=$2
-    shift 2
-    ;;
-  --)
-    shift
-    break
-    ;;
-  *)
-    shift
-    ;;
-esac
-done
-
-file_path=$(cd "$(dirname $0)" || exit; pwd)
-if [ $RANK_SIZE -gt 1 ]
-then
-  echo "Working on $RANK_SIZE device"
-fi
-echo "Working on file ${task}_prophetnet_$DEVICE_ID"
-
-cd $file_path || exit
-cd ../ || exit
-
-rm -rf ./${task}_prophetnet_$DEVICE_ID
-mkdir ./${task}_prophetnet_$DEVICE_ID
-
-cp train_gradient_accumulation.py ./${task}_prophetnet_$DEVICE_ID
-cp train.py ./${task}_prophetnet_$DEVICE_ID
-cp eval.py ./${task}_prophetnet_$DEVICE_ID
-cp -r src ./${task}_prophetnet_$DEVICE_ID
-cp -r config ./${task}_prophetnet_$DEVICE_ID
-cp $configurations ./${task}_prophetnet_$DEVICE_ID
-
-if [ $vocab ]
-then
-  cp $vocab ./${task}_prophetnet_$DEVICE_ID
-fi
-
-cd ./${task}_prophetnet_$DEVICE_ID || exit
-env > log.log
-echo $task
-if [ "$task" == "train" ]
-then
-  if [ $RANK_SIZE -gt 1 ]
-    then
-      mpirun -n $RANK_SIZE --output-filename log_output --merge-stderr-to-stdout python train.py --config ${configurations##*/} --platform GPU >>log.log 2>&1 &
-    fi
-  #python train.py --config ${configurations##*/} --platform GPU >>log.log 2>&1 &
-  python train.py --config ${configurations##*/} --platform GPU
-elif [ "$task" == "infer" ]
-then
-  #python eval.py --config ${configurations##*/} --output ${output} --vocab ${vocab##*/} --metric ${metric} --platform GPU >>log_infer.log 2>&1 &
-  python eval.py --config ${configurations##*/} --output ${output} --vocab ${vocab##*/} --metric ${metric} --platform GPU 
-fi
-cd ../
-
diff --git a/official/nlp/prophetnet/src/__init__.py b/official/nlp/prophetnet/src/__init__.py
deleted file mode 100644
index 7e943365a0082e1eab891c1f6f22ddb0d5133324..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/__init__.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Source of mass model."""
-from .dataset import load_dataset
-from .dataset import bi_data_loader
-from .dataset import mono_data_loader
-from .transformer import TransformerDecoder
-from .transformer import TransformerEncoder
-from .transformer import Transformer
-from .transformer import TransformerNetworkWithLoss
-from .transformer import LabelSmoothedCrossEntropyCriterion
-from .transformer import TransformerTrainOneStepWithLossScaleCell
-from .transformer import TransformerTraining
-from .transformer import infer
-from .language_model import LooseMaskedLanguageModel
-from .language_model import MaskedLanguageModel
-from .language_model import NoiseChannelLanguageModel
-
-__all__ = [
-    "load_dataset",
-    "bi_data_loader",
-    "mono_data_loader",
-    "Transformer",
-    "infer",
-    "TransformerTraining",
-    "TransformerNetworkWithLoss",
-    "TransformerTrainOneStepWithLossScaleCell",
-    "LabelSmoothedCrossEntropyCriterion",
-    "LooseMaskedLanguageModel",
-    "MaskedLanguageModel",
-    "NoiseChannelLanguageModel"
-]
diff --git a/official/nlp/prophetnet/src/dataset/__init__.py b/official/nlp/prophetnet/src/dataset/__init__.py
deleted file mode 100644
index b93504d9223aad076661d85ee86b09b5687f77d3..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/dataset/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Dataset module."""
-from .bi_data_loader import BiLingualDataLoader
-from .mono_data_loader import MonoLingualDataLoader
-from .load_dataset import load_dataset
-
-__all__ = [
-    "load_dataset",
-    "BiLingualDataLoader",
-    "MonoLingualDataLoader"
-]
diff --git a/official/nlp/prophetnet/src/dataset/load_dataset.py b/official/nlp/prophetnet/src/dataset/load_dataset.py
deleted file mode 100644
index 84aaf94a760c3eeba081f4ac54b3499c4ba1f6cd..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/dataset/load_dataset.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2020-2022 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Dataset loader to feed into model."""
-import mindspore.common.dtype as mstype
-import mindspore.dataset as ds
-import mindspore.dataset.transforms as deC
-
-
-def _load_dataset(input_files, batch_size, epoch_count=1,
-                  sink_mode=False, sink_step=1, rank_size=1, rank_id=0, shuffle=True):
-    """
-    Load dataset according to passed in params.
-
-    Args:
-        input_files (list): Data files.
-        batch_size (int): Batch size.
-        epoch_count (int): Epoch count.
-        sink_mode (bool): Whether enable sink mode.
-        sink_step (int): Step to sink.
-        rank_size (int): Rank size.
-        rank_id (int): Rank id.
-        shuffle (bool): Whether shuffle dataset.
-
-    Returns:
-        Dataset, dataset instance.
-    """
-    if not input_files:
-        raise FileNotFoundError("Require at least one dataset.")
-
-    if not isinstance(sink_mode, bool):
-        raise ValueError("`sink` must be type of bool.")
-
-    for datafile in input_files:
-        print(f" | Loading {datafile}.")
-
-    data_set = ds.TFRecordDataset(
-        input_files,
-        columns_list=[
-            "src", "src_padding",
-            "prev_opt", "prev_padding",
-            "target", "tgt_padding"
-        ],
-        shuffle=shuffle, num_shards=rank_size, shard_id=rank_id,
-        shard_equal_rows=True, num_parallel_workers=8)
-
-    ori_dataset_size = data_set.get_dataset_size()
-    print(f" | Dataset size: {ori_dataset_size}.")
-    repeat_count = epoch_count
-
-    type_cast_op = deC.TypeCast(mstype.int32)
-    data_set = data_set.map(input_columns="src", operations=type_cast_op)
-    data_set = data_set.map(input_columns="src_padding", operations=type_cast_op)
-    data_set = data_set.map(input_columns="prev_opt", operations=type_cast_op)
-    data_set = data_set.map(input_columns="prev_padding", operations=type_cast_op)
-    data_set = data_set.map(input_columns="target", operations=type_cast_op)
-    data_set = data_set.map(input_columns="tgt_padding", operations=type_cast_op)
-
-    data_set = data_set.rename(
-        input_columns=["src",
-                       "src_padding",
-                       "prev_opt",
-                       "prev_padding",
-                       "target",
-                       "tgt_padding"],
-        output_columns=["source_eos_ids",
-                        "source_eos_mask",
-                        "target_sos_ids",
-                        "target_sos_mask",
-                        "target_eos_ids",
-                        "target_eos_mask"]
-    )
-
-    data_set = data_set.batch(batch_size, drop_remainder=True)
-    data_set = data_set.repeat(repeat_count)
-
-    data_set.channel_name = 'transformer'
-    return data_set
-
-
-def load_dataset(data_files: list, batch_size: int, epoch_count: int,
-                 sink_mode: bool, sink_step: int = 1, rank_size: int = 1, rank_id: int = 0, shuffle=True):
-    """
-    Load dataset.
-
-    Args:
-        data_files (list): Data files.
-        batch_size (int): Batch size.
-        epoch_count (int): Epoch count.
-        sink_mode (bool): Whether enable sink mode.
-        sink_step (int): Step to sink.
-        rank_size (int): Rank size.
-        rank_id (int): Rank id.
-        shuffle (bool): Whether shuffle dataset.
-
-    Returns:
-        Dataset, dataset instance.
-    """
-    return _load_dataset(data_files, batch_size, epoch_count, sink_mode,
-                         sink_step, rank_size, rank_id, shuffle=shuffle)
diff --git a/official/nlp/prophetnet/src/dataset/schema.py b/official/nlp/prophetnet/src/dataset/schema.py
deleted file mode 100644
index 5e7ebff544c884fb6af7041c12f6020ad803f0f2..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/dataset/schema.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Define schema of mindrecord."""
-
-SCHEMA = {
-    "src": {"type": "int64", "shape": [-1]},
-    "src_padding": {"type": "int64", "shape": [-1]},
-    "prev_opt": {"type": "int64", "shape": [-1]},
-    "prev_padding": {"type": "int64", "shape": [-1]},
-    "target": {"type": "int64", "shape": [-1]},
-    "tgt_padding": {"type": "int64", "shape": [-1]},
-}
diff --git a/official/nlp/prophetnet/src/language_model/__init__.py b/official/nlp/prophetnet/src/language_model/__init__.py
deleted file mode 100644
index 95211d00d26e9448e792c1de0ce86002184b0cbc..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/language_model/__init__.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Language model."""
-from .noise_channel_language_model import NoiseChannelLanguageModel
-from .masked_language_model import MaskedLanguageModel
-from .loose_masked_language_model import LooseMaskedLanguageModel
-from .mass_language_model import MassLanguageModel
-from .prophetnet_language_model import ProphetNetLanguageModel, NgramNoiseChannelLanguageModel
-
-__all__ = [
-    "LooseMaskedLanguageModel",
-    "MassLanguageModel",
-    "MaskedLanguageModel",
-    "NoiseChannelLanguageModel",
-    "ProphetNetLanguageModel",
-    "NgramNoiseChannelLanguageModel"
-]
diff --git a/official/nlp/prophetnet/src/language_model/base.py b/official/nlp/prophetnet/src/language_model/base.py
deleted file mode 100644
index dd58989f9e8455bbba3754540b44b77ec426e7e7..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/language_model/base.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Base language model."""
-
-
-class LanguageModel:
-    """Define base language model."""
-
-    def __init__(self):
-        pass
-
-    def emit(self, **kwargs):
-        raise NotImplementedError
diff --git a/official/nlp/prophetnet/src/language_model/loose_masked_language_model.py b/official/nlp/prophetnet/src/language_model/loose_masked_language_model.py
deleted file mode 100644
index e6b97a5754f48e728fa67f83a8ca94baf6da623b..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/language_model/loose_masked_language_model.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Modified masked language model."""
-import numpy as np
-
-from src.utils import Dictionary
-from .base import LanguageModel
-
-
-class LooseMaskedLanguageModel(LanguageModel):
-    """
-    Modified mask operation on sentence.
-
-    If k is assigned, then mask sentence with length k.
-    Otherwise, use mask_ratio.
-
-    Args:
-        k (int): Length of fragment.
-        mask_ratio (float): Mask ratio.
-    """
-
-    def __init__(self, k: int = None, mask_ratio=0.5,
-                 mask_all_prob=None):
-        super(LooseMaskedLanguageModel, self).__init__()
-        self.mask_ratio = mask_ratio
-        self._k = k
-        self._threshold = mask_all_prob
-
-    def emit(self, sentence: np.ndarray, vocabulary: Dictionary):
-        """
-        Mask mono source sentence.
-
-        A sample used to train model is processed with following step:
-
-        encoder input (source): [x1, x2, x3, x4, x5, x6, x7, x8, </eos>]
-        masked encoder input:   [x1, x2, x3,  _,  _,  _, x7, x8, </eos>]
-        decoder input:          [  -, x3, x4, x5]
-                                  |   |   |   |
-                                  V   V   V   V
-        decoder output:         [x3, x4, x5, x6]
-
-        Notes:
-            A simple rule is made that source sentence starts without <BOS>
-            but end with <EOS>.
-
-        Args:
-            vocabulary (Dictionary): Vocabulary.
-            sentence (np.ndarray): Raw sentence instance.
-
-        Returns:
-            dict, an example.
-        """
-        # If v=0, then u must equal to 0. [u, v)
-        u, v = self._get_masked_interval(sentence.shape[0],
-                                         self._k, self._threshold)
-
-        encoder_input = sentence.copy()
-        right_shifted_sentence = np.concatenate(([vocabulary.bos_index], sentence[:-1]))
-        if u == 0:
-            _len = v - u if v - u != 0 else sentence.shape[0]
-            decoder_input = right_shifted_sentence[:_len]
-            decoder_input[0] = vocabulary.mask_index
-            decoder_output = sentence[:_len].copy()
-        else:
-            decoder_input = right_shifted_sentence[u - 1:v]
-            decoder_input[0] = vocabulary.mask_index
-            decoder_output = sentence[u - 1:v].copy()
-
-        if v == 0:
-            decoder_input[:] = vocabulary.mask_index
-        else:
-            encoder_input[np.arange(start=u, stop=v)] = vocabulary.mask_index
-
-        if u != v and u > 1:
-            padding = np.array([vocabulary.padding_index] * (u - 1), dtype=np.int32)
-            decoder_input = np.concatenate((padding, decoder_input))
-            decoder_output = np.concatenate((padding, decoder_output))
-
-        if decoder_input.shape[0] != decoder_output.shape[0]:
-            raise ValueError("seq len must equal.")
-
-        return {
-            "sentence_length": sentence.shape[0],
-            "tgt_sen_length": decoder_output.shape[0],
-            "encoder_input": encoder_input,  # end with </eos>
-            "decoder_input": decoder_input,
-            "decoder_output": decoder_output  # end with </eos>
-        }
-
-    def _get_masked_interval(self, length, fix_length=None,
-                             threshold_to_mask_all=None):
-        """
-        Generate a sequence length according to length and mask_ratio.
-
-        Args:
-            length (int): Sequence length.
-
-        Returns:
-            Tuple[int, int], [start position, end position].
-        """
-        # Can not larger than sequence length.
-        # Mask_length belongs to [0, length].
-        if fix_length is not None:
-            interval_length = min(length, fix_length)
-        else:
-            interval_length = min(length, round(self.mask_ratio * length))
-
-        _magic = np.random.random()
-        if threshold_to_mask_all is not None and _magic <= threshold_to_mask_all:
-            return 0, length
-
-        # If not sequence to be masked, then return 0, 0.
-        if interval_length == 0:
-            return 0, 0
-        # Otherwise, return start position and interval length.
-        start_pos = np.random.randint(low=0, high=length - interval_length + 1)
-        return start_pos, start_pos + interval_length
diff --git a/official/nlp/prophetnet/src/language_model/masked_language_model.py b/official/nlp/prophetnet/src/language_model/masked_language_model.py
deleted file mode 100644
index 52aed8d53ed7b0a0eae8a67d7231364bbf913a00..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/language_model/masked_language_model.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Masked language model."""
-import numpy as np
-
-from .base import LanguageModel
-
-
-class MaskedLanguageModel(LanguageModel):
-    """
-    Do mask operation on sentence.
-
-    If k is assigned, then mask sentence with length k.
-    Otherwise, use mask_ratio.
-
-    Args:
-        k (int): Length of fragment.
-        mask_ratio (float): Mask ratio.
-    """
-
-    def __init__(self, k: int = None, mask_ratio=0.5,
-                 mask_all_prob=None):
-        super(MaskedLanguageModel, self).__init__()
-        self.mask_ratio = mask_ratio
-        self._k = k
-        self._threshold = mask_all_prob
-
-    def emit(self, sentence: np.ndarray, vocabulary):
-        """
-        Mask mono source sentence.
-
-        A sample used to train model is processed with following step:
-
-        encoder input (source): [x1, x2, x3, x4, x5, x6, x7, x8, </eos>]
-        masked encoder input:   [x1, x2,  _,  _,  _, x6, x7, x8, </eos>]
-        decoder input:          [  _, x3, x4]
-                                  |   |   |
-                                  V   V   V
-        decoder output:         [ x3, x4, x5]
-
-        Notes:
-            A simple rule is made that source sentence starts without <BOS>
-            but end with <EOS>.
-
-        Args:
-            vocabulary (Dictionary): Vocabulary.
-            sentence (np.ndarray): Raw sentence instance.
-
-        Returns:
-            dict, an example.
-        """
-        encoder_input = sentence.copy()
-        seq_len = encoder_input.shape[0]
-
-        # If v=0, then u must equal to 0. [u, v)
-        u, v = self._get_masked_interval(len(encoder_input),
-                                         self._k, self._threshold)
-
-        if u == 0:
-            _len = v - u if v - u != 0 else seq_len
-            decoder_input = np.array([vocabulary.mask_index] * _len, dtype=np.int32)
-            decoder_input[1:] = encoder_input[:_len - 1].copy()
-        else:
-            decoder_input = np.array([vocabulary.mask_index] * (v - u), dtype=np.int32)
-            decoder_input[1:] = encoder_input[u:v - 1].copy()
-
-        if v == 0:
-            decoder_output = encoder_input.copy()
-            encoder_input[:] = vocabulary.mask_index
-        else:
-            decoder_output = encoder_input[u:v].copy()
-            encoder_input[np.arange(start=u, stop=v)] = vocabulary.mask_index
-
-        if u != v and u > 0:
-            padding = np.array([vocabulary.padding_index] * u, dtype=np.int32)
-            decoder_input = np.concatenate((padding, decoder_input))
-            decoder_output = np.concatenate((padding, decoder_output))
-
-        assert decoder_input.shape[0] == decoder_output.shape[0], "seq len must equal."
-
-        return {
-            "sentence_length": seq_len,
-            "tgt_sen_length": decoder_output.shape[0],
-            "encoder_input": encoder_input,  # end with </eos>
-            "decoder_input": decoder_input,
-            "decoder_output": decoder_output  # end with </eos>
-        }
-
-    def _get_masked_interval(self, length, fix_length=None,
-                             threshold_to_mask_all=None):
-        """
-        Generate a sequence length according to length and mask_ratio.
-
-        Args:
-            length (int): Sequence length.
-
-        Returns:
-            Tuple[int, int], [start position, end position].
-        """
-        # Can not larger than sequence length.
-        # Mask_length belongs to [0, length].
-        if fix_length is not None:
-            interval_length = min(length, fix_length)
-        else:
-            interval_length = min(length, round(self.mask_ratio * length))
-
-        _magic = np.random.random()
-        if threshold_to_mask_all is not None and _magic <= threshold_to_mask_all:
-            return 0, length
-
-        # If not sequence to be masked, then return 0, 0.
-        if interval_length == 0:
-            return 0, 0
-        # Otherwise, return start position and interval length.
-        start_pos = np.random.randint(low=0, high=length - interval_length + 1)
-        return start_pos, start_pos + interval_length
diff --git a/official/nlp/prophetnet/src/language_model/noise_channel_language_model.py b/official/nlp/prophetnet/src/language_model/noise_channel_language_model.py
deleted file mode 100644
index 0a02b36bc86cae64213b523c6d51abf85aab91c5..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/language_model/noise_channel_language_model.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Noise channel language model."""
-import numpy as np
-
-from .base import LanguageModel
-
-
-class NoiseChannelLanguageModel(LanguageModel):
-    """Do mask on bilingual data."""
-
-    def __init__(self, add_noise_prob: float = 0.1):
-        super(NoiseChannelLanguageModel, self).__init__()
-        self._noisy_prob = add_noise_prob
-
-    def emit(self, sentence: np.ndarray, target: np.ndarray,
-             mask_symbol_idx: int,
-             bos_symbol_idx: int):
-        """
-        Add noise to sentence randomly.
-
-        For example, given a sentence pair:
-        source sentence:    [x1, x2, x3, x4, x5, x6, </eos>]
-        target sentence:    [y1, y2, y3, y4, </eos>]
-
-        After do random mask, data is looked like:
-        encoder input (source): [x1, x2,  _, x4, x5,  _, </eos>]
-        decoder input:          [<bos>,  y1,  y2,  y3,  y4]
-                                   |    |    |    |    |
-                                   V    V    V    V    V
-        decoder output:         [ y1,  y2,  y3,  y4, </eos>]
-
-        Args:
-            sentence (np.ndarray): Raw sentence.
-            target (np.ndarray): Target output (prediction).
-            mask_symbol_idx (int): Index of MASK symbol.
-            bos_symbol_idx (int): Index of bos symbol.
-
-        Returns:
-            dict, an example.
-        """
-        encoder_input = sentence.copy()
-        tgt_seq_len = target.shape[0]
-        if self._noisy_prob > 0:
-            for i, _ in enumerate(encoder_input):
-                _prob = np.random.random()
-                if _prob < self._noisy_prob:
-                    encoder_input[i] = mask_symbol_idx
-
-        decoder_input = np.empty(shape=tgt_seq_len, dtype=np.int64)
-        decoder_input[1:] = target[:-1]
-        decoder_input[0] = bos_symbol_idx
-
-        return {
-            "sentence_length": encoder_input.shape[0],
-            "tgt_sen_length": tgt_seq_len,
-            "encoder_input": encoder_input,  # end with </eos>
-            "decoder_input": decoder_input,  # start with <bos>
-            "decoder_output": target  # end with </eos>
-        }
diff --git a/official/nlp/prophetnet/src/transformer/__init__.py b/official/nlp/prophetnet/src/transformer/__init__.py
deleted file mode 100644
index 7416ed6d61871764d2175a3e9e4d4c52f2ca2567..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/transformer/__init__.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Transformer model module."""
-from .transformer import Transformer
-from .encoder import TransformerEncoder
-from .decoder import TransformerDecoder
-from .beam_search import BeamSearchDecoder
-from .transformer_for_train import TransformerTraining, LabelSmoothedCrossEntropyCriterion, \
-    TransformerNetworkWithLoss, TransformerTrainOneStepWithLossScaleCell, \
-    TransformerTrainAccumulateStepsWithLossScaleCell
-from .infer_mass import infer, infer_ppl
-
-__all__ = [
-    "infer",
-    "infer_ppl",
-    "TransformerTraining",
-    "LabelSmoothedCrossEntropyCriterion",
-    "TransformerTrainOneStepWithLossScaleCell",
-    "TransformerTrainAccumulateStepsWithLossScaleCell",
-    "TransformerNetworkWithLoss",
-    "Transformer",
-    "TransformerEncoder",
-    "TransformerDecoder",
-    "BeamSearchDecoder"
-]
diff --git a/official/nlp/prophetnet/src/transformer/embedding.py b/official/nlp/prophetnet/src/transformer/embedding.py
deleted file mode 100644
index 0b878e455bbfac0268650de3123ac97326e7e43a..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/transformer/embedding.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Embedding."""
-import numpy as np
-import mindspore.common.dtype as mstype
-from mindspore import nn
-from mindspore.ops import operations as P
-from mindspore.common.tensor import Tensor
-from mindspore.common.parameter import Parameter
-
-
-class EmbeddingLookup(nn.Cell):
-    """Embeddings lookup table with a fixed dictionary and size."""
-
-    def __init__(self,
-                 vocab_size,
-                 embed_dim,
-                 use_one_hot_embeddings=False):
-        """
-        Embeddings lookup table with a fixed dictionary and size.
-
-        Args:
-            vocab_size (int): Size of the dictionary of embeddings.
-            embed_dim (int): The size of word embedding.
-            use_one_hot_embeddings (bool): Whether use one-hot embedding. Default: False.
-        """
-        super(EmbeddingLookup, self).__init__()
-        self.embedding_dim = embed_dim
-        self.vocab_size = vocab_size
-        self.use_one_hot_embeddings = use_one_hot_embeddings
-
-        init_weight = np.random.normal(0, embed_dim ** -0.5, size=[vocab_size, embed_dim]).astype(np.float32)
-        # 0 is Padding index, thus init it as 0.
-        init_weight[0, :] = 0
-        self.embedding_table = Parameter(Tensor(init_weight))
-        self.expand = P.ExpandDims()
-        self.gather = P.Gather()
-        self.one_hot = P.OneHot()
-        self.on_value = Tensor(1.0, mstype.float32)
-        self.off_value = Tensor(0.0, mstype.float32)
-        self.array_mul = P.MatMul()
-        self.reshape = P.Reshape()
-        self.get_shape = P.Shape()
-
-    def construct(self, input_ids):
-        """
-        Construct network.
-
-        Args:
-            input_ids (Tensor): A batch of sentences with shape (N, T).
-
-        Returns:
-            Tensor, word embeddings with shape (N, T, D)
-        """
-        _shape = self.get_shape(input_ids)  # (N, T).
-        _batch_size = _shape[0]
-        _max_len = _shape[1]
-
-        flat_ids = self.reshape(input_ids, (_batch_size * _max_len,))
-        if self.use_one_hot_embeddings:
-            one_hot_ids = self.one_hot(flat_ids, self.vocab_size, self.on_value, self.off_value)
-            output_for_reshape = self.array_mul(
-                one_hot_ids, self.embedding_table)
-        else:
-            output_for_reshape = self.gather(self.embedding_table, flat_ids, 0)
-
-        output = self.reshape(output_for_reshape, (_batch_size, _max_len, self.embedding_dim))
-        return output, self.embedding_table
diff --git a/official/nlp/prophetnet/src/transformer/grad_clip.py b/official/nlp/prophetnet/src/transformer/grad_clip.py
deleted file mode 100644
index 03eae5da3d7b959281aa0cc07b1e626743972e3a..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/transformer/grad_clip.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Gradient clip."""
-import mindspore.nn as nn
-from mindspore.ops import operations as P
-from mindspore.ops import functional as F
-from mindspore.ops import composite as C
-
-GRADIENT_CLIP_TYPE = 1
-GRADIENT_CLIP_VALUE = 8
-
-
-class ClipGradients(nn.Cell):
-    """
-    Clip gradients.
-
-    Returns:
-        List, a list of clipped_grad tuples.
-    """
-
-    def __init__(self):
-        super(ClipGradients, self).__init__()
-        self.clip_by_norm = nn.ClipByNorm()
-        self.cast = P.Cast()
-        self.dtype = P.DType()
-
-    def construct(self,
-                  grads,
-                  clip_type,
-                  clip_value):
-        """
-        Construct gradient clip network.
-
-        Args:
-            grads (list): List of gradient tuples.
-            clip_type (Tensor): The way to clip, 'value' or 'norm'.
-            clip_value (Tensor): Specifies how much to clip.
-
-        Returns:
-            List, a list of clipped_grad tuples.
-        """
-        if clip_type != 0 and clip_type != 1:  # pylint: disable=R1714
-            return grads
-
-        new_grads = ()
-        for grad in grads:
-            dt = self.dtype(grad)
-            if clip_type == 0:
-                t = C.clip_by_value(grad, self.cast(F.tuple_to_array((-clip_value,)), dt),
-                                    self.cast(F.tuple_to_array((clip_value,)), dt))
-            else:
-                t = self.clip_by_norm(grad, self.cast(F.tuple_to_array((clip_value,)), dt))
-            new_grads = new_grads + (t,)
-
-        return new_grads
diff --git a/official/nlp/prophetnet/src/transformer/infer_mass.py b/official/nlp/prophetnet/src/transformer/infer_mass.py
deleted file mode 100644
index 5d6dbd69a1058a55c3cd7f8a958606bf4d19f058..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/transformer/infer_mass.py
+++ /dev/null
@@ -1,288 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Infer api."""
-import time
-
-import mindspore.nn as nn
-import mindspore.common.dtype as mstype
-from mindspore.ops import operations as P
-from mindspore.common.tensor import Tensor
-from mindspore.train.model import Model
-from mindspore.train.serialization import load_checkpoint, load_param_into_net
-
-from mindspore import context
-
-from src.dataset import load_dataset
-from .transformer_for_infer import TransformerInferModel
-from .transformer_for_train import TransformerTraining
-from ..utils.load_weights import load_infer_weights
-
-context.set_context(
-    mode=context.GRAPH_MODE,
-    #mode=context.PYNATIVE_MODE,
-    save_graphs=False,
-    device_target="GPU",
-    reserve_class_name_in_scope=False)
-
-
-class TransformerInferCell(nn.Cell):
-    """
-    Encapsulation class of transformer network infer.
-
-    Args:
-        network (nn.Cell): Transformer model.
-
-    Returns:
-        Tuple[Tensor, Tensor], predicted_ids and predicted_probs.
-    """
-
-    def __init__(self, network):
-        super(TransformerInferCell, self).__init__(auto_prefix=False)
-        self.network = network
-
-    def construct(self,
-                  source_ids,
-                  source_mask):
-        """Defines the computation performed."""
-
-        predicted_ids, predicted_probs = self.network(source_ids,
-                                                      source_mask)
-
-        return predicted_ids, predicted_probs
-
-
-def transformer_infer(config, dataset):
-    """
-    Run infer with Transformer.
-
-    Args:
-        config (TransformerConfig): Config.
-        dataset (Dataset): Dataset.
-
-    Returns:
-        List[Dict], prediction, each example has 4 keys, "source",
-        "target", "prediction" and "prediction_prob".
-    """
-    tfm_model = TransformerInferModel(config=config, use_one_hot_embeddings=False)
-    tfm_model.init_parameters_data()
-
-    params = tfm_model.trainable_params()
-    weights = load_infer_weights(config)
-
-    for param in params:
-        value = param.data
-        name = param.name
-        if name not in weights:
-            raise ValueError(f"{name} is not found in weights.")
-
-        with open("weight_after_deal.txt", "a+") as f:
-            weights_name = name
-            f.write(weights_name + "\n")
-            if isinstance(value, Tensor):
-                print(name, value.asnumpy().shape)
-                if weights_name in weights:
-                    assert weights_name in weights
-                    param.set_data(Tensor(weights[weights_name], mstype.float32))
-                else:
-                    raise ValueError(f"{weights_name} is not found in checkpoint.")
-            else:
-                raise TypeError(f"Type of {weights_name} is not Tensor.")
-
-    print(" | Load weights successfully.")
-    tfm_infer = TransformerInferCell(tfm_model)
-    model = Model(tfm_infer)
-
-    predictions = []
-    probs = []
-    source_sentences = []
-    target_sentences = []
-    for batch in dataset.create_dict_iterator():
-        source_sentences.append(batch["source_eos_ids"])
-        target_sentences.append(batch["target_eos_ids"])
-
-        source_ids = Tensor(batch["source_eos_ids"], mstype.int32)
-        source_mask = Tensor(batch["source_eos_mask"], mstype.int32)
-
-        start_time = time.time()
-        predicted_ids, entire_probs = model.predict(source_ids, source_mask)
-        print(f" | Batch size: {config.batch_size}, "
-              f"Time cost: {time.time() - start_time}.")
-
-        predictions.append(predicted_ids.asnumpy())
-        probs.append(entire_probs.asnumpy())
-
-    output = []
-    for inputs, ref, batch_out, batch_probs in zip(source_sentences,
-                                                   target_sentences,
-                                                   predictions,
-                                                   probs):
-        for i in range(config.batch_size):
-            if batch_out.ndim == 3:
-                batch_out = batch_out[:, 0]
-
-            example = {
-                "source": inputs[i].asnumpy().tolist(),
-                "target": ref[i].asnumpy().tolist(),
-                "prediction": batch_out[i].tolist(),
-                "prediction_prob": batch_probs[i].tolist()
-            }
-            output.append(example)
-
-    return output
-
-
-def infer(config):
-    """
-    Transformer infer api.
-
-    Args:
-        config (TransformerConfig): Config.
-
-    Returns:
-        list, result with
-    """
-    eval_dataset = load_dataset(data_files=config.test_dataset,
-                                batch_size=config.batch_size,
-                                epoch_count=1,
-                                sink_mode=config.dataset_sink_mode,
-                                shuffle=False) if config.test_dataset else None
-    prediction = transformer_infer(config, eval_dataset)
-    return prediction
-
-
-class TransformerInferPPLCell(nn.Cell):
-    """
-    Encapsulation class of transformer network infer for PPL.
-
-    Args:
-        config(TransformerConfig): Config.
-
-    Returns:
-        Tuple[Tensor, Tensor], predicted log prob and label lengths.
-    """
-    def __init__(self, config):
-        super(TransformerInferPPLCell, self).__init__()
-        self.transformer = TransformerTraining(config, is_training=False, use_one_hot_embeddings=False)
-        self.batch_size = config.batch_size
-        self.vocab_size = config.vocab_size
-        self.one_hot = P.OneHot()
-        self.on_value = Tensor(float(1), mstype.float32)
-        self.off_value = Tensor(float(0), mstype.float32)
-        self.reduce_sum = P.ReduceSum()
-        self.reshape = P.Reshape()
-        self.cast = P.Cast()
-        self.flat_shape = (config.batch_size * config.seq_length,)
-        self.batch_shape = (config.batch_size, config.seq_length)
-        self.last_idx = (-1,)
-
-    def construct(self,
-                  source_ids,
-                  source_mask,
-                  target_ids,
-                  target_mask,
-                  label_ids,
-                  label_mask):
-        """Defines the computation performed."""
-
-        predicted_log_probs = self.transformer(source_ids, source_mask, target_ids, target_mask)
-        label_ids = self.reshape(label_ids, self.flat_shape)
-        label_mask = self.cast(label_mask, mstype.float32)
-        one_hot_labels = self.one_hot(label_ids, self.vocab_size, self.on_value, self.off_value)
-
-        label_log_probs = self.reduce_sum(predicted_log_probs * one_hot_labels, self.last_idx)
-        label_log_probs = self.reshape(label_log_probs, self.batch_shape)
-        log_probs = label_log_probs * label_mask
-        lengths = self.reduce_sum(label_mask, self.last_idx)
-
-        return log_probs, lengths
-
-
-def transformer_infer_ppl(config, dataset):
-    """
-    Run infer with Transformer for PPL.
-
-    Args:
-        config (TransformerConfig): Config.
-        dataset (Dataset): Dataset.
-
-    Returns:
-        List[Dict], prediction, each example has 4 keys, "source",
-        "target", "log_prob" and "length".
-    """
-    tfm_infer = TransformerInferPPLCell(config=config)
-    tfm_infer.init_parameters_data()
-
-    parameter_dict = load_checkpoint(config.existed_ckpt)
-    load_param_into_net(tfm_infer, parameter_dict)
-
-    model = Model(tfm_infer)
-
-    log_probs = []
-    lengths = []
-    source_sentences = []
-    target_sentences = []
-    for batch in dataset.create_dict_iterator():
-        source_sentences.append(batch["source_eos_ids"])
-        target_sentences.append(batch["target_eos_ids"])
-
-        source_ids = Tensor(batch["source_eos_ids"], mstype.int32)
-        source_mask = Tensor(batch["source_eos_mask"], mstype.int32)
-        target_ids = Tensor(batch["target_sos_ids"], mstype.int32)
-        target_mask = Tensor(batch["target_sos_mask"], mstype.int32)
-        label_ids = Tensor(batch["target_eos_ids"], mstype.int32)
-        label_mask = Tensor(batch["target_eos_mask"], mstype.int32)
-
-        start_time = time.time()
-        log_prob, length = model.predict(source_ids, source_mask, target_ids, target_mask, label_ids, label_mask)
-        print(f" | Batch size: {config.batch_size}, "
-              f"Time cost: {time.time() - start_time}.")
-
-        log_probs.append(log_prob.asnumpy())
-        lengths.append(length.asnumpy())
-
-    output = []
-    for inputs, ref, log_prob, length in zip(source_sentences,
-                                             target_sentences,
-                                             log_probs,
-                                             lengths):
-        for i in range(config.batch_size):
-            example = {
-                "source": inputs[i].tolist(),
-                "target": ref[i].tolist(),
-                "log_prob": log_prob[i].tolist(),
-                "length": length[i]
-            }
-            output.append(example)
-
-    return output
-
-
-def infer_ppl(config):
-    """
-    Transformer infer PPL api.
-
-    Args:
-        config (TransformerConfig): Config.
-
-    Returns:
-        list, result with
-    """
-    eval_dataset = load_dataset(data_files=config.test_dataset,
-                                batch_size=config.batch_size,
-                                epoch_count=1,
-                                sink_mode=config.dataset_sink_mode,
-                                shuffle=False) if config.test_dataset else None
-    prediction = transformer_infer_ppl(config, eval_dataset)
-    return prediction
diff --git a/official/nlp/prophetnet/src/transformer/positional_embedding.py b/official/nlp/prophetnet/src/transformer/positional_embedding.py
deleted file mode 100644
index 3d89429898d3503c59fa8c6beb0793643623dbc1..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/transformer/positional_embedding.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Positional Embedding."""
-import numpy as np
-from mindspore import nn
-from mindspore import Tensor
-import mindspore.common.dtype as mstype
-from mindspore.ops import operations as P
-
-
-def position_encoding(length, depth,
-                      min_timescale=1,
-                      max_timescale=1e4):
-    """
-    Create Tensor of sinusoids of different frequencies.
-
-    Args:
-        length (int): Length of the Tensor to create, i.e. Number of steps.
-        depth (int): Dimensions of embedding.
-        min_timescale (float): Minimum time scale.
-        max_timescale (float): Maximum time scale.
-
-    Returns:
-        Tensor of shape (T, D)
-    """
-    depth = depth // 2
-    positions = np.arange(length, dtype=np.float32)
-    log_timescale_increment = (np.log(max_timescale / min_timescale) / (depth - 1))
-    inv_timescales = min_timescale * np.exp(
-        np.arange(depth, dtype=np.float32) * -log_timescale_increment)
-    scaled_time = np.expand_dims(positions, 1) * np.expand_dims(inv_timescales, 0)
-    # instead of using SIN and COS interleaved
-    # it's  the same to first use SIN then COS
-    # as they are applied to the same position
-    x = np.concatenate([np.sin(scaled_time), np.cos(scaled_time)], axis=1)
-    return x
-
-
-class PositionalEmbedding(nn.Cell):
-    """
-    Add positional info to word embeddings.
-
-    Args:
-        embedding_size (int): Size of word embedding.
-        max_position_embeddings (int): Maximum step in this model.
-
-    Returns:
-        Tensor, shape of (N, T, D).
-    """
-
-    def __init__(self,
-                 embedding_size,
-                 max_position_embeddings=512):
-        super(PositionalEmbedding, self).__init__()
-        self.add = P.Add()
-        self.expand_dims = P.ExpandDims()
-        self.position_embedding_table = Tensor(
-            position_encoding(max_position_embeddings, embedding_size),
-            mstype.float32
-        )
-        self.gather = P.Gather()
-        self.get_shape = P.Shape()
-
-    def construct(self, word_embeddings):
-        input_shape = self.get_shape(word_embeddings)
-        input_len = input_shape[1]
-        position_embeddings = self.position_embedding_table[0:input_len:1, ::]
-        position_embeddings = self.expand_dims(position_embeddings, 0)
-        output = self.add(word_embeddings, position_embeddings)
-        return output
diff --git a/official/nlp/prophetnet/src/transformer/residual_conn.py b/official/nlp/prophetnet/src/transformer/residual_conn.py
deleted file mode 100644
index 5cf887f89455d83a03909cd13fce64d32ff2361a..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/transformer/residual_conn.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Residual block."""
-import mindspore.nn as nn
-from mindspore.ops import operations as P
-
-
-class ResidualConnection(nn.Cell):
-    """
-    Add residual to output.
-
-    Args:
-        dropout_prob (float): Dropout rate.
-
-    Returns:
-        Tensor, with same shape of hidden_tensor.
-    """
-
-    def __init__(self, dropout_prob=0.1):
-        super(ResidualConnection, self).__init__()
-        self.add = P.Add()
-        self.dropout = nn.Dropout(1 - dropout_prob)
-
-    def construct(self, hidden_tensor, residual):
-        """
-        Construct network.
-
-        Args:
-            hidden_tensor (Tensor): Hidden tensor.
-            residual (Tensor): Input tensor.
-
-        Returns:
-            Tensor, which has the same shape with hidden_tensor and residual.
-        """
-        output = self.dropout(hidden_tensor)
-        output = self.add(output, residual)
-        return output
diff --git a/official/nlp/prophetnet/src/utils/__init__.py b/official/nlp/prophetnet/src/utils/__init__.py
deleted file mode 100644
index efb9f6f4b6a581936b69f9ffdd3dc4304dcc5b3f..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/utils/__init__.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Utils for mass model."""
-from .dictionary import Dictionary
-from .ppl_score import ngram_ppl
-from .lr_scheduler import square_root_schedule
-from .loss_monitor import LossCallBack
-from .byte_pair_encoding import bpe_encode
-from .initializer import zero_weight, one_weight, normal_weight, weight_variable
-from .rouge_score import rouge
-from .eval_score import get_score
-
-__all__ = [
-    "Dictionary",
-    "rouge",
-    "bpe_encode",
-    "ngram_ppl",
-    "square_root_schedule",
-    "LossCallBack",
-    "one_weight",
-    "zero_weight",
-    "normal_weight",
-    "weight_variable",
-    "get_score"
-]
diff --git a/official/nlp/prophetnet/src/utils/byte_pair_encoding.py b/official/nlp/prophetnet/src/utils/byte_pair_encoding.py
deleted file mode 100644
index d18b09e37fce855908e26824df9619ff24fe0f2b..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/utils/byte_pair_encoding.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""BPE."""
-import os
-import subprocess
-
-ENCODER = "subword-nmt apply-bpe -c"
-LEARN_DICT = "subword-nmt get-vocab -i"
-
-
-def bpe_encode(codes_path, src_path, output_path, dict_path):
-    """
-    Do bpe.
-
-    Args:
-        codes_path (str): BPE codes file.
-        src_path (str): Source text file path.
-        output_path (str): Output path.
-        dict_path (str): Dict path.
-    """
-    if not (os.path.isabs(codes_path)
-            and os.path.isabs(src_path)
-            and os.path.isabs(output_path)
-            and os.path.isabs(dict_path)):
-        raise ValueError("Absolute path is required.")
-
-    if not (os.path.exists(os.path.dirname(codes_path))
-            and os.path.exists(os.path.dirname(src_path))
-            and os.path.exists(os.path.dirname(output_path))
-            and os.path.exists(os.path.dirname(dict_path))):
-        raise FileNotFoundError("Dir not found.")
-
-    # Encoding.
-    print(" | Applying BPE encoding.")
-    commands = ENCODER.split() + [codes_path] + ["-i"] + [src_path] + ["-o"] + [output_path]
-    subprocess.call(commands)
-    print(" | Fetching vocabulary from single file.")
-    # Learn vocab.
-    commands = LEARN_DICT.split() + [output_path] + ["-o"] + [dict_path]
-    subprocess.call(commands)
diff --git a/official/nlp/prophetnet/src/utils/eval_score.py b/official/nlp/prophetnet/src/utils/eval_score.py
deleted file mode 100644
index f624b3d43b6d50614f6f8e8a181ccf0f111e7a60..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/utils/eval_score.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Get score by given metric."""
-from .ppl_score import ngram_ppl
-from .rouge_score import rouge
-
-
-def get_ppl_score(result):
-    """
-    Calculate Perplexity(PPL) score.
-
-    Args:
-        List[Dict], prediction, each example has 4 keys, "source",
-        "target", "log_prob" and "length".
-
-    Returns:
-        Float, ppl score.
-    """
-    log_probs = []
-    total_length = 0
-
-    for sample in result:
-        log_prob = sample['log_prob']
-        length = sample['length']
-        log_probs.extend(log_prob)
-        total_length += length
-
-        print(f" | log_prob:{log_prob}")
-        print(f" | length:{length}")
-
-    ppl = ngram_ppl(log_probs, total_length, log_softmax=True)
-    print(f" | final PPL={ppl}.")
-    return ppl
-
-
-def get_rouge_score(result, vocab):
-    """
-    Calculate ROUGE score.
-
-    Args:
-        List[Dict], prediction, each example has 4 keys, "source",
-        "target", "prediction" and "prediction_prob".
-        Dictionary, dict instance.
-
-    return:
-        Str, rouge score.
-    """
-
-    predictions = []
-    targets = []
-    for sample in result:
-        predictions.append(' '.join([vocab[t] for t in sample['prediction']]))
-        targets.append(' '.join([vocab[t] for t in sample['target']]))
-        print(f" | source: {' '.join([vocab[t] for t in sample['source']])}")
-        print(f" | target: {targets[-1]}")
-
-    return rouge(predictions, targets)
-
-
-def get_score(result, vocab=None, metric='rouge'):
-    """
-    Get eval score.
-
-    Args:
-        List[Dict], prediction.
-        Dictionary, dict instance.
-        Str, metric function, default is rouge.
-
-    Return:
-        Str, Score.
-    """
-    score = None
-    if metric == 'rouge':
-        score = get_rouge_score(result, vocab)
-    elif metric == 'ppl':
-        score = get_ppl_score(result)
-    else:
-        print(f" |metric not in (rouge, ppl)")
-
-    return score
diff --git a/official/nlp/prophetnet/src/utils/initializer.py b/official/nlp/prophetnet/src/utils/initializer.py
deleted file mode 100644
index d1b5ba92ba82cf598f94a253c3caaf9cec15ba4b..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/utils/initializer.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Initializer."""
-import math
-import numpy as np
-
-from mindspore import Tensor
-
-
-def _compute_fans(shape):
-    """
-    Computes the number of input and output units for a weight shape.
-
-    Args:
-        shape (tuple): Integer shape tuple or TF tensor shape.
-
-    Returns:
-        tuple, integer scalars (fan_in, fan_out).
-    """
-    if not shape:
-        fan_in = fan_out = 1
-    elif len(shape) == 1:
-        fan_in = fan_out = shape[0]
-    elif len(shape) == 2:
-        fan_in = shape[0]
-        fan_out = shape[1]
-    else:
-        # Assuming convolution kernels (2D, 3D, or more).
-        # kernel shape: (..., input_depth, depth)
-        receptive_field_size = 1
-        for dim in shape[:-2]:
-            receptive_field_size *= dim
-        fan_in = shape[-2] * receptive_field_size
-        fan_out = shape[-1] * receptive_field_size
-    return int(fan_in), int(fan_out)
-
-
-def weight_variable(shape):
-    """
-    Generate weight var.
-
-    Args:
-        shape (tuple): Shape.
-
-    Returns:
-        Tensor, var.
-    """
-    scale_shape = shape
-    fan_in, fan_out = _compute_fans(scale_shape)
-    scale = 1.0 / max(1., (fan_in + fan_out) / 2.)
-    limit = math.sqrt(3.0 * scale)
-    values = np.random.uniform(-limit, limit, shape).astype(np.float32)
-    return Tensor(values)
-
-
-def one_weight(shape):
-    """
-    Generate weight with ones.
-
-    Args:
-        shape (tuple): Shape.
-
-    Returns:
-        Tensor, var.
-    """
-    ones = np.ones(shape).astype(np.float32)
-    return Tensor(ones)
-
-
-def zero_weight(shape):
-    """
-    Generate weight with zeros.
-
-    Args:
-        shape (tuple): Shape.
-
-    Returns:
-        Tensor, var.
-    """
-    zeros = np.zeros(shape).astype(np.float32)
-    return Tensor(zeros)
-
-
-def normal_weight(shape, num_units):
-    """
-    Generate weight with normal dist.
-
-    Args:
-        shape (tuple): Shape.
-        num_units (int): Dimension.
-
-    Returns:
-        Tensor, var.
-    """
-    norm = np.random.normal(0.0, num_units ** -0.5, shape).astype(np.float32)
-    return Tensor(norm)
diff --git a/official/nlp/prophetnet/src/utils/loss_monitor.py b/official/nlp/prophetnet/src/utils/loss_monitor.py
deleted file mode 100644
index 1c3795887d12e3edea16208d867075490f58e1ec..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/utils/loss_monitor.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Loss monitor."""
-import time
-from mindspore.train.callback import Callback
-from config import TransformerConfig
-
-
-class LossCallBack(Callback):
-    """
-    Monitor the loss in training.
-
-    If the loss is NAN or INF terminating training.
-
-    Note:
-        If per_print_times is 0 do not print loss.
-
-    Args:
-        per_print_times (int): Print loss every times. Default: 1.
-    """
-    time_stamp_init = False
-    time_stamp_first = 0
-
-    def __init__(self, config: TransformerConfig, per_print_times: int = 1):
-        super(LossCallBack, self).__init__()
-        if not isinstance(per_print_times, int) or per_print_times < 0:
-            raise ValueError("print_step must be int and >= 0.")
-        self.config = config
-        self._per_print_times = per_print_times
-
-        if not self.time_stamp_init:
-            self.time_stamp_first = self._get_ms_timestamp()
-            self.time_stamp_init = True
-
-    def step_end(self, run_context):
-        cb_params = run_context.original_args()
-        file_name = "./loss.log"
-        with open(file_name, "a+") as f:
-            time_stamp_current = self._get_ms_timestamp()
-            is_accu_step = cb_params.net_outputs[3]
-            accu_length = cb_params.net_outputs[4]
-            # Only update at non-accumulation steps
-            if not is_accu_step:
-                f.write("time: {}, epoch: {}, step: {}, outputs are {},{},{}.\n".format(
-                    time_stamp_current - self.time_stamp_first,
-                    cb_params.cur_epoch_num,
-                    cb_params.cur_step_num // accu_length,
-                    str(cb_params.net_outputs[0].asnumpy()),
-                    str(cb_params.net_outputs[1].asnumpy()),
-                    str(cb_params.net_outputs[2].asnumpy())
-                ))
-
-    @staticmethod
-    def _get_ms_timestamp():
-        t = time.time()
-        return int(round(t * 1000))
diff --git a/official/nlp/prophetnet/src/utils/lr_scheduler.py b/official/nlp/prophetnet/src/utils/lr_scheduler.py
deleted file mode 100644
index 879c88ee531a35410073829f24c5bb514748dec8..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/utils/lr_scheduler.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Learning scheduler."""
-from math import ceil
-import numpy as np
-
-import mindspore.nn.learning_rate_schedule as lr_schedules
-
-
-def square_root_schedule(lr, update_num, decay_start_step,
-                         warmup_steps=2000,
-                         min_lr=1e-7):
-    """
-    Decay the LR based on the ISR(inverse square root).
-
-    During warm-up::
-        lrs = np.linspace(0, lr, warmup_steps)
-
-    After warm-up:
-        decay_factor = lr * sqrt(warmup_steps)
-        lr = decay_factor / sqrt(step) if step >= decay_start_step else lr
-
-    Args:
-        lr (float): Init learning rate.
-        update_num (int): Total steps.
-        decay_start_step (int): Decay begins after `decay_start_step` steps.
-        warmup_steps (int): Warm up steps.
-        min_lr (float): Min learning rate.
-
-    Returns:
-        np.ndarray, learning rate array.
-    """
-    warmup_end_lr = lr
-    warmup_init_lr = 1e-7 if warmup_steps > 0 else warmup_end_lr
-
-    # If warmup_init_lr > lr, then lr_step is negative.
-    # Otherwise, it's positive.
-    lr_step = (warmup_end_lr - warmup_init_lr) / warmup_steps
-    decay_factor = lr * warmup_steps ** 0.5
-
-    lrs = np.empty(shape=update_num, dtype=np.float32)
-    _start_step = 0
-    if 0 < warmup_steps < update_num:
-        lrs[:warmup_steps] = np.linspace(warmup_init_lr, warmup_end_lr, warmup_steps)
-        _start_step = warmup_steps
-
-    for step in range(_start_step, update_num):
-        if step < warmup_steps:
-            _lr = warmup_init_lr + step * lr_step
-        elif step < decay_start_step:
-            _lr = lr
-        else:
-            _lr = decay_factor * step ** -0.5
-            if _lr < min_lr:
-                _lr = min_lr
-        lrs[step] = _lr
-
-    return lrs
-
-
-def polynomial_decay_scheduler(lr, min_lr, decay_steps, total_update_num, warmup_steps=1000, power=1.0):
-    """
-    Implements of polynomial decay learning rate scheduler which cycles by default.
-
-    Args:
-        lr (float): Initial learning rate.
-        warmup_steps (int): Warmup steps.
-        decay_steps (int): Decay steps.
-        total_update_num (int): Total update steps.
-        min_lr (float): Min learning.
-        power (float): Power factor.
-
-    Returns:
-        np.ndarray, learning rate of each step.
-    """
-    lrs = np.zeros(shape=total_update_num, dtype=np.float32)
-
-    if decay_steps <= 0:
-        raise ValueError("`decay_steps` must larger than 1.")
-
-    _start_step = 0
-    if 0 < warmup_steps < total_update_num:
-        warmup_end_lr = lr
-        warmup_init_lr = 0 if warmup_steps > 0 else warmup_end_lr
-        lrs[:warmup_steps] = np.linspace(warmup_init_lr, warmup_end_lr, warmup_steps)
-        _start_step = warmup_steps
-
-    decay_steps = decay_steps
-    for step in range(_start_step, total_update_num):
-        _step = step - _start_step  # 2999
-        ratio = ceil(_step / decay_steps)  # 3
-        ratio = 1 if ratio < 1 else ratio
-        _decay_steps = decay_steps * ratio  # 3000
-        lrs[step] = (lr - min_lr) * pow(1 - _step / _decay_steps, power) + min_lr
-
-    return lrs
-
-
-class BertLearningRate(lr_schedules.LearningRateSchedule):
-    """
-    Implements of warmup-polydecay learning rate scheduler.
-
-    Args:
-        learning_rate (float): The initial value of learning rate.
-        end_learning_rate (float): The end value of learning rate.
-        warmup_steps (int): The warm up steps of learning rate.
-        decay_steps (int): A value used to calculate decayed learning rate.
-        power (float): A value used to calculate decayed learning rate.
-
-    Returns:
-        Tensor. The learning rate value for the current step.
-    """
-    def __init__(self, learning_rate, end_learning_rate, warmup_steps, decay_steps, power):
-        super(BertLearningRate, self).__init__()
-        self.warmup_lr = lr_schedules.WarmUpLR(learning_rate, warmup_steps)
-        self.decay_lr = lr_schedules.PolynomialDecayLR(learning_rate, end_learning_rate, decay_steps, power)
-        self.warmup_steps = Tensor(np.array([warmup_steps]).astype(np.float32))
-
-        self.greater = P.Greater()
-        self.one = Tensor(np.array([1.0]).astype(np.float32))
-        self.cast = P.Cast()
-
-    def construct(self, global_step):
-        is_warmup = self.cast(self.greater(self.warmup_steps, global_step), mstype.float32)
-        warmup_lr = self.warmup_lr(global_step)
-        decay_lr = self.decay_lr(global_step)
-        lr = (self.one - is_warmup) * decay_lr + is_warmup * warmup_lr
-        return lr
diff --git a/official/nlp/prophetnet/src/utils/ppl_score.py b/official/nlp/prophetnet/src/utils/ppl_score.py
deleted file mode 100644
index 4a9139ced0368a4def4bf1e39a55e289e6759f03..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/utils/ppl_score.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Calculate Perplexity score under N-gram language model."""
-from typing import Union
-
-import numpy as np
-
-def ngram_ppl(prob: Union[np.ndarray, list], length: int, log_softmax=False, index: float = np.e):
-    """
-    Calculate Perplexity(PPL) score under N-gram language model.
-
-    Please make sure the sum of `prob` is 1.
-    Otherwise, assign `normalize=True`.
-
-    The number of N is depended by model.
-
-    Args:
-        prob (Union[list, np.ndarray]): Prediction probability
-            of the sentence.
-        log_softmax (bool): If sum of `prob` is not 1, please
-            set normalize=True.
-        index (float): Base number of log softmax.
-
-    Returns:
-        float, ppl score.
-    """
-    if not length:
-        return np.inf
-    if not isinstance(prob, (np.ndarray, list)):
-        raise TypeError("`prob` must be type of list or np.ndarray.")
-    if not isinstance(prob, np.ndarray):
-        prob = np.array(prob)
-    if prob.shape[0] == 0:
-        raise ValueError("`prob` length must greater than 0.")
-
-    print(f'length:{length}, log_prob:{prob}')
-
-    if log_softmax:
-        prob = np.sum(prob) / length
-        ppl = 1. / np.power(index, prob)
-        print(f'avg log prob:{prob}')
-    else:
-        p = 1.
-        for i in range(prob.shape[0]):
-            p *= (1. / prob[i])
-        ppl = pow(p, 1 / length)
-
-    print(f'ppl val:{ppl}')
-    return ppl
diff --git a/official/nlp/prophetnet/src/utils/rouge_score.py b/official/nlp/prophetnet/src/utils/rouge_score.py
deleted file mode 100644
index 29c1f7d1d0245a6626091e3722cc3c661b26feb1..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/src/utils/rouge_score.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Calculate ROUGE score."""
-from typing import List
-from rouge import Rouge
-
-H_PATH = "summaries.txt"
-R_PATH = "references.txt"
-
-
-def rouge(hypothesis: List[str], target: List[str]):
-    """
-    Calculate ROUGE score.
-
-    Args:
-        hypothesis (List[str]): Inference result.
-        target (List[str]): Reference.
-    """
-
-    def cut(s):
-        idx = s.find("[SEP]")
-        if idx != -1:
-            s = s[:idx]
-        return s
-
-    if not hypothesis or not target:
-        raise ValueError(f"`hypothesis` and `target` can not be None.")
-
-    edited_hyp = []
-    edited_ref = []
-    for h, r in zip(hypothesis, target):
-        h = "[BOS]" + h[5:]
-        h = cut(h).replace("[BOS]", "").strip()
-        r = cut(r).replace("[SEP]", "").strip()
-        edited_hyp.append(h + "\n")
-        edited_ref.append(r + "\n")
-
-    _rouge = Rouge()
-    scores = _rouge.get_scores(edited_hyp, edited_ref, avg=True)
-    print(" | ROUGE Score:")
-    print(f" | RG-1(F): {scores['rouge-1']['f'] * 100:8.2f}")
-    print(f" | RG-2(F): {scores['rouge-2']['f'] * 100:8.2f}")
-    print(f" | RG-L(F): {scores['rouge-l']['f'] * 100:8.2f}")
-
-    with open(H_PATH, "w") as f:
-        f.writelines(edited_hyp)
-
-    with open(R_PATH, "w") as f:
-        f.writelines(edited_ref)
diff --git a/official/nlp/prophetnet/tokenize_corpus.py b/official/nlp/prophetnet/tokenize_corpus.py
deleted file mode 100644
index 4717cfdd12bfda89aae736c20f9b6fb13a9ce14e..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/tokenize_corpus.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Tokenizer."""
-import os
-import argparse
-from typing import Callable
-from multiprocessing import Pool
-
-parser = argparse.ArgumentParser(description='Corpus tokenizer which text file must end with `.txt`.')
-parser.add_argument("--corpus_folder", type=str, default="", required=True,
-                    help="Corpus folder path, if multi-folders are provided, use ',' split folders.")
-parser.add_argument("--output_folder", type=str, default="", required=True,
-                    help="Output folder path.")
-parser.add_argument("--tokenizer", type=str, default="nltk", required=False,
-                    help="Tokenizer to be used, nltk or jieba, if nltk is not installed fully, "
-                         "use jieba instead.")
-parser.add_argument("--pool_size", type=int, default=2, required=False,
-                    help="Processes pool size.")
-
-TOKENIZER = Callable
-
-
-def create_tokenized_sentences(file_path, tokenized_file):
-    """
-    Create tokenized sentences.
-
-    Args:
-        file_path (str): Text file.
-        tokenized_file (str): Output file.
-    """
-    global TOKENIZER
-
-    print(f" | Processing {file_path}.")
-    tokenized_sen = []
-    with open(file_path, "r") as file:
-        for sen in file:
-            tokens = TOKENIZER(sen)
-            tokens = [t for t in tokens if t != " "]
-            if len(tokens) > 175:
-                continue
-            tokenized_sen.append(" ".join(tokens) + "\n")
-
-    with open(tokenized_file, "w") as file:
-        file.writelines(tokenized_sen)
-    print(f" | Wrote to {tokenized_file}.")
-
-
-def tokenize():
-    """Tokenizer."""
-    global TOKENIZER
-
-    args, _ = parser.parse_known_args()
-    src_folder = args.corpus_folder.split(",")
-
-    try:
-        from nltk.tokenize import word_tokenize
-
-        TOKENIZER = word_tokenize
-    except (ImportError, ModuleNotFoundError, LookupError):
-        try:
-            import jieba
-        except Exception as e:
-            raise e
-
-        print(" | NLTK is not found, use jieba instead.")
-        TOKENIZER = jieba.cut
-
-    if args.tokenizer == "jieba":
-        import jieba
-        TOKENIZER = jieba.cut
-
-    pool = Pool(args.pool_size)
-    for folder in src_folder:
-        for file in os.listdir(folder):
-            if not file.endswith(".txt"):
-                continue
-            file_path = os.path.join(folder, file)
-            out_path = os.path.join(args.output_folder, file.replace(".txt", "_tokenized.txt"))
-            pool.apply_async(create_tokenized_sentences, (file_path, out_path,))
-    pool.close()
-    pool.join()
-
-
-if __name__ == '__main__':
-    tokenize()
diff --git a/official/nlp/prophetnet/weights_average.py b/official/nlp/prophetnet/weights_average.py
deleted file mode 100644
index 911181ba458703d2d5ff808f29a541b7f9c9f99e..0000000000000000000000000000000000000000
--- a/official/nlp/prophetnet/weights_average.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Weight average."""
-import os
-import argparse
-import numpy as np
-from mindspore.train.serialization import load_checkpoint
-
-parser = argparse.ArgumentParser(description='transformer')
-parser.add_argument("--input_files", type=str, default=None, required=False,
-                    help="Multi ckpt files path.")
-parser.add_argument("--input_folder", type=str, default=None, required=False,
-                    help="Ckpt files folder.")
-parser.add_argument("--output_file", type=str, default=None, required=True,
-                    help="Output model file path.")
-
-
-def average_me_models(ckpt_list):
-    """
-    Average multi ckpt params.
-
-    Args:
-        ckpt_list (list): Ckpt paths.
-
-    Returns:
-        dict, params dict.
-    """
-    avg_model = {}
-    # load all checkpoint
-    for ckpt in ckpt_list:
-        if not ckpt.endswith(".ckpt"):
-            continue
-        if not os.path.exists(ckpt):
-            raise FileNotFoundError(f"Checkpoint file is not existed.")
-
-        print(f" | Loading ckpt from {ckpt}.")
-        ms_ckpt = load_checkpoint(ckpt)
-        for param_name in ms_ckpt:
-            if param_name not in avg_model:
-                avg_model[param_name] = []
-            avg_model[param_name].append(ms_ckpt[param_name].data.asnumpy())
-
-    for name in avg_model:
-        avg_model[name] = sum(avg_model[name]) / float(len(ckpt_list))
-
-    return avg_model
-
-
-def main():
-    """Entry point."""
-    args, _ = parser.parse_known_args()
-
-    if not args.input_files and not args.input_folder:
-        raise ValueError("`--input_files` or `--input_folder` must be provided one as least.")
-
-    ckpt_list = []
-    if args.input_files:
-        ckpt_list.extend(args.input_files.split(","))
-
-    if args.input_folder and os.path.exists(args.input_folder) and os.path.isdir(args.input_folder):
-        for file in os.listdir(args.input_folder):
-            ckpt_list.append(os.path.join(args.input_folder, file))
-
-    avg_weights = average_me_models(ckpt_list)
-    np.savez(args.output_file, **avg_weights)
-
-
-if __name__ == '__main__':
-    main()
diff --git a/research/audio/tacotron2/README.md b/research/audio/tacotron2/README.md
deleted file mode 100644
index 0461b19dd2e9e498d397b8330276f1b27ba86285..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/README.md
+++ /dev/null
@@ -1,311 +0,0 @@
-# Contents
-
-- [Tacotron2 Description](#tacotron2-description)
-- [Model Architecture](#model-architecture)
-- [Dataset](#dataset)
-- [Environment Requirements](#environment-requirements)
-- [Quick Start](#quick-start)
-- [Script Description](#script-description)
-    - [Script and Sample Code](#script-and-sample-code)
-    - [Script Parameters](#script-parameters)
-    - [Training Process](#training-process)
-    - [Inference Process](#inference-process)
-- [Model Description](#model-description)
-    - [Performance](#performance)
-        - [Training Performance](#training-performance)
-        - [Inference Performance](#inference-performance)
-- [Description of Random Situation](#description-of-random-situation)
-- [ModelZoo Homepage](#modelzoo-homepage)
-
-# [Tacotron2 Description](#contents)
-
-Tacotron2 is a TTS models. It contaion two phases, in first phase it use sequence to sequence method to predict mel spectrogram from text sequence,
-in second phase it apply WaveNet as vocoder to convert mel spectrogram to waveform. We support training and evaluation tacotron2 model on Ascend platform.
-
-[Paper](https://arxiv.org/abs/1712.05884): Jonathan, et al. Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions.
-
-# [Model Architecture](#contents)
-
-Tacotron2 substantially is a sequence to sequence model which contain an encoder and a decoder, the encoder is implemented by three conv layers and one BiLSTM layer, and the decoder use  two LSTM layers to decode next state, a location-aware attention is applied between encoder and decoder, then the decoded state is fed into postnet which is implemented by five conv layers to predict mel spectrogram, finally the predicted mel spectrogram features is fed into WaveNet vocoder to synthesis speech signal.
-
-# [Dataset](#contents)
-
-In the following sections, we will introduce how to run the scripts using the related dataset below.
-
-Dataset used: [The LJ Speech Dataset](<https://keithito.com/LJ-Speech-Dataset>)
-
-- Dataset size锛�2.6G
-- Data format锛歛udio clips(13100) and transcription
-
-- The dataset structure is as follows:
-
-    ```text
-    .
-    鈹斺攢鈹€ LJSpeech-1.1
-        鈹溾攢 wavs                  //audio clips files
-        鈹斺攢 metadata.csv           //transcripts
-    ```
-
-# [Environment Requirements](#contents)
-
-- Hardware锛圓scend锛�
-    - Prepare hardware environment with Ascend processor.
-- Framework
-    - [MindSpore](https://www.mindspore.cn/install/en)
-- For more information, please check the resources below锛�
-    - [MindSpore Tutorials](https://www.mindspore.cn/tutorials/en/master/index.html)
-    - [MindSpore Python API](https://www.mindspore.cn/docs/en/master/index.html)
-
-# [Quick Start](#contents)
-
-After installing MindSpore via the official website, you can start training and evaluation as follows:
-
-- running on Ascend
-
-  ```python
-  # install python3 package
-  pip install -r requirements.txt
-  # generate hdf5 file from dataset
-  python generate_hdf5 --data_path /path/to/LJSpeech-1.1
-  ```
-
-  ```shell
-  cd scripts
-  # run standalone training
-  bash run_standalone_train.sh [DATASET_PATH] [DEVICE_ID] [DATANAME]
-  # example: bash run_standalone_train.sh ../. 0 ljspeech
-
-  # run distributed training
-  bash run_distributed_train.sh [DATASET_PATH] [RANK_TABLE_PATH] [DATANAME] [RANK_SIZE] [DEVICE_BEGIN]
-  # example: bash run_distributed_train.sh ../. ../hccl_8p_01234567_127.0.0.1.json ljspeech 8 0
-
-  # run evaluation
-  bash run_eval.sh [OUTPUT_PATH] [DATANAME] [MODEL_CKPT] [DEVICE_ID]
-  # example: bash run_eval.sh output ljspeech /path/to/model.ckpt 0
-  ```
-
-  For distributed training, a hccl configuration file with JSON format needs to be created in advance.
-
-  Please follow the instructions in the link below:
-
-  <https://gitee.com/mindspore/models/tree/master/utils/hccl_tools>.
-
-- ModelArts (If you want to run in modelarts, please check the official documentation of [modelarts](https://support.huaweicloud.com/modelarts/), and you can start training as follows)
-
-    - Standalone training example on ModelArts
-
-      ```python
-      # run standalone training example
-
-      # (1) Add "config_path='/path_to_code/[DATASET_NAME]_config.yaml'" on the website UI interface.
-      # (2) Perform a or b.
-      #       a. Set "enable_modelarts=True" on [DATASET_NAME]_config.yaml file.
-      #          Set "dataset_path='/cache/data/[DATASET_NAME]'" on [DATASET_NAME]_config.yaml file.
-      #          Set "data_name='[DATASET_NAME]'" on [DATASET_NAME]_config.yaml file.
-      #          (option)Set other parameters on [DATASET_NAME]_config.yaml file you need.
-      #       b. Add "enable_modelarts=True" on the website UI interface.
-      #          Add "dataset_path='/cache/data/[DATASET_NAME]'" on the website UI interface.
-      #          Add "data_name='[DATASET_NAME]'" on the website UI interface.
-      #          (option)Add other parameters on the website UI interface.
-      # (3) Upload a zip dataset to S3 bucket. (you could also upload the origin dataset, but it can be so slow.)
-      # (4) Set the code directory to "/path/to/tacotron2" on the website UI interface.
-      # (5) Set the startup file to "train.py" on the website UI interface.
-      # (6) Set the "Dataset path" and "Output file path" and "Job log path" to your path on the website UI interface.
-      # (7) Create your job.
-      ```
-
-    - Distributed Training example on Modelarts
-
-      ```python
-      # run distributed training example
-
-      # (1) Add "config_path='/path_to_code/[DATASET_NAME]_config.yaml'" on the website UI interface.
-      # (2) Perform a or b.
-      #       a. Set "enable_modelarts=True" on [DATASET_NAME]_config.yaml file.
-      #          Set "run_distribute=True" on [DATASET_NAME]_config.yaml file.
-      #          Set "dataset_path='/cache/data/[DATASET_NAME]'" on [DATASET_NAME]_config.yaml file.
-      #          Set "data_name='[DATASET_NAME]'" on [DATASET_NAME]_config.yaml file.
-      #          (option)Set other parameters on [DATASET_NAME]_config.yaml file you need.
-      #       b. Add "enable_modelarts=True" on the website UI interface.
-      #          Add "run_distribute=True" on the website UI interface.
-      #          Add "dataset_path='/cache/data/[DATASET_NAME]'" on the website UI interface.
-      #          Add "data_name='[DATASET_NAME]'" on the website UI interface.
-      #          (option)Add other parameters on the website UI interface.
-      # (3) Upload a zip dataset to S3 bucket. (you could also upload the origin dataset, but it can be so slow.)
-      # (4) Set the code directory to "/path/to/tacotron2" on the website UI interface.
-      # (5) Set the startup file to "train.py" on the website UI interface.
-      # (6) Set the "Dataset path" and "Output file path" and "Job log path" to your path on the website UI interface.
-      # (7) Create your job.
-      ```
-
-    - Eval on ModelArts
-
-      ```python
-      # run eval example
-
-      # (1) Add "config_path='/path_to_code/[DATASET_NAME]_config.yaml'" on the website UI interface.
-      # (2) Perform a or b.
-      #       a. Set "enable_modelarts=True" on [DATASET_NAME]_config.yaml file.
-      #          Set "data_name='[DATASET_NAME]'" on [DATASET_NAME]_config.yaml file.
-      #          Set "model_ckpt='/cache/checkpoint_path/model.ckpt'" on [DATASET_NAME]_config.yaml file.
-      #          Set "text='text to synthesize'" on [DATASET_NAME]_config.yaml file.
-      #          Set "checkpoint_url='s3://dir_to_trained_ckpt/'" on [DATASET_NAME]_config.yaml file.
-      #          (option)Set other parameters on [DATASET_NAME]_config.yaml file you need.
-      #       b. Add "enable_modelarts=True" on the website UI interface.
-      #          Add "data_name='[DATASET_NAME]'" on the website UI interface.
-      #          Add "model_ckpt=/cache/checkpoint_path/model.ckpt" on the website UI interface.
-      #          Add "text='text to synthesize'" on the website UI interface.
-      #          Add "checkpoint_url='s3://dir_to_trained_ckpt/'" on the website UI interface.
-      #          (option)Add other parameters on the website UI interface.
-      # (3) Upload or copy your pretrained model to S3 bucket.
-      # (4) Upload a zip dataset to S3 bucket. (you could also upload the origin dataset, but it can be so slow.)
-      # (5) Set the code directory to "/path/to/tacotron2" on the website UI interface.
-      # (6) Set the startup file to "eval.py" on the website UI interface.
-      # (7) Set the "Dataset path" and "Output file path" and "Job log path" to your path on the website UI interface.
-      # (8) Create your job.
-      ```
-
-# [Script Description](#contents)
-
-## [Script and Sample Code](#contents)
-
-```path
-
-tacotron2/
-鈹溾攢鈹€ eval.py                             //  evaluate entry
-鈹溾攢鈹€ generate_hdf5.py                    // generate hdf5 file from dataset
-鈹溾攢鈹€ ljspeech_config.yaml
-鈹溾攢鈹€ model_utils
-鈹�  鈹溾攢鈹€ config.py                       // Parse arguments
-鈹�  鈹溾攢鈹€ device_adapter.py               // Device adapter for ModelArts
-鈹�  鈹溾攢鈹€ __init__.py                     // init file
-鈹�  鈹溾攢鈹€ local_adapter.py                // Local adapter
-鈹�  鈹斺攢鈹€ moxing_adapter.py               // Moxing adapter for ModelArts
-鈹溾攢鈹€ README.md                           // descriptions about Tacotron2
-鈹溾攢鈹€ requirements.txt                // reqired package
-鈹溾攢鈹€ scripts
-鈹�  鈹溾攢鈹€ run_distribute_train.sh         // launch distributed training
-鈹�  鈹溾攢鈹€ run_eval.sh                     // launch evaluate
-鈹�  鈹斺攢鈹€ run_standalone_train.sh         // launch standalone training
-鈹溾攢鈹€ src
-鈹�  鈹溾攢鈹€ callback.py                     // callbacks to monitor the training
-鈹�  鈹溾攢鈹€ dataset.py                      // define dataset and sampler
-鈹�  鈹溾攢鈹€ hparams.py                      // Tacotron2 configs
-鈹�  鈹溾攢鈹€ rnn_cells.py                    // rnn cells implementations
-鈹�  鈹溾攢鈹€ rnns.py                         // lstm implementations with length mask
-鈹�  鈹溾攢鈹€ tacotron2.py                    // Tacotron2 networks
-鈹�  鈹溾攢鈹€ text
-鈹�  鈹�  鈹溾攢鈹€ cleaners.py                  // clean text sequence
-鈹�  鈹�  鈹溾攢鈹€ cmudict.py                   // define cmudict
-鈹�  鈹�  鈹溾攢鈹€ __init__.py                  // processing text sequunce
-鈹�  鈹�  鈹溾攢鈹€ numbers.py                   // normalize numbers
-鈹�  鈹�  鈹斺攢鈹€ symbols.py                   // symbols for encoding
-鈹�  鈹斺攢鈹€ utils
-鈹�      鈹溾攢鈹€ audio.py                     // extract audio feature
-鈹�      鈹斺攢鈹€ convert.py                   // normalize mel spectrogram by meanvar
-鈹斺攢鈹€ train.py                            // training entry
-
-```
-
-## [Script Parameters](#contents)
-
-Parameters for both training and evaluation can be set in [DATASET]_config.yaml
-
-- config for LJSpeech-1.1
-
-  ```python
-  'pretrain_ckpt': '/path/to/model.ckpt'# use pretrained ckpt at training phase
-  'model_ckpt': '/path/to/model.ckpt'   # use pretrained ckpt at inference phase
-  'lr': 0.002                           # initial learning rate
-  'batch_size': 16                      # training batch size
-  'epoch_num': 2000                     # total training epochs
-  'warmup_epochs': 30                   # warmpup lr epochs
-  'save_ckpt_dir:' './ckpt'             # specify ckpt saving dir
-  'keep_checkpoint_max': 10             # only keep the last keep_checkpoint_max checkpoint
-
-  'text': 'text to synthesize'          # specify text to synthesize at inference
-  'dataset_path': '/dir/to/hdf5'        # specify dir to hdf5 file
-  'data_name': 'ljspeech'               # specify dataset name
-  'audioname': 'text2speech'            # specify filename for generated audio
-  'run_distribute': False               # whether distributed training
-  'device_id': 0                        # specify which device to use
-  ```
-
-### [Training Process](#content)
-
-- Running on Ascend
-
-    - Start task training on a single device and run the shell script
-
-        ```bash
-        cd scripts
-        bash run_standalone_train.sh [DATASET_PATH] [DEVICE_ID] [DATANAME]
-        ```
-
-    - Running scripts for distributed training of Tacotron2. Task training on multiple device and run the following command in bash to be executed in `scripts/`:
-
-        ```bash
-        cd scripts
-        bash run_distributed_train.sh [DATASET_PATH] [RANK_TABLE_PATH] [DATANAME] [RANK_SIZE] [DEVICE_BEGIN]
-        ```
-
-    Note: `DATASET_PATH` is the directory contains hdf5 file.
-
-### [Inference Process](#content)
-
-- Running on Ascend
-
-    - Running scripts for evaluation of Tacotron2. The commdan as below.
-
-        ```bash
-        cd scripts
-        bash run_eval.sh [OUTPUT_PATH] [DATANAME] [MODEL_CKPT] [DEVICE_ID]
-        ```
-
-    Note: The `OUTPUT_PATH` is the directory to save evaluate outputs
-
-# [Model Description](#contents)
-
-## [Performance](#contents)
-
-### Training Performance
-
-| Parameters                 | Tacotron2                                                      |
-| -------------------------- | ---------------------------------------------------------------|
-| Resource                   | Ascend 910; OS Euler2.8              |
-| uploaded Date              | 10/25/2021 (month/day/year)                                    |
-| MindSpore Version          | 1.3.0                                                          |
-| Dataset                    | LJSpeech-1.1                                                 |
-| Training Parameters        | 8p, epoch=2000, batch_size=16  |
-| Optimizer                  | Adam                                                           |
-| Loss Function              | BinaryCrossEntropy, MSE                                |
-| outputs                    | mel spectrogram                                                     |
-| Loss                       | 0.33                                                        |
-| Speed|1264ms/step|
-| Total time: training       | 8p: 72h/19m/41s;;                                  |
-| Checkpoint                 | 328.9M (.ckpt file)                                              |
-| Scripts                    | [Tacotron2 script](https://gitee.com/mindspore/models/tree/master/research/audio/tacotron2) |
-
-### Inference Performance
-
-| Parameters                 | Tacotron2                                                       |
-| -------------------------- | ----------------------------------------------------------------|
-| Resource                   | Ascend 910; OS Euler2.8                   |
-| uploaded Date              | 10/25/2021 (month/day/year)                                 |
-| MindSpore Version          | 1.3.0                                                           |
-| Dataset                    | LJSpeech-1.1                         |
-| batch_size                 | 1                                                               |
-| outputs                    | mel spectrogram                       |
-| Speed       | 1p: cost 125s synthesize 6s mel spectrogram|
-
-## [Random Situation Description](#content)
-
-There only one random situation.
-
-- Initialization of some model weights.
-
-Some seeds have already been set in train.py to avoid the randomness of weight initialization.
-
-# [ModelZoo Homepage](#contents)  
-
- Please check the official [homepage](https://gitee.com/mindspore/models).
diff --git a/research/audio/tacotron2/eval.py b/research/audio/tacotron2/eval.py
deleted file mode 100644
index e8c0559c8136513542686b4b90a9131790d751f8..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/eval.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""evaluate."""
-
-import time
-import os
-from os.path import join
-
-import matplotlib
-import matplotlib.pylab as plt
-import numpy as np
-
-import mindspore
-from mindspore import context
-from mindspore.train.serialization import load_checkpoint, load_param_into_net
-from mindspore import Tensor
-
-from src.utils.audio import save_wav, inv_melspectrogram
-from src.tacotron2 import Tacotron2
-from src.hparams import hparams as hps
-from src.text import text_to_sequence
-
-from model_utils.config import config
-from model_utils.moxing_adapter import moxing_wrapper
-from model_utils.device_adapter import get_device_id, get_device_num
-
-matplotlib.use('Agg')
-
-context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target=config.device_target)
-
-
-def load_model(ckpt_pth):
-    '''
-    load model
-    '''
-    net = Tacotron2()
-    param_dict = load_checkpoint(ckpt_pth)
-
-    load_param_into_net(net, param_dict)
-    net.set_train(False)
-    net.decoder.prenet.dropout.set_train(True)
-
-    return net.to_float(mindspore.float32)
-
-
-def infer(text, net):
-    '''
-    inference
-    '''
-    sequence = text_to_sequence(text, hps.text_cleaners)
-    sequence = Tensor(sequence, mindspore.int32).view(1, -1)
-    text_mask = Tensor(np.zeros(sequence.shape).astype('bool'))
-
-    mel_outputs, mel_outputs_postnet, _, alignments = net.inference(
-        sequence, text_mask)
-
-    return (mel_outputs, mel_outputs_postnet, alignments)
-
-
-def plot_data(data, figsize=(16, 4)):
-    '''
-    plot alignments
-    '''
-    _, axes = plt.subplots(1, len(data), figsize=figsize)
-    for _, i in enumerate(range(len(data))):
-        axes[i].imshow(data[i], aspect='auto', origin='lower')
-
-
-
-def plot(output, dir_pth, filename):
-    '''
-    plot alignments
-    '''
-    mel_outputs, mel_outputs_postnet, alignments = output
-    plot_data((mel_outputs.asnumpy()[0],
-               mel_outputs_postnet.asnumpy()[0],
-               alignments.asnumpy()[0].T))
-    plt.savefig(join(dir_pth, filename + '.png'))
-
-
-def audio(output, dir_pth, filename):
-    '''
-    save waveform
-    '''
-    mel_outputs, _, _ = output
-
-    wav = inv_melspectrogram(mel_outputs.asnumpy()[0])
-    np.save(join(dir_pth, filename + '-wave.npy'), wav, allow_pickle=False)
-    save_wav(wav, join(dir_pth, filename + '.wav'))
-
-
-def save_mel(output, dir_pth, filename):
-    '''
-    save mel spectrogram
-    '''
-    mel_outputs, _, _ = output
-    np.save(
-        join(
-            dir_pth,
-            filename +
-            '-feats.npy'),
-        mel_outputs.asnumpy()[0].T,
-        allow_pickle=False)
-
-def modelarts_pre_process():
-    '''modelarts pre process function.'''
-    def unzip(zip_file, save_dir):
-        import zipfile
-        s_time = time.time()
-        if not os.path.exists(os.path.join(save_dir, config.modelarts_dataset_unzip_name)):
-            zip_isexist = zipfile.is_zipfile(zip_file)
-            if zip_isexist:
-                fz = zipfile.ZipFile(zip_file, 'r')
-                data_num = len(fz.namelist())
-                print("Extract Start...")
-                print("unzip file num: {}".format(data_num))
-                data_print = int(data_num / 100) if data_num > 100 else 1
-                i = 0
-                for file in fz.namelist():
-                    if i % data_print == 0:
-                        print("unzip percent: {}%".format(int(i * 100 / data_num)), flush=True)
-                    i += 1
-                    fz.extract(file, save_dir)
-                print("cost time: {}min:{}s.".format(int((time.time() - s_time) / 60),
-                                                     int(int(time.time() - s_time) % 60)))
-                print("Extract Done.")
-            else:
-                print("This is not zip.")
-        else:
-            print("Zip has been extracted.")
-
-    if config.need_modelarts_dataset_unzip:
-        zip_file_1 = os.path.join(config.data_path, config.modelarts_dataset_unzip_name + ".zip")
-        save_dir_1 = os.path.join(config.data_path)
-
-        sync_lock = "/tmp/unzip_sync.lock"
-
-        # Each server contains 8 devices as most.
-        if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):
-            print("Zip file path: ", zip_file_1)
-            print("Unzip file save dir: ", save_dir_1)
-            unzip(zip_file_1, save_dir_1)
-            print("===Finish extract data synchronization===")
-            try:
-                os.mknod(sync_lock)
-            except IOError:
-                pass
-
-        while True:
-            if os.path.exists(sync_lock):
-                break
-            time.sleep(1)
-
-        print("Device: {}, Finish sync unzip data from {} to {}.".format(get_device_id(), zip_file_1, save_dir_1))
-
-@moxing_wrapper(pre_process=modelarts_pre_process)
-def run_tacotron2_infer():
-    ''' run tacotron2 inference '''
-    model = load_model(config.model_ckpt)
-    print('Successfully loading checkpoint {}'.format(config.model_ckpt))
-    print(config.output_path)
-    start = time.time()
-    outputs = infer(config.text, model)
-    end = time.time()
-    print('inference elapsed :{}s'.format(end - start))
-    plot(outputs, config.output_path, config.audioname)
-    audio(outputs, config.output_path, config.audioname)
-    save_mel(outputs, config.output_path, config.audioname)
-
-if __name__ == '__main__':
-    run_tacotron2_infer()
diff --git a/research/audio/tacotron2/generate_hdf5.py b/research/audio/tacotron2/generate_hdf5.py
deleted file mode 100644
index 900b0fa033dd3f1f26ac7dc3779086cfa7e7d2e2..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/generate_hdf5.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-''' generate hdf5 file '''
-import os
-import argparse
-import random
-import h5py
-from tqdm import tqdm
-
-import numpy as np
-import librosa
-from src.utils.audio import load_wav, melspectrogram
-from src.hparams import hparams as hps
-from src.text import text_to_sequence
-
-random.seed(0)
-
-
-def files_to_list(fdir):
-    ''' collect text and filepath to list'''
-    f_list = []
-    with open(os.path.join(fdir, 'metadata.csv'), encoding='utf-8') as f:
-        for line in f:
-            parts = line.strip().split('|')
-            wav_path = os.path.join(fdir, 'wavs', '%s.wav' % parts[0])
-            f_list.append([wav_path, parts[1]])
-    return f_list
-
-
-def get_mel_text_pair(filename_and_text):
-    '''preprocessing mel and text '''
-    filename, text = filename_and_text[0], filename_and_text[1]
-    text += '~'
-    text = get_text(text)
-    mel = get_mel(filename)
-    return (text, mel)
-
-
-def get_text(text):
-    '''encode text to sequence'''
-    return text_to_sequence(text, hps.text_cleaners)
-
-
-def get_mel(filename):
-    ''' extract mel spectrogram '''
-    wav = load_wav(filename)
-    trim_wav, _ = librosa.effects.trim(
-        wav, top_db=60, frame_length=2048, hop_length=512)
-    wav = np.concatenate(
-        (trim_wav,
-         np.zeros(
-             (5 * hps.hop_length),
-             np.float32)),
-        0)
-    mel = melspectrogram(wav).astype(np.float32)
-    return mel
-
-
-def generate_hdf5(fdir):
-    '''generate hdf5 file'''
-    f_list = files_to_list(fdir)
-    random.shuffle(f_list)
-
-    max_text, max_mel = 0, 0
-    for idx, filename_and_text in tqdm(enumerate(f_list)):
-        text, mel = get_mel_text_pair(filename_and_text)
-
-        max_text = max(max_text, len(text))
-        max_mel = max(max_mel, mel.shape[1])
-
-        with h5py.File('ljdataset.hdf5', 'a') as hf:
-            hf.create_dataset('{}_mel'.format(idx), data=mel)
-            hf.create_dataset('{}_text'.format(idx), data=text)
-
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument(
-        '--data_path',
-        type=str,
-        default='',
-        help='Path to LJSpeech-1.1')
-    args = parser.parse_args()
-    generate_hdf5(args.data_path)
diff --git a/research/audio/tacotron2/ljspeech_config.yaml b/research/audio/tacotron2/ljspeech_config.yaml
deleted file mode 100644
index 50833d838d9cdcfbe296590ba2d4e7c2b4c554ef..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/ljspeech_config.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-# Builtin Configurations(DO NOT CHANGE THESE CONFIGURATIONS unless you know exactly what you are doing)
-enable_modelarts: False
-# Url for modelarts
-data_url: ""        # set on the page
-train_url: ""       # set on the page
-checkpoint_url: ""  # set on the page
-# Path for local
-data_path: "/cache/data"              # download data to data_path from data_url(obs address) 
-output_path: "/cache/train"           # upload output data from output_path dirs to train_url(obs address)
-load_path: "/cache/checkpoint_path"   # download checkpoint to load_path from checkpoint_url(obs address)
-device_target: "Ascend"
-need_modelarts_dataset_unzip: False
-modelarts_dataset_unzip_name: ""
-
-# ==============================================================================
-# options
-epoch_num: 2000
-warmup_epochs: 30
-batch_size: 16
-lr: 0.002
-pretrain_ckpt: "" # if modelarts: '/cache/checkpoint_path/model.ckpt'" set on the page; else: set /path/to/model.ckpt
-save_ckpt_dir: "./ckpt/"
-keep_ckpt_max: 10
-text: "Once move to the directory, then, execute the following main script with a chainer backend.~"
-audioname: "text2speech"
-dataset_path: ""   # set dataset dir; if modelarts: set /cache/data/ on the page; else: set dataset dir '../'
-data_name: "ljspeech"
-run_distribute: True
-model_ckpt: ""  #  used when eval; if modelarts: set /cache/checkpoint_path/model.ckpt; else: set /path/to/model.ckpt 
-# export option
-device_id: 0
-ckpt_file: ""
-file_name: "tacotron2"
-file_format: "MINDIR"
----
-
-# Help description for each configuration
-device_target: "Device target"
-dataset_path: "FastText input data file path."
-data_name: "dataset name. choice in ['ljspeech']"
-run_distribute: "Run distribute, default: false."
-model_ckpt: "existed checkpoint address."
-# export option
-device_id: "Device id"
-ckpt_file: "Checkpoint file path"
-file_name: "Output file name"
-file_format: "Output file format, choice in ['AIR', 'ONNX', 'MINDIR']"
diff --git a/research/audio/tacotron2/model_utils/__init__.py b/research/audio/tacotron2/model_utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/research/audio/tacotron2/model_utils/config.py b/research/audio/tacotron2/model_utils/config.py
deleted file mode 100644
index 2895b1e4a4fa69551be64485d8716b08fa3956a2..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/model_utils/config.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-"""Parse arguments"""
-
-import os
-import ast
-import argparse
-from pprint import pprint, pformat
-import yaml
-
-class Config:
-    """
-    Configuration namespace. Convert dictionary to members.
-    """
-    def __init__(self, cfg_dict):
-        for k, v in cfg_dict.items():
-            if isinstance(v, (list, tuple)):
-                setattr(self, k, [Config(x) if isinstance(x, dict) else x for x in v])
-            else:
-                setattr(self, k, Config(v) if isinstance(v, dict) else v)
-
-    def __str__(self):
-        return pformat(self.__dict__)
-
-    def __repr__(self):
-        return self.__str__()
-
-
-def parse_cli_to_yaml(parser, cfg, helper=None, choices=None, cfg_path="default_config.yaml"):
-    """
-    Parse command line arguments to the configuration according to the default yaml.
-
-    Args:
-        parser: Parent parser.
-        cfg: Base configuration.
-        helper: Helper description.
-        cfg_path: Path to the default yaml config.
-    """
-    parser = argparse.ArgumentParser(description="[REPLACE THIS at config.py]",
-                                     parents=[parser])
-    helper = {} if helper is None else helper
-    choices = {} if choices is None else choices
-    for item in cfg:
-        if not isinstance(cfg[item], list) and not isinstance(cfg[item], dict):
-            help_description = helper[item] if item in helper else "Please reference to {}".format(cfg_path)
-            choice = choices[item] if item in choices else None
-            if isinstance(cfg[item], bool):
-                parser.add_argument("--" + item, type=ast.literal_eval, default=cfg[item], choices=choice,
-                                    help=help_description)
-            else:
-                parser.add_argument("--" + item, type=type(cfg[item]), default=cfg[item], choices=choice,
-                                    help=help_description)
-    args = parser.parse_args()
-    return args
-
-
-def parse_yaml(yaml_path):
-    """
-    Parse the yaml config file.
-
-    Args:
-        yaml_path: Path to the yaml config.
-    """
-    with open(yaml_path, 'r') as fin:
-        try:
-            cfgs = yaml.load_all(fin.read(), Loader=yaml.FullLoader)
-            cfgs = [x for x in cfgs]
-            if len(cfgs) == 1:
-                cfg_helper = {}
-                cfg = cfgs[0]
-                cfg_choices = {}
-            elif len(cfgs) == 2:
-                cfg, cfg_helper = cfgs
-                cfg_choices = {}
-            elif len(cfgs) == 3:
-                cfg, cfg_helper, cfg_choices = cfgs
-            else:
-                raise ValueError("At most 3 docs (config, description for help, choices) are supported in config yaml")
-            print(cfg_helper)
-        except:
-            raise ValueError("Failed to parse yaml")
-    return cfg, cfg_helper, cfg_choices
-
-
-def merge(args, cfg):
-    """
-    Merge the base config from yaml file and command line arguments.
-
-    Args:
-        args: Command line arguments.
-        cfg: Base configuration.
-    """
-    args_var = vars(args)
-    for item in args_var:
-        cfg[item] = args_var[item]
-    return cfg
-
-
-def get_config():
-    """
-    Get Config according to the yaml file and cli arguments.
-    """
-    parser = argparse.ArgumentParser(description="default name", add_help=False)
-    current_dir = os.path.dirname(os.path.abspath(__file__))
-    parser.add_argument("--config_path", type=str, default=os.path.join(current_dir, "../default_config.yaml"),
-                        help="Config file path")
-    path_args, _ = parser.parse_known_args()
-    default, helper, choices = parse_yaml(path_args.config_path)
-    args = parse_cli_to_yaml(parser=parser, cfg=default, helper=helper, choices=choices, cfg_path=path_args.config_path)
-    final_config = merge(args, default)
-    pprint(final_config)
-    print("Please check the above information for the configurations", flush=True)
-    return Config(final_config)
-
-config = get_config()
diff --git a/research/audio/tacotron2/model_utils/device_adapter.py b/research/audio/tacotron2/model_utils/device_adapter.py
deleted file mode 100644
index 7c5d7f837ddaa8f53cf8dc5573cac0e36881e7b1..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/model_utils/device_adapter.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-"""Device adapter for ModelArts"""
-
-from .config import config
-
-if config.enable_modelarts:
-    from .moxing_adapter import get_device_id, get_device_num, get_rank_id, get_job_id
-else:
-    from .local_adapter import get_device_id, get_device_num, get_rank_id, get_job_id
-
-__all__ = [
-    "get_device_id", "get_device_num", "get_rank_id", "get_job_id"
-]
diff --git a/research/audio/tacotron2/model_utils/local_adapter.py b/research/audio/tacotron2/model_utils/local_adapter.py
deleted file mode 100644
index 769fa6dc78e59eb66dbc8e6773accdc1d08b649e..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/model_utils/local_adapter.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-"""Local adapter"""
-
-import os
-
-def get_device_id():
-    device_id = os.getenv('DEVICE_ID', '0')
-    return int(device_id)
-
-
-def get_device_num():
-    device_num = os.getenv('RANK_SIZE', '1')
-    return int(device_num)
-
-
-def get_rank_id():
-    global_rank_id = os.getenv('RANK_ID', '0')
-    return int(global_rank_id)
-
-
-def get_job_id():
-    return "Local Job"
diff --git a/research/audio/tacotron2/model_utils/moxing_adapter.py b/research/audio/tacotron2/model_utils/moxing_adapter.py
deleted file mode 100644
index 25838a7da99a27a1bb744684c1f75f80f5704688..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/model_utils/moxing_adapter.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-"""Moxing adapter for ModelArts"""
-
-import os
-import functools
-from mindspore import context
-from .config import config
-
-_global_sync_count = 0
-
-def get_device_id():
-    device_id = os.getenv('DEVICE_ID', '0')
-    return int(device_id)
-
-
-def get_device_num():
-    device_num = os.getenv('RANK_SIZE', '1')
-    return int(device_num)
-
-
-def get_rank_id():
-    global_rank_id = os.getenv('RANK_ID', '0')
-    return int(global_rank_id)
-
-
-def get_job_id():
-    job_id = os.getenv('JOB_ID')
-    job_id = job_id if job_id != "" else "default"
-    return job_id
-
-def sync_data(from_path, to_path):
-    """
-    Download data from remote obs to local directory if the first url is remote url and the second one is local path
-    Upload data from local directory to remote obs in contrast.
-    """
-    import moxing as mox
-    import time
-    global _global_sync_count
-    sync_lock = "/tmp/copy_sync.lock" + str(_global_sync_count)
-    _global_sync_count += 1
-
-    # Each server contains 8 devices as most.
-    if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):
-        print("from path: ", from_path)
-        print("to path: ", to_path)
-        mox.file.copy_parallel(from_path, to_path)
-        print("===finish data synchronization===")
-        try:
-            os.mknod(sync_lock)
-        except IOError:
-            pass
-        print("===save flag===")
-
-    while True:
-        if os.path.exists(sync_lock):
-            break
-        time.sleep(1)
-
-    print("Finish sync data from {} to {}.".format(from_path, to_path))
-
-
-def moxing_wrapper(pre_process=None, post_process=None):
-    """
-    Moxing wrapper to download dataset and upload outputs.
-    """
-    def wrapper(run_func):
-        @functools.wraps(run_func)
-        def wrapped_func(*args, **kwargs):
-            # Download data from data_url
-            if config.enable_modelarts:
-                if config.data_url:
-                    sync_data(config.data_url, config.data_path)
-                    print("Dataset downloaded: ", os.listdir(config.data_path))
-                if config.checkpoint_url:
-                    sync_data(config.checkpoint_url, config.load_path)
-                    print("Preload downloaded: ", os.listdir(config.load_path))
-                if config.train_url:
-                    sync_data(config.train_url, config.output_path)
-                    print("Workspace downloaded: ", os.listdir(config.output_path))
-
-                context.set_context(save_graphs_path=os.path.join(config.output_path, str(get_rank_id())))
-                config.device_num = get_device_num()
-                config.device_id = get_device_id()
-                if not os.path.exists(config.output_path):
-                    os.makedirs(config.output_path)
-
-                if pre_process:
-                    pre_process()
-
-            # Run the main function
-            run_func(*args, **kwargs)
-
-            # Upload data to train_url
-            if config.enable_modelarts:
-                if post_process:
-                    post_process()
-
-                if config.train_url:
-                    print("Start to copy output directory")
-                    sync_data(config.output_path, config.train_url)
-        return wrapped_func
-    return wrapper
diff --git a/research/audio/tacotron2/requirements.txt b/research/audio/tacotron2/requirements.txt
deleted file mode 100644
index 24979b70345d07cc2fe02313d3992511005932da..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/requirements.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-numpy==1.19.5
-librosa==0.6.0
-numba==0.48.0
-llvmlite==0.31.0
-h5py==3.1.0
-scipy==1.6.0
-docopt==0.6.2
-Unidecode==1.3.2
-inflect==5.3.0
-matplotlib
\ No newline at end of file
diff --git a/research/audio/tacotron2/scripts/run_distribute_train.sh b/research/audio/tacotron2/scripts/run_distribute_train.sh
deleted file mode 100644
index 1caa218d0c94a1374996cde94d9db28467018d11..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/scripts/run_distribute_train.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-echo "=============================================================================================================="
-echo "Please run the script as: "
-echo "sh run_distributed_train.sh DATASET_PATH RANK_TABLE_PATH DATANAME RANK_SIZE DEVICE_BEGIN"
-echo "for example: sh run_distributed_train.sh /dir/to/dataset /home/workspace/rank_table_file.json ljspeech 8 0"
-echo "It is better to use absolute path."
-echo "Please pay attention that the dataset should corresponds to dataset_name"
-echo "=============================================================================================================="
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-if [ $3 != "ljspeech" ]
-then
-  echo "Unrecognized dataset name, the name can choose from [ljspeech]"
-exit 1
-fi
-
-DATASET=$(get_real_path $1)
-echo $DATASET
-RANK_TABLE_PATH=$(get_real_path $2)
-if [ ! -d $DATASET ]
-then
-  echo "Error: DATA_PATH=$DATASET is not a file"
-exit 1
-fi
-current_exec_path=$(pwd)
-echo ${current_exec_path}
-
-export RANK_TABLE_FILE=$RANK_TABLE_PATH
-
-
-echo $RANK_TABLE_FILE
-export RANK_SIZE=$4
-export DEVICE_NUM=$4
-
-if [ $# -ge 1 ]; then
-  if [ $3 == 'ljspeech' ]; then
-    DATANAME='ljspeech'
-  else
-    echo "Unrecognized dataset name,he name can choose from [ljspeech]"
-    exit 1
-  fi
-fi
-
-config_path="./${DATANAME}_config.yaml"
-echo "config path is : ${config_path}"
-
-BEGIN=$5
-for((i=$BEGIN;i<RANK_SIZE+BEGIN;i++));
-do
-    rm -rf ${current_exec_path}/device$i
-    mkdir ${current_exec_path}/device$i
-    cd ${current_exec_path}/device$i || exit
-    cp ../../*.py ./
-    cp ../../*.yaml ./
-    cp -r ../../src ./
-    cp -r ../../model_utils ./
-    cp -r ../*.sh ./
-    let rank=$i-$BEGIN
-    export RANK_ID=$rank
-    export DEVICE_ID=$i
-    echo "start training for rank $i, device $DEVICE_ID"
-    env > env.log
-    python ../../train.py --config_path $config_path --dataset_path $DATASET --data_name $DATANAME > distributed_tacotron2.log 2>&1 &
-    cd ${current_exec_path} || exit
-done
-cd ${current_exec_path} || exit
diff --git a/research/audio/tacotron2/scripts/run_eval.sh b/research/audio/tacotron2/scripts/run_eval.sh
deleted file mode 100644
index 0139831cd5bc8b8be2b6203454b224a971a6fac3..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/scripts/run_eval.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/bash
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-echo "=============================================================================================================="
-echo "Please run the script as: "
-echo "sh run_eval.sh OUTPUT_PATH DATANAME MODEL_CKPT DEVICE_ID"
-echo "for example: bash run_eval.sh output ljspeech device0/ckpt0/tacotron2-5-118.ckpt 0"
-echo "It is better to use absolute path."
-echo "=============================================================================================================="
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-OUTPUT_PATH=$1
-echo $PWD/eval/$OUTPUT_PATH
-DATANAME=$2
-MODEL_CKPT=$(get_real_path $3)
-DEVICEID=$4
-export DEVICE_NUM=1
-export DEVICE_ID=$DEVICEID
-export RANK_ID=0
-export RANK_SIZE=1
-
-config_path="./${DATANAME}_config.yaml"
-echo "config path is : ${config_path}"
-
-if [ -d "eval" ];
-then
-    rm -rf ./eval
-fi
-mkdir -p ./eval/$OUTPUT_PATH
-cp ../*.py ./eval
-cp ../*.yaml ./eval
-cp -r ../src ./eval
-cp -r ../model_utils ./eval
-cp -r ../scripts/*.sh ./eval
-cd ./eval || exit
-echo "start evaling for device $DEVICE_ID"
-env > env.log
-python ../../eval.py --config_path $config_path --output_path $PWD/$OUTPUT_PATH  --model_ckpt $MODEL_CKPT> eval_tacotron2.log 2>&1 &
-cd ..
diff --git a/research/audio/tacotron2/scripts/run_standalone_train.sh b/research/audio/tacotron2/scripts/run_standalone_train.sh
deleted file mode 100644
index af7ff887b3b553b5f3f9077139df76bc75a21064..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/scripts/run_standalone_train.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/bin/bash
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-echo "=============================================================================================================="
-echo "Please run the script as: "
-echo "sh run_standalone_train.sh DATASET_PATH DEVICE_ID DATANAME"
-echo "for example: sh run_standalone_train.sh /home/workspace/ag 0 ljspeech"
-echo "It is better to use absolute path."
-echo "Please pay attention that the dataset should corresponds to dataset_name"
-echo "=============================================================================================================="
-if [[ $# -lt 3 ]]; then
-  echo "Usage: bash run_standalone_train.sh [DATA_PATH] [DEVICE_ID] [DATANAME]
-  DATANAME can choose from [ljspeech]"
-exit 1
-fi
-
-if [ $3 != "ljspeech" ]
-then
-  echo "Unrecognized dataset name, the name can choose from [ljspeech]"
-exit 1
-fi
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-DATASET=$(get_real_path $1)
-DATANAME=$(basename $DATASET)
-DEVICEID=$2
-if [ $# -ge 1 ]; then
-  if [ $3 == 'ljspeech' ]; then
-    DATANAME='ljspeech'
-  else
-    echo "Unrecognized dataset name"
-    exit 1
-  fi
-fi
-
-config_path="./${DATANAME}_config.yaml"
-echo "config path is : ${config_path}"
-
-export DEVICE_NUM=1
-export DEVICE_ID=$DEVICEID
-export RANK_ID=0
-export RANK_SIZE=1
-
-
-if [ -d "train" ];
-then
-    rm -rf ./train
-fi
-mkdir ./train
-cp ../*.py ./train
-cp ../*.yaml ./train
-cp -r ../src ./train
-cp -r ../model_utils ./train
-cp -r ../scripts/*.sh ./train
-cd ./train || exit
-echo "start training for device $DEVICE_ID"
-env > env.log
-python train.py --config_path $config_path --dataset_path $DATASET --data_name $DATANAME > standalone_tacotron2.log 2>&1 &
-cd ..
diff --git a/research/audio/tacotron2/src/callback.py b/research/audio/tacotron2/src/callback.py
deleted file mode 100644
index 313388883032ad1c3e0be4d4b1da0c6807f04d6e..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/src/callback.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-''' callback function '''
-import time
-import math
-
-from mindspore.train.callback import Callback
-from mindspore import Tensor
-import numpy as np
-
-
-
-def get_lr(init_lr, total_epoch, step_per_epoch,
-           anneal_step=250):
-    ''' warmup lr schedule'''
-    total_step = total_epoch * step_per_epoch
-    lr_step = []
-
-    for step in range(total_step):
-        lambda_lr = anneal_step**0.5 * \
-            min((step + 1) * anneal_step**-1.5, (step + 1)**-0.5)
-        lr_step.append(init_lr * lambda_lr)
-    learning_rate = np.array(lr_step).astype(np.float32)
-    return learning_rate
-
-
-class TimeMonitor(Callback):
-    """
-    Time monitor for calculating cost of each epoch.
-
-    Args:
-        data_size (int): step size of an epoch.
-    """
-
-    def __init__(self, data_size):
-        super(TimeMonitor, self).__init__()
-        self.data_size = data_size
-
-    def epoch_begin(self, run_context):
-        self.epoch_time = time.time()
-
-    def epoch_end(self, run_context):
-        epoch_seconds = (time.time() - self.epoch_time)
-        per_step_seconds = epoch_seconds / self.data_size
-        print("epoch time: {}, per step time: {}".format(epoch_seconds, per_step_seconds), flush=True)
-
-    def step_begin(self, run_context):
-        self.step_time = time.time()
-
-    def step_end(self, run_context):
-        step_seconds = (time.time() - self.step_time)
-        print("step time {}".format(step_seconds), flush=True)
-
-class LossCallBack(Callback):
-    """
-    Monitor the loss in training.
-    If the loss in NAN or INF terminating training.
-    Note:
-        if per_print_times is 0 do not print loss.
-    Args:
-        per_print_times (int): Print loss every times. Default: 1.
-    """
-    def __init__(self, dataset_size=-1):
-        super(LossCallBack, self).__init__()
-        self._dataset_size = dataset_size
-    def step_end(self, run_context):
-        """
-        Print loss after each step
-        """
-        cb_params = run_context.original_args()
-        if self._dataset_size > 0:
-            percent, epoch_num = math.modf(cb_params.cur_step_num / self._dataset_size)
-            if percent == 0:
-                percent = 1
-                epoch_num -= 1
-            print("epoch: {}, current epoch percent: {}, step: {}, outputs are {}"
-                  .format(int(epoch_num), "%.3f" % percent, cb_params.cur_step_num, str(cb_params.net_outputs)),
-                  flush=True)
-        else:
-            print("epoch: {}, step: {}, outputs are {}".format(cb_params.cur_epoch_num, cb_params.cur_step_num,
-                                                               str(cb_params.net_outputs)), flush=True)
-
-class Monitor(Callback):
-    """
-    Monitor loss and time.
-
-    Args:
-        lr_init (numpy array): train lr
-
-    Returns:
-        None
-
-    Examples:
-        >>> Monitor(100,lr_init=Tensor([0.05]*100).asnumpy())
-    """
-
-    def __init__(self, lr_init=None):
-        super(Monitor, self).__init__()
-        self.lr_init = lr_init
-        self.lr_init_len = len(lr_init)
-
-    def epoch_begin(self, run_context):
-        self.losses = []
-        self.epoch_time = time.time()
-
-    def epoch_end(self, run_context):
-        cb_params = run_context.original_args()
-
-        epoch_mseconds = (time.time() - self.epoch_time)
-        per_step_mseconds = epoch_mseconds / cb_params.batch_num
-        print("epoch time: {:5.3f}, per step time: {:5.3f}, avg loss: {:5.6f}".format(\
-            epoch_mseconds, per_step_mseconds, np.mean(self.losses)))
-
-    def step_begin(self, run_context):
-        self.step_time = time.time()
-
-    def step_end(self, run_context):
-        """step end"""
-        cb_params = run_context.original_args()
-        step_mseconds = (time.time() - self.step_time)
-        step_loss = cb_params.net_outputs
-
-        if isinstance(
-                step_loss, (tuple, list)) and isinstance(
-                    step_loss[0], Tensor):
-            step_loss = step_loss[0]
-        if isinstance(step_loss, Tensor):
-            step_loss = np.mean(step_loss.asnumpy())
-
-        self.losses.append(step_loss)
-        cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num
-
-        print("epoch: [{:3d}/{:3d}], step:[{:5d}/{:5d}], loss:[{:5.6f}/{:5.6f}], time:[{:5.3f}], lr:[{:.9f}]".format(\
-            cb_params.cur_epoch_num -\
-            1, cb_params.epoch_num, cur_step_in_epoch, cb_params.batch_num, step_loss,\
-            np.mean(self.losses), step_mseconds, self.lr_init[cb_params.cur_step_num - 1].asnumpy()))
diff --git a/research/audio/tacotron2/src/dataset.py b/research/audio/tacotron2/src/dataset.py
deleted file mode 100644
index 0a37888607d28c0d997e4056203fa034665d63a6..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/src/dataset.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-''' define dataset and sampler function '''
-import time
-import math
-import numpy as np
-import h5py
-from src.hparams import hparams as hps
-
-
-class Sampler():
-    ''' sampler '''
-    def __init__(self, sample_nums, rank, group_size, random_seed=0):
-        self.batch_size = hps.batch_size
-        self.rank = rank
-        self.group_size = group_size
-        self.seed = random_seed
-        self.sample_nums = sample_nums
-
-        self.sample_indexes = np.arange(self.sample_nums).tolist()
-        self.total_sample_nums = int(
-            math.ceil(self.sample_nums / self.batch_size)) * self.batch_size
-
-        self.sample_indexes += self.sample_indexes[:(
-            self.total_sample_nums - self.sample_nums)]
-        print('total training samples: {}'.format(len(self.sample_indexes)))
-        print('batch_size : {}'.format(self.batch_size))
-        num_steps_per_epoch = int(
-            math.ceil(
-                self.sample_nums /
-                self.batch_size))
-        self.total_steps = int(
-            math.ceil(
-                num_steps_per_epoch / self.group_size)) * self.group_size
-        self.step_indexes = np.arange(num_steps_per_epoch).tolist()
-        self.step_indexes += self.step_indexes[:(
-            self.total_steps - len(self.step_indexes))]
-
-    def __iter__(self):
-        self.seed = (self.seed + 1) & 0xffffffff
-        np.random.seed(self.seed)
-        np.random.shuffle(self.step_indexes)
-        step_indexes = self.step_indexes
-        np.random.seed(self.seed)
-        np.random.shuffle(self.sample_indexes)
-        sample_indexes = self.sample_indexes
-        sample_indexes_bins = [sample_indexes[i:i + self.batch_size]
-                               for i in range(0, len(sample_indexes), self.batch_size)]
-
-        index = []
-        for step_idx in step_indexes[self.rank::self.group_size]:
-            index.extend(sample_indexes_bins[step_idx])
-        return iter(index)
-
-
-class ljdataset():
-    ''' ljspeech-1.1 dataset'''
-    def __init__(self, hdf5_pth, group_size):
-        self.max_text_len = 0
-        self.max_mel_len = 0
-        load_time = time.time()
-        self.dataset = []
-        with h5py.File(hdf5_pth, 'r') as ff:
-            self.dsname = sorted(ff.keys())
-            self.length = len(ff.keys()) // 2
-            for idx in range(0, self.length * 2, 2):
-                self.dataset.append({
-                    'mel': ff[self.dsname[idx]][:],
-                    'text': ff[self.dsname[idx + 1]][:]
-                })
-                self.max_mel_len = max(
-                    ff[self.dsname[idx]][:].shape[1], self.max_mel_len)
-                self.max_text_len = max(
-                    len(ff[self.dsname[idx + 1]][:]), self.max_text_len)
-        self.sample_nums = len(self.dataset)
-
-        hps.max_text_len = self.max_text_len
-
-        print('Training number: {}'.format(self.sample_nums))
-        print('Load target time: {:.3f} s'.format(time.time() - load_time))
-        print('max text length : {}'.format(self.max_text_len))
-        print('max mel length : {}'.format(self.max_mel_len))
-        self.n_frames_per_step = hps.n_frames_per_step
-        if self.max_mel_len % self.n_frames_per_step != 0:
-            self.max_mel_len += self.n_frames_per_step - \
-                self.max_mel_len % self.n_frames_per_step
-            assert self.max_mel_len % self.n_frames_per_step == 0
-
-        num_steps_per_epoch = int(math.ceil(self.sample_nums / hps.batch_size))
-        self.group_size = group_size
-        print('{} steps per epoch'.format(num_steps_per_epoch))
-
-    def __getitem__(self, index):
-        meta = self.dataset[index]
-        text_padded, input_lengths, mel_padded, gate_padded, text_mask, mel_mask, rnn_mask = self.sort_and_pad(
-            meta)
-
-        return text_padded, input_lengths, mel_padded, gate_padded, text_mask, mel_mask, rnn_mask
-
-    def __len__(self):
-        return int(math.ceil(int(math.ceil(self.sample_nums / \
-                   hps.batch_size)) / self.group_size)) * hps.batch_size
-
-    def sort_and_pad(self, meta):
-        ''' pad text sequence and mel spectrogram'''
-        text = meta['text']
-        mel = meta['mel']
-
-        text_len = len(text)
-
-        input_lengths = np.array(text_len, np.int32)
-
-        n_mels, n_frames = mel.shape
-
-        max_input_len = self.max_text_len
-
-        text_padded = np.ones((max_input_len), np.int32)
-        text_padded[:text_len] = text
-
-        max_target_len = self.max_mel_len
-
-        mel_padded = np.zeros((n_mels, max_target_len), np.float32)
-        mel_padded[:, :n_frames] = mel
-        gate_padded = np.zeros((max_target_len), np.float32)
-        gate_padded[n_frames - 1:] = 1
-
-        text_mask = np.zeros((max_input_len)).astype(np.bool)
-        text_mask[:text_len] = True
-        mel_mask = np.zeros((max_target_len)).astype(np.bool)
-        mel_mask[:n_frames] = True
-        mel_mask = np.expand_dims(mel_mask, 0).repeat(80, 0)
-
-        rnn_mask = np.zeros((max_input_len)).astype(np.bool)
-        rnn_mask[:text_len] = True
-        rnn_mask = np.expand_dims(rnn_mask, 1).repeat(512, 1)
-
-        return text_padded, input_lengths, mel_padded, gate_padded, text_mask, mel_mask, rnn_mask
diff --git a/research/audio/tacotron2/src/hparams.py b/research/audio/tacotron2/src/hparams.py
deleted file mode 100644
index fc31910284b865007ca4fde4cdac3fe2e4bb4a7d..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/src/hparams.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-''' configs file '''
-from src.text import symbols
-
-
-class hparams:
-    ''' configs '''
-    text_cleaners = ['english_cleaners']
-
-    # Preprocessing parameters
-    num_mels = 80
-    num_freq = 513
-    sample_rate = 22050
-    frame_length_ms = 50
-    frame_shift_ms = 12.5
-    preemphasis = 0.85
-    min_level_db = -100
-    ref_level_db = 20
-    power = 1.5
-    gl_iters = 100
-    fmin = 125
-    fmax = 7600
-    hop_length = 256
-    win_length = 1024
-
-    # Model Parameters
-
-    symbols_embedding_dim = 512
-
-    # Encoder parameters
-    encoder_kernel_size = 5
-    encoder_n_convolutions = 3
-    encoder_embedding_dim = 512
-
-    # Decoder parameters
-    n_frames_per_step = 3
-    decoder_rnn_dim = 1024
-    prenet_dim = 256
-    max_decoder_steps = 1000
-    gate_threshold = 0.5
-    p_attention_dropout = 0.1
-    p_decoder_dropout = 0.1
-
-    # Attention parameters
-    attention_rnn_dim = 1024
-    attention_dim = 256
-
-    # Location Layer parameters
-    attention_location_n_filters = 32
-    attention_location_kernel_size = 31
-
-    # Mel-post processing network parameters
-    postnet_embedding_dim = 512
-    postnet_kernel_size = 5
-    postnet_n_convolutions = 5
-
-    lr = 0.002
-    epoch_num = 2000
-    batch_size = 16
-    test_batch_size = 1
-    mask_padding = True
-    p = 10  # mel spec loss penalty
-
-    max_text_len = 189
-    n_symbols = len(symbols)
diff --git a/research/audio/tacotron2/src/rnn_cells.py b/research/audio/tacotron2/src/rnn_cells.py
deleted file mode 100644
index 048a94a464036e9a585b66356beca7aadd520045..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/src/rnn_cells.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-''' basic rnn cells '''
-import math
-
-import numpy as np
-import mindspore
-import mindspore.nn as nn
-import mindspore.ops as P
-from mindspore import Tensor, Parameter
-from mindspore.common.initializer import initializer, Uniform
-
-
-def rnn_tanh_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):
-    '''rnn tanh cell '''
-    if b_ih is None:
-        igates = P.MatMul(False, True)(inputs, w_ih)
-        hgates = P.MatMul(False, True)(hidden, w_hh)
-    else:
-        igates = P.MatMul(False, True)(inputs, w_ih) + b_ih
-        hgates = P.MatMul(False, True)(hidden, w_hh) + b_hh
-    return P.Tanh()(igates + hgates)
-
-
-def rnn_relu_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):
-    '''rnn relu cell '''
-    if b_ih is None:
-        igates = P.MatMul(False, True)(inputs, w_ih)
-        hgates = P.MatMul(False, True)(hidden, w_hh)
-    else:
-        igates = P.MatMul(False, True)(inputs, w_ih) + b_ih
-        hgates = P.MatMul(False, True)(hidden, w_hh) + b_hh
-    return P.ReLU()(igates + hgates)
-
-
-class LSTMCell(nn.Cell):
-    '''lstm cell '''
-    def __init__(self):
-        super(LSTMCell, self).__init__()
-        self.matmul = P.MatMul(False, True)
-        self.split = P.Split(1, 4)
-        self.cast = P.Cast()
-        self.tanh = P.Tanh()
-        self.sigmoid = P.Sigmoid()
-
-    def construct(self, inputs, hidden, w_ih, w_hh, b_ih, b_hh):
-        ''' lstm '''
-        hx, cx = hidden
-        inputs = self.cast(inputs, mindspore.float16)
-        hx = self.cast(hx, mindspore.float16)
-        cx = self.cast(cx, mindspore.float16)
-        w_ih = self.cast(w_ih, mindspore.float16)
-        w_hh = self.cast(w_hh, mindspore.float16)
-        b_ih = self.cast(b_ih, mindspore.float16)
-        b_hh = self.cast(b_hh, mindspore.float16)
-        if b_ih is None:
-            gates = self.matmul(inputs, w_ih) + self.matmul(hx, w_hh)
-        else:
-            gates = self.matmul(inputs, w_ih) + \
-                self.matmul(hx, w_hh) + b_ih + b_hh
-        gates = self.cast(gates, mindspore.float32)
-        ingate, forgetgate, cellgate, outgate = self.split(gates)
-
-        ingate = self.sigmoid(ingate)
-        forgetgate = self.sigmoid(forgetgate)
-        cellgate = self.tanh(cellgate)
-        outgate = self.sigmoid(outgate)
-
-        cy = (forgetgate * cx) + (ingate * cellgate)
-        hy = outgate * self.tanh(cy)
-        return hy, cy
-
-
-def lstm_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):
-    ''' lstm cell '''
-    hx, cx = hidden
-
-    if b_ih is None:
-        gates = P.MatMul(False, True)(inputs, w_ih) + \
-            P.MatMul(False, True)(hx, w_hh)
-    else:
-        gates = P.MatMul(False, True)(inputs, w_ih) + \
-            P.MatMul(False, True)(hx, w_hh) + b_ih + b_hh
-
-    ingate, forgetgate, cellgate, outgate = P.Split(1, 4)(gates)
-
-    ingate = P.Sigmoid()(ingate)
-    forgetgate = P.Sigmoid()(forgetgate)
-    cellgate = P.Tanh()(cellgate)
-    outgate = P.Sigmoid()(outgate)
-
-    cy = (forgetgate * cx) + (ingate * cellgate)
-    hy = outgate * P.Tanh()(cy)
-
-    return hy, cy
-
-
-def gru_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):
-    ''' gru cell '''
-    if b_ih is None:
-        gi = P.MatMul(False, True)(inputs, w_ih)
-        gh = P.MatMul(False, True)(hidden, w_hh)
-    else:
-        gi = P.MatMul(False, True)(inputs, w_ih) + b_ih
-        gh = P.MatMul(False, True)(hidden, w_hh) + b_hh
-    i_r, i_i, i_n = P.Split(1, 3)(gi)
-    h_r, h_i, h_n = P.Split(1, 3)(gh)
-
-    resetgate = P.Sigmoid()(i_r + h_r)
-    inputgate = P.Sigmoid()(i_i + h_i)
-    newgate = P.Tanh()(i_n + resetgate * h_n)
-    hy = newgate + inputgate * (hidden - newgate)
-
-    return hy
-
-
-class RNNCellBase(nn.Cell):
-    ''' rnn cell base '''
-    def __init__(
-            self,
-            input_size: int,
-            hidden_size: int,
-            bias: bool,
-            num_chunks: int):
-        super().__init__()
-        self.input_size = input_size
-        self.hidden_size = hidden_size
-        self.bias = bias
-        self.weight_ih = Parameter(
-            Tensor(
-                np.random.randn(
-                    num_chunks *
-                    hidden_size,
-                    input_size).astype(
-                        np.float32)))
-        self.weight_hh = Parameter(
-            Tensor(
-                np.random.randn(
-                    num_chunks *
-                    hidden_size,
-                    hidden_size).astype(
-                        np.float32)))
-        if bias:
-            self.bias_ih = Parameter(
-                Tensor(
-                    np.random.randn(
-                        num_chunks *
-                        hidden_size).astype(
-                            np.float32)))
-            self.bias_hh = Parameter(
-                Tensor(
-                    np.random.randn(
-                        num_chunks *
-                        hidden_size).astype(
-                            np.float32)))
-        self.reset_parameters()
-
-    def reset_parameters(self):
-        ''' init '''
-        stdv = 1 / math.sqrt(self.hidden_size)
-        for weight in self.get_parameters():
-            weight.set_data(initializer(Uniform(stdv), weight.shape))
-
-
-class RNNCell(RNNCellBase):
-    ''' rnn cell '''
-    _non_linearity = ['tanh', 'relu']
-
-    def __init__(
-            self,
-            input_size: int,
-            hidden_size: int,
-            bias: bool = True,
-            nonlinearity: str = "tanh"):
-        super().__init__(input_size, hidden_size, bias, num_chunks=1)
-        if nonlinearity not in self._non_linearity:
-            raise ValueError(
-                "Unknown nonlinearity: {}".format(
-                    nonlinearity))
-        self.nonlinearity = nonlinearity
-
-    def construct(self, inputs, hx):
-        ''' rnn cell '''
-        if self.nonlinearity == "tanh":
-            ret = rnn_tanh_cell(
-                inputs,
-                hx,
-                self.weight_ih,
-                self.weight_hh,
-                self.bias_ih,
-                self.bias_hh)
-        else:
-            ret = rnn_relu_cell(
-                inputs,
-                hx,
-                self.weight_ih,
-                self.weight_hh,
-                self.bias_ih,
-                self.bias_hh)
-        return ret
diff --git a/research/audio/tacotron2/src/rnns.py b/research/audio/tacotron2/src/rnns.py
deleted file mode 100644
index 9f837b85955652257cec5f2164b2db6b6228c963..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/src/rnns.py
+++ /dev/null
@@ -1,404 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-''' implement dynamic rnn'''
-import math
-import numpy as np
-import mindspore.nn as nn
-import mindspore.ops as P
-from mindspore import Tensor, Parameter, ParameterTuple
-from mindspore.common.initializer import initializer, Uniform
-from mindspore import log as logger
-from mindspore.ops.primitive import constexpr
-from src.rnn_cells import rnn_relu_cell, rnn_tanh_cell, gru_cell
-from src.rnn_cells import LSTMCell
-
-
-@constexpr
-def _init_state(shape, dtype, is_lstm):
-    ''' init state '''
-    hx = Tensor(np.zeros(shape), dtype)
-
-    if is_lstm:
-        ret = (hx, hx)
-    else:
-        ret = hx
-    return ret
-
-class DynamicRNN(nn.Cell):
-    ''' dynamic rnn '''
-
-    def __init__(self, mode):
-        super(DynamicRNN, self).__init__()
-        if mode == "RNN_RELU":
-            cell = rnn_relu_cell
-        elif mode == "RNN_TANH":
-            cell = rnn_tanh_cell
-        elif mode == "LSTM":
-            cell = LSTMCell()
-        elif mode == "GRU":
-            cell = gru_cell
-        else:
-            raise ValueError("Unrecognized RNN mode: " + mode)
-        self.cell = cell
-        self.is_lstm = mode == "LSTM"
-        self.concat_len = 50
-        self.pack = P.Stack()
-        self.concat = P.Concat()
-        self.squeeze = P.Squeeze()
-
-    def pack_list(self, alignments):
-        ''' pack tensor list '''
-        align_tuple = ()
-        n_frames = len(alignments)
-        for i in range(n_frames // self.concat_len):
-            start = i * self.concat_len
-            end = (i + 1) * self.concat_len
-            alignment = self.pack(alignments[start: end])
-            align_tuple += (alignment,)
-        if n_frames % self.concat_len != 0:
-            start = n_frames // self.concat_len * self.concat_len
-            alignment = self.pack(alignments[start:])
-            align_tuple += (alignment,)
-        alignments = self.concat(align_tuple)
-        return alignments
-
-    def recurrent(self, x, hidden, w_ih, w_hh, b_ih, b_hh):
-        ''' static rnn '''
-        time_step = range(x.shape[0])
-        outputs = ()
-        for t in time_step:
-            hidden = self.cell(x[t], hidden, w_ih, w_hh, b_ih, b_hh)
-            if self.is_lstm:
-                outputs += (hidden[0],)
-            else:
-                outputs += (hidden,)
-        outputs = self.pack_list(outputs)
-        return outputs, hidden
-
-    def variable_recurrent(self, x, h, seq_length, w_ih, w_hh, b_ih, b_hh):
-        ''' dynamic rnn '''
-        time_step = range(x.shape[0])
-
-        h_t = h
-        if self.is_lstm:
-            hidden_size = h[0].shape[-1]
-            zero_output = P.ZerosLike()(h_t[0])
-        else:
-            hidden_size = h.shape[-1]
-            zero_output = P.ZerosLike()(h_t)
-
-        seq_length = P.BroadcastTo((hidden_size, -1))(seq_length)
-        seq_length = P.Transpose()(seq_length, (1, 0))
-
-        outputs = ()
-        state_t = h_t
-        for t in time_step:
-            h_t = self.cell(self.squeeze(
-                x[t:t + 1]), state_t, w_ih, w_hh, b_ih, b_hh)
-
-            seq_cond = seq_length > t
-
-            if self.is_lstm:
-                state_t_0 = P.Select()(seq_cond, h_t[0], state_t[0])
-                state_t_1 = P.Select()(seq_cond, h_t[1], state_t[1])
-                output = P.Select()(seq_cond, h_t[0], zero_output)
-                state_t = (state_t_0, state_t_1)
-            else:
-                state_t = P.Select()(seq_cond, h_t, state_t)
-                output = P.Select()(seq_cond, h_t, zero_output)
-            outputs += (output,)
-
-        outputs = self.pack_list(outputs)
-
-        return outputs, state_t
-
-    def construct(self, x, h, seq_length, w_ih, w_hh, b_ih, b_hh):
-        ''' rnn cells'''
-        if seq_length is None:
-            res = self.recurrent(x, h, w_ih, w_hh, b_ih, b_hh)
-        else:
-            res = self.variable_recurrent(
-                x, h, seq_length, w_ih, w_hh, b_ih, b_hh)
-        return res
-
-
-class RNNBase(nn.Cell):
-    ''' rnn base '''
-
-    def __init__(
-            self,
-            mode,
-            input_size,
-            hidden_size,
-            num_layers=1,
-            has_bias=True,
-            batch_first=False,
-            dropout=0,
-            bidirectional=False):
-        super(RNNBase, self).__init__()
-        if not 0 <= dropout <= 1:
-            raise ValueError(
-                "dropout should be a number in range [0, 1] "
-                "representing the probability of an element being "
-                "zeroed")
-
-        if dropout > 0 and num_layers == 1:
-            logger.warning("dropout option adds dropout after all but last "
-                           "recurrent layer, so non-zero dropout expects "
-                           "num_layers greater than 1, but got dropout={} and "
-                           "num_layers={}".format(dropout, num_layers))
-        if mode == "LSTM":
-            gate_size = 4 * hidden_size
-        elif mode == "GRU":
-            gate_size = 3 * hidden_size
-        elif mode == "RNN_TANH":
-            gate_size = hidden_size
-        elif mode == "RNN_RELU":
-            gate_size = hidden_size
-        else:
-            raise ValueError("Unrecognized RNN mode: " + mode)
-
-        self.hidden_size = hidden_size
-        self.batch_first = batch_first
-        self.num_layers = num_layers
-        self.dropout = dropout
-        self.dropout_op = nn.Dropout(float(1 - dropout))
-        self.bidirectional = bidirectional
-        self.has_bias = has_bias
-        self.rnn = DynamicRNN(mode)
-        self.squeeze = P.Squeeze()
-        num_directions = 2 if bidirectional else 1
-        self.is_lstm = mode == "LSTM"
-
-        self.expand_dims = P.ExpandDims()
-        self._all_weights = []
-        self.w_list = []
-        self.b_list = []
-
-        for layer in range(num_layers):
-            for direction in range(num_directions):
-                layer_input_size = input_size if layer == 0 else hidden_size * num_directions
-                suffix = '_reverse' if direction == 1 else ''
-                offset = 2**(num_directions) * layer + 2 * direction
-                w_ih = Parameter(
-                    Tensor(
-                        np.random.randn(
-                            gate_size, layer_input_size).astype(
-                                np.float32)), name='weight_ih_l{}{}'.format(
-                                    layer, suffix))
-                w_hh = Parameter(
-                    Tensor(
-                        np.random.randn(
-                            gate_size, hidden_size).astype(
-                                np.float32)), name='weight_hh_l{}{}'.format(
-                                    layer, suffix))
-                self.w_list.append(w_ih)
-                self.w_list.append(w_hh)
-                if has_bias:
-                    b_ih = Parameter(
-                        Tensor(
-                            np.random.randn(gate_size).astype(
-                                np.float32)), name='bias_ih_l{}{}'.format(
-                                    layer, suffix))
-                    b_hh = Parameter(
-                        Tensor(
-                            np.random.randn(gate_size).astype(
-                                np.float32)), name='bias_hh_l{}{}'.format(
-                                    layer, suffix))
-                    self.b_list.append(b_ih)
-                    self.b_list.append(b_hh)
-                    layer_params = (self.w_list[offset],
-                                    self.w_list[offset + 1],
-                                    self.b_list[offset],
-                                    self.b_list[offset + 1])
-
-                else:
-                    layer_params = (
-                        self.w_list[offset], self.w_list[offset + 1])
-
-                self._all_weights.append(ParameterTuple(layer_params))
-        self.w_list = ParameterTuple(self.w_list)
-        self.b_list = ParameterTuple(self.b_list)
-        self.reset_parameters()
-
-    def reset_parameters(self):
-        ''' init parameters '''
-        stdv = 1 / math.sqrt(self.hidden_size)
-        for weight in self.get_parameters():
-            weight.set_data(initializer(Uniform(stdv), weight.shape))
-
-    def _stacked_bi_dynamic_rnn(self, x, h, seq_length, weights):
-        """stacked bidirectional dynamic_rnn"""
-        pre_layer = x
-        h_n = ()
-        c_n = ()
-        output = 0
-        hidden, cell = h
-        for i in range(self.num_layers):
-            offset = i * 2
-            if self.has_bias:
-                w_f_ih, w_f_hh, b_f_ih, b_f_hh = weights[offset]
-                w_b_ih, w_b_hh, b_b_ih, b_b_hh = weights[offset + 1]
-            else:
-                w_f_ih, w_f_hh = weights[offset]
-                w_b_ih, w_b_hh = weights[offset + 1]
-                b_f_ih, b_f_hh, b_b_ih, b_b_hh = None, None, None, None
-            if self.is_lstm:
-                h_f_i = (self.squeeze(
-                    hidden[offset:offset + 1]), self.squeeze(cell[offset:offset + 1]))
-                h_b_i = (self.squeeze(
-                    hidden[offset + 1:]), self.squeeze(cell[offset + 1:]))
-            else:
-                h_f_i = self.squeeze(h[offset:offset + 1])
-                h_b_i = self.squeeze(h[offset + 1:])
-
-            if len(h_f_i[0].shape) <= 1:
-                h_f_i = (
-                    self.expand_dims(
-                        h_f_i[0], 0), self.expand_dims(
-                            h_f_i[1], 0))
-                h_b_i = (
-                    self.expand_dims(
-                        h_b_i[0], 0), self.expand_dims(
-                            h_b_i[1], 0))
-            if seq_length is None:
-                x_b = P.ReverseV2([0])(pre_layer)
-            else:
-                x_b = P.ReverseSequence(0, 1)(pre_layer, seq_length)
-            output_f, h_t_f = self.rnn(
-                pre_layer, h_f_i, seq_length, w_f_ih, w_f_hh, b_f_ih, b_f_hh)
-            output_b, h_t_b = self.rnn(
-                x_b, h_b_i, seq_length, w_b_ih, w_b_hh, b_b_ih, b_b_hh)
-            hidden_f, cell_f = h_t_f
-            hidden_b, cell_b = h_t_b
-            if seq_length is None:
-                output_b = P.ReverseV2([0])(output_b)
-            else:
-                output_b = P.ReverseSequence(0, 1)(output_b, seq_length)
-            output = P.Concat(2)((output_f, output_b))
-            pre_layer = self.dropout_op(output) if (
-                self.dropout != 0 and i < self.num_layers - 1) else output
-            if self.is_lstm:
-                h_n += (hidden_f, hidden_b,)
-                c_n += (cell_f, cell_b,)
-            else:
-                h_n += (h_t_f, h_t_b,)
-        if self.is_lstm:
-
-            h_n = P.Concat(0)(h_n)
-            c_n = P.Concat(0)(c_n)
-            h_n = h_n.view(hidden.shape)
-            c_n = c_n.view(cell.shape)
-
-            return output, (h_n.view(hidden.shape), c_n.view(cell.shape))
-
-        h_n = P.Concat(0)(h_n)
-        return output, h_n.view(h.shape)
-
-    def _stacked_dynamic_rnn(self, x, h, seq_length, weights):
-        """stacked mutil_layer dynamic_rnn"""
-        pre_layer = x
-        h_n = ()
-        c_n = ()
-        output = 0
-        for i in range(self.num_layers):
-            if self.has_bias:
-                w_ih, w_hh, b_ih, b_hh = weights[i]
-            else:
-                w_ih, w_hh = weights[i]
-                b_ih, b_hh = None, None
-            if self.is_lstm:
-                h_i = (h[0][i], h[1][i])
-            else:
-                h_i = h[i]
-
-            output, h_t = self.rnn(
-                pre_layer, h_i, seq_length, w_ih, w_hh, b_ih, b_hh)
-            pre_layer = self.dropout_op(output) if (
-                self.dropout != 0 and i < self.num_layers - 1) else output
-            if self.is_lstm:
-                h_n += (h_t[0],)
-                c_n += (h_t[1],)
-            else:
-                h_n += (h_t,)
-        if self.is_lstm:
-            h_n = P.Concat(0)(h_n)
-            c_n = P.Concat(0)(c_n)
-            h_n = h_n.view(h[0].shape)
-            c_n = c_n.view(h[1].shape)
-            return output, (h_n.view(h[0].shape), c_n.view(h[1].shape))
-
-        h_n = P.Concat(0)(h_n)
-        return output, h_n.view(h.shape)
-
-    def construct(self, x, h=None, seq_length=None):
-        ''' rnns '''
-        max_batch_size = x.shape[0] if self.batch_first else x.shape[1]
-        num_directions = 2 if self.bidirectional else 1
-        if h is None:
-            h = _init_state(
-                (self.num_layers *
-                 num_directions,
-                 max_batch_size,
-                 self.hidden_size),
-                x.dtype,
-                self.is_lstm)
-        if self.batch_first:
-            x = P.Transpose()(x, (1, 0, 2))
-        if self.bidirectional:
-            x, h = self._stacked_bi_dynamic_rnn(
-                x, h, seq_length, self._all_weights)
-        else:
-            x, h = self._stacked_dynamic_rnn(
-                x, h, seq_length, self._all_weights)
-        if self.batch_first:
-            x = P.Transpose()(x, (1, 0, 2))
-        return x, h
-
-
-class RNN(RNNBase):
-    '''rnns '''
-
-    def __init__(self, *args, **kwargs):
-        if 'nonlinearity' in kwargs:
-            if kwargs['nonlinearity'] == 'tanh':
-                mode = 'RNN_TANH'
-            elif kwargs['nonlinearity'] == 'relu':
-                mode = 'RNN_RELU'
-            else:
-                raise ValueError("Unknown nonlinearity '{}'".format(
-                    kwargs['nonlinearity']))
-            del kwargs['nonlinearity']
-        else:
-            mode = 'RNN_TANH'
-
-        super(RNN, self).__init__(mode, *args, **kwargs)
-
-
-class GRU(RNNBase):
-    '''rnns '''
-
-    def __init__(self, *args, **kwargs):
-        mode = 'GRU'
-        super(GRU, self).__init__(mode, *args, **kwargs)
-
-
-class LSTM(RNNBase):
-    '''rnns '''
-
-    def __init__(self, *args, **kwargs):
-        mode = 'LSTM'
-        super(LSTM, self).__init__(mode, *args, **kwargs)
-        self.support_non_tensor_inputs = True
diff --git a/research/audio/tacotron2/src/tacotron2.py b/research/audio/tacotron2/src/tacotron2.py
deleted file mode 100644
index 9ca043b8356df557ddc2ce45de2c0cce258a1405..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/src/tacotron2.py
+++ /dev/null
@@ -1,1229 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-'''tacotron2 model '''
-import math
-
-import numpy as np
-import mindspore
-from mindspore import nn
-from mindspore.ops import operations as P
-from mindspore.ops import composite as C
-from mindspore.ops import functional as F
-from mindspore import context
-from mindspore.context import ParallelMode
-from mindspore.communication.management import get_group_size
-from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
-from mindspore.parallel._utils import  _get_gradients_mean
-from mindspore import Parameter, Tensor
-
-from src.rnns import LSTM
-from src.hparams import hparams as hps
-
-
-gain = {'linear': 1, 'sigmoid': 1, 'tanh': 5 / 3, 'relu': math.sqrt(2)}
-
-
-class LinearNorm(nn.Cell):
-    '''linear layer'''
-    def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
-        super(LinearNorm, self).__init__()
-
-        w_init = mindspore.common.initializer.XavierUniform(
-            gain=gain[w_init_gain])
-        self.linear_layer = nn.Dense(
-            in_dim,
-            out_dim,
-            has_bias=bias,
-            weight_init=w_init).to_float(
-                mindspore.float16)
-
-        self.cast = P.Cast()
-
-    def construct(self, x):
-        ''' construct '''
-        return self.cast(self.linear_layer(x), mindspore.float32)
-
-
-class ConvNorm(nn.Cell):
-    '''conv1d layer'''
-    def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
-                 padding=None, dilation=1, bias=True, w_init_gain='linear'):
-        super(ConvNorm, self).__init__()
-        if padding is None:
-            padding = int(dilation * (kernel_size - 1) / 2)
-
-        w_init = mindspore.common.initializer.XavierUniform(
-            gain=gain[w_init_gain])
-        self.conv = nn.Conv1d(
-            in_channels,
-            out_channels,
-            kernel_size=kernel_size,
-            stride=stride,
-            pad_mode='pad',
-            padding=padding,
-            dilation=dilation,
-            has_bias=bias,
-            weight_init=w_init)
-        self.cast = P.Cast()
-
-    def construct(self, signal):
-        ''' construct '''
-        conv_signal = self.conv(signal)
-
-        return conv_signal
-
-
-class Tacotron2Loss(nn.Cell):
-    ''' tacotron loss calculate '''
-    def __init__(self):
-        super(Tacotron2Loss, self).__init__()
-        self.reshape = P.Reshape()
-        self.mse = nn.MSELoss(reduction='mean')
-        self.bce = nn.BCEWithLogitsLoss(reduction='mean')
-
-        self.get_shape = P.Shape()
-        self.n_frames_per_step = hps.n_frames_per_step
-        self.p = hps.p
-
-    def construct(self, model_output, targets):
-        ''' construct '''
-        mel_target, gate_target = targets[0], targets[1]
-        mel_out, mel_out_postnet, gate_out, _ = model_output
-
-        gate_target = self.reshape(
-            gate_target[:, ::self.n_frames_per_step], (-1, 1))
-        gate_out = self.reshape(gate_out, (-1, 1))
-        mel_loss = self.mse(self.p * mel_out, self.p * mel_target) + \
-            self.mse(self.p * mel_out_postnet, self.p * mel_target)
-        gate_loss = self.bce(gate_out, gate_target)
-        return mel_loss + gate_loss
-
-
-class LocationLayer(nn.Cell):
-    ''' location layer '''
-    def __init__(self, attention_n_filters, attention_kernel_size,
-                 attention_dim):
-        super(LocationLayer, self).__init__()
-        padding = int((attention_kernel_size - 1) / 2)
-        self.location_conv = ConvNorm(2, attention_n_filters,
-                                      kernel_size=attention_kernel_size,
-                                      padding=padding, bias=False, stride=1,
-                                      dilation=1)
-        self.location_dense = LinearNorm(attention_n_filters, attention_dim,
-                                         bias=False, w_init_gain='tanh')
-        self.transpose = P.Transpose()
-
-    def construct(self, attention_weights_cat):
-        ''' construct '''
-        processed_attention = self.location_conv(attention_weights_cat)
-        processed_attention = self.transpose(processed_attention, (0, 2, 1))
-        processed_attention = self.location_dense(processed_attention)
-        return processed_attention
-
-
-class Attention(nn.Cell):
-    '''attention layer '''
-    def __init__(
-            self,
-            memory_layer,
-            attention_rnn_dim,
-            embedding_dim,
-            attention_dim,
-            attention_location_n_filters,
-            attention_location_kernel_size):
-        super(Attention, self).__init__()
-        self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
-                                      bias=False, w_init_gain='tanh')
-        self.memory_layer = memory_layer
-
-        self.v = LinearNorm(attention_dim, 1, bias=False)
-        self.location_layer = LocationLayer(
-            attention_location_n_filters,
-            attention_location_kernel_size,
-            attention_dim)
-
-        self.expand_dims = P.ExpandDims()
-        self.tanh = P.Tanh()
-        self.reshape = P.Reshape()
-        self.squeeze = P.Squeeze(-1)
-        self.softmax = P.Softmax(-1)
-        self.bmm = P.BatchMatMul()
-        self.squeeze_ = P.Squeeze(1)
-        self.select = P.Select()
-        self.fill = P.Fill()
-        self.get_shape = P.Shape()
-        self.score_values = -float('inf')
-
-    def get_alignment_energies(self, query, processed_memory,
-                               attention_weights_cat):
-        '''get alignment '''
-        processed_query = self.expand_dims(self.query_layer(query), 1)
-
-        processed_attention_weights = self.location_layer(
-            attention_weights_cat)
-        processed_attention = self.tanh(
-            processed_query +
-            processed_attention_weights +
-            processed_memory)
-        energies = self.v(processed_attention)
-        energies = self.squeeze(energies)
-        return energies
-
-    def construct(
-            self,
-            attention_hidden_state,
-            memory,
-            processed_memory,
-            attention_weights_cat,
-            mask=None):
-        ''' construct '''
-        alignment = self.get_alignment_energies(
-            attention_hidden_state, processed_memory, attention_weights_cat)
-
-        if mask is not None:
-            alignment = self.select(
-                mask,
-                alignment,
-                self.fill(
-                    mindspore.float32,
-                    self.get_shape(mask),
-                    self.score_values))
-
-        attention_weights = self.softmax(alignment)
-        attention_context = self.bmm(
-            self.expand_dims(
-                attention_weights, 1), memory)
-        attention_context = self.squeeze_(attention_context)
-        return attention_context, attention_weights
-
-    def inference(
-            self,
-            attention_hidden_state,
-            memory,
-            processed_memory,
-            attention_weights_cat,
-            mask=None):
-        ''' construct '''
-        alignment = self.get_alignment_energies(
-            attention_hidden_state, processed_memory, attention_weights_cat)
-
-        attention_weights = self.softmax(alignment)
-        attention_context = self.bmm(
-            self.expand_dims(
-                attention_weights, 1), memory)
-        attention_context = self.squeeze_(attention_context)
-        return attention_context, attention_weights
-
-class Prenet(nn.Cell):
-    ''' prenet '''
-    def __init__(self, in_dim, sizes):
-        super(Prenet, self).__init__()
-        in_sizes = [in_dim] + sizes[:-1]
-        layers = [nn.SequentialCell([LinearNorm(in_size, out_size, bias=False)])
-                  for (in_size, out_size) in zip(in_sizes, sizes)]
-        self.relu = nn.ReLU()
-        self.dropout = nn.Dropout(keep_prob=0.5)
-        self.size = sizes[-1]
-        self.layers = nn.CellList(layers)
-
-    def construct(self, x):
-        ''' construct '''
-        for linear in self.layers:
-            x = self.dropout(self.relu(linear(x)))
-        return x
-
-
-class Postnet(nn.Cell):
-    ''' postnet '''
-    def __init__(self):
-        super(Postnet, self).__init__()
-        conv_layer = []
-        conv_layer.extend(nn.SequentialCell([
-            ConvNorm(hps.num_mels, hps.postnet_embedding_dim,
-                     kernel_size=hps.postnet_kernel_size, stride=1,
-                     padding=int((hps.postnet_kernel_size - 1) / 2),
-                     dilation=1, w_init_gain='tanh'),
-            ExpandDims(),
-            nn.BatchNorm2d(hps.postnet_embedding_dim),
-            Squeeze(),
-            nn.Tanh(),
-            nn.Dropout(keep_prob=0.5)
-        ]))
-
-        for _ in range(1, hps.postnet_n_convolutions - 1):
-            conv_layer.extend(nn.SequentialCell([
-                ConvNorm(hps.postnet_embedding_dim,
-                         hps.postnet_embedding_dim,
-                         kernel_size=hps.postnet_kernel_size, stride=1,
-                         padding=int((hps.postnet_kernel_size - 1) / 2),
-                         dilation=1, w_init_gain='tanh'),
-                ExpandDims(),
-                nn.BatchNorm2d(hps.postnet_embedding_dim),
-                Squeeze(),
-                nn.Tanh(),
-                nn.Dropout(keep_prob=0.5)]))
-
-        conv_layer.extend(
-            nn.SequentialCell(
-                [
-                    ConvNorm(
-                        hps.postnet_embedding_dim,
-                        hps.num_mels,
-                        kernel_size=hps.postnet_kernel_size,
-                        stride=1,
-                        padding=int(
-                            (hps.postnet_kernel_size - 1) / 2),
-                        dilation=1,
-                        w_init_gain='linear'),
-                    ExpandDims(),
-                    nn.BatchNorm2d(
-                        hps.num_mels),
-                    Squeeze(),
-                    nn.Dropout(
-                        keep_prob=0.5)]))
-        self.convolutions = nn.CellList(conv_layer)
-
-    def construct(self, x):
-        ''' construct '''
-        for i in range(len(self.convolutions)):
-            x = self.convolutions[i](x)
-        return x
-
-    def inference(self, x):
-        '''inference '''
-        for i in range(len(self.convolutions)):
-            x = self.convolutions[i](x)
-        return x
-
-
-class ExpandDims(nn.Cell):
-    '''expand dim'''
-    def __init__(self):
-        super(ExpandDims, self).__init__()
-        self.expand_dim = P.ExpandDims()
-
-    def construct(self, x):
-        ''' construct '''
-        return self.expand_dim(x, -1)
-
-
-class Squeeze(nn.Cell):
-    ''' squeeze dim '''
-    def __init__(self):
-        super(Squeeze, self).__init__()
-        self.squeeze = P.Squeeze(-1)
-
-    def construct(self, x):
-        ''' construct '''
-        return self.squeeze(x)
-
-
-class Encoder(nn.Cell):
-    ''' encoder '''
-    def __init__(self):
-        super(Encoder, self).__init__()
-
-        conv_layer = []
-        for _ in range(hps.encoder_n_convolutions):
-            conv_layer.extend(nn.SequentialCell([
-                ConvNorm(hps.encoder_embedding_dim,
-                         hps.encoder_embedding_dim,
-                         kernel_size=hps.encoder_kernel_size, stride=1,
-                         padding=int((hps.encoder_kernel_size - 1) / 2),
-                         dilation=1, w_init_gain='relu'),
-                ExpandDims(),
-                nn.BatchNorm2d(hps.encoder_embedding_dim),
-                Squeeze(),
-                nn.ReLU(),
-                nn.Dropout(keep_prob=0.5)]))
-
-        self.convolutions = nn.CellList(conv_layer)
-
-        self.lstm = LSTM(
-            input_size=hps.encoder_embedding_dim,
-            hidden_size=int(
-                hps.encoder_embedding_dim / 2),
-            num_layers=1,
-            batch_first=True,
-            bidirectional=True)
-        self.transpose = P.Transpose()
-        self.cast = P.Cast()
-        self.h, self.c = self.lstm_default_state(hps.batch_size, int(
-            hps.encoder_embedding_dim / 2), 1, bidirectional=True)
-        self.h_test, self.c_test = self.lstm_default_state(
-            hps.test_batch_size, int(
-                hps.encoder_embedding_dim / 2), 1, bidirectional=True)
-        self.fullzeros = Tensor(
-            np.zeros(
-                (hps.batch_size,
-                 hps.max_text_len,
-                 512)),
-            mindspore.float32)
-        self.select = P.Select()
-
-    def lstm_default_state(
-            self,
-            batch_size,
-            hidden_size,
-            num_layers,
-            bidirectional):
-        ''' init lstm '''
-        num_directions = 2 if bidirectional else 1
-        h = Tensor(
-            np.zeros(
-                (num_layers *
-                 num_directions,
-                 batch_size,
-                 hidden_size)),
-            mindspore.float32)
-        c = Tensor(
-            np.zeros(
-                (num_layers *
-                 num_directions,
-                 batch_size,
-                 hidden_size)),
-            mindspore.float32)
-        return h, c
-
-    def construct(self, x, input_length, mask):
-        ''' construct '''
-        for i in range(len(self.convolutions)):
-            x = self.convolutions[i](x)
-        x = self.transpose(x, (0, 2, 1))
-
-        outputs, _ = self.lstm(x, h=(self.h, self.c), seq_length=input_length)
-
-        outputs = self.select(mask, outputs, self.fullzeros)
-
-        outputs = self.cast(outputs, mindspore.float32)
-        return outputs
-
-    def inference(self, x):
-        '''inference '''
-        for layer in self.convolutions:
-            x = layer(x)
-        x = self.transpose(x, (0, 2, 1))
-        outputs, _ = self.lstm(x, h=(self.h_test, self.c_test))
-
-        outputs = self.cast(outputs, mindspore.float32)
-
-        return outputs
-
-
-class LSTMCell(nn.Cell):
-    '''lstm cell '''
-    def __init__(self, input_size, hidden_size):
-        super(LSTMCell, self).__init__()
-        self.hidden_size = hidden_size
-        w_init = mindspore.common.initializer.Uniform(
-            scale=1 / math.sqrt(hidden_size))
-        self.linear1 = nn.Dense(
-            input_size,
-            4 * hidden_size,
-            weight_init=w_init).to_float(
-                mindspore.float16)
-        self.linear2 = nn.Dense(
-            hidden_size,
-            4 * hidden_size,
-            weight_init=w_init).to_float(
-                mindspore.float16)
-
-        self.sigmoid = nn.Sigmoid()
-        self.tanh = nn.Tanh()
-        self.split = P.Split(1, 4)
-        self.cast = P.Cast()
-
-    def construct(self, inputs, hx, cx):
-        ''' construct '''
-        gates = self.cast(self.linear2(hx), mindspore.float32) + \
-            self.cast(self.linear1(inputs), mindspore.float32)
-        ingate, forgetgate, cellgate, outgate = self.split(gates)
-        ingate = self.sigmoid(ingate)
-        forgetgate = self.sigmoid(forgetgate)
-        cellgate = self.tanh(cellgate)
-        outgate = self.sigmoid(outgate)
-
-        cy = (forgetgate * cx) + (ingate * cellgate)
-        hy = outgate * self.tanh(cy)
-        return hy, cy
-
-
-class Decode(nn.Cell):
-    ''' decode at each step '''
-    def __init__(self, memory_layer):
-        super(Decode, self).__init__()
-        self.num_mels = hps.num_mels
-        self.n_frames_per_step = hps.n_frames_per_step
-        self.encoder_embedding_dim = hps.encoder_embedding_dim
-        self.attention_rnn_dim = hps.attention_rnn_dim
-        self.decoder_rnn_dim = hps.decoder_rnn_dim
-        self.prenet_dim = hps.prenet_dim
-        self.max_decoder_steps = hps.max_decoder_steps
-        self.gate_threshold = hps.gate_threshold
-        self.p_attention_dropout = hps.p_attention_dropout
-        self.p_decoder_dropout = hps.p_decoder_dropout
-
-        self.attention_rnn = LSTMCell(
-            hps.prenet_dim + hps.encoder_embedding_dim,
-            hps.attention_rnn_dim)
-
-        self.attention_layer = Attention(
-            memory_layer,
-            hps.attention_rnn_dim, hps.encoder_embedding_dim,
-            hps.attention_dim, hps.attention_location_n_filters,
-            hps.attention_location_kernel_size)
-
-        self.decoder_rnn = LSTMCell(
-            hps.attention_rnn_dim + hps.encoder_embedding_dim,
-            hps.decoder_rnn_dim)
-
-        self.linear_projection = LinearNorm(
-            hps.decoder_rnn_dim + hps.encoder_embedding_dim,
-            hps.num_mels * hps.n_frames_per_step)
-
-        self.gate_layer = LinearNorm(
-            hps.decoder_rnn_dim + hps.encoder_embedding_dim, 1,
-            bias=True, w_init_gain='sigmoid')
-
-        self.dropout_attention = nn.Dropout(
-            keep_prob=1 - self.p_attention_dropout)
-        self.dropout_decoder = nn.Dropout(keep_prob=1 - self.p_decoder_dropout)
-
-        self.concat_ = P.Concat(-1)
-        self.concat_dim1 = P.Concat(axis=1)
-        self.expand_dims = P.ExpandDims()
-        self.squeeze = P.Squeeze()
-        self.squeeze_dim1 = P.Squeeze(1)
-
-    def construct(self, decoder_input, attention_hidden,
-                  attention_cell, attention_weights, attention_weights_cum,
-                  attention_context, memory, processed_memory,
-                  decoder_hidden, decoder_cell, mask):
-        ''' construct '''
-        cell_input = self.concat_((decoder_input, attention_context))
-        attention_hidden, attention_cell = self.attention_rnn(
-            cell_input, attention_hidden, attention_cell)
-
-        attention_hidden = self.dropout_attention(attention_hidden)
-
-        attention_weights_cat = self.concat_dim1(
-            (self.expand_dims(attention_weights, 1),
-             self.expand_dims(attention_weights_cum, 1)))
-
-        attention_context, attention_weights = self.attention_layer(
-            attention_hidden, memory, processed_memory,
-            attention_weights_cat, mask)
-
-        attention_weights_cum += attention_weights
-        decoder_input = self.concat_(
-            (attention_hidden, attention_context))
-
-        decoder_hidden, decoder_cell = self.decoder_rnn(
-            decoder_input, decoder_hidden, decoder_cell)
-
-        decoder_hidden = self.dropout_decoder(decoder_hidden)
-
-        decoder_hidden_attention_context = self.concat_dim1(
-            (decoder_hidden, attention_context))
-
-        decoder_output = self.linear_projection(
-            decoder_hidden_attention_context)
-
-        gate_prediction = self.gate_layer(decoder_hidden_attention_context)
-
-        return (
-            decoder_output,
-            gate_prediction,
-            attention_weights,
-            attention_weights_cum,
-            attention_context,
-            decoder_hidden,
-            decoder_cell,
-            attention_hidden,
-            attention_cell)
-
-    def inference(self, decoder_input, attention_hidden,
-                  attention_cell, attention_weights, attention_weights_cum,
-                  attention_context, memory, processed_memory,
-                  decoder_hidden, decoder_cell, mask):
-        ''' construct '''
-        cell_input = self.concat_((decoder_input, attention_context))
-        attention_hidden, attention_cell = self.attention_rnn(
-            cell_input, attention_hidden, attention_cell)
-
-        attention_hidden = self.dropout_attention(attention_hidden)
-
-        attention_weights_cat = self.concat_dim1(
-            (self.expand_dims(attention_weights, 1),
-             self.expand_dims(attention_weights_cum, 1)))
-
-        attention_context, attention_weights = self.attention_layer.inference(
-            attention_hidden, memory, processed_memory,
-            attention_weights_cat, mask)
-
-        attention_weights_cum += attention_weights
-        decoder_input = self.concat_(
-            (attention_hidden, attention_context))
-
-        decoder_hidden, decoder_cell = self.decoder_rnn(
-            decoder_input, decoder_hidden, decoder_cell)
-
-        decoder_hidden = self.dropout_decoder(decoder_hidden)
-
-        decoder_hidden_attention_context = self.concat_dim1(
-            (decoder_hidden, attention_context))
-
-        decoder_output = self.linear_projection(
-            decoder_hidden_attention_context)
-
-        gate_prediction = self.gate_layer(decoder_hidden_attention_context)
-
-        return (
-            decoder_output,
-            gate_prediction,
-            attention_weights,
-            attention_weights_cum,
-            attention_context,
-            decoder_hidden,
-            decoder_cell,
-            attention_hidden,
-            attention_cell)
-
-class Decoder(nn.Cell):
-    ''' decoder '''
-    def __init__(self):
-        super(Decoder, self).__init__()
-
-        self.num_mels = hps.num_mels
-        self.n_frames_per_step = hps.n_frames_per_step
-        self.encoder_embedding_dim = hps.encoder_embedding_dim
-        self.attention_rnn_dim = hps.attention_rnn_dim
-        self.decoder_rnn_dim = hps.decoder_rnn_dim
-        self.prenet_dim = hps.prenet_dim
-        self.max_decoder_steps = hps.max_decoder_steps
-        self.gate_threshold = hps.gate_threshold
-        self.p_attention_dropout = hps.p_attention_dropout
-        self.p_decoder_dropout = hps.p_decoder_dropout
-
-        self.memory_layer = LinearNorm(
-            hps.encoder_embedding_dim,
-            hps.attention_dim,
-            bias=False,
-            w_init_gain='tanh')
-
-        self.prenet = Prenet(
-            hps.num_mels * hps.n_frames_per_step,
-            [hps.prenet_dim, hps.prenet_dim])
-        self.reshape = P.Reshape()
-        self.get_shape = P.Shape()
-        self.transpose = P.Transpose()
-        self.concat = P.Concat()
-        self.concat_ = P.Concat(-1)
-        self.concat_dim1 = P.Concat(axis=1)
-        self.expand_dims = P.ExpandDims()
-        self.squeeze = P.Squeeze()
-        self.squeeze_dim1 = P.Squeeze(1)
-        self.fill = P.Fill()
-        self.zeros = P.Zeros()
-        self.pack = P.Stack()
-        self.decode = Decode(self.memory_layer)
-        self.sigmoid = P.Sigmoid()
-        self.concat_len = 50
-        self.attention_zero_tensor = Tensor(
-            np.zeros((hps.batch_size, self.attention_rnn_dim)), mindspore.float32)
-        self.decoder_zero_tensor = Tensor(
-            np.zeros((hps.batch_size, self.decoder_rnn_dim)), mindspore.float32)
-        self.attention_context = Tensor(
-            np.zeros((hps.batch_size, self.encoder_embedding_dim)), mindspore.float32)
-        self.go_frame = Tensor(
-            np.zeros(
-                (hps.batch_size,
-                 self.num_mels *
-                 self.n_frames_per_step)),
-            mindspore.float32)
-
-    def parse_decoder_inputs(self, decoder_inputs):
-        ''' parse decoder inputs '''
-        decoder_inputs = self.transpose(decoder_inputs, (0, 2, 1))
-
-        B, n_frame, _ = self.get_shape(decoder_inputs)
-
-        decoder_inputs = self.reshape(
-            decoder_inputs, (B, n_frame // self.n_frames_per_step, -1))
-        decoder_inputs = self.transpose(decoder_inputs, (1, 0, 2))
-        return decoder_inputs
-
-    def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments):
-        ''' pack outputs '''
-        align_tuple = ()
-        n_frames = len(alignments)
-        for i in range(n_frames // self.concat_len):
-            start = i * self.concat_len
-            end = (i + 1) * self.concat_len
-            alignment = self.pack(alignments[start: end])
-            align_tuple += (alignment,)
-        if n_frames % self.concat_len != 0:
-            start = n_frames // self.concat_len * self.concat_len
-            alignment = self.pack(alignments[start:])
-            align_tuple += (alignment,)
-        alignments = self.concat(align_tuple)
-        alignments = self.transpose(alignments, (1, 0, 2))
-
-        gate_tuple = ()
-        for i in range(n_frames // self.concat_len):
-            start = i * self.concat_len
-            end = (i + 1) * self.concat_len
-            gate_output = self.pack(gate_outputs[start: end])
-            gate_tuple += (gate_output,)
-        if n_frames % self.concat_len != 0:
-            start = n_frames // self.concat_len * self.concat_len
-            gate_output = self.pack(gate_outputs[start:])
-            gate_tuple += (gate_output,)
-
-        gate_outputs = self.concat(gate_tuple)
-        if len(self.get_shape(gate_outputs)) <= 1:
-            gate_outputs = self.expand_dims(gate_outputs, 0)
-        gate_outputs = self.transpose(gate_outputs, (1, 0))
-
-        mel_tuple = ()
-        for i in range(n_frames // self.concat_len):
-            start = i * self.concat_len
-            end = (i + 1) * self.concat_len
-            mel_output = self.pack(mel_outputs[start: end])
-            mel_tuple += (mel_output,)
-        if n_frames % self.concat_len != 0:
-            start = n_frames // self.concat_len * self.concat_len
-            mel_output = self.pack(mel_outputs[start:])
-            mel_tuple += (mel_output,)
-        mel_outputs = self.concat(mel_tuple)
-        mel_outputs = self.transpose(mel_outputs, (1, 0, 2))
-        mel_outputs = self.reshape(
-            mel_outputs, (self.get_shape(mel_outputs)[0], -1, self.num_mels))
-        mel_outputs = self.transpose(mel_outputs, (0, 2, 1))
-
-        return mel_outputs, gate_outputs, alignments
-
-    def construct(self, memory, decoder_inputs, text_mask):
-        ''' construct '''
-        decoder_input = self.expand_dims(self.go_frame, 0)
-        decoder_inputs = self.parse_decoder_inputs(decoder_inputs)
-        decoder_inputs_ = self.concat((decoder_input, decoder_inputs))
-        decoder_inputs = self.prenet(decoder_inputs_)
-
-        B, MAX_TIME, _ = self.get_shape(memory)
-
-        attention_hidden = self.attention_zero_tensor
-        attention_cell = self.attention_zero_tensor
-
-        decoder_hidden = self.decoder_zero_tensor
-        decoder_cell = self.decoder_zero_tensor
-
-        attention_weights = self.fill(mindspore.float32, (B, MAX_TIME), 0.0)
-        attention_weights_cum = self.fill(mindspore.float32, (B, MAX_TIME), 0.0)
-        attention_context = self.attention_context
-
-        processed_memory = self.memory_layer(memory)
-
-        mask = text_mask
-
-        mel_outputs, gate_outputs, alignments = (), (), ()
-
-        n_frame, _, _ = self.get_shape(decoder_inputs)
-
-        for i in range(n_frame - 1):
-            decoder_input = self.squeeze(decoder_inputs[i:i + 1])
-            mel_output, gate_output, attention_weights, attention_weights_cum, \
-            attention_context, decoder_hidden, decoder_cell, attention_hidden, \
-            attention_cell = self.decode(decoder_input,
-                                         attention_hidden, attention_cell,
-                                         attention_weights, attention_weights_cum, attention_context,
-                                         memory, processed_memory,
-                                         decoder_hidden, decoder_cell, mask)
-
-            mel_outputs += (mel_output,)
-            gate_outputs += (self.squeeze(gate_output),)
-            alignments += (attention_weights,)
-
-        mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
-            mel_outputs, gate_outputs, alignments)
-
-        return mel_outputs, gate_outputs, alignments
-
-    def inference(self, memory, text_mask):
-        '''inference '''
-        B, MAX_TIME, _ = self.get_shape(memory)
-
-        decoder_input = self.fill(
-            mindspore.float32, (B, self.num_mels * self.n_frames_per_step), 0)
-
-        attention_hidden = self.zeros((B, self.attention_rnn_dim), mindspore.float32)
-        attention_cell = self.zeros((B, self.attention_rnn_dim), mindspore.float32)
-
-        decoder_hidden = self.zeros((B, self.decoder_rnn_dim), mindspore.float32)
-        decoder_cell = self.zeros((B, self.decoder_rnn_dim), mindspore.float32)
-
-        attention_weights = self.fill(mindspore.float32, (B, MAX_TIME), 0.0)
-        attention_weights_cum = self.fill(mindspore.float32, (B, MAX_TIME), 0.0)
-        attention_context = self.zeros(
-            (B, self.encoder_embedding_dim), mindspore.float32)
-
-        processed_memory = self.memory_layer(memory)
-
-        mask = text_mask
-        mel_outputs, gate_outputs, alignments = (), (), ()
-        while True:
-            decoder_input = self.prenet(decoder_input)
-            mel_output, gate_output, attention_weights, attention_weights_cum, \
-            attention_context, decoder_hidden, decoder_cell, attention_hidden, \
-            attention_cell = self.decode.inference(decoder_input,
-                                                   attention_hidden, attention_cell,
-                                                   attention_weights, attention_weights_cum, attention_context,
-                                                   memory, processed_memory,
-                                                   decoder_hidden, decoder_cell, mask)
-
-            mel_outputs += (mel_output,)
-            gate_outputs += (self.squeeze(gate_output),)
-            alignments += (attention_weights,)
-
-            if self.sigmoid(gate_output[0]) > self.gate_threshold:
-                P.Print()('Terminated by gate.')
-                break
-            if len(mel_outputs) > 1 and (mel_output <= 0.2).all():
-                P.Print()('Warning: End with low power.')
-                break
-            if len(mel_outputs) == self.max_decoder_steps:
-                P.Print()('Warning: Reached max decoder steps.')
-                break
-
-            decoder_input = mel_output
-
-        mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
-            mel_outputs, gate_outputs, alignments)
-        return mel_outputs, gate_outputs, alignments
-
-    def inferencev2(self, memory, text_mask):
-        '''inferencev2 '''
-        B, MAX_TIME, _ = self.get_shape(memory)
-
-        decoder_input = self.fill(
-            mindspore.float32, (B, self.num_mels * self.n_frames_per_step), 0)
-
-        attention_hidden = self.zeros((B, self.attention_rnn_dim), mindspore.float32)
-        attention_cell = self.zeros((B, self.attention_rnn_dim), mindspore.float32)
-
-        decoder_hidden = self.zeros((B, self.decoder_rnn_dim), mindspore.float32)
-        decoder_cell = self.zeros((B, self.decoder_rnn_dim), mindspore.float32)
-
-        attention_weights = self.fill(mindspore.float32, (B, MAX_TIME), 0.0)
-        attention_weights_cum = self.fill(mindspore.float32, (B, MAX_TIME), 0.0)
-        attention_context = self.zeros(
-            (B, self.encoder_embedding_dim), mindspore.float32)
-
-        processed_memory = self.memory_layer(memory)
-
-        mask = text_mask
-        mel_outputs, gate_outputs, alignments = (), (), ()
-        for _ in range(292):
-            decoder_input = self.prenet(decoder_input)
-            mel_output, gate_output, attention_weights, attention_weights_cum, \
-            attention_context, decoder_hidden, decoder_cell, attention_hidden, \
-            attention_cell = self.decode(decoder_input,
-                                         attention_hidden, attention_cell,
-                                         attention_weights, attention_weights_cum, attention_context,
-                                         memory, processed_memory,
-                                         decoder_hidden, decoder_cell, mask)
-
-            mel_outputs += (mel_output,)
-            gate_outputs += (self.squeeze(gate_output),)
-            alignments += (attention_weights,)
-
-            decoder_input = mel_output
-
-        mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
-            mel_outputs, gate_outputs, alignments)
-        return mel_outputs, gate_outputs, alignments
-
-
-class Tacotron2(nn.Cell):
-    '''tacotron2 '''
-    def __init__(self):
-        super(Tacotron2, self).__init__()
-        self.num_mels = hps.num_mels
-        self.mask_padding = hps.mask_padding
-        self.n_frames_per_step = hps.n_frames_per_step
-
-        std = math.sqrt(2.0 / (hps.n_symbols + hps.symbols_embedding_dim))
-        val = math.sqrt(3.0) * std
-        w_init = mindspore.common.initializer.Uniform(scale=val)
-        self.embedding = nn.Embedding(
-            hps.n_symbols, hps.symbols_embedding_dim, embedding_table=w_init)
-        self.encoder = Encoder()
-        self.decoder = Decoder()
-        self.postnet = Postnet()
-
-        self.transpose = P.Transpose()
-        self.select = P.Select()
-        self.fill = P.Fill()
-        self.get_shape = P.Shape()
-
-    def parse_output(self, outputs, mel_mask=None):
-        ''' parse output '''
-        if mel_mask is not None:
-            outputs[0] = self.select(
-                mel_mask, outputs[0], self.fill(
-                    mindspore.float32, self.get_shape(
-                        outputs[0]), 0.0))
-            outputs[1] = self.select(
-                mel_mask, outputs[1], self.fill(
-                    mindspore.float32, self.get_shape(
-                        outputs[1]), 0.0))
-            outputs[2] = self.select(mel_mask[:,
-                                              0,
-                                              ::self.n_frames_per_step],
-                                     outputs[2],
-                                     self.fill(mindspore.float32,
-                                               self.get_shape(outputs[2]),
-                                               1e3))
-
-        return outputs
-
-    def construct(
-            self,
-            text_inputs,
-            input_length,
-            mel_padded,
-            text_mask,
-            mel_mask,
-            rnn_mask):
-        ''' construct '''
-        embedded_inputs = self.transpose(
-            self.embedding(text_inputs), (0, 2, 1))
-
-        encoder_outputs = self.encoder(embedded_inputs, input_length, rnn_mask)
-
-        mel_outputs, gate_outputs, alignments = self.decoder(
-            encoder_outputs, mel_padded, text_mask)
-
-        mel_outputs_postnet = self.postnet(mel_outputs)
-        mel_outputs_postnet = mel_outputs + mel_outputs_postnet
-
-        return self.parse_output(
-            [mel_outputs, mel_outputs_postnet, gate_outputs, alignments],
-            mel_mask)
-
-    def inference(self, inputs, text_mask):
-        '''inference '''
-        embedded_inputs = self.transpose(self.embedding(inputs), (0, 2, 1))
-        encoder_outputs = self.encoder.inference(embedded_inputs)
-        mel_outputs, gate_outputs, alignments = self.decoder.inference(
-            encoder_outputs, text_mask)
-
-        mel_outputs_postnet = self.postnet.inference(mel_outputs)
-        mel_outputs_postnet = mel_outputs + mel_outputs_postnet
-
-        outputs = self.parse_output(
-            [mel_outputs, mel_outputs_postnet, gate_outputs, alignments])
-
-        return outputs
-
-
-class NetWithLossClass(nn.Cell):
-    ''' net with loss'''
-    def __init__(self, model, loss_fn):
-        super(NetWithLossClass, self).__init__()
-        self.model = model
-        self.loss_fn = loss_fn
-        self.sigmoid = P.Sigmoid()
-
-    def construct(
-            self,
-            text_padded,
-            input_length,
-            mel_padded,
-            gate_padded,
-            text_mask,
-            mel_mask,
-            rnn_mask):
-        ''' construct '''
-        out = self.model(
-            text_padded,
-            input_length,
-            mel_padded,
-            text_mask,
-            mel_mask,
-            rnn_mask)
-        loss = self.loss_fn(out, (mel_padded, gate_padded))
-        return loss
-
-
-class PredictMel(nn.Cell):
-    '''predict cell for inference '''
-    def __init__(self):
-        super(PredictMel, self).__init__()
-        self.num_mels = hps.num_mels
-        self.mask_padding = hps.mask_padding
-        self.n_frames_per_step = hps.n_frames_per_step
-
-        std = math.sqrt(2.0 / (hps.n_symbols + hps.symbols_embedding_dim))
-        val = math.sqrt(3.0) * std
-        w_init = mindspore.common.initializer.Uniform(scale=val)
-        self.embedding = nn.Embedding(
-            hps.n_symbols, hps.symbols_embedding_dim, embedding_table=w_init)
-        self.encoder = Encoder()
-        self.decoder = Decoder()
-        self.postnet = Postnet()
-
-        self.transpose = P.Transpose()
-        self.select = P.Select()
-        self.fill = P.Fill()
-        self.get_shape = P.Shape()
-
-    def parse_output(self, outputs, mel_mask=None):
-        ''' parse output '''
-        if mel_mask is not None:
-            outputs[0] = self.select(
-                mel_mask, outputs[0], self.fill(
-                    mindspore.float32, self.get_shape(
-                        outputs[0]), 0.0))
-            outputs[1] = self.select(
-                mel_mask, outputs[1], self.fill(
-                    mindspore.float32, self.get_shape(
-                        outputs[1]), 0.0))
-            outputs[2] = self.select(mel_mask[:,
-                                              0,
-                                              ::self.n_frames_per_step],
-                                     outputs[2],
-                                     self.fill(mindspore.float32,
-                                               self.get_shape(outputs[2]),
-                                               1e3))
-
-        return outputs
-
-    def construct(self, inputs, text_mask):
-        ''' construct '''
-        embedded_inputs = self.transpose(self.embedding(inputs), (0, 2, 1))
-        encoder_outputs = self.encoder.inference(embedded_inputs)
-        mel_outputs, gate_outputs, alignments = self.decoder.inferencev2(
-            encoder_outputs, text_mask)
-
-        mel_outputs_postnet = self.postnet.inference(mel_outputs)
-        mel_outputs_postnet = mel_outputs + mel_outputs_postnet
-
-        outputs = self.parse_output(
-            [mel_outputs, mel_outputs_postnet, gate_outputs, alignments])
-        return outputs
-
-
-GRADIENT_CLIP_TYPE = 1
-GRADIENT_CLIP_VALUE = 1.0
-clip_grad = C.MultitypeFuncGraph("clip_grad")
-
-
-@clip_grad.register("Number", "Number", "Tensor")
-def _clip_grad(clip_type, clip_value, grad):
-    """
-    Clip gradients.
-
-    Inputs:
-        clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'.
-        clip_value (float): Specifies how much to clip.
-        grad (tuple[Tensor]): Gradients.
-
-    Outputs:
-        tuple[Tensor], clipped gradients.
-    """
-    if clip_type not in [0, 1]:
-        return grad
-    dt = F.dtype(grad)
-    if clip_type == 0:
-        new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array(
-            (-clip_value,)), dt), F.cast(F.tuple_to_array((clip_value,)), dt))
-    else:
-        new_grad = nn.ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), dt))
-    return new_grad
-
-
-grad_scale = C.MultitypeFuncGraph("grad_scale")
-reciprocal = P.Reciprocal()
-
-
-@grad_scale.register("Tensor", "Tensor")
-def tensor_grad_scale(scale, grad):
-    ''' scale grad '''
-    return grad * F.cast(reciprocal(scale), F.dtype(grad))
-
-
-_grad_overflow = C.MultitypeFuncGraph("_grad_overflow")
-grad_overflow = P.FloatStatus()
-
-
-@_grad_overflow.register("Tensor")
-def _tensor_grad_overflow(grad):
-    ''' grad overflow '''
-    return grad_overflow(grad)
-
-
-compute_norm = C.MultitypeFuncGraph("compute_norm")
-
-
-@compute_norm.register("Tensor")
-def _compute_norm(grad):
-    norm = nn.Norm()
-    norm = norm(F.cast(grad, mindspore.float32))
-    ret = F.expand_dims(F.cast(norm, mindspore.float32), 0)
-    return ret
-
-
-grad_div = C.MultitypeFuncGraph("grad_div")
-
-
-@grad_div.register("Tensor", "Tensor")
-def _grad_div(val, grad):
-    div = P.RealDiv()
-    mul = P.Mul()
-    scale = div(1.0, val)
-    ret = mul(grad, scale)
-    return ret
-
-
-class TrainStepWrap(nn.Cell):
-    """
-    TrainStepWrap definition
-    """
-
-    def __init__(self, network, optimizer, scale_update_cell):  # 16384.0
-        super(TrainStepWrap, self).__init__(auto_prefix=False)
-        self.network = network
-        self.network.set_grad()
-        self.network.add_flags(defer_inline=True)
-        self.add_flags(has_effect=True)
-        self.weights = optimizer.parameters
-        self.optimizer = optimizer
-
-        self.hyper_map = C.HyperMap()
-        self.grad = C.GradOperation(get_by_list=True, sens_param=True)
-
-        self.sens = 1.0
-        self.fill = P.Fill()
-        self.dtype = P.DType()
-        self.get_shape = P.Shape()
-        self.cast = P.Cast()
-        self.concat = P.Concat()
-        self.less_equal = P.LessEqual()
-        self.reduce_sum = P.ReduceSum(keep_dims=False)
-        self.greater = P.Greater()
-        self.select = P.Select()
-        self.alloc_status = P.NPUAllocFloatStatus()
-        self.get_status = P.NPUGetFloatStatus()
-        self.clear_before_grad = P.NPUClearFloatStatus()
-        self.is_distributed = False
-        self.norm = nn.Norm(keep_dims=True)
-        self.base = Tensor(1, mindspore.float32)
-
-        self.all_reduce = P.AllReduce()
-
-        self.loss_scaling_manager = scale_update_cell
-        self.loss_scale = Parameter(
-            Tensor(
-                scale_update_cell.get_loss_scale(),
-                dtype=mindspore.float32))
-
-        self.reducer_flag = False
-        self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
-        if self.parallel_mode in [
-                ParallelMode.DATA_PARALLEL,
-                ParallelMode.HYBRID_PARALLEL]:
-            self.reducer_flag = True
-            self.is_distributed = True
-        self.grad_reducer = F.identity
-        self.degree = 1
-        if self.reducer_flag:
-            self.degree = get_group_size()
-            mean = _get_gradients_mean()
-            self.grad_reducer = DistributedGradReducer(
-                self.weights, mean, self.degree)
-
-    def construct(
-            self,
-            text_padded,
-            input_length,
-            mel_padded,
-            gate_padded,
-            text_mask,
-            mel_mask,
-            rnn_mask):
-        ''' construct '''
-        weights = self.weights
-        loss = self.network(
-            text_padded,
-            input_length,
-            mel_padded,
-            gate_padded,
-            text_mask,
-            mel_mask,
-            rnn_mask)
-
-        scale_sense = self.loss_scale
-
-        init = self.alloc_status()
-        init = F.depend(init, loss)
-
-        clear_status = self.clear_before_grad(init)
-        scale_sense = F.depend(scale_sense, clear_status)
-
-        grads = self.grad(
-            self.network,
-            weights)(
-                text_padded,
-                input_length,
-                mel_padded,
-                gate_padded,
-                text_mask,
-                mel_mask,
-                rnn_mask,
-                self.cast(
-                    scale_sense,
-                    mindspore.float32))
-        grads = self.grad_reducer(grads)
-        grads = self.hyper_map(
-            F.partial(
-                grad_scale,
-                self.degree *
-                scale_sense),
-            grads)
-        grads = self.hyper_map(
-            F.partial(
-                clip_grad,
-                GRADIENT_CLIP_TYPE,
-                GRADIENT_CLIP_VALUE),
-            grads)
-
-        init = F.depend(init, grads)
-        get_status = self.get_status(init)
-        init = F.depend(init, get_status)
-        flag_sum = self.reduce_sum(init, (0,))
-
-        if self.is_distributed:
-            flag_reduce = self.all_reduce(flag_sum)
-            cond = self.less_equal(self.base, flag_reduce)
-        else:
-            cond = self.less_equal(self.base, flag_sum)
-
-        overflow = self.loss_scaling_manager(self.loss_scale, cond)
-
-        if not overflow:
-            self.optimizer(grads)
-
-        return loss, scale_sense
diff --git a/research/audio/tacotron2/src/text/__init__.py b/research/audio/tacotron2/src/text/__init__.py
deleted file mode 100644
index ca8a8e7e24c4719ff46c77a56072e1bcd181aabc..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/src/text/__init__.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-""" from https://github.com/keithito/tacotron """
-import re
-
-from src.text import cleaners
-from src.text.symbols import symbols
-
-
-# Mappings from symbol to numeric ID and vice versa:
-_symbol_to_id = {s: i for i, s in enumerate(symbols)}
-_id_to_symbol = {i: s for i, s in enumerate(symbols)}
-
-# Regular expression matching text enclosed in curly braces:
-_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
-
-
-def text_to_sequence(text, cleaner_names):
-    '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
-
-      The text can optionally have ARPAbet sequences enclosed in curly braces embedded
-      in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
-
-      Args:
-        text: string to convert to a sequence
-        cleaner_names: names of the cleaner functions to run the text through
-
-      Returns:
-        List of integers corresponding to the symbols in the text
-    '''
-    sequence = []
-
-    # Check for curly braces and treat their contents as ARPAbet:
-    length = len(text)
-    while length:
-        m = _curly_re.match(text)
-        if not m:
-            sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
-            break
-        sequence += _symbols_to_sequence(
-            _clean_text(m.group(1), cleaner_names))
-        sequence += _arpabet_to_sequence(m.group(2))
-        text = m.group(3)
-        length = len(text)
-    return sequence
-
-
-def sequence_to_text(sequence):
-    '''Converts a sequence of IDs back to a string'''
-    result = ''
-    for symbol_id in sequence:
-        if symbol_id in _id_to_symbol:
-            s = _id_to_symbol[symbol_id]
-            # Enclose ARPAbet back in curly braces:
-            if len(s) > 1 and s[0] == '@':
-                s = '{%s}' % s[1:]
-            result += s
-    return result.replace('}{', ' ')
-
-
-def _clean_text(text, cleaner_names):
-    ''' clean text '''
-    for name in cleaner_names:
-        cleaner = getattr(cleaners, name)
-        if not cleaner:
-            raise Exception('Unknown cleaner: %s' % name)
-        text = cleaner(text)
-    return text
-
-
-def _symbols_to_sequence(symbol):
-    ''' symbols_to_sequence'''
-    return [_symbol_to_id[s] for s in symbol if _should_keep_symbol(s)]
-
-
-def _arpabet_to_sequence(text):
-    ''' arpabet_to_sequence'''
-    return _symbols_to_sequence(['@' + s for s in text.split()])
-
-
-def _should_keep_symbol(s):
-    ''' keep symbolds '''
-    return s in _symbol_to_id and s != '_'
diff --git a/research/audio/tacotron2/src/text/cleaners.py b/research/audio/tacotron2/src/text/cleaners.py
deleted file mode 100644
index dbd5ef5cd45aadc6655aaddfc04965c56a92e624..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/src/text/cleaners.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-""" from https://github.com/keithito/tacotron """
-
-
-# Regular expression matching whitespace:
-import re
-from unidecode import unidecode
-from .numbers import normalize_numbers
-
-_whitespace_re = re.compile(r'\s+')
-
-# List of (regular expression, replacement) pairs for abbreviations:
-_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
-    ('mrs', 'misess'),
-    ('mr', 'mister'),
-    ('dr', 'doctor'),
-    ('st', 'saint'),
-    ('co', 'company'),
-    ('jr', 'junior'),
-    ('maj', 'major'),
-    ('gen', 'general'),
-    ('drs', 'doctors'),
-    ('rev', 'reverend'),
-    ('lt', 'lieutenant'),
-    ('hon', 'honorable'),
-    ('sgt', 'sergeant'),
-    ('capt', 'captain'),
-    ('esq', 'esquire'),
-    ('ltd', 'limited'),
-    ('col', 'colonel'),
-    ('ft', 'fort'),
-]]
-
-
-def expand_abbreviations(text):
-    ''' expand abbreviation'''
-    for regex, replacement in _abbreviations:
-        text = re.sub(regex, replacement, text)
-    return text
-
-
-def expand_numbers(text):
-    ''' expand numbers '''
-    return normalize_numbers(text)
-
-
-def lowercase(text):
-    ''' lowercase '''
-    return text.lower()
-
-
-def collapse_whitespace(text):
-    ''' remove whitespace '''
-    return re.sub(_whitespace_re, ' ', text)
-
-
-def convert_to_ascii(text):
-    ''' convert '''
-    return unidecode(text)
-
-
-def basic_cleaners(text):
-    '''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
-    text = lowercase(text)
-    text = collapse_whitespace(text)
-    return text
-
-
-def transliteration_cleaners(text):
-    '''Pipeline for non-English text that transliterates to ASCII.'''
-    text = convert_to_ascii(text)
-    text = lowercase(text)
-    text = collapse_whitespace(text)
-    return text
-
-
-def english_cleaners(text):
-    '''Pipeline for English text, including number and abbreviation expansion.'''
-    text = convert_to_ascii(text)
-    text = lowercase(text)
-    text = expand_numbers(text)
-    text = expand_abbreviations(text)
-    text = collapse_whitespace(text)
-    return text
diff --git a/research/audio/tacotron2/src/text/cmudict.py b/research/audio/tacotron2/src/text/cmudict.py
deleted file mode 100644
index 0f982ac96d822f1c643de40e38703644c1383930..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/src/text/cmudict.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-""" from https://github.com/keithito/tacotron """
-
-import re
-
-
-valid_symbols = [
-    'AA',
-    'AA0',
-    'AA1',
-    'AA2',
-    'AE',
-    'AE0',
-    'AE1',
-    'AE2',
-    'AH',
-    'AH0',
-    'AH1',
-    'AH2',
-    'AO',
-    'AO0',
-    'AO1',
-    'AO2',
-    'AW',
-    'AW0',
-    'AW1',
-    'AW2',
-    'AY',
-    'AY0',
-    'AY1',
-    'AY2',
-    'B',
-    'CH',
-    'D',
-    'DH',
-    'EH',
-    'EH0',
-    'EH1',
-    'EH2',
-    'ER',
-    'ER0',
-    'ER1',
-    'ER2',
-    'EY',
-    'EY0',
-    'EY1',
-    'EY2',
-    'F',
-    'G',
-    'HH',
-    'IH',
-    'IH0',
-    'IH1',
-    'IH2',
-    'IY',
-    'IY0',
-    'IY1',
-    'IY2',
-    'JH',
-    'K',
-    'L',
-    'M',
-    'N',
-    'NG',
-    'OW',
-    'OW0',
-    'OW1',
-    'OW2',
-    'OY',
-    'OY0',
-    'OY1',
-    'OY2',
-    'P',
-    'R',
-    'S',
-    'SH',
-    'T',
-    'TH',
-    'UH',
-    'UH0',
-    'UH1',
-    'UH2',
-    'UW',
-    'UW0',
-    'UW1',
-    'UW2',
-    'V',
-    'W',
-    'Y',
-    'Z',
-    'ZH']
-
-_valid_symbol_set = set(valid_symbols)
-
-
-class CMUDict:
-    '''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
-
-    def __init__(self, file_or_path, keep_ambiguous=True):
-        if isinstance(file_or_path, str):
-            with open(file_or_path, encoding='latin-1') as f:
-                entries = _parse_cmudict(f)
-        else:
-            entries = _parse_cmudict(file_or_path)
-        if not keep_ambiguous:
-            entries = {
-                word: pron for word,
-                pron in entries.items() if len(pron) == 1}
-        self._entries = entries
-
-    def __len__(self):
-        return len(self._entries)
-
-    def lookup(self, word):
-        '''Returns list of ARPAbet pronunciations of the given word.'''
-        return self._entries.get(word.upper())
-
-
-_alt_re = re.compile(r'\([0-9]+\)')
-
-
-def _parse_cmudict(lines):
-    ''' parser cmudict '''
-    cmudict = {}
-    for line in lines:
-        if not line:
-            continue
-        if (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
-            parts = line.split('  ')
-            word = re.sub(_alt_re, '', parts[0])
-            pronunciation = _get_pronunciation(parts[1])
-            if pronunciation:
-                if word in cmudict:
-                    cmudict[word].append(pronunciation)
-                else:
-                    cmudict[word] = [pronunciation]
-
-    return cmudict
-
-
-def _get_pronunciation(s):
-    ''' get pronunciation '''
-    parts = s.strip().split(' ')
-    for part in parts:
-        if part not in _valid_symbol_set:
-            return None
-    return ' '.join(parts)
diff --git a/research/audio/tacotron2/src/text/numbers.py b/research/audio/tacotron2/src/text/numbers.py
deleted file mode 100644
index d3ff6b9883a74f2c7d58c5311151b975d935c844..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/src/text/numbers.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-""" from https://github.com/keithito/tacotron """
-import re
-import inflect
-
-
-
-_inflect = inflect.engine()
-_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
-_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
-_pounds_re = re.compile(r'拢([0-9\,]*[0-9]+)')
-_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
-_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
-_number_re = re.compile(r'[0-9]+')
-
-
-def _remove_commas(m):
-    ''' remove commas'''
-    return m.group(1).replace(',', '')
-
-
-def _expand_decimal_point(m):
-    ''' expand decimal point '''
-    return m.group(1).replace('.', ' point ')
-
-
-def _expand_dollars(m):
-    '''expand dollars '''
-    match = m.group(1)
-    parts = match.split('.')
-    if len(parts) > 2:
-        return match + ' dollars'  # Unexpected format
-    dollars = int(parts[0]) if parts[0] else 0
-    cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
-    if dollars and cents:
-        dollar_unit = 'dollar' if dollars == 1 else 'dollars'
-        cent_unit = 'cent' if cents == 1 else 'cents'
-        ret_dollar = '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
-    elif dollars:
-        dollar_unit = 'dollar' if dollars == 1 else 'dollars'
-        ret_dollar = '%s %s' % (dollars, dollar_unit)
-    elif cents:
-        cent_unit = 'cent' if cents == 1 else 'cents'
-        ret_dollar = '%s %s' % (cents, cent_unit)
-    else:
-        ret_dollar = 'zero dollars'
-    return ret_dollar
-
-
-
-def _expand_ordinal(m):
-    ''' expand ordinal '''
-    return _inflect.number_to_words(m.group(0))
-
-
-def _expand_number(m):
-    ''' expand number '''
-    num = int(m.group(0))
-    if 1000 < num < 3000:
-        if num == 2000:
-            ret_number = 'two thousand'
-        elif 2000 < num < 2010:
-            ret_number = 'two thousand ' + _inflect.number_to_words(num % 100)
-        elif num % 100 == 0:
-            ret_number = _inflect.number_to_words(num // 100) + ' hundred'
-        else:
-            ret_number = _inflect.number_to_words(
-                num,
-                andword='',
-                zero='oh',
-                group=2).replace(
-                    ', ',
-                    ' ')
-        return ret_number
-    return _inflect.number_to_words(num, andword='')
-
-
-def normalize_numbers(text):
-    ''' normalize numbers '''
-    text = re.sub(_comma_number_re, _remove_commas, text)
-    text = re.sub(_pounds_re, r'\1 pounds', text)
-    text = re.sub(_dollars_re, _expand_dollars, text)
-    text = re.sub(_decimal_number_re, _expand_decimal_point, text)
-    text = re.sub(_ordinal_re, _expand_ordinal, text)
-    text = re.sub(_number_re, _expand_number, text)
-    return text
diff --git a/research/audio/tacotron2/src/text/symbols.py b/research/audio/tacotron2/src/text/symbols.py
deleted file mode 100644
index 9051dd7cb0424e07f97f95bce2b58a3ba618d6a3..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/src/text/symbols.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-""" from https://github.com/keithito/tacotron """
-from src.text import cmudict
-
-_pad = '_'
-_punctuation = '!\'(),.:;? '
-_special = '-'
-_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
-_end = '~'
-# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same
-# as uppercase letters):
-_arpabet = ['@' + s for s in cmudict.valid_symbols]
-
-# Export all symbols:
-symbols = [_pad] + list(_special) + list(_punctuation) + \
-    list(_letters) + _arpabet + list(_end)
diff --git a/research/audio/tacotron2/src/utils/audio.py b/research/audio/tacotron2/src/utils/audio.py
deleted file mode 100644
index d976f71c3429044648411f53a4225fe77bd37af7..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/src/utils/audio.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-'''audio'''
-
-import librosa
-import librosa.filters
-import numpy as np
-import scipy
-from scipy.io import wavfile
-
-from src.hparams import hparams as hps
-
-
-def load_wav(path):
-    ''' load wav '''
-    _, wav = wavfile.read(path)
-    signed_int16_max = 2**15
-    if wav.dtype == np.int16:
-        wav = wav.astype(np.float32) / signed_int16_max
-
-    wav = wav / np.max(np.abs(wav))
-    return wav
-
-
-def save_wav(wav, path):
-    ''' save wav'''
-    wav *= 32767 / max(0.01, np.max(np.abs(wav)))
-    wavfile.write(path, hps.sample_rate, wav.astype(np.int16))
-
-
-def preemphasis(x):
-    ''' preemphasis '''
-    return scipy.signal.lfilter([1, -hps.preemphasis], [1], x)
-
-
-def inv_preemphasis(x):
-    ''' inv preemphasis '''
-    return scipy.signal.lfilter([1], [1, -hps.preemphasis], x)
-
-
-def spectrogram(y):
-    ''' extract spectrogram '''
-    D = _stft(preemphasis(y))
-    S = _amp_to_db(np.abs(D)) - hps.ref_level_db
-    return _normalize(S)
-
-
-def inv_spectrogram(spec):
-    '''Converts spectrogram to waveform using librosa'''
-    S = _db_to_amp(_denormalize(spec) + hps.ref_level_db)
-    return inv_preemphasis(_griffin_lim(S ** hps.power))
-
-
-def melspectrogram(y):
-    '''extract normalized mel spectrogram'''
-    D = _stft(y)
-    S = _amp_to_db(_linear_to_mel(np.abs(D))) - hps.ref_level_db
-    return _normalize(S)
-
-
-def inv_melspectrogram(spec):
-    '''convert mel spectrogram to waveform '''
-    mel = _db_to_amp(_denormalize(spec) + hps.ref_level_db)
-    S = _mel_to_linear(mel)
-    return _griffin_lim(S ** hps.power)
-
-
-def find_endpoint(wav, threshold_db=-40, min_silence_sec=0.8):
-    ''' find endpoint '''
-    window_length = int(hps.sample_rate * min_silence_sec)
-    hop_length = int(window_length / 4)
-    threshold = _db_to_amp(threshold_db)
-    for x in range(hop_length, len(wav) - window_length, hop_length):
-        if np.max(wav[x:x + window_length]) < threshold:
-            return x + hop_length
-    return len(wav)
-
-
-def _griffin_lim(S):
-    '''librosa implementation of Griffin-Lim
-    Based on https://github.com/librosa/librosa/issues/434
-    '''
-    angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
-    S_complex = np.abs(S).astype(np.complex)
-    y = _istft(S_complex * angles)
-    for _ in range(hps.gl_iters):
-        angles = np.exp(1j * np.angle(_stft(y)))
-        y = _istft(S_complex * angles)
-    return y
-
-
-def _stft(y):
-    ''' stft using librosa '''
-    n_fft, hop_length, win_length = _stft_parameters()
-    return librosa.stft(
-        y=y,
-        n_fft=n_fft,
-        hop_length=hop_length,
-        win_length=win_length,
-        pad_mode='reflect')
-
-
-def _istft(y):
-    ''' istft using librosa '''
-    _, hop_length, win_length = _stft_parameters()
-    return librosa.istft(y, hop_length=hop_length, win_length=win_length)
-
-
-def _stft_parameters():
-    '''  get stft parameters'''
-    n_fft = (hps.num_freq - 1) * 2
-    hop_length = hps.hop_length
-    win_length = hps.win_length
-    return n_fft, hop_length, win_length
-
-
-_mel_basis = None
-
-
-def _linear_to_mel(spec):
-    ''' linear spectrogram to mel spectrogram'''
-    global _mel_basis
-    if _mel_basis is None:
-        _mel_basis = _build_mel_basis()
-    return np.dot(_mel_basis, spec)
-
-
-def _mel_to_linear(spec):
-    ''' mel spectrogram to linear spectrogram '''
-    global _mel_basis
-    if _mel_basis is None:
-        _mel_basis = _build_mel_basis()
-    inv_mel_basis = np.linalg.pinv(_mel_basis)
-    inverse = np.dot(inv_mel_basis, spec)
-    inverse = np.maximum(1e-10, inverse)
-    return inverse
-
-
-def _build_mel_basis():
-    ''' build mel filters '''
-    n_fft = (hps.num_freq - 1) * 2
-    return librosa.filters.mel(
-        hps.sample_rate,
-        n_fft,
-        fmin=hps.fmin,
-        fmax=hps.fmax,
-        n_mels=hps.num_mels)
-
-
-def _amp_to_db(x):
-    ''' amp to db'''
-    return 20 * np.log10(np.maximum(1e-5, x))
-
-
-def _db_to_amp(x):
-    ''' db to amp '''
-    return np.power(10.0, x * 0.05)
-
-
-def _normalize(S):
-    ''' normalize '''
-    return np.clip((S - hps.min_level_db) / -hps.min_level_db, 0, 1)
-
-
-def _denormalize(S):
-    '''denormalize '''
-    return (np.clip(S, 0, 1) * -hps.min_level_db) + hps.min_level_db
diff --git a/research/audio/tacotron2/src/utils/convert.py b/research/audio/tacotron2/src/utils/convert.py
deleted file mode 100644
index 5f61f1905697a1a1e4bd439e69b41f6ab2e6a2af..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/src/utils/convert.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-'''proposessing mel spectrogram synthesized by Tacotron2 to make it suitable for Wavenet inference,
-the statistic data(meanvar.joblib) is attained from Wavenet(ascend version) dataset proposessing phase.
-'''
-import os
-import os.path
-import argparse
-import numpy as np
-
-import joblib
-
-from audio import inv_melspectrogram
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('-f', '--file_pth', type=str, default='',
-                        required=True, help='path to load checkpoints')
-    args = parser.parse_args()
-    mel = np.load(args.file_pth)['arr_0']
-
-    dir_name, fname = os.path.split(args.file_pth)
-
-    wav = inv_melspectrogram(mel)
-
-    scalar = joblib.load('meanvar.joblib')
-    mel = scalar.transform(mel)
-
-    np.save(
-        os.path.join(
-            dir_name,
-            'output-feats.npy'),
-        mel.T,
-        allow_pickle=False)
-    np.save(os.path.join(dir_name, 'output-wave.npy'), wav, allow_pickle=False)
diff --git a/research/audio/tacotron2/train.py b/research/audio/tacotron2/train.py
deleted file mode 100644
index 18d1756b978903212cdeac078de3f3e7878215f3..0000000000000000000000000000000000000000
--- a/research/audio/tacotron2/train.py
+++ /dev/null
@@ -1,250 +0,0 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-'''training model'''
-import os
-import os.path
-import time
-import numpy as np
-import mindspore
-import mindspore.dataset as ds
-
-from mindspore.context import ParallelMode
-from mindspore.communication import management as MultiDevice
-from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor
-from mindspore.train.serialization import load_checkpoint, load_param_into_net
-from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell
-from mindspore import context
-from mindspore import Model
-from mindspore import Tensor
-from mindspore.nn.optim import Adam
-
-from src.hparams import hparams as hps
-from src.dataset import ljdataset, Sampler
-from src.callback import get_lr, LossCallBack
-from src.tacotron2 import Tacotron2, Tacotron2Loss, NetWithLossClass, TrainStepWrap
-
-from model_utils.config import config
-from model_utils.moxing_adapter import moxing_wrapper
-from model_utils.device_adapter import get_device_id, get_device_num
-
-def get_ms_timestamp():
-    '''get timestamp'''
-    t = time.time()
-    return int(round(t * 1000))
-
-np.random.seed(0)
-mindspore.common.set_seed(1024)
-time_stamp_init = False
-time_stamp_first = 0
-
-context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target=config.device_target)
-
-def prepare_dataloaders(dataset_path, rank_id, group_size):
-    '''prepare dataloaders'''
-    dataset = ljdataset(dataset_path, group_size)
-    ds_dataset = ds.GeneratorDataset(dataset,
-                                     ['text_padded',
-                                      'input_lengths',
-                                      'mel_padded',
-                                      'gate_padded',
-                                      'text_mask',
-                                      'mel_mask',
-                                      'rnn_mask'],
-                                     num_parallel_workers=4,
-                                     sampler=Sampler(dataset.sample_nums,
-                                                     rank_id,
-                                                     group_size))
-    ds_dataset = ds_dataset.batch(hps.batch_size)
-    return ds_dataset
-
-
-def modelarts_pre_process():
-    '''modelarts pre process function.'''
-    def unzip(zip_file, save_dir):
-        import zipfile
-        s_time = time.time()
-        if not os.path.exists(os.path.join(save_dir, config.modelarts_dataset_unzip_name)):
-            zip_isexist = zipfile.is_zipfile(zip_file)
-            if zip_isexist:
-                fz = zipfile.ZipFile(zip_file, 'r')
-                data_num = len(fz.namelist())
-                print("Extract Start...")
-                print("unzip file num: {}".format(data_num))
-                data_print = int(data_num / 100) if data_num > 100 else 1
-                i = 0
-                for file in fz.namelist():
-                    if i % data_print == 0:
-                        print("unzip percent: {}%".format(int(i * 100 / data_num)), flush=True)
-                    i += 1
-                    fz.extract(file, save_dir)
-                print("cost time: {}min:{}s.".format(int((time.time() - s_time) / 60),
-                                                     int(int(time.time() - s_time) % 60)))
-                print("Extract Done.")
-            else:
-                print("This is not zip.")
-        else:
-            print("Zip has been extracted.")
-
-    if config.need_modelarts_dataset_unzip:
-        zip_file_1 = os.path.join(config.data_path, config.modelarts_dataset_unzip_name + ".zip")
-        save_dir_1 = os.path.join(config.data_path)
-
-        sync_lock = "/tmp/unzip_sync.lock"
-
-        # Each server contains 8 devices as most.
-        if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):
-            print("Zip file path: ", zip_file_1)
-            print("Unzip file save dir: ", save_dir_1)
-            unzip(zip_file_1, save_dir_1)
-            print("===Finish extract data synchronization===")
-            try:
-                os.mknod(sync_lock)
-            except IOError:
-                pass
-
-        while True:
-            if os.path.exists(sync_lock):
-                break
-            time.sleep(1)
-
-        print("Device: {}, Finish sync unzip data from {} to {}.".format(get_device_id(), zip_file_1, save_dir_1))
-
-    config.save_ckpt_dir = os.path.join(config.output_path, config.save_ckpt_dir)
-
-def _build_training_pipeline(pre_dataset, run_distribute=False):
-    ''' training '''
-
-    epoch_num = config.epoch_num
-
-    steps_per_epoch = pre_dataset.get_dataset_size()
-
-    learning_rate = get_lr(config.lr, epoch_num, steps_per_epoch, steps_per_epoch * config.warmup_epochs)
-    learning_rate = Tensor(learning_rate)
-
-    scale_update_cell = DynamicLossScaleUpdateCell(loss_scale_value=2**12,
-                                                   scale_factor=2,
-                                                   scale_window=1000)
-    net = Tacotron2()
-    loss_fn = Tacotron2Loss()
-    loss_net = NetWithLossClass(net, loss_fn)
-
-
-    resume_epoch = None
-    if config.pretrain_ckpt:
-        resume_epoch = int(config.pretrain_ckpt.split('-')[-1].split('_')[0])
-        learning_rate = learning_rate[resume_epoch * steps_per_epoch:]
-        param_dict = load_checkpoint(config.pretrain_ckpt)
-        load_param_into_net(net, param_dict)
-        print(
-            'Successfully loading the pretrained model {}'.format(
-                config.pretrain_ckpt))
-
-    optimizer = Adam(params=net.trainable_params(), learning_rate=learning_rate)
-
-    train_net = TrainStepWrap(loss_net, optimizer, scale_update_cell)
-    train_net.set_train()
-
-    model = Model(train_net)
-
-    ckpt_config = CheckpointConfig(save_checkpoint_steps=steps_per_epoch,
-                                   keep_checkpoint_max=config.keep_ckpt_max)
-
-
-    callbacks = [LossCallBack(steps_per_epoch),
-                 TimeMonitor(data_size=steps_per_epoch)]
-
-    ckpt_callback = ModelCheckpoint(prefix='tacotron2',
-                                    directory=os.path.join(config.save_ckpt_dir,
-                                                           'ckpt_{}'.format(os.getenv("DEVICE_ID"))),
-                                    config=ckpt_config)
-
-    callbacks.append(ckpt_callback)
-
-
-    print("Prepare to Training....")
-    if resume_epoch is not None:
-        epoch_num = epoch_num - resume_epoch
-
-    print("Epoch size ", epoch_num)
-    if run_distribute:
-        print(f" | Rank {MultiDevice.get_rank()} Call model train.")
-
-    model.train(
-        epoch_num,
-        pre_dataset,
-        callbacks=callbacks,
-        dataset_sink_mode=True)
-
-
-def set_parallel_env():
-    '''set parallel context'''
-    context.reset_auto_parallel_context()
-    MultiDevice.init()
-    context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL,
-                                      device_num=MultiDevice.get_group_size(),
-                                      gradients_mean=True)
-
-def train_paralle(input_file_path):
-    """
-    Train model on multi device
-    Args:
-        input_file_path: preprocessed dataset path
-    """
-    set_parallel_env()
-    print("Starting traning on multiple devices. |~ _ ~| |~ _ ~| |~ _ ~| |~ _ ~|")
-    hps.batch_size = config.batch_size
-
-    dataset_path = os.path.join(input_file_path, 'ljdataset.hdf5')
-    print('dataset_path : {}'.format(dataset_path))
-    preprocessed_data = prepare_dataloaders(dataset_path,
-                                            MultiDevice.get_rank(),
-                                            MultiDevice.get_group_size())
-
-    _build_training_pipeline(preprocessed_data, True)
-
-def train_single(input_file_path):
-    """
-    Train model on single device
-    Args:
-        input_file_path: preprocessed dataset path
-    """
-    print("Staring training on single device.")
-    hps.batch_size = config.batch_size
-
-    dataset_path = os.path.join(input_file_path, 'ljdataset.hdf5')
-    print('dataset_path : {}'.format(dataset_path))
-    preprocessed_data = prepare_dataloaders(dataset_path,
-                                            rank_id=0,
-                                            group_size=1)
-
-    _build_training_pipeline(preprocessed_data)
-
-@moxing_wrapper(pre_process=modelarts_pre_process)
-def run_train():
-    '''run train.'''
-    if config.device_target == "Ascend":
-        config.rank_id = get_device_id()
-    else:
-        raise ValueError("Not support device target: {}".format(config.device_target))
-
-    device_num = get_device_num()
-    if device_num > 1:
-        train_paralle(config.dataset_path)
-    else:
-        train_single(config.dataset_path)
-
-
-if __name__ == '__main__':
-    run_train()
diff --git a/research/cv/ManiDP/Readme.md b/research/cv/ManiDP/README.md
similarity index 100%
rename from research/cv/ManiDP/Readme.md
rename to research/cv/ManiDP/README.md
diff --git a/research/cv/ViG/README_CN.md b/research/cv/ViG/README_CN.md
index a76d93ebc50a15d4bd42b3399bb2221cac8c8ca8..8e2445807968152c96632b86c209459d7b469607 100644
--- a/research/cv/ViG/README_CN.md
+++ b/research/cv/ViG/README_CN.md
@@ -137,7 +137,7 @@
     batch_size: 128                     # 鎵瑰ぇ灏�
     # ===== Hardware setup ===== #
     num_parallel_workers: 16            # 鏁版嵁棰勫鐞嗙嚎绋嬫暟
-    device_target: Ascend               # GPU鎴栬€匒scend
+    device_target: Ascend               # Ascend
   ```
 
 鏇村閰嶇疆缁嗚妭璇峰弬鑰冭剼鏈琡vig_s_patch16_224.yaml`銆� 閫氳繃瀹樻柟缃戠珯瀹夎MindSpore鍚庯紝鎮ㄥ彲浠ユ寜鐓у涓嬫楠よ繘琛岃缁冨拰璇勪及锛�
diff --git a/research/cv/augvit/readme.md b/research/cv/augvit/README.md
similarity index 100%
rename from research/cv/augvit/readme.md
rename to research/cv/augvit/README.md
diff --git a/research/cv/flownet2/README.md b/research/cv/flownet2/README.md
index 4a6924f7683ec38e3fbff16df58b7eb0ab199309..866f018d79558e35272dc3ef80adba159c007f02 100644
--- a/research/cv/flownet2/README.md
+++ b/research/cv/flownet2/README.md
@@ -86,8 +86,8 @@ Dataset used: [MpiSintel](http://sintel.cs.washington.edu)
 
 # [Environment Requirements](#contents)
 
-- Hardware锛圓scend/GPU/CPU锛�
-    - Prepare hardware environment with Ascend/GPU/CPU processor.
+- Hardware锛圙PU锛�
+    - Prepare hardware environment with GPU processor.
 - Framework
     - [MindSpore](https://www.mindspore.cn/install/en)
 - For more information, please check the resources below锛�
@@ -153,8 +153,6 @@ After installing MindSpore via the official website, you can start training and
 
 - running on GPU
 
-  For running on GPU, please change `device_target` from `Ascend` to `GPU` in configuration file default_config.yaml
-
   ```python
   # run training example
   export CUDA_VISIBLE_DEVICES=0
diff --git a/research/cv/renas/Readme.md b/research/cv/renas/README.md
similarity index 100%
rename from research/cv/renas/Readme.md
rename to research/cv/renas/README.md
diff --git a/research/cv/resnet50_adv_pruning/Readme.md b/research/cv/resnet50_adv_pruning/README.md
similarity index 100%
rename from research/cv/resnet50_adv_pruning/Readme.md
rename to research/cv/resnet50_adv_pruning/README.md
diff --git a/research/cv/snn_mlp/readme.md b/research/cv/snn_mlp/README.md
similarity index 100%
rename from research/cv/snn_mlp/readme.md
rename to research/cv/snn_mlp/README.md
diff --git a/research/cv/squeezenet/README.md b/research/cv/squeezenet/README.md
deleted file mode 100644
index 81fb98d38b946be7988919aba97785b2e5050455..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/README.md
+++ /dev/null
@@ -1,708 +0,0 @@
-# Contents
-
-- [Contents](#contents)
-- [SqueezeNet Description](#squeezenet-description)
-- [Model Architecture](#model-architecture)
-- [Dataset](#dataset)
-- [Features](#features)
-    - [Mixed Precision](#mixed-precision)
-- [Environment Requirements](#environment-requirements)
-- [Quick Start](#quick-start)
-- [Script Description](#script-description)
-    - [Script and Sample Code](#script-and-sample-code)
-    - [Script Parameters](#script-parameters)
-    - [Training Process](#training-process)
-        - [Usage](#usage)
-            - [Running on Ascend](#running-on-ascend)
-            - [Running on GPU](#running-on-gpu)
-        - [Result](#result)
-    - [Evaluation Process](#evaluation-process)
-        - [Usage](#usage-1)
-            - [Running on Ascend](#running-on-ascend-1)
-            - [Running on GPU](#running-on-gpu-1)
-        - [Result](#result-1)
-- [Model Description](#model-description)
-    - [Performance](#performance)
-        - [Evaluation Performance](#evaluation-performance)
-            - [SqueezeNet on CIFAR-10](#squeezenet-on-cifar-10)
-            - [SqueezeNet on ImageNet](#squeezenet-on-imagenet)
-            - [SqueezeNet_Residual on CIFAR-10](#squeezenet_residual-on-cifar-10)
-            - [SqueezeNet_Residual on ImageNet](#squeezenet_residual-on-imagenet)
-        - [Inference Performance](#inference-performance)
-            - [SqueezeNet on CIFAR-10](#squeezenet-on-cifar-10-1)
-            - [SqueezeNet on ImageNet](#squeezenet-on-imagenet-1)
-            - [SqueezeNet_Residual on CIFAR-10](#squeezenet_residual-on-cifar-10-1)
-            - [SqueezeNet_Residual on ImageNet](#squeezenet_residual-on-imagenet-1)
-    - [How to use](#how-to-use)
-        - [Inference](#inference)
-        - [Continue Training on the Pretrained Model](#continue-training-on-the-pretrained-model)
-        - [Transfer Learning](#transfer-learning)
-- [Description of Random Situation](#description-of-random-situation)
-- [ModelZoo Homepage](#modelzoo-homepage)
-
-# [SqueezeNet Description](#contents)
-
-SqueezeNet is a lightweight and efficient CNN model proposed by Han et al., published in ICLR-2017. SqueezeNet has 50x fewer parameters than AlexNet, but the model performance (accuracy) is close to AlexNet.
-
-These are examples of training SqueezeNet/SqueezeNet_Residual with CIFAR-10/ImageNet dataset in MindSpore. SqueezeNet_Residual adds residual operation on the basis of SqueezeNet, which can improve the accuracy of the model without increasing the amount of parameters.
-
-[Paper](https://arxiv.org/abs/1602.07360):  Forrest N. Iandola and Song Han and Matthew W. Moskewicz and Khalid Ashraf and William J. Dally and Kurt Keutzer. "SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size"
-
-# [Model Architecture](#contents)
-
-SqueezeNet is composed of fire modules. A fire module mainly includes two layers of convolution operations: one is the squeeze layer using a **1x1 convolution** kernel; the other is an expand layer using a mixture of **1x1** and **3x3 convolution** kernels.
-
-# [Dataset](#contents)
-
-Dataset used: [CIFAR-10](<http://www.cs.toronto.edu/~kriz/cifar.html>)
-
-- Dataset size锛�175M锛�60,000 32*32 colorful images in 10 classes
-    - Train锛�146M锛�50,000 images
-    - Test锛�29M锛�10,000 images
-- Data format锛歜inary files
-    - Note锛欴ata will be processed in src/dataset.py
-
-Dataset used: [ImageNet2012](http://www.image-net.org/)
-
-- Dataset size: 125G, 1250k colorful images in 1000 classes
-    - Train: 120G, 1200k images
-    - Test: 5G, 50k images
-- Data format: RGB images.
-    - Note: Data will be processed in src/dataset.py
-
-# [Features](#contents)
-
-## Mixed Precision
-
-The [mixed precision](https://www.mindspore.cn/tutorials/experts/en/master/others/mixed_precision.html) training method accelerates the deep learning neural network training process by using both the single-precision and half-precision data formats, and maintains the network precision achieved by the single-precision training at the same time. Mixed precision training can accelerate the computation process, reduce memory usage, and enable a larger model or batch size to be trained on specific hardware.
-For FP16 operators, if the input data type is FP32, the backend of MindSpore will automatically handle it with reduced precision. Users could check the reduced-precision operators by enabling INFO log and then searching 鈥榬educe precision鈥�.
-
-# [Environment Requirements](#contents)
-
-- Hardware锛圓scend/GPU锛�
-    - Prepare hardware environment with Ascend or GPU processor. Squeezenet training on GPU performs not well now, and it is still in research.
-- Framework
-    - [MindSpore](https://www.mindspore.cn/install/en)
-- For more information, please check the resources below锛�
-    - [MindSpore Tutorials](https://www.mindspore.cn/tutorials/en/master/index.html)
-    - [MindSpore Python API](https://www.mindspore.cn/docs/en/master/index.html)
-
-# [Quick Start](#contents)
-
-After installing MindSpore via the official website, you can start training and evaluation as follows:
-
-- running on Ascend
-
-  ```bash
-  # distributed training
-  Usage: bash scripts/run_distribute_train.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [RANK_TABLE_FILE] [DATASET_PATH] [PRETRAINED_CKPT_PATH](optional)
-
-  # standalone training
-  Usage: bash scripts/run_standalone_train.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DEVICE_ID] [DATASET_PATH] [PRETRAINED_CKPT_PATH](optional)
-
-  # run evaluation example
-  Usage: bash scripts/run_eval.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DEVICE_ID] [DATASET_PATH] [CHECKPOINT_PATH]
-  ```
-
-- running on GPU
-
-  ```bash
-  # distributed training example
-  bash scripts/run_distribute_train_gpu.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DATASET_PATH] [PRETRAINED_CKPT_PATH](optional)
-
-  # standalone training example
-  bash scripts/run_standalone_train_gpu.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DEVICE_ID] [DATASET_PATH] [PRETRAINED_CKPT_PATH](optional)
-
-  # run evaluation example
-  bash scripts/run_eval_gpu.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DEVICE_ID] [DATASET_PATH] [CHECKPOINT_PATH]
-  ```
-
-# [Script Description](#contents)
-
-## [Script and Sample Code](#contents)
-
-```shell
-.
-鈹斺攢鈹€ squeezenet
-  鈹溾攢鈹€ README.md
-  鈹溾攢鈹€ scripts
-    鈹溾攢鈹€ run_distribute_train.sh            # launch ascend distributed training(8 pcs)
-    鈹溾攢鈹€ run_standalone_train.sh            # launch ascend standalone training(1 pcs)
-    鈹溾攢鈹€ run_distribute_train_gpu.sh        # launch gpu distributed training(8 pcs)
-    鈹溾攢鈹€ run_standalone_train_gpu.sh        # launch gpu standalone training(1 pcs)
-    鈹溾攢鈹€ run_eval.sh                        # launch ascend evaluation
-    鈹斺攢鈹€ run_eval_gpu.sh                    # launch gpu evaluation
-  鈹溾攢鈹€ src
-    鈹溾攢鈹€ config.py                          # parameter configuration
-    鈹溾攢鈹€ dataset.py                         # data preprocessing
-    鈹溾攢鈹€ CrossEntropySmooth.py              # loss definition for ImageNet dataset
-    鈹溾攢鈹€ lr_generator.py                    # generate learning rate for each step
-    鈹斺攢鈹€ squeezenet.py                      # squeezenet architecture, including squeezenet and squeezenet_residual
-  鈹溾攢鈹€ train.py                             # train net
-  鈹溾攢鈹€ eval.py                              # eval net
-  鈹斺攢鈹€ export.py                            # export checkpoint files into geir/onnx
-```
-
-## [Script Parameters](#contents)
-
-Parameters for both training and evaluation can be set in config.py
-
-- config for SqueezeNet, CIFAR-10 dataset
-
-  ```py
-  "class_num": 10,                  # dataset class num
-  "batch_size": 32,                 # batch size of input tensor
-  "loss_scale": 1024,               # loss scale
-  "momentum": 0.9,                  # momentum
-  "weight_decay": 1e-4,             # weight decay
-  "epoch_size": 120,                # only valid for taining, which is always 1 for inference
-  "pretrain_epoch_size": 0,         # epoch size that model has been trained before loading pretrained checkpoint, actual training epoch size is equal to epoch_size minus pretrain_epoch_size
-  "save_checkpoint": True,          # whether save checkpoint or not
-  "save_checkpoint_epochs": 1,      # the epoch interval between two checkpoints. By default, the last checkpoint will be saved after the last step
-  "keep_checkpoint_max": 10,        # only keep the last keep_checkpoint_max checkpoint
-  "save_checkpoint_path": "./",     # path to save checkpoint
-  "warmup_epochs": 5,               # number of warmup epoch
-  "lr_decay_mode": "poly"           # decay mode for generating learning rate
-  "lr_init": 0,                     # initial learning rate
-  "lr_end": 0,                      # final learning rate
-  "lr_max": 0.01,                   # maximum learning rate
-  ```
-
-- config for SqueezeNet, ImageNet dataset
-
-  ```py
-  "class_num": 1000,                # dataset class num
-  "batch_size": 32,                 # batch size of input tensor
-  "loss_scale": 1024,               # loss scale
-  "momentum": 0.9,                  # momentum
-  "weight_decay": 7e-5,             # weight decay
-  "epoch_size": 200,                # only valid for taining, which is always 1 for inference
-  "pretrain_epoch_size": 0,         # epoch size that model has been trained before loading pretrained checkpoint, actual training epoch size is equal to epoch_size minus pretrain_epoch_size
-  "save_checkpoint": True,          # whether save checkpoint or not
-  "save_checkpoint_epochs": 1,      # the epoch interval between two checkpoints. By default, the last checkpoint will be saved after the last step
-  "keep_checkpoint_max": 10,        # only keep the last keep_checkpoint_max checkpoint
-  "save_checkpoint_path": "./",     # path to save checkpoint
-  "warmup_epochs": 0,               # number of warmup epoch
-  "lr_decay_mode": "poly"           # decay mode for generating learning rate
-  "use_label_smooth": True,         # label smooth
-  "label_smooth_factor": 0.1,       # label smooth factor
-  "lr_init": 0,                     # initial learning rate
-  "lr_end": 0,                      # final learning rate
-  "lr_max": 0.01,                   # maximum learning rate
-  ```
-
-- config for SqueezeNet_Residual, CIFAR-10 dataset
-
-  ```py
-  "class_num": 10,                  # dataset class num
-  "batch_size": 32,                 # batch size of input tensor
-  "loss_scale": 1024,               # loss scale
-  "momentum": 0.9,                  # momentum
-  "weight_decay": 1e-4,             # weight decay
-  "epoch_size": 150,                # only valid for taining, which is always 1 for inference
-  "pretrain_epoch_size": 0,         # epoch size that model has been trained before loading pretrained checkpoint, actual training epoch size is equal to epoch_size minus pretrain_epoch_size
-  "save_checkpoint": True,          # whether save checkpoint or not
-  "save_checkpoint_epochs": 1,      # the epoch interval between two checkpoints. By default, the last checkpoint will be saved after the last step
-  "keep_checkpoint_max": 10,        # only keep the last keep_checkpoint_max checkpoint
-  "save_checkpoint_path": "./",     # path to save checkpoint
-  "warmup_epochs": 5,               # number of warmup epoch
-  "lr_decay_mode": "linear"         # decay mode for generating learning rate
-  "lr_init": 0,                     # initial learning rate
-  "lr_end": 0,                      # final learning rate
-  "lr_max": 0.01,                   # maximum learning rate
-  ```
-
-- config for SqueezeNet_Residual, ImageNet dataset
-
-  ```py
-  "class_num": 1000,                # dataset class num
-  "batch_size": 32,                 # batch size of input tensor
-  "loss_scale": 1024,               # loss scale
-  "momentum": 0.9,                  # momentum
-  "weight_decay": 7e-5,             # weight decay
-  "epoch_size": 300,                # only valid for taining, which is always 1 for inference
-  "pretrain_epoch_size": 0,         # epoch size that model has been trained before loading pretrained checkpoint, actual training epoch size is equal to epoch_size minus pretrain_epoch_size
-  "save_checkpoint": True,          # whether save checkpoint or not
-  "save_checkpoint_epochs": 1,      # the epoch interval between two checkpoints. By default, the last checkpoint will be saved after the last step
-  "keep_checkpoint_max": 10,        # only keep the last keep_checkpoint_max checkpoint
-  "save_checkpoint_path": "./",     # path to save checkpoint
-  "warmup_epochs": 0,               # number of warmup epoch
-  "lr_decay_mode": "cosine"         # decay mode for generating learning rate
-  "use_label_smooth": True,         # label smooth
-  "label_smooth_factor": 0.1,       # label smooth factor
-  "lr_init": 0,                     # initial learning rate
-  "lr_end": 0,                      # final learning rate
-  "lr_max": 0.01,                   # maximum learning rate
-  ```
-
-For more configuration details, please refer the script `config.py`.
-
-## [Training Process](#contents)
-
-### Usage
-
-#### Running on Ascend
-
-  ```bash
-  # distributed training
-  Usage: bash scripts/run_distribute_train.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [RANK_TABLE_FILE] [DATASET_PATH] [PRETRAINED_CKPT_PATH](optional)
-
-  # standalone training
-  Usage: bash scripts/run_standalone_train.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DEVICE_ID] [DATASET_PATH] [PRETRAINED_CKPT_PATH](optional)
-  ```
-
-For distributed training, a hccl configuration file with JSON format needs to be created in advance.
-
-Please follow the instructions in the link [hccl_tools](https://gitee.com/mindspore/models/tree/master/utils/hccl_tools).
-
-Training result will be stored in the example path, whose folder name begins with "train" or "train_parallel". Under this, you can find checkpoint file together with result like the followings in log.
-
-#### Running on GPU
-
-```bash
-# distributed training example
-bash scripts/run_distribute_train_gpu.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DATASET_PATH] [PRETRAINED_CKPT_PATH](optional)
-
-# standalone training example
-bash scripts/run_standalone_train_gpu.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DEVICE_ID] [DATASET_PATH] [PRETRAINED_CKPT_PATH](optional)
-```
-
-### Result
-
-- Training SqueezeNet with CIFAR-10 dataset
-
-```shell
-# standalone training result
-epoch: 1 step 1562, loss is 1.7103254795074463
-epoch: 2 step 1562, loss is 2.06101131439209
-epoch: 3 step 1562, loss is 1.5594401359558105
-epoch: 4 step 1562, loss is 1.4127278327941895
-epoch: 5 step 1562, loss is 1.2140142917633057
-...
-```
-
-- Training SqueezeNet with ImageNet dataset
-
-```shell
-# distribute training result(8 pcs)
-epoch: 1 step 5004, loss is 5.716324329376221
-epoch: 2 step 5004, loss is 5.350603103637695
-epoch: 3 step 5004, loss is 4.580031394958496
-epoch: 4 step 5004, loss is 4.784664154052734
-epoch: 5 step 5004, loss is 4.136358261108398
-...
-```
-
-- Training SqueezeNet_Residual with CIFAR-10 dataset
-
-```shell
-# standalone training result
-epoch: 1 step 1562, loss is 2.298271656036377
-epoch: 2 step 1562, loss is 2.2728664875030518
-epoch: 3 step 1562, loss is 1.9493038654327393
-epoch: 4 step 1562, loss is 1.7553865909576416
-epoch: 5 step 1562, loss is 1.3370063304901123
-...
-```
-
-- Training SqueezeNet_Residual with ImageNet dataset
-
-```shell
-# distribute training result(8 pcs)
-epoch: 1 step 5004, loss is 6.802495002746582
-epoch: 2 step 5004, loss is 6.386072158813477
-epoch: 3 step 5004, loss is 5.513605117797852
-epoch: 4 step 5004, loss is 5.312961101531982
-epoch: 5 step 5004, loss is 4.888848304748535
-...
-```
-
-## [Evaluation Process](#contents)
-
-### Usage
-
-#### Running on Ascend
-
-```shell
-# evaluation
-Usage: bash scripts/run_eval.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DEVICE_ID] [DATASET_PATH] [CHECKPOINT_PATH]
-```
-
-```shell
-# evaluation example
-bash scripts/run_eval.sh squeezenet cifar10 0 ~/cifar-10-verify-bin train/squeezenet_cifar10-120_1562.ckpt
-```
-
-checkpoint can be produced in training process.
-
-#### Running on GPU
-
-```shell
-bash scripts/run_eval_gpu.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DEVICE_ID] [DATASET_PATH] [CHECKPOINT_PATH]
-```
-
-### Result
-
-Evaluation result will be stored in the example path, whose folder name is "eval". Under this, you can find result like the followings in log.
-
-- Evaluating SqueezeNet with CIFAR-10 dataset
-
-```shell
-result: {'top_1_accuracy': 0.8896233974358975, 'top_5_accuracy': 0.9965945512820513}
-```
-
-- Evaluating SqueezeNet with ImageNet dataset
-
-```shell
-result: {'top_1_accuracy': 0.5851472471190781, 'top_5_accuracy': 0.8105393725992317}
-```
-
-- Evaluating SqueezeNet_Residual with CIFAR-10 dataset
-
-```shell
-result: {'top_1_accuracy': 0.9077524038461539, 'top_5_accuracy': 0.9969951923076923}
-```
-
-- Evaluating SqueezeNet_Residual with ImageNet dataset
-
-```shell
-result: {'top_1_accuracy': 0.6094950384122919, 'top_5_accuracy': 0.826324423815621}
-```
-
-# [Model Description](#contents)
-
-## [Performance](#contents)
-
-### Evaluation Performance
-
-#### SqueezeNet on CIFAR-10
-
-| Parameters                 | Contents                                                      |
-| -------------------------- | ----------------------------------------------------------- |
-| Model Version              | SqueezeNet                                                  |
-| Resource                   |Ascend 910; CPU 2.60GHz, 192cores; Memory 755G; OS Euler2.8            |
-| uploaded Date              | 11/06/2020 (month/day/year)                                 |
-| MindSpore Version          | 1.0.1                                                      |
-| Dataset                    | CIFAR-10                                                    |
-| Training Parameters        | epoch=120, steps=195, batch_size=32, lr=0.01                |
-| Optimizer                  | Momentum                                                    |
-| Loss Function              | Softmax Cross Entropy                                       |
-| outputs                    | probability                                                 |
-| Loss                       | 0.0496                                                      |
-| Speed(Ascend)              | 1pc: 16.7 ms/step;  8pcs: 17.0 ms/step                      |
-| Speed(GPU)                 | 1pc: 44.27 ms/step;                                         |
-| Total time(Ascend)         | 1pc: 55.5 mins;  8pcs: 15.0 mins                            |
-| Parameters (M)             | 4.8                                                         |
-| Checkpoint for Fine tuning | 6.4M (.ckpt file)                                           |
-
-#### SqueezeNet on ImageNet
-
-| Parameters                 | Contents                                                      |
-| -------------------------- | ----------------------------------------------------------- |
-| Model Version              | SqueezeNet                                                  |
-| Resource                   | Ascend 910; CPU 2.60GHz, 192cores; Memory 755G; OS Euler2.8                |
-| uploaded Date              | 11/06/2020 (month/day/year)                                 |
-| MindSpore Version          | 1.0.1                                                       |
-| Dataset                    | ImageNet                                                    |
-| Training Parameters        | epoch=200, steps=5004, batch_size=32, lr=0.01               |
-| Optimizer                  | Momentum                                                    |
-| Loss Function              | Softmax Cross Entropy                                       |
-| outputs                    | probability                                                 |
-| Loss                       | 2.9150                                                      |
-| Speed(Ascend)              | 8pcs: 19.9 ms/step                                          |
-| Speed(GPU)                 | 1pcs: 47.59 ms/step                                          |
-| Total time(Ascend)         | 8pcs: 5.2 hours                                             |
-| Parameters (M)             | 4.8                                                         |
-| Checkpoint for Fine tuning | 13.3M (.ckpt file)                                          |
-
-#### SqueezeNet_Residual on CIFAR-10
-
-| Parameters                 | Contents                                                    |
-| -------------------------- | ----------------------------------------------------------- |
-| Model Version              | SqueezeNet_Residual                                         |
-| Resource                   | Ascend 910; CPU 2.60GHz, 192cores; Memory 755G; OS Euler2.8              |
-| uploaded Date              | 11/06/2020 (month/day/year)                                 |
-| MindSpore Version          | 1.0.1                                                       |
-| Dataset                    | CIFAR-10                                                    |
-| Training Parameters        | epoch=150, steps=195, batch_size=32, lr=0.01                |
-| Optimizer                  | Momentum                                                    |
-| Loss Function              | Softmax Cross Entropy                                       |
-| outputs                    | probability                                                 |
-| Loss                       | 0.0641                                                      |
-| Speed(Ascend)              | 1pc: 16.9 ms/step;  8pcs: 17.3 ms/step                      |
-| Speed(GPU)                 | 1pc: 45.23 ms/step;                                         |
-| Total time(Ascend)         | 1pc: 68.6 mins;  8pcs: 20.9 mins                            |
-| Parameters (M)             | 4.8                                                         |
-| Checkpoint for Fine tuning | 6.5M (.ckpt file)                                           |
-
-#### SqueezeNet_Residual on ImageNet
-
-| Parameters                 | Contents                                                      |
-| -------------------------- | ----------------------------------------------------------- |
-| Model Version              | SqueezeNet_Residual                                         |
-| Resource                   | Ascend 910; CPU 2.60GHz, 192cores; Memory 755G; OS Euler2.8           |
-| uploaded Date              | 11/06/2020 (month/day/year)                                 |
-| MindSpore Version          | 1.0.1                                                       |
-| Dataset                    | ImageNet                                                    |
-| Training Parameters        | epoch=300, steps=5004, batch_size=32, lr=0.01               |
-| Optimizer                  | Momentum                                                    |
-| Loss Function              | Softmax Cross Entropy                                       |
-| outputs                    | probability                                                 |
-| Loss                       | 2.9040                                                      |
-| Speed(Ascend)              | 8pcs: 20.2 ms/step                                          |
-| Total time(Ascend)         | 8pcs: 8.0 hours                                             |
-| Parameters (M)             | 4.8                                                         |
-| Checkpoint for Fine tuning | 15.3M (.ckpt file)                                          |
-
-### Inference Performance
-
-#### SqueezeNet on CIFAR-10
-
-| Parameters          | Contents                      |
-| ------------------- | --------------------------- |
-| Model Version       | SqueezeNet                  |
-| Resource            | Ascend 910; OS Euler2.8                     |
-| Uploaded Date       | 11/06/2020 (month/day/year) |
-| MindSpore Version   | 1.0.1                       |
-| Dataset             | CIFAR-10                    |
-| batch_size          | 32                          |
-| outputs             | probability                 |
-| Accuracy            | 1pc: 89.0%;  8pcs: 84.4%    |
-
-#### SqueezeNet on ImageNet
-
-| Parameters          | Contents                      |
-| ------------------- | --------------------------- |
-| Model Version       | SqueezeNet                  |
-| Resource            | Ascend 910; OS Euler2.8                     |
-| Uploaded Date       | 11/06/2020 (month/day/year) |
-| MindSpore Version   | 1.0.1                       |
-| Dataset             | ImageNet                    |
-| batch_size          | 32                          |
-| outputs             | probability                 |
-| Accuracy            | 8pcs: 58.5%(TOP1), 81.1%(TOP5)       |
-
-#### SqueezeNet_Residual on CIFAR-10
-
-| Parameters          | Contents                      |
-| ------------------- | --------------------------- |
-| Model Version       | SqueezeNet_Residual         |
-| Resource            | Ascend 910; OS Euler2.8                     |
-| Uploaded Date       | 11/06/2020 (month/day/year) |
-| MindSpore Version   | 1.0.1                       |
-| Dataset             | CIFAR-10                    |
-| batch_size          | 32                          |
-| outputs             | probability                 |
-| Accuracy            | 1pc: 90.8%;  8pcs: 87.4%    |
-
-#### SqueezeNet_Residual on ImageNet
-
-| Parameters          | Contents                      |
-| ------------------- | --------------------------- |
-| Model Version       | SqueezeNet_Residual         |
-| Resource            | Ascend 910; OS Euler2.8                     |
-| Uploaded Date       | 11/06/2020 (month/day/year) |
-| MindSpore Version   | 1.0.1                       |
-| Dataset             | ImageNet                    |
-| batch_size          | 32                          |
-| outputs             | probability                 |
-| Accuracy            | 8pcs: 60.9%(TOP1), 82.6%(TOP5)       |
-
-## [How to use](#contents)
-
-### Inference
-
-If you need to use the trained model to perform inference on multiple hardware platforms, such as GPU, Ascend 910 or Ascend 310, you can refer to this [Link](https://www.mindspore.cn/tutorials/experts/en/master/infer/inference.html). Following the steps below, this is a simple example:
-
-- Running on Ascend
-
-  ```py
-  # Set context
-  device_id = int(os.getenv('DEVICE_ID'))
-  context.set_context(mode=context.GRAPH_MODE,
-                      device_target='Ascend',
-                      device_id=device_id)
-
-  # Load unseen dataset for inference
-  dataset = create_dataset(dataset_path=args_opt.dataset_path,
-                           do_train=False,
-                           batch_size=config.batch_size,
-                           target='Ascend')
-
-  # Define model
-  net = squeezenet(num_classes=config.class_num)
-  loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
-  model = Model(net,
-                loss_fn=loss,
-                metrics={'top_1_accuracy', 'top_5_accuracy'})
-
-  # Load pre-trained model
-  param_dict = load_checkpoint(args_opt.checkpoint_path)
-  load_param_into_net(net, param_dict)
-  net.set_train(False)
-
-  # Make predictions on the unseen dataset
-  acc = model.eval(dataset)
-  print("accuracy: ", acc)
-  ```
-
-- Running on GPU:
-
-  ```py
-  # Set context
-  device_id = int(os.getenv('DEVICE_ID'))
-  context.set_context(mode=context.GRAPH_MODE,
-                      device_target='GPU',
-                      device_id=device_id)
-
-  # Load unseen dataset for inference
-  dataset = create_dataset(dataset_path=args_opt.dataset_path,
-                           do_train=False,
-                           batch_size=config.batch_size,
-                           target='GPU')
-
-  # Define model
-  net = squeezenet(num_classes=config.class_num)
-  loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
-  model = Model(net,
-                loss_fn=loss,
-                metrics={'top_1_accuracy', 'top_5_accuracy'})
-
-  # Load pre-trained model
-  param_dict = load_checkpoint(args_opt.checkpoint_path)
-  load_param_into_net(net, param_dict)
-  net.set_train(False)
-
-  # Make predictions on the unseen dataset
-  acc = model.eval(dataset)
-  print("accuracy: ", acc)
-  ```
-
-### Continue Training on the Pretrained Model
-
-- running on Ascend
-
-  ```py
-  # Load dataset
-  dataset = create_dataset(dataset_path=args_opt.dataset_path,
-                           do_train=True,
-                           repeat_num=1,
-                           batch_size=config.batch_size,
-                           target='Ascend')
-  step_size = dataset.get_dataset_size()
-
-  # define net
-  net = squeezenet(num_classes=config.class_num)
-
-  # load checkpoint
-  if args_opt.pre_trained:
-      param_dict = load_checkpoint(args_opt.pre_trained)
-      load_param_into_net(net, param_dict)
-
-  # init lr
-  lr = get_lr(lr_init=config.lr_init,
-              lr_end=config.lr_end,
-              lr_max=config.lr_max,
-              total_epochs=config.epoch_size,
-              warmup_epochs=config.warmup_epochs,
-              pretrain_epochs=config.pretrain_epoch_size,
-              steps_per_epoch=step_size,
-              lr_decay_mode=config.lr_decay_mode)
-  lr = Tensor(lr)
-  loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
-  loss_scale = FixedLossScaleManager(config.loss_scale,
-                                     drop_overflow_update=False)
-  opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()),
-                 lr,
-                 config.momentum,
-                 config.weight_decay,
-                 config.loss_scale,
-                 use_nesterov=True)
-  model = Model(net,
-                loss_fn=loss,
-                optimizer=opt,
-                loss_scale_manager=loss_scale,
-                metrics={'acc'},
-                amp_level="O2",
-                keep_batchnorm_fp32=False)
-
-  # Set callbacks
-  config_ck = CheckpointConfig(
-      save_checkpoint_steps=config.save_checkpoint_epochs * step_size,
-      keep_checkpoint_max=config.keep_checkpoint_max)
-  time_cb = TimeMonitor(data_size=step_size)
-  ckpt_cb = ModelCheckpoint(prefix=args_opt.net + '_' + args_opt.dataset,
-                            directory=ckpt_save_dir,
-                            config=config_ck)
-  loss_cb = LossMonitor()
-
-  # Start training
-  model.train(config.epoch_size - config.pretrain_epoch_size, dataset,
-              callbacks=[time_cb, ckpt_cb, loss_cb])
-  print("train success")
-  ```
-
-- running on GPU
-
-  ```py
-  # Load dataset
-  dataset = create_dataset(dataset_path=args_opt.dataset_path,
-                           do_train=True,
-                           repeat_num=1,
-                           batch_size=config.batch_size,
-                           target='Ascend')
-  step_size = dataset.get_dataset_size()
-
-  # define net
-  net = squeezenet(num_classes=config.class_num)
-
-  # load checkpoint
-  if args_opt.pre_trained:
-      param_dict = load_checkpoint(args_opt.pre_trained)
-      load_param_into_net(net, param_dict)
-
-  # init lr
-  lr = get_lr(lr_init=config.lr_init,
-              lr_end=config.lr_end,
-              lr_max=config.lr_max,
-              total_epochs=config.epoch_size,
-              warmup_epochs=config.warmup_epochs,
-              pretrain_epochs=config.pretrain_epoch_size,
-              steps_per_epoch=step_size,
-              lr_decay_mode=config.lr_decay_mode)
-  lr = Tensor(lr)
-  loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
-  opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()),
-                 lr,
-                 config.momentum,
-                 config.weight_decay,
-                 use_nesterov=True)
-  model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'})
-
-  # Set callbacks
-  config_ck = CheckpointConfig(
-      save_checkpoint_steps=config.save_checkpoint_epochs * step_size,
-      keep_checkpoint_max=config.keep_checkpoint_max)
-  time_cb = TimeMonitor(data_size=step_size)
-  ckpt_cb = ModelCheckpoint(prefix=args_opt.net + '_' + args_opt.dataset,
-                            directory=ckpt_save_dir,
-                            config=config_ck)
-  loss_cb = LossMonitor()
-
-  # Start training
-  model.train(config.epoch_size - config.pretrain_epoch_size, dataset,
-              callbacks=[time_cb, ckpt_cb, loss_cb])
-  print("train success")
-  ```
-
-### Transfer Learning
-
-To be added.
-
-# [Description of Random Situation](#contents)
-
-In dataset.py, we set the seed inside 鈥渃reate_dataset" function. We also use random seed in train.py.
-
-# [ModelZoo Homepage](#contents)
-
- Please check the official [homepage](https://gitee.com/mindspore/models).
diff --git a/research/cv/squeezenet/eval.py b/research/cv/squeezenet/eval.py
deleted file mode 100644
index a1eda27f3e1151b604fb17ede7aa05556db6985f..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/eval.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""eval squeezenet."""
-import os
-import argparse
-from mindspore import context
-from mindspore.common import set_seed
-from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
-from mindspore.train.model import Model
-from mindspore.train.serialization import load_checkpoint, load_param_into_net
-from src.CrossEntropySmooth import CrossEntropySmooth
-
-parser = argparse.ArgumentParser(description='Image classification')
-parser.add_argument('--net', type=str, default='squeezenet', choices=['squeezenet', 'squeezenet_residual'],
-                    help='Model.')
-parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10', 'imagenet'], help='Dataset.')
-parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path')
-parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
-parser.add_argument('--device_target', type=str, default='Ascend', help='Device target')
-args_opt = parser.parse_args()
-
-set_seed(1)
-
-if args_opt.net == "squeezenet":
-    from src.squeezenet import SqueezeNet as squeezenet
-    if args_opt.dataset == "cifar10":
-        from src.config import config1 as config
-        from src.dataset import create_dataset_cifar as create_dataset
-    else:
-        from src.config import config2 as config
-        from src.dataset import create_dataset_imagenet as create_dataset
-else:
-    from src.squeezenet import SqueezeNet_Residual as squeezenet
-    if args_opt.dataset == "cifar10":
-        from src.config import config3 as config
-        from src.dataset import create_dataset_cifar as create_dataset
-    else:
-        from src.config import config4 as config
-        from src.dataset import create_dataset_imagenet as create_dataset
-
-if __name__ == '__main__':
-    target = args_opt.device_target
-
-    # init context
-    device_id = int(os.getenv('DEVICE_ID'))
-    context.set_context(mode=context.GRAPH_MODE,
-                        device_target=target,
-                        device_id=device_id)
-
-    # create dataset
-    dataset = create_dataset(dataset_path=args_opt.dataset_path,
-                             do_train=False,
-                             batch_size=config.batch_size,
-                             target=target)
-    step_size = dataset.get_dataset_size()
-
-    # define net
-    net = squeezenet(num_classes=config.class_num)
-
-    # load checkpoint
-    param_dict = load_checkpoint(args_opt.checkpoint_path)
-    load_param_into_net(net, param_dict)
-    net.set_train(False)
-
-    # define loss
-    if args_opt.dataset == "imagenet":
-        if not config.use_label_smooth:
-            config.label_smooth_factor = 0.0
-        loss = CrossEntropySmooth(sparse=True,
-                                  reduction='mean',
-                                  smooth_factor=config.label_smooth_factor,
-                                  num_classes=config.class_num)
-    else:
-        loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
-
-    # define model
-    model = Model(net,
-                  loss_fn=loss,
-                  metrics={'top_1_accuracy', 'top_5_accuracy'})
-
-    # eval model
-    res = model.eval(dataset)
-    print("result:", res, "ckpt=", args_opt.checkpoint_path)
diff --git a/research/cv/squeezenet/export.py b/research/cv/squeezenet/export.py
deleted file mode 100644
index f93d0711856d796a17a1eb30e1fceac37224cef8..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/export.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""
-##############export checkpoint file into air and onnx models#################
-python export.py --net squeezenet --dataset cifar10 --checkpoint_path squeezenet_cifar10-120_1562.ckpt
-"""
-
-import argparse
-import numpy as np
-from mindspore import Tensor
-from mindspore.train.serialization import load_checkpoint, load_param_into_net, export
-
-if __name__ == '__main__':
-    parser = argparse.ArgumentParser(description='Image classification')
-    parser.add_argument('--net', type=str, default='squeezenet', choices=['squeezenet', 'squeezenet_residual'],
-                        help='Model.')
-    parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10', 'imagenet'], help='Dataset.')
-    parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path')
-    args_opt = parser.parse_args()
-
-    if args_opt.net == "squeezenet":
-        from src.squeezenet import SqueezeNet as squeezenet
-    else:
-        from src.squeezenet import SqueezeNet_Residual as squeezenet
-    if args_opt.dataset == "cifar10":
-        num_classes = 10
-    else:
-        num_classes = 1000
-
-    onnx_filename = args_opt.net + '_' + args_opt.dataset
-    air_filename = args_opt.net + '_' + args_opt.dataset
-
-    net = squeezenet(num_classes=num_classes)
-
-    assert args_opt.checkpoint_path is not None, "checkpoint_path is None."
-
-    param_dict = load_checkpoint(args_opt.checkpoint_path)
-    load_param_into_net(net, param_dict)
-
-    input_arr = Tensor(np.zeros([1, 3, 227, 227], np.float32))
-    export(net, input_arr, file_name=onnx_filename, file_format="ONNX")
-    export(net, input_arr, file_name=air_filename, file_format="MINDIR")
diff --git a/research/cv/squeezenet/requirements.txt b/research/cv/squeezenet/requirements.txt
deleted file mode 100644
index 34fbd13367133f98ccadd17dfcd80df72e3cd0ba..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/requirements.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-easydict
-numpy
diff --git a/research/cv/squeezenet/scripts/run_distribute_train.sh b/research/cv/squeezenet/scripts/run_distribute_train.sh
deleted file mode 100644
index ee8c13651f12f75168aa86be6dfd9386fc1903cf..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/scripts/run_distribute_train.sh
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-if [ $# != 4 ] && [ $# != 5 ]
-then 
-    echo "Usage: sh scripts/run_distribute_train.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [RANK_TABLE_FILE] [DATASET_PATH] [PRETRAINED_CKPT_PATH](optional)"
-    exit 1
-fi
-
-if [ $1 != "squeezenet" ] && [ $1 != "squeezenet_residual" ]
-then 
-    echo "error: the selected net is neither squeezenet nor squeezenet_residual"
-    exit 1
-fi
-
-if [ $2 != "cifar10" ] && [ $2 != "imagenet" ]
-then 
-    echo "error: the selected dataset is neither cifar10 nor imagenet"
-    exit 1
-fi
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-PATH1=$(get_real_path $3)
-PATH2=$(get_real_path $4)
-
-if [ $# == 5 ]
-then 
-    PATH3=$(get_real_path $5)
-fi
-
-if [ ! -f $PATH1 ]
-then 
-    echo "error: RANK_TABLE_FILE=$PATH1 is not a file"
-    exit 1
-fi 
-
-if [ ! -d $PATH2 ]
-then 
-    echo "error: DATASET_PATH=$PATH2 is not a directory"
-    exit 1
-fi 
-
-if [ $# == 5 ] && [ ! -f $PATH3 ]
-then
-    echo "error: PRETRAINED_CKPT_PATH=$PATH3 is not a file"
-    exit 1
-fi
-
-ulimit -u unlimited
-export DEVICE_NUM=8
-export RANK_SIZE=8
-export RANK_TABLE_FILE=$PATH1
-
-export SERVER_ID=0
-rank_start=$((DEVICE_NUM * SERVER_ID))
-
-for((i=0; i<${DEVICE_NUM}; i++))
-do
-    export DEVICE_ID=${i}
-    export RANK_ID=$((rank_start + i))
-    rm -rf ./train_parallel$i
-    mkdir ./train_parallel$i
-    cp ./train.py ./train_parallel$i
-    cp -r ./src ./train_parallel$i
-    cd ./train_parallel$i || exit
-    echo "start training for rank $RANK_ID, device $DEVICE_ID"
-    env > env.log
-    if [ $# == 4 ]
-    then
-        python train.py --net=$1 --dataset=$2 --run_distribute=True --device_num=$DEVICE_NUM --dataset_path=$PATH2 &> log &
-    fi
-    
-    if [ $# == 5 ]
-    then
-        python train.py --net=$1 --dataset=$2 --run_distribute=True --device_num=$DEVICE_NUM --dataset_path=$PATH2 --pre_trained=$PATH3 &> log &
-    fi
-
-    cd ..
-done
diff --git a/research/cv/squeezenet/scripts/run_distribute_train_gpu.sh b/research/cv/squeezenet/scripts/run_distribute_train_gpu.sh
deleted file mode 100644
index 2df3f9719ebdadc8208f195a3ccf8711c48ad85c..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/scripts/run_distribute_train_gpu.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-if [ $# != 3 ] && [ $# != 4 ]
-then 
-    echo "Usage: sh scripts/run_distribute_train_gpu.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DATASET_PATH] [PRETRAINED_CKPT_PATH](optional)"
-    exit 1
-fi
-
-if [ $1 != "squeezenet" ] && [ $1 != "squeezenet_residual" ]
-then 
-    echo "error: the selected net is neither squeezenet nor squeezenet_residual"
-    exit 1
-fi
-
-if [ $2 != "cifar10" ] && [ $2 != "imagenet" ]
-then 
-    echo "error: the selected dataset is neither cifar10 nor imagenet"
-    exit 1
-fi
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-PATH1=$(get_real_path $3)
-
-if [ $# == 4 ]
-then 
-    PATH2=$(get_real_path $4)
-fi
-
-
-if [ ! -d $PATH1 ]
-then 
-    echo "error: DATASET_PATH=$PATH1 is not a directory"
-    exit 1
-fi 
-
-if [ $# == 5 ] && [ ! -f $PATH2 ]
-then
-    echo "error: PRETRAINED_CKPT_PATH=$PATH2 is not a file"
-    exit 1
-fi
-
-ulimit -u unlimited
-export DEVICE_NUM=8
-export RANK_SIZE=8
-
-rm -rf ./train_parallel
-mkdir ./train_parallel
-cp ./train.py ./train_parallel
-cp -r ./src ./train_parallel
-cd ./train_parallel || exit
-
-if [ $# == 3 ]
-then
-    mpirun --allow-run-as-root -n $RANK_SIZE --output-filename log_output --merge-stderr-to-stdout \
-    python train.py --net=$1 --dataset=$2 --run_distribute=True \
-    --device_num=$DEVICE_NUM --device_target="GPU" --dataset_path=$PATH1 &> log &
-fi
-    
-if [ $# == 4 ]
-then
-    mpirun --allow-run-as-root -n $RANK_SIZE --output-filename log_output --merge-stderr-to-stdout \
-    python train.py --net=$1 --dataset=$2 --run_distribute=True \
-    --device_num=$DEVICE_NUM --device_target="GPU" --dataset_path=$PATH1 --pre_trained=$PATH2 &> log &
-fi
diff --git a/research/cv/squeezenet/scripts/run_eval.sh b/research/cv/squeezenet/scripts/run_eval.sh
deleted file mode 100644
index 8ac34e25ce2a25f05131e4e6848d8e9867302009..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/scripts/run_eval.sh
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-if [ $# != 5 ]
-then 
-    echo "Usage: sh scripts/run_eval.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DEVICE_ID] [DATASET_PATH] [CHECKPOINT_PATH]"
-exit 1
-fi
-
-if [ $1 != "squeezenet" ] && [ $1 != "squeezenet_residual" ]
-then 
-    echo "error: the selected net is neither squeezenet nor squeezenet_residual"
-exit 1
-fi
-
-if [ $2 != "cifar10" ] && [ $2 != "imagenet" ]
-then 
-    echo "error: the selected dataset is neither cifar10 nor imagenet"
-exit 1
-fi
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-PATH1=$(get_real_path $4)
-PATH2=$(get_real_path $5)
-
-
-if [ ! -d $PATH1 ]
-then 
-    echo "error: DATASET_PATH=$PATH1 is not a directory"
-exit 1
-fi 
-
-if [ ! -f $PATH2 ]
-then 
-    echo "error: CHECKPOINT_PATH=$PATH2 is not a file"
-exit 1
-fi 
-
-ulimit -u unlimited
-export DEVICE_NUM=1
-export DEVICE_ID=$3
-export RANK_SIZE=$DEVICE_NUM
-export RANK_ID=0
-
-if [ -d "eval" ];
-then
-    rm -rf ./eval
-fi
-mkdir ./eval
-cp ./eval.py ./eval
-cp -r ./src ./eval
-cd ./eval || exit
-env > env.log
-echo "start evaluation for device $DEVICE_ID"
-python eval.py --net=$1 --dataset=$2 --dataset_path=$PATH1 --checkpoint_path=$PATH2 &> log &
-cd ..
diff --git a/research/cv/squeezenet/scripts/run_eval_gpu.sh b/research/cv/squeezenet/scripts/run_eval_gpu.sh
deleted file mode 100644
index f5bfaa4ade055a800b03ac1dd45f152837cb6f91..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/scripts/run_eval_gpu.sh
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-if [ $# != 5 ]
-then 
-    echo "Usage: sh scripts/run_eval_gpu.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DEVICE_ID] [DATASET_PATH] [CHECKPOINT_PATH]"
-exit 1
-fi
-
-if [ $1 != "squeezenet" ] && [ $1 != "squeezenet_residual" ]
-then 
-    echo "error: the selected net is neither squeezenet nor squeezenet_residual"
-exit 1
-fi
-
-if [ $2 != "cifar10" ] && [ $2 != "imagenet" ]
-then 
-    echo "error: the selected dataset is neither cifar10 nor imagenet"
-exit 1
-fi
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-PATH1=$(get_real_path $4)
-PATH2=$(get_real_path $5)
-
-
-if [ ! -d $PATH1 ]
-then 
-    echo "error: DATASET_PATH=$PATH1 is not a directory"
-exit 1
-fi 
-
-if [ ! -f $PATH2 ]
-then 
-    echo "error: CHECKPOINT_PATH=$PATH2 is not a file"
-exit 1
-fi 
-
-ulimit -u unlimited
-export DEVICE_NUM=1
-export DEVICE_ID=$3
-export RANK_SIZE=$DEVICE_NUM
-export RANK_ID=0
-
-if [ -d "eval" ];
-then
-    rm -rf ./eval
-fi
-mkdir ./eval
-cp ./eval.py ./eval
-cp -r ./src ./eval
-cd ./eval || exit
-env > env.log
-echo "start evaluation for device $DEVICE_ID"
-python eval.py --net=$1 --dataset=$2 --dataset_path=$PATH1 --checkpoint_path=$PATH2 --device_target="GPU" &> log &
-cd ..
diff --git a/research/cv/squeezenet/scripts/run_standalone_train.sh b/research/cv/squeezenet/scripts/run_standalone_train.sh
deleted file mode 100644
index cd4c637075fbf3078c48beee3fdd18b8e23fd939..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/scripts/run_standalone_train.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-if [ $# != 4 ] && [ $# != 5 ]
-then 
-    echo "Usage: sh scripts/run_standalone_train.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DEVICE_ID] [DATASET_PATH] [PRETRAINED_CKPT_PATH](optional)"
-exit 1
-fi
-
-if [ $1 != "squeezenet" ] && [ $1 != "squeezenet_residual" ]
-then 
-    echo "error: the selected net is neither squeezenet nor squeezenet_residual"
-exit 1
-fi
-
-if [ $2 != "cifar10" ] && [ $2 != "imagenet" ]
-then 
-    echo "error: the selected dataset is neither cifar10 nor imagenet"
-exit 1
-fi
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-PATH1=$(get_real_path $4)
-
-if [ $# == 5 ]
-then
-    PATH2=$(get_real_path $5)
-fi
-
-if [ ! -d $PATH1 ]
-then 
-    echo "error: DATASET_PATH=$PATH1 is not a directory"
-exit 1
-fi
-
-if [ $# == 5 ] && [ ! -f $PATH2 ]
-then
-    echo "error: PRETRAINED_CKPT_PATH=$PATH2 is not a file"
-exit 1
-fi
-
-ulimit -u unlimited
-export DEVICE_NUM=1
-export DEVICE_ID=$3
-export RANK_ID=0
-export RANK_SIZE=1
-
-if [ -d "train" ];
-then
-    rm -rf ./train
-fi
-mkdir ./train
-cp ./train.py ./train
-cp -r ./src ./train
-cd ./train || exit
-echo "start training for device $DEVICE_ID"
-env > env.log
-if [ $# == 4 ]
-then
-    python train.py --net=$1 --dataset=$2 --dataset_path=$PATH1 &> log &
-fi
-
-if [ $# == 5 ]
-then
-    python train.py --net=$1 --dataset=$2 --dataset_path=$PATH1 --pre_trained=$PATH2 &> log &
-fi
-cd ..
diff --git a/research/cv/squeezenet/scripts/run_standalone_train_gpu.sh b/research/cv/squeezenet/scripts/run_standalone_train_gpu.sh
deleted file mode 100644
index 8a80526e0486be005999b5efa109c19819263286..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/scripts/run_standalone_train_gpu.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-
-if [ $# != 4 ] && [ $# != 5 ]
-then 
-    echo "Usage: sh scripts/run_standalone_train_gpu.sh [squeezenet|squeezenet_residual] [cifar10|imagenet] [DEVICE_ID] [DATASET_PATH] [PRETRAINED_CKPT_PATH](optional)"
-exit 1
-fi
-
-if [ $1 != "squeezenet" ] && [ $1 != "squeezenet_residual" ]
-then 
-    echo "error: the selected net is neither squeezenet nor squeezenet_residual"
-exit 1
-fi
-
-if [ $2 != "cifar10" ] && [ $2 != "imagenet" ]
-then 
-    echo "error: the selected dataset is neither cifar10 nor imagenet"
-exit 1
-fi
-
-get_real_path(){
-  if [ "${1:0:1}" == "/" ]; then
-    echo "$1"
-  else
-    echo "$(realpath -m $PWD/$1)"
-  fi
-}
-
-PATH1=$(get_real_path $4)
-
-if [ $# == 5 ]
-then
-    PATH2=$(get_real_path $5)
-fi
-
-if [ ! -d $PATH1 ]
-then 
-    echo "error: DATASET_PATH=$PATH1 is not a directory"
-exit 1
-fi
-
-if [ $# == 5 ] && [ ! -f $PATH2 ]
-then
-    echo "error: PRETRAINED_CKPT_PATH=$PATH2 is not a file"
-exit 1
-fi
-
-ulimit -u unlimited
-export DEVICE_NUM=1
-export DEVICE_ID=$3
-export RANK_ID=0
-export RANK_SIZE=1
-
-if [ -d "train" ];
-then
-    rm -rf ./train
-fi
-mkdir ./train
-cp ./train.py ./train
-cp -r ./src ./train
-cd ./train || exit
-echo "start training for device $DEVICE_ID"
-env > env.log
-if [ $# == 4 ]
-then
-    python train.py --net=$1 --dataset=$2 --device_target="GPU" --dataset_path=$PATH1 &> log &
-fi
-
-if [ $# == 5 ]
-then
-    python train.py --net=$1 --dataset=$2 --device_target="GPU" --dataset_path=$PATH1 --pre_trained=$PATH2 &> log &
-fi
-cd ..
diff --git a/research/cv/squeezenet/src/CrossEntropySmooth.py b/research/cv/squeezenet/src/CrossEntropySmooth.py
deleted file mode 100644
index 31ce3ad0aad4c535be7a50abcbe7c74cb48386c3..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/src/CrossEntropySmooth.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""define loss function for network"""
-import mindspore.nn as nn
-from mindspore import Tensor
-from mindspore.common import dtype as mstype
-from mindspore.nn.cell import Cell
-from mindspore.ops import functional as F
-from mindspore.ops import operations as P
-
-class MyLoss(Cell):
-    """
-    Base class for other losses.
-    """
-    def __init__(self, reduction='mean'):
-        super(MyLoss, self).__init__()
-        if reduction is None:
-            reduction = 'none'
-
-        if reduction not in ('mean', 'sum', 'none'):
-            raise ValueError(f"reduction method for {reduction.lower()} is not supported")
-
-        self.average = True
-        self.reduce = True
-        if reduction == 'sum':
-            self.average = False
-        if reduction == 'none':
-            self.reduce = False
-
-        self.reduce_mean = P.ReduceMean()
-        self.reduce_sum = P.ReduceSum()
-        self.mul = P.Mul()
-        self.cast = P.Cast()
-
-    def get_axis(self, x):
-        shape = F.shape(x)
-        length = F.tuple_len(shape)
-        perm = F.make_range(0, length)
-        return perm
-
-    def get_loss(self, x, weights=1.0):
-        """
-        Computes the weighted loss
-        Args:
-            weights: Optional `Tensor` whose rank is either 0, or the same rank as inputs, and must be broadcastable to
-                inputs (i.e., all dimensions must be either `1`, or the same as the corresponding inputs dimension).
-        """
-        input_dtype = x.dtype
-        x = self.cast(x, mstype.float32)
-        weights = self.cast(weights, mstype.float32)
-        x = self.mul(weights, x)
-        if self.reduce and self.average:
-            x = self.reduce_mean(x, self.get_axis(x))
-        if self.reduce and not self.average:
-            x = self.reduce_sum(x, self.get_axis(x))
-        x = self.cast(x, input_dtype)
-        return x
-
-    def construct(self, base, target):
-        raise NotImplementedError
-
-class CrossEntropySmooth(MyLoss):
-    """CrossEntropy"""
-    def __init__(self, sparse=True, reduction='mean', smooth_factor=0., num_classes=1000):
-        super(CrossEntropySmooth, self).__init__()
-        self.onehot = P.OneHot()
-        self.sparse = sparse
-        self.on_value = Tensor(1.0 - smooth_factor, mstype.float32)
-        self.off_value = Tensor(1.0 * smooth_factor / (num_classes - 1), mstype.float32)
-        self.ce = nn.SoftmaxCrossEntropyWithLogits(reduction=reduction)
-
-    def construct(self, logit, label):
-        if self.sparse:
-            label = self.onehot(label, F.shape(logit)[1], self.on_value, self.off_value)
-        loss = self.ce(logit, label)
-        return loss
diff --git a/research/cv/squeezenet/src/config.py b/research/cv/squeezenet/src/config.py
deleted file mode 100644
index 40d119c5bde7d5371a97290a563a169fe9051af1..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/src/config.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""
-network config setting, will be used in train.py and eval.py
-"""
-from easydict import EasyDict as ed
-
-# config for squeezenet, cifar10
-config1 = ed({
-    "class_num": 10,
-    "batch_size": 32,
-    "loss_scale": 1024,
-    "momentum": 0.9,
-    "weight_decay": 1e-4,
-    "epoch_size": 120,
-    "pretrain_epoch_size": 0,
-    "save_checkpoint": True,
-    "save_checkpoint_epochs": 1,
-    "keep_checkpoint_max": 10,
-    "save_checkpoint_path": "./",
-    "warmup_epochs": 5,
-    "lr_decay_mode": "poly",
-    "lr_init": 0,
-    "lr_end": 0,
-    "lr_max": 0.01
-})
-
-# config for squeezenet, imagenet
-config2 = ed({
-    "class_num": 1000,
-    "batch_size": 32,
-    "loss_scale": 1024,
-    "momentum": 0.9,
-    "weight_decay": 7e-5,
-    "epoch_size": 200,
-    "pretrain_epoch_size": 0,
-    "save_checkpoint": True,
-    "save_checkpoint_epochs": 1,
-    "keep_checkpoint_max": 10,
-    "save_checkpoint_path": "./",
-    "warmup_epochs": 0,
-    "lr_decay_mode": "poly",
-    "use_label_smooth": True,
-    "label_smooth_factor": 0.1,
-    "lr_init": 0,
-    "lr_end": 0,
-    "lr_max": 0.01
-})
-
-# config for squeezenet_residual, cifar10
-config3 = ed({
-    "class_num": 10,
-    "batch_size": 32,
-    "loss_scale": 1024,
-    "momentum": 0.9,
-    "weight_decay": 1e-4,
-    "epoch_size": 150,
-    "pretrain_epoch_size": 0,
-    "save_checkpoint": True,
-    "save_checkpoint_epochs": 1,
-    "keep_checkpoint_max": 10,
-    "save_checkpoint_path": "./",
-    "warmup_epochs": 5,
-    "lr_decay_mode": "linear",
-    "lr_init": 0,
-    "lr_end": 0,
-    "lr_max": 0.01
-})
-
-# config for squeezenet_residual, imagenet
-config4 = ed({
-    "class_num": 1000,
-    "batch_size": 32,
-    "loss_scale": 1024,
-    "momentum": 0.9,
-    "weight_decay": 7e-5,
-    "epoch_size": 300,
-    "pretrain_epoch_size": 0,
-    "save_checkpoint": True,
-    "save_checkpoint_epochs": 1,
-    "keep_checkpoint_max": 10,
-    "save_checkpoint_path": "./",
-    "warmup_epochs": 0,
-    "lr_decay_mode": "cosine",
-    "use_label_smooth": True,
-    "label_smooth_factor": 0.1,
-    "lr_init": 0,
-    "lr_end": 0,
-    "lr_max": 0.01
-})
diff --git a/research/cv/squeezenet/src/dataset.py b/research/cv/squeezenet/src/dataset.py
deleted file mode 100644
index e778661b4519b989aa1e83f0b92d5e9893b205b0..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/src/dataset.py
+++ /dev/null
@@ -1,191 +0,0 @@
-# Copyright 2020-2022 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""
-create train or eval dataset.
-"""
-import os
-import mindspore.common.dtype as mstype
-import mindspore.dataset as ds
-import mindspore.dataset.vision as C
-import mindspore.dataset.transforms as C2
-from mindspore.communication.management import init, get_rank, get_group_size
-
-
-def create_dataset_cifar(dataset_path,
-                         do_train,
-                         repeat_num=1,
-                         batch_size=32,
-                         target="Ascend"):
-    """
-    create a train or evaluate cifar10 dataset
-    Args:
-        dataset_path(string): the path of dataset.
-        do_train(bool): whether dataset is used for train or eval.
-        repeat_num(int): the repeat times of dataset. Default: 1
-        batch_size(int): the batch size of dataset. Default: 32
-        target(str): the device target. Default: Ascend
-
-    Returns:
-        dataset
-    """
-    if target == "Ascend":
-        device_num, rank_id = _get_rank_info()
-    else:
-        init()
-        rank_id = get_rank()
-        device_num = get_group_size()
-
-    if device_num == 1:
-        data_set = ds.Cifar10Dataset(dataset_path,
-                                     num_parallel_workers=8,
-                                     shuffle=True)
-    else:
-        data_set = ds.Cifar10Dataset(dataset_path,
-                                     num_parallel_workers=8,
-                                     shuffle=True,
-                                     num_shards=device_num,
-                                     shard_id=rank_id)
-
-    # define map operations
-    if do_train:
-        trans = [
-            C.RandomCrop((32, 32), (4, 4, 4, 4)),
-            C.RandomHorizontalFlip(prob=0.5),
-            C.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4),
-            C.Resize((227, 227)),
-            C.Rescale(1.0 / 255.0, 0.0),
-            C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
-            C.CutOut(112),
-            C.HWC2CHW()
-        ]
-    else:
-        trans = [
-            C.Resize((227, 227)),
-            C.Rescale(1.0 / 255.0, 0.0),
-            C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
-            C.HWC2CHW()
-        ]
-
-    type_cast_op = C2.TypeCast(mstype.int32)
-
-    data_set = data_set.map(operations=type_cast_op,
-                            input_columns="label",
-                            num_parallel_workers=8)
-    data_set = data_set.map(operations=trans,
-                            input_columns="image",
-                            num_parallel_workers=8)
-
-    # apply batch operations
-    data_set = data_set.batch(batch_size, drop_remainder=True)
-
-    # apply dataset repeat operation
-    data_set = data_set.repeat(repeat_num)
-
-    return data_set
-
-
-def create_dataset_imagenet(dataset_path,
-                            do_train,
-                            repeat_num=1,
-                            batch_size=32,
-                            target="Ascend"):
-    """
-    create a train or eval imagenet dataset
-
-    Args:
-        dataset_path(string): the path of dataset.
-        do_train(bool): whether dataset is used for train or eval.
-        repeat_num(int): the repeat times of dataset. Default: 1
-        batch_size(int): the batch size of dataset. Default: 32
-        target(str): the device target. Default: Ascend
-
-    Returns:
-        dataset
-    """
-    if target == "Ascend":
-        device_num, rank_id = _get_rank_info()
-    else:
-        init()
-        rank_id = get_rank()
-        device_num = get_group_size()
-
-    if device_num == 1:
-        data_set = ds.ImageFolderDataset(dataset_path,
-                                         num_parallel_workers=8,
-                                         shuffle=True)
-    else:
-        data_set = ds.ImageFolderDataset(dataset_path,
-                                         num_parallel_workers=8,
-                                         shuffle=True,
-                                         num_shards=device_num,
-                                         shard_id=rank_id)
-
-    image_size = 227
-    mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
-    std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
-
-    # define map operations
-    if do_train:
-        trans = [
-            C.RandomCropDecodeResize(image_size,
-                                     scale=(0.08, 1.0),
-                                     ratio=(0.75, 1.333)),
-            C.RandomHorizontalFlip(prob=0.5),
-            C.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4),
-            C.Normalize(mean=mean, std=std),
-            C.CutOut(112),
-            C.HWC2CHW()
-        ]
-    else:
-        trans = [
-            C.Decode(),
-            C.Resize((256, 256)),
-            C.CenterCrop(image_size),
-            C.Normalize(mean=mean, std=std),
-            C.HWC2CHW()
-        ]
-
-    type_cast_op = C2.TypeCast(mstype.int32)
-
-    data_set = data_set.map(operations=type_cast_op,
-                            input_columns="label",
-                            num_parallel_workers=8)
-    data_set = data_set.map(operations=trans,
-                            input_columns="image",
-                            num_parallel_workers=8)
-
-    # apply batch operations
-    data_set = data_set.batch(batch_size, drop_remainder=True)
-
-    # apply dataset repeat operation
-    data_set = data_set.repeat(repeat_num)
-
-    return data_set
-
-
-def _get_rank_info():
-    """
-    get rank size and rank id
-    """
-    rank_size = int(os.environ.get("RANK_SIZE", 1))
-
-    if rank_size > 1:
-        rank_size = get_group_size()
-        rank_id = get_rank()
-    else:
-        rank_size = 1
-        rank_id = 0
-
-    return rank_size, rank_id
diff --git a/research/cv/squeezenet/src/lr_generator.py b/research/cv/squeezenet/src/lr_generator.py
deleted file mode 100644
index 238d038961bf348f0815da62860c6981ba4c4cf2..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/src/lr_generator.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""learning rate generator"""
-import math
-import numpy as np
-
-
-def get_lr(lr_init, lr_end, lr_max, total_epochs, warmup_epochs,
-           pretrain_epochs, steps_per_epoch, lr_decay_mode):
-    """
-    generate learning rate array
-
-    Args:
-        lr_init(float): init learning rate
-        lr_end(float): end learning rate
-        lr_max(float): max learning rate
-        total_epochs(int): total epoch of training
-        warmup_epochs(int): number of warmup epochs
-        pretrain_epochs(int): number of pretrain epochs
-        steps_per_epoch(int): steps of one epoch
-        lr_decay_mode(string): learning rate decay mode,
-                               including steps, poly, linear or cosine
-
-    Returns:
-        np.array, learning rate array
-    """
-
-    lr_each_step = []
-    total_steps = steps_per_epoch * total_epochs
-    warmup_steps = steps_per_epoch * warmup_epochs
-    pretrain_steps = steps_per_epoch * pretrain_epochs
-    decay_steps = total_steps - warmup_steps
-
-    if lr_decay_mode == 'steps':
-        decay_epoch_index = [
-            0.3 * total_steps, 0.6 * total_steps, 0.8 * total_steps
-        ]
-        for i in range(total_steps):
-            if i < decay_epoch_index[0]:
-                lr = lr_max
-            elif i < decay_epoch_index[1]:
-                lr = lr_max * 0.1
-            elif i < decay_epoch_index[2]:
-                lr = lr_max * 0.01
-            else:
-                lr = lr_max * 0.001
-            lr_each_step.append(lr)
-
-    elif lr_decay_mode == 'poly':
-        for i in range(total_steps):
-            if i < warmup_steps:
-                lr = linear_warmup_lr(i, warmup_steps, lr_max, lr_init)
-            else:
-                base = (1.0 - (i - warmup_steps) / decay_steps)
-                lr = lr_max * base * base
-            lr_each_step.append(lr)
-
-    elif lr_decay_mode == 'linear':
-        for i in range(total_steps):
-            if i < warmup_steps:
-                lr = linear_warmup_lr(i, warmup_steps, lr_max, lr_init)
-            else:
-                lr = lr_max - (lr_max - lr_end) * (i -
-                                                   warmup_steps) / decay_steps
-            lr_each_step.append(lr)
-
-    elif lr_decay_mode == 'cosine':
-        for i in range(total_steps):
-            if i < warmup_steps:
-                lr = linear_warmup_lr(i, warmup_steps, lr_max, lr_init)
-            else:
-                linear_decay = (total_steps - i) / decay_steps
-                cosine_decay = 0.5 * (
-                    1 + math.cos(math.pi * 2 * 0.47 *
-                                 (i - warmup_steps) / decay_steps))
-                decayed = linear_decay * cosine_decay + 0.00001
-                lr = lr_max * decayed
-            lr_each_step.append(lr)
-
-    else:
-        raise NotImplementedError(
-            'Learning rate decay mode [{:s}] cannot be recognized'.format(
-                lr_decay_mode))
-
-    lr_each_step = np.array(lr_each_step).astype(np.float32)
-    learning_rate = lr_each_step[pretrain_steps:]
-
-    return learning_rate
-
-
-def linear_warmup_lr(current_step, warmup_steps, base_lr, init_lr):
-    lr_inc = (base_lr - init_lr) / warmup_steps
-    lr = init_lr + lr_inc * current_step
-    return lr
diff --git a/research/cv/squeezenet/src/squeezenet.py b/research/cv/squeezenet/src/squeezenet.py
deleted file mode 100644
index 396e7de6713da728ac4200b8b9e8cb5cb74939ad..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/src/squeezenet.py
+++ /dev/null
@@ -1,222 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Squeezenet."""
-import mindspore.nn as nn
-from mindspore.common import initializer as weight_init
-from mindspore.ops import operations as P
-
-
-class Fire(nn.Cell):
-    """
-    Fire network definition.
-    """
-    def __init__(self, inplanes, squeeze_planes, expand1x1_planes,
-                 expand3x3_planes):
-        super(Fire, self).__init__()
-        self.inplanes = inplanes
-        self.squeeze = nn.Conv2d(inplanes,
-                                 squeeze_planes,
-                                 kernel_size=1,
-                                 has_bias=True)
-        self.squeeze_activation = nn.ReLU()
-        self.expand1x1 = nn.Conv2d(squeeze_planes,
-                                   expand1x1_planes,
-                                   kernel_size=1,
-                                   has_bias=True)
-        self.expand1x1_activation = nn.ReLU()
-        self.expand3x3 = nn.Conv2d(squeeze_planes,
-                                   expand3x3_planes,
-                                   kernel_size=3,
-                                   pad_mode='same',
-                                   has_bias=True)
-        self.expand3x3_activation = nn.ReLU()
-        self.concat = P.Concat(axis=1)
-
-    def construct(self, x):
-        x = self.squeeze_activation(self.squeeze(x))
-        return self.concat((self.expand1x1_activation(self.expand1x1(x)),
-                            self.expand3x3_activation(self.expand3x3(x))))
-
-
-class SqueezeNet(nn.Cell):
-    r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
-    accuracy with 50x fewer parameters and <0.5MB model size"
-    <https://arxiv.org/abs/1602.07360>`_ paper.
-
-    Get SqueezeNet neural network.
-
-    Args:
-        num_classes (int): Class number.
-
-    Returns:
-        Cell, cell instance of SqueezeNet neural network.
-
-    Examples:
-        >>> net = SqueezeNet(10)
-    """
-    def __init__(self, num_classes=10):
-        super(SqueezeNet, self).__init__()
-
-        self.features = nn.SequentialCell([
-            nn.Conv2d(3,
-                      96,
-                      kernel_size=7,
-                      stride=2,
-                      pad_mode='valid',
-                      has_bias=True),
-            nn.ReLU(),
-            nn.MaxPool2d(kernel_size=3, stride=2),
-            Fire(96, 16, 64, 64),
-            Fire(128, 16, 64, 64),
-            Fire(128, 32, 128, 128),
-            nn.MaxPool2d(kernel_size=3, stride=2),
-            Fire(256, 32, 128, 128),
-            Fire(256, 48, 192, 192),
-            Fire(384, 48, 192, 192),
-            Fire(384, 64, 256, 256),
-            nn.MaxPool2d(kernel_size=3, stride=2),
-            Fire(512, 64, 256, 256),
-        ])
-
-        # Final convolution is initialized differently from the rest
-        self.final_conv = nn.Conv2d(512,
-                                    num_classes,
-                                    kernel_size=1,
-                                    has_bias=True)
-        self.dropout = nn.Dropout(keep_prob=0.5)
-        self.relu = nn.ReLU()
-        self.mean = P.ReduceMean(keep_dims=True)
-        self.flatten = nn.Flatten()
-        self.custom_init_weight()
-
-    def custom_init_weight(self):
-        """
-        Init the weight of Conv2d in the net.
-        """
-        for _, cell in self.cells_and_names():
-            if isinstance(cell, nn.Conv2d):
-                if cell is self.final_conv:
-                    cell.weight.set_data(
-                        weight_init.initializer('normal', cell.weight.shape,
-                                                cell.weight.dtype))
-                else:
-                    cell.weight.set_data(
-                        weight_init.initializer('he_uniform',
-                                                cell.weight.shape,
-                                                cell.weight.dtype))
-                if cell.bias is not None:
-                    cell.bias.set_data(
-                        weight_init.initializer('zeros', cell.bias.shape,
-                                                cell.bias.dtype))
-
-    def construct(self, x):
-        x = self.features(x)
-        x = self.dropout(x)
-        x = self.final_conv(x)
-        x = self.relu(x)
-        x = self.mean(x, (2, 3))
-        x = self.flatten(x)
-
-        return x
-
-
-class SqueezeNet_Residual(nn.Cell):
-    r"""SqueezeNet with simple bypass model architecture from the `"SqueezeNet:
-    AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size"
-    <https://arxiv.org/abs/1602.07360>`_ paper.
-
-    Get SqueezeNet with simple bypass neural network.
-
-    Args:
-        num_classes (int): Class number.
-
-    Returns:
-        Cell, cell instance of SqueezeNet with simple bypass neural network.
-
-    Examples:
-        >>> net = SqueezeNet_Residual(10)
-    """
-    def __init__(self, num_classes=10):
-        super(SqueezeNet_Residual, self).__init__()
-
-        self.conv1 = nn.Conv2d(3,
-                               96,
-                               kernel_size=7,
-                               stride=2,
-                               pad_mode='valid',
-                               has_bias=True)
-        self.fire2 = Fire(96, 16, 64, 64)
-        self.fire3 = Fire(128, 16, 64, 64)
-        self.fire4 = Fire(128, 32, 128, 128)
-        self.fire5 = Fire(256, 32, 128, 128)
-        self.fire6 = Fire(256, 48, 192, 192)
-        self.fire7 = Fire(384, 48, 192, 192)
-        self.fire8 = Fire(384, 64, 256, 256)
-        self.fire9 = Fire(512, 64, 256, 256)
-        # Final convolution is initialized differently from the rest
-        self.conv10 = nn.Conv2d(512, num_classes, kernel_size=1, has_bias=True)
-
-        self.relu = nn.ReLU()
-        self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2)
-        self.add = P.Add()
-        self.dropout = nn.Dropout(keep_prob=0.5)
-        self.mean = P.ReduceMean(keep_dims=True)
-        self.flatten = nn.Flatten()
-        self.custom_init_weight()
-
-    def custom_init_weight(self):
-        """
-        Init the weight of Conv2d in the net.
-        """
-        for _, cell in self.cells_and_names():
-            if isinstance(cell, nn.Conv2d):
-                if cell is self.conv10:
-                    cell.weight.set_data(
-                        weight_init.initializer('normal', cell.weight.shape,
-                                                cell.weight.dtype))
-                else:
-                    cell.weight.set_data(
-                        weight_init.initializer('xavier_uniform',
-                                                cell.weight.shape,
-                                                cell.weight.dtype))
-                if cell.bias is not None:
-                    cell.bias.set_data(
-                        weight_init.initializer('zeros', cell.bias.shape,
-                                                cell.bias.dtype))
-
-    def construct(self, x):
-        """
-        Construct squeezenet_residual.
-        """
-        x = self.conv1(x)
-        x = self.relu(x)
-        x = self.max_pool2d(x)
-        x = self.fire2(x)
-        x = self.add(x, self.fire3(x))
-        x = self.fire4(x)
-        x = self.max_pool2d(x)
-        x = self.add(x, self.fire5(x))
-        x = self.fire6(x)
-        x = self.add(x, self.fire7(x))
-        x = self.fire8(x)
-        x = self.max_pool2d(x)
-        x = self.add(x, self.fire9(x))
-        x = self.dropout(x)
-        x = self.conv10(x)
-        x = self.relu(x)
-        x = self.mean(x, (2, 3))
-        x = self.flatten(x)
-
-        return x
diff --git a/research/cv/squeezenet/train.py b/research/cv/squeezenet/train.py
deleted file mode 100644
index dc71b0366d38ae8574f03286ec55fae4002533fe..0000000000000000000000000000000000000000
--- a/research/cv/squeezenet/train.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""train squeezenet."""
-import os
-import argparse
-from mindspore import context
-from mindspore import Tensor
-from mindspore.nn.optim.momentum import Momentum
-from mindspore.train.model import Model
-from mindspore.context import ParallelMode
-from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
-from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
-from mindspore.train.loss_scale_manager import FixedLossScaleManager
-from mindspore.train.serialization import load_checkpoint, load_param_into_net
-from mindspore.communication.management import init, get_rank, get_group_size
-from mindspore.common import set_seed
-from src.lr_generator import get_lr
-from src.CrossEntropySmooth import CrossEntropySmooth
-
-parser = argparse.ArgumentParser(description='Image classification')
-parser.add_argument('--net', type=str, default='squeezenet', choices=['squeezenet', 'squeezenet_residual'],
-                    help='Model.')
-parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10', 'imagenet'], help='Dataset.')
-parser.add_argument('--run_distribute', type=bool, default=False, help='Run distribute')
-parser.add_argument('--device_num', type=int, default=1, help='Device num.')
-parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
-parser.add_argument('--device_target', type=str, default='Ascend', help='Device target')
-parser.add_argument('--pre_trained', type=str, default=None, help='Pretrained checkpoint path')
-args_opt = parser.parse_args()
-
-set_seed(1)
-
-if args_opt.net == "squeezenet":
-    from src.squeezenet import SqueezeNet as squeezenet
-    if args_opt.dataset == "cifar10":
-        from src.config import config1 as config
-        from src.dataset import create_dataset_cifar as create_dataset
-    else:
-        from src.config import config2 as config
-        from src.dataset import create_dataset_imagenet as create_dataset
-else:
-    from src.squeezenet import SqueezeNet_Residual as squeezenet
-    if args_opt.dataset == "cifar10":
-        from src.config import config3 as config
-        from src.dataset import create_dataset_cifar as create_dataset
-    else:
-        from src.config import config4 as config
-        from src.dataset import create_dataset_imagenet as create_dataset
-
-if __name__ == '__main__':
-    target = args_opt.device_target
-    ckpt_save_dir = config.save_checkpoint_path
-
-    # init context
-    context.set_context(mode=context.GRAPH_MODE,
-                        device_target=target)
-    if args_opt.run_distribute:
-        if target == "Ascend":
-            device_id = int(os.getenv('DEVICE_ID'))
-            context.set_context(device_id=device_id)
-            context.set_auto_parallel_context(
-                device_num=args_opt.device_num,
-                parallel_mode=ParallelMode.DATA_PARALLEL,
-                gradients_mean=True)
-            init()
-        # GPU target
-        else:
-            init()
-            context.set_auto_parallel_context(
-                device_num=get_group_size(),
-                parallel_mode=ParallelMode.DATA_PARALLEL,
-                gradients_mean=True)
-        ckpt_save_dir = config.save_checkpoint_path + "ckpt_" + str(
-            get_rank()) + "/"
-
-    # create dataset
-    dataset = create_dataset(dataset_path=args_opt.dataset_path,
-                             do_train=True,
-                             repeat_num=1,
-                             batch_size=config.batch_size,
-                             target=target)
-    step_size = dataset.get_dataset_size()
-
-    # define net
-    net = squeezenet(num_classes=config.class_num)
-
-    # load checkpoint
-    if args_opt.pre_trained:
-        param_dict = load_checkpoint(args_opt.pre_trained)
-        load_param_into_net(net, param_dict)
-
-    # init lr
-    lr = get_lr(lr_init=config.lr_init,
-                lr_end=config.lr_end,
-                lr_max=config.lr_max,
-                total_epochs=config.epoch_size,
-                warmup_epochs=config.warmup_epochs,
-                pretrain_epochs=config.pretrain_epoch_size,
-                steps_per_epoch=step_size,
-                lr_decay_mode=config.lr_decay_mode)
-    lr = Tensor(lr)
-
-    # define loss
-    if args_opt.dataset == "imagenet":
-        if not config.use_label_smooth:
-            config.label_smooth_factor = 0.0
-        loss = CrossEntropySmooth(sparse=True,
-                                  reduction='mean',
-                                  smooth_factor=config.label_smooth_factor,
-                                  num_classes=config.class_num)
-    else:
-        loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
-
-    # define opt, model
-    if target == "Ascend":
-        loss_scale = FixedLossScaleManager(config.loss_scale,
-                                           drop_overflow_update=False)
-        opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()),
-                       lr,
-                       config.momentum,
-                       config.weight_decay,
-                       config.loss_scale,
-                       use_nesterov=True)
-        model = Model(net,
-                      loss_fn=loss,
-                      optimizer=opt,
-                      loss_scale_manager=loss_scale,
-                      metrics={'acc'},
-                      amp_level="O2",
-                      keep_batchnorm_fp32=False)
-    else:
-        # GPU target
-        opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()),
-                       lr,
-                       config.momentum,
-                       config.weight_decay,
-                       use_nesterov=True)
-        model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'})
-
-    # define callbacks
-    time_cb = TimeMonitor(data_size=step_size)
-    loss_cb = LossMonitor()
-    cb = [time_cb, loss_cb]
-    if config.save_checkpoint:
-        config_ck = CheckpointConfig(
-            save_checkpoint_steps=config.save_checkpoint_epochs * step_size,
-            keep_checkpoint_max=config.keep_checkpoint_max)
-        ckpt_cb = ModelCheckpoint(prefix=args_opt.net + '_' + args_opt.dataset,
-                                  directory=ckpt_save_dir,
-                                  config=config_ck)
-        cb += [ckpt_cb]
-
-    # train model
-    model.train(config.epoch_size - config.pretrain_epoch_size,
-                dataset,
-                callbacks=cb)
diff --git a/research/gnn/dgcn/readme_CN.md b/research/gnn/dgcn/README_CN.md
similarity index 100%
rename from research/gnn/dgcn/readme_CN.md
rename to research/gnn/dgcn/README_CN.md
diff --git a/research/nlp/textrcnn/readme.md b/research/nlp/textrcnn/README.md
similarity index 100%
rename from research/nlp/textrcnn/readme.md
rename to research/nlp/textrcnn/README.md