From 23a1e801e8cfd93f739e81b6427427f51c3ba91d Mon Sep 17 00:00:00 2001
From: Cathy Wong <cathy.wong@huawei.com>
Date: Tue, 31 May 2022 19:45:38 -0400
Subject: [PATCH] [MD] Transforms Unification Feature - Updates models to use
 unified dataset transforms

---
 .jenkins/check/config/filter_linklint.txt     |  3 +-
 .jenkins/check/config/whitelizard.txt         |  1 +
 benchmark/ascend/bert/src/dataset.py          |  4 +-
 benchmark/ascend/resnet/src/dataset.py        | 90 +++++++++----------
 benchmark/ascend/resnet/src/dataset_infer.py  | 62 ++++++-------
 official/cv/Deepsort/modelarts/start_train.py |  2 +-
 official/cv/Deepsort/src/deep/train.py        |  4 +-
 official/cv/alexnet/src/dataset.py            |  6 +-
 official/cv/brdnet/src/dataset.py             |  4 +-
 .../cv/cnn_direction_model/src/dataset.py     |  4 +-
 official/cv/crnn/src/dataset.py               |  6 +-
 official/cv/crnn_seq2seq_ocr/src/dataset.py   |  9 +-
 official/cv/cspdarknet53/src/dataset.py       |  6 +-
 official/cv/ctpn/src/dataset.py               |  6 +-
 official/cv/darknet53/src/dataset.py          |  6 +-
 official/cv/deeptext/src/dataset.py           |  6 +-
 .../densenet/src/datasets/classification.py   |  6 +-
 official/cv/depthnet/src/data_loader.py       |  6 +-
 official/cv/depthnet/train.py                 |  6 +-
 official/cv/dncnn/eval.py                     |  4 +-
 .../infer/data/preprocess/export_bin_file.py  |  2 +-
 official/cv/dncnn/src/dataset.py              |  4 +-
 official/cv/dpn/src/imagenet_dataset.py       |  6 +-
 official/cv/east/detect.py                    |  6 +-
 official/cv/east/src/dataset.py               |  4 +-
 official/cv/efficientnet/src/dataset.py       |  6 +-
 official/cv/efficientnet/src/transform.py     | 10 +--
 official/cv/faster_rcnn/src/dataset.py        |  4 +-
 official/cv/fastscnn/eval.py                  |  8 +-
 official/cv/fastscnn/modelarts/start_train.py |  8 +-
 official/cv/fastscnn/src/dataloader.py        |  4 +-
 official/cv/fastscnn/train.py                 |  8 +-
 official/cv/googlenet/src/dataset.py          |  6 +-
 official/cv/inceptionv3/src/dataset.py        |  6 +-
 official/cv/inceptionv4/src/dataset.py        |  6 +-
 official/cv/lenet/src/dataset.py              |  6 +-
 official/cv/maskrcnn/src/dataset.py           |  4 +-
 .../cv/maskrcnn_mobilenetv1/src/dataset.py    |  4 +-
 official/cv/mobilenetv1/src/dataset.py        | 34 +++----
 official/cv/mobilenetv2/src/dataset.py        | 24 ++---
 official/cv/mobilenetv3/src/dataset.py        | 50 +++++------
 official/cv/nasnet/src/dataset.py             |  6 +-
 official/cv/nima/src/dataset.py               |  6 +-
 official/cv/patchcore/preprocess.py           | 22 ++---
 official/cv/patchcore/src/dataset.py          | 22 ++---
 official/cv/posenet/src/dataset.py            |  4 +-
 official/cv/psenet/src/dataset.py             | 14 +--
 official/cv/pvnet/eval.py                     |  9 +-
 official/cv/pvnet/src/dataset.py              | 11 ++-
 official/cv/pwcnet/src/flyingchairs.py        | 20 ++---
 official/cv/pwcnet/src/sintel.py              | 20 ++---
 official/cv/resnet/gpu_resnet_benchmark.py    | 16 ++--
 official/cv/resnet/src/dataset.py             | 90 +++++++++----------
 official/cv/resnet/src/dataset_infer.py       | 62 ++++++-------
 official/cv/resnet_thor/src/dataset.py        |  6 +-
 official/cv/resnext/src/dataset.py            |  6 +-
 official/cv/retinanet/src/dataset.py          |  2 +-
 official/cv/se_resnext50/src/dataset.py       |  6 +-
 .../cv/semantic_human_matting/src/dataset.py  |  4 +-
 official/cv/shufflenetv1/src/dataset.py       |  6 +-
 official/cv/shufflenetv2/src/dataset.py       |  6 +-
 official/cv/simclr/src/dataset.py             | 11 ++-
 official/cv/simple_pose/src/dataset.py        |  4 +-
 .../sphereface/src/datasets/classification.py |  6 +-
 official/cv/squeezenet/src/dataset.py         |  6 +-
 official/cv/ssd/src/dataset.py                | 10 +--
 official/cv/ssim-ae/src/dataset.py            |  2 +-
 official/cv/tinydarknet/src/dataset.py        |  6 +-
 official/cv/unet/src/data_loader.py           |  4 +-
 official/cv/unet3d/src/dataset.py             |  4 +-
 official/cv/vgg16/src/dataset.py              |  6 +-
 official/cv/vit/src/dataset.py                |  7 +-
 official/cv/warpctc/src/dataset.py            |  6 +-
 official/cv/xception/src/dataset.py           |  6 +-
 .../cv/yolov3_darknet53/src/yolo_dataset.py   |  6 +-
 official/cv/yolov3_resnet18/src/dataset.py    |  4 +-
 official/cv/yolov4/src/yolo_dataset.py        |  4 +-
 official/cv/yolov5/src/transforms.py          |  6 +-
 official/cv/yolov5/src/yolo_dataset.py        |  6 +-
 official/nlp/bert/src/dataset.py              |  4 +-
 .../nlp/bert/src/finetune_data_preprocess.py  |  4 +-
 official/nlp/bert_thor/pretrain_eval.py       |  4 +-
 official/nlp/bert_thor/src/dataset.py         |  4 +-
 official/nlp/cpm/train.py                     |  4 +-
 official/nlp/dgu/src/utils.py                 |  4 +-
 official/nlp/duconv/src/dataset.py            |  4 +-
 official/nlp/emotect/src/dataset.py           |  4 +-
 official/nlp/ernie/src/dataset.py             |  4 +-
 official/nlp/fasttext/eval.py                 |  4 +-
 .../nlp/gnmt_v2/src/dataset/load_dataset.py   |  4 +-
 official/nlp/gpt/src/dataset.py               |  4 +-
 official/nlp/gru/src/dataset.py               |  4 +-
 official/nlp/mass/src/dataset/load_dataset.py |  4 +-
 official/nlp/pangu_alpha/src/dataset.py       |  4 +-
 .../prophetnet/src/dataset/load_dataset.py    |  4 +-
 official/nlp/tinybert/src/dataset.py          |  4 +-
 official/nlp/transformer/eval.py              |  2 +-
 official/nlp/transformer/src/dataset.py       |  2 +-
 research/audio/ctcmodel/src/dataset.py        |  4 +-
 .../audio/speech_transformer/src/dataset.py   |  4 +-
 research/cv/3dcnn/src/dataset.py              |  4 +-
 .../APDrawingGAN/src/data/aligned_dataset.py  | 14 +--
 .../cv/APDrawingGAN/src/data/base_dataset.py  | 22 ++---
 research/cv/AVA_cifar/src/datasets.py         | 48 +++++-----
 research/cv/AVA_hpa/src/datasets.py           | 50 +++++------
 .../cv/AlignedReID++/src/dataset_loader.py    | 20 ++---
 research/cv/AlignedReID/src/dataset.py        |  4 +-
 .../infer/sdk/postprocess/src/dataset.py      |  4 +-
 research/cv/AlphaPose/src/dataset.py          |  4 +-
 research/cv/AttGAN/src/data.py                | 28 +++---
 research/cv/AttentionCluster/make_dataset.py  |  4 +-
 research/cv/AutoSlim/src/dataset.py           | 25 +++---
 research/cv/CBAM/src/data.py                  |  2 +-
 research/cv/CGAN/src/dataset.py               |  4 +-
 research/cv/CMT/src/dataset.py                | 17 ++--
 research/cv/CascadeRCNN/src/dataset.py        |  4 +-
 .../CycleGAN/src/dataset/cyclegan_dataset.py  |  4 +-
 research/cv/DBPN/src/dataset/dataset.py       | 12 +--
 research/cv/DDAG/eval.py                      | 12 +--
 research/cv/DDAG/train.py                     | 40 ++++-----
 research/cv/DDRNet/src/data/imagenet.py       | 25 +++---
 research/cv/DRNet/src/dataset.py              | 19 ++--
 research/cv/DeepID/src/dataset.py             | 16 ++--
 research/cv/EfficientDet_d0/src/dataset.py    |  4 +-
 research/cv/FDA-BNN/src/dataset.py            | 58 ++++++------
 research/cv/FaceAttribute/preprocess.py       | 10 +--
 research/cv/FaceAttribute/src/dataset_eval.py | 10 +--
 .../cv/FaceAttribute/src/dataset_train.py     | 10 +--
 research/cv/FaceDetection/preprocess.py       |  6 +-
 .../cv/FaceDetection/src/data_preprocess.py   | 10 +--
 research/cv/FaceNet/src/LFWDataset.py         |  9 +-
 research/cv/FaceNet/src/data_loader.py        | 15 ++--
 .../data_loader_generate_triplets_online.py   | 15 ++--
 .../cv/FaceQualityAssessment/src/dataset.py   |  4 +-
 research/cv/FaceRecognition/eval.py           |  9 +-
 .../cv/FaceRecognition/src/dataset_factory.py |  8 +-
 .../cv/FaceRecognitionForTracking/eval.py     |  6 +-
 .../FaceRecognitionForTracking/preprocess.py  |  8 +-
 .../FaceRecognitionForTracking/src/dataset.py |  4 +-
 research/cv/GENet_Res50/src/dataset.py        |  6 +-
 research/cv/HRNetW48_cls/src/dataset.py       |  6 +-
 research/cv/HireMLP/src/dataset.py            | 17 ++--
 research/cv/HourNAS/src/dataset.py            | 60 ++++++-------
 research/cv/ICNet/Res50V1_PRE/src/dataset.py  |  6 +-
 research/cv/ICNet/eval.py                     |  8 +-
 .../cv/ICNet/src/cityscapes_mindrecord.py     | 11 +--
 research/cv/ICNet/src/visualize.py            |  8 +-
 research/cv/ISyNet/src/dataset.py             | 15 ++--
 research/cv/ISyNet/src/transform.py           |  8 +-
 research/cv/ISyNet/utils/preprocess_310.py    |  4 +-
 research/cv/Inception-v2/src/dataset.py       |  4 +-
 research/cv/JDE/eval_detect.py                |  4 +-
 research/cv/JDE/train.py                      |  4 +-
 research/cv/LightCNN/src/dataset.py           | 24 ++---
 research/cv/MGN/src/dataset.py                |  2 +-
 research/cv/MVD/eval.py                       | 12 +--
 research/cv/MVD/train.py                      | 36 ++++----
 research/cv/ManiDP/src/dataset.py             | 56 ++++++------
 .../cv/MaskedFaceRecognition/test_dataset.py  |  6 +-
 .../cv/MaskedFaceRecognition/train_dataset.py |  6 +-
 research/cv/NFNet/src/data/imagenet.py        | 13 ++-
 research/cv/Neighbor2Neighbor/src/dataset.py  |  4 +-
 research/cv/PAMTRI/MultiTaskNet/preprocess.py |  4 +-
 .../MultiTaskNet/src/dataset/dataset.py       |  4 +-
 .../MultiTaskNet/src/dataset/transforms.py    |  8 +-
 .../PAMTRI/PoseEstNet/src/dataset/dataset.py  | 10 +--
 research/cv/PAMTRI/PoseEstNet/trans.py        | 10 +--
 research/cv/PDarts/src/dataset.py             |  4 +-
 .../cv/Pix2Pix/src/dataset/pix2pix_dataset.py |  2 +-
 research/cv/ReIDStrongBaseline/src/dataset.py |  2 +-
 research/cv/RefineDet/src/dataset.py          |  4 +-
 research/cv/RefineNet/src/dataset.py          |  2 +-
 research/cv/ResNeSt50/src/datasets/autoaug.py |  8 +-
 research/cv/ResNeSt50/src/datasets/dataset.py | 15 ++--
 research/cv/SE-Net/src/dataset.py             |  6 +-
 research/cv/SE_ResNeXt50/src/dataset.py       |  6 +-
 research/cv/SPPNet/src/dataset.py             |  4 +-
 research/cv/STGAN/modelarts/dataset/celeba.py |  4 +-
 research/cv/STGAN/src/dataset/celeba.py       |  4 +-
 research/cv/SiamFC/ModelArts/start_train.py   |  8 +-
 research/cv/SiamFC/train.py                   |  8 +-
 research/cv/StarGAN/src/dataset.py            | 20 ++---
 research/cv/TCN/src/dataset.py                |  6 +-
 research/cv/TNT/src/data/imagenet.py          | 13 ++-
 research/cv/U-GAT-IT/src/dataset/dataset.py   | 30 +++----
 research/cv/UNet3+/src/dataset.py             |  4 +-
 research/cv/VehicleNet/src/dataset.py         | 11 ++-
 research/cv/ViG/src/data/imagenet.py          | 11 ++-
 research/cv/Yolact++/src/dataset.py           |  4 +-
 research/cv/advanced_east/src/dataset.py      |  4 +-
 research/cv/arcface/src/dataset.py            |  6 +-
 research/cv/augvit/src/c10_dataset.py         |  8 +-
 .../src/dataset/autoaugment/aug.py            |  8 +-
 .../src/dataset/autoaugment/ops/__init__.py   | 16 ++--
 .../src/dataset/autoaugment/ops/crop.py       |  8 +-
 .../src/dataset/autoaugment/ops/cutout.py     |  2 +-
 .../src/dataset/autoaugment/ops/ops_test.py   |  4 +-
 .../src/dataset/autoaugment/ops/transform.py  | 15 ++--
 .../cv/autoaugment/src/dataset/cifar10.py     |  4 +-
 .../autoaugment/src/dataset/svhn_dataset.py   |  4 +-
 research/cv/cait/src/data/imagenet.py         | 21 +++--
 research/cv/cct/src/data/cifar10.py           | 20 ++---
 research/cv/cct/src/data/imagenet.py          | 23 +++--
 research/cv/convnext/src/data/imagenet.py     | 11 ++-
 research/cv/dcgan/src/dataset.py              |  6 +-
 .../cv/delf/src/data_augmentation_parallel.py |  6 +-
 research/cv/ecolite/src/transforms.py         |  6 +-
 research/cv/efficientnet-b0/src/dataset.py    |  6 +-
 research/cv/efficientnet-b1/src/dataset.py    |  6 +-
 research/cv/efficientnet-b2/src/dataset.py    |  6 +-
 research/cv/efficientnet-b3/src/dataset.py    |  6 +-
 .../src/data/imagenet_finetune.py             |  6 +-
 research/cv/eppmvsnet/src/blendedmvs.py       | 16 ++--
 research/cv/faster_rcnn_dcn/src/dataset.py    |  4 +-
 research/cv/fishnet99/src/dataset.py          |  6 +-
 research/cv/ghostnet/src/dataset.py           |  6 +-
 research/cv/ghostnet_quant/src/dataset.py     | 19 ++--
 research/cv/glore_res/src/autoaugment.py      | 82 ++++++++---------
 research/cv/glore_res/src/dataset.py          |  6 +-
 research/cv/glore_res/src/transform.py        | 10 +--
 research/cv/hardnet/src/dataset.py            |  4 +-
 research/cv/hed/src/dataset.py                |  4 +-
 research/cv/ibnnet/src/dataset.py             |  6 +-
 .../cv/inception_resnet_v2/src/dataset.py     |  6 +-
 research/cv/lresnet100e_ir/src/dataset.py     |  4 +-
 research/cv/mae/src/datasets/dataset.py       | 13 ++-
 research/cv/mae/src/datasets/imagenet.py      |  2 +-
 .../meta-baseline/src/data/mini_Imagenet.py   | 16 ++--
 research/cv/metric_learn/src/dataset.py       |  6 +-
 research/cv/mnasnet/src/dataset.py            |  6 +-
 .../cv/mobilenetV3_small_x1_0/src/dataset.py  |  6 +-
 research/cv/mobilenetv3_large/src/dataset.py  |  6 +-
 research/cv/nas-fpn/src/dataset.py            |  4 +-
 research/cv/nima_vgg16/src/MyDataset.py       |  4 +-
 research/cv/ntsnet/src/dataset.py             |  4 +-
 research/cv/ntsnet/src/dataset_gpu.py         |  4 +-
 research/cv/osnet/model_utils/transforms.py   |  6 +-
 research/cv/pcb_rpp/src/dataset.py            |  4 +-
 research/cv/pnasnet/src/dataset.py            |  6 +-
 research/cv/proxylessnas/src/dataset.py       |  6 +-
 research/cv/ras/src/dataset_test.py           |  4 +-
 research/cv/ras/src/dataset_train.py          |  4 +-
 research/cv/rcnn/eval.py                      |  2 +-
 research/cv/relationnet/src/dataset.py        | 10 +--
 research/cv/renas/src/dataset.py              | 58 ++++++------
 research/cv/repvgg/src/data/imagenet.py       | 11 ++-
 research/cv/res2net/src/dataset.py            |  6 +-
 research/cv/res2net/src/dataset_infer.py      |  6 +-
 .../cv/res2net_faster_rcnn/src/dataset.py     |  4 +-
 .../cv/res2net_yolov3/src/yolo_dataset.py     |  4 +-
 research/cv/resnet3d/src/dataset.py           |  4 +-
 research/cv/resnet3d/src/pil_transforms.py    | 32 +++----
 .../resnet50_adv_pruning/src/pet_dataset.py   | 20 ++---
 research/cv/resnet50_bam/src/dataset.py       |  6 +-
 research/cv/resnetv2/src/dataset.py           |  6 +-
 research/cv/resnetv2_50_frn/src/dataset.py    |  6 +-
 research/cv/resnext152_64x4d/src/dataset.py   |  6 +-
 .../cv/retinanet_resnet101/src/dataset.py     |  4 +-
 .../cv/retinanet_resnet152/src/dataset.py     |  4 +-
 research/cv/rfcn/src/dataset.py               |  2 +-
 research/cv/simple_baselines/src/dataset.py   |  4 +-
 research/cv/single_path_nas/src/dataset.py    |  6 +-
 research/cv/sknet/src/dataset.py              |  6 +-
 research/cv/squeezenet/src/dataset.py         |  6 +-
 research/cv/squeezenet1_1/src/dataset.py      |  6 +-
 research/cv/ssc_resnet50/src/dataset.py       | 32 +++----
 research/cv/ssd_ghostnet/src/dataset.py       |  4 +-
 research/cv/ssd_inception_v2/src/dataset.py   |  4 +-
 research/cv/ssd_inceptionv2/src/dataset.py    |  4 +-
 research/cv/ssd_mobilenetV2/src/dataset.py    |  4 +-
 .../cv/ssd_mobilenetV2_FPNlite/src/dataset.py |  4 +-
 research/cv/ssd_resnet34/src/dataset.py       |  4 +-
 research/cv/ssd_resnet50/src/dataset.py       |  4 +-
 research/cv/ssd_resnet_34/src/dataset.py      |  4 +-
 research/cv/stpm/src/dataset.py               | 18 ++--
 .../cv/swin_transformer/src/data/imagenet.py  | 17 ++--
 research/cv/textfusenet/src/dataset.py        |  4 +-
 research/cv/tinynet/src/dataset.py            | 53 ++++++-----
 research/cv/tracktor/src/dataset.py           |  2 +-
 research/cv/u2net/src/data_loader.py          |  4 +-
 research/cv/vgg19/src/dataset.py              |  6 +-
 research/cv/vit_base/src/dataset.py           |  6 +-
 research/cv/wave_mlp/src/dataset.py           | 21 +++--
 research/cv/wgan/src/dataset.py               |  6 +-
 research/cv/wideresnet/src/dataset.py         |  6 +-
 research/cv/yolov3_tiny/src/transforms.py     |  6 +-
 research/cv/yolov3_tiny/src/yolo_dataset.py   |  4 +-
 research/mm/wukong/src/dataset/dataset.py     |  4 +-
 research/nlp/DYR/src/dataset.py               |  4 +-
 research/nlp/albert/src/dataset.py            |  4 +-
 research/nlp/gpt2/src/dataset.py              |  4 +-
 research/nlp/hypertext/src/dataset.py         |  4 +-
 research/nlp/ktnet/src/dataset.py             |  4 +-
 .../src/reading_comprehension/dataLoader.py   |  4 +-
 .../nlp/seq2seq/src/dataset/load_dataset.py   |  4 +-
 research/recommend/mmoe/src/load_dataset.py   |  2 +-
 .../model_scaffolding/example/src/dataset.py  |  6 +-
 297 files changed, 1494 insertions(+), 1531 deletions(-)

diff --git a/.jenkins/check/config/filter_linklint.txt b/.jenkins/check/config/filter_linklint.txt
index bbd5911cd..e5a98919a 100644
--- a/.jenkins/check/config/filter_linklint.txt
+++ b/.jenkins/check/config/filter_linklint.txt
@@ -1,2 +1,3 @@
 http://www.vision.caltech.edu/visipedia/CUB-200-2011.html
-http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth
\ No newline at end of file
+http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth
+https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py_key
diff --git a/.jenkins/check/config/whitelizard.txt b/.jenkins/check/config/whitelizard.txt
index 5f42aed7c..a571f39f3 100644
--- a/.jenkins/check/config/whitelizard.txt
+++ b/.jenkins/check/config/whitelizard.txt
@@ -55,4 +55,5 @@ models/research/cvtmodel/resnet_ipl/src/resnet26t.py:__init__
 models/research/cvtmodel/resnet_ipl/src/resnet101d.py:__init__
 models/research/cvtmodel/resnet_ipl/src/resnetrs50.py:__init__
 models/official/audio/lpcnet/ascend310_infer/src/main.cc:main
+models/official/nlp/bert/src/finetune_data_preprocess.py:process_msra
 
diff --git a/benchmark/ascend/bert/src/dataset.py b/benchmark/ascend/bert/src/dataset.py
index a611c1ef5..433ebe2cc 100644
--- a/benchmark/ascend/bert/src/dataset.py
+++ b/benchmark/ascend/bert/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import math
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore import log as logger
 
 
diff --git a/benchmark/ascend/resnet/src/dataset.py b/benchmark/ascend/resnet/src/dataset.py
index 77809beb7..d96526cb3 100644
--- a/benchmark/ascend/resnet/src/dataset.py
+++ b/benchmark/ascend/resnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -49,18 +49,18 @@ def create_dataset1(dataset_path, do_train, batch_size=32, train_image_size=224,
     trans = []
     if do_train:
         trans += [
-            ds.vision.c_transforms.RandomCrop((32, 32), (4, 4, 4, 4)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
+            ds.vision.RandomCrop((32, 32), (4, 4, 4, 4)),
+            ds.vision.RandomHorizontalFlip(prob=0.5)
         ]
 
     trans += [
-        ds.vision.c_transforms.Resize((train_image_size, train_image_size)),
-        ds.vision.c_transforms.Rescale(1.0 / 255.0, 0.0),
-        ds.vision.c_transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
-        ds.vision.c_transforms.HWC2CHW()
+        ds.vision.Resize((train_image_size, train_image_size)),
+        ds.vision.Rescale(1.0 / 255.0, 0.0),
+        ds.vision.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
+        ds.vision.HWC2CHW()
     ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=type_cast_op, input_columns="label",
                             num_parallel_workers=get_num_parallel_workers(8))
@@ -115,18 +115,18 @@ def create_dataset2(dataset_path, do_train, batch_size=32, train_image_size=224,
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5)
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(eval_image_size)
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(eval_image_size)
         ]
-    trans_norm = [ds.vision.c_transforms.Normalize(mean=mean, std=std), ds.vision.c_transforms.HWC2CHW()]
+    trans_norm = [ds.vision.Normalize(mean=mean, std=std), ds.vision.HWC2CHW()]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
     if device_num == 1:
         trans_work_num = 24
     else:
@@ -187,21 +187,21 @@ def create_dataset_pynative(dataset_path, do_train, batch_size=32, train_image_s
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(eval_image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(eval_image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=4)
     # only enable cache for eval
@@ -253,21 +253,21 @@ def create_dataset3(dataset_path, do_train, batch_size=32, train_image_size=224,
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(rank_id / (rank_id + 1)),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(rank_id / (rank_id + 1)),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(eval_image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(eval_image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=get_num_parallel_workers(8))
     # only enable cache for eval
@@ -321,21 +321,21 @@ def create_dataset4(dataset_path, do_train, batch_size=32, train_image_size=224,
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(292),
-            ds.vision.c_transforms.CenterCrop(eval_image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(292),
+            ds.vision.CenterCrop(eval_image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=get_num_parallel_workers(12))
     # only enable cache for eval
     if do_train:
diff --git a/benchmark/ascend/resnet/src/dataset_infer.py b/benchmark/ascend/resnet/src/dataset_infer.py
index 5d0a655e8..ce032b1db 100644
--- a/benchmark/ascend/resnet/src/dataset_infer.py
+++ b/benchmark/ascend/resnet/src/dataset_infer.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -130,21 +130,21 @@ def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target="
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=8)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
@@ -202,21 +202,21 @@ def create_dataset2(dataset_path, do_train, repeat_num=1, batch_size=32, target=
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(rank_id / (rank_id + 1)),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(rank_id / (rank_id + 1)),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=8)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
@@ -271,21 +271,21 @@ def create_dataset3(dataset_path, do_train, repeat_num=1, batch_size=32, target=
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(292),
-            ds.vision.c_transforms.CenterCrop(256),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(292),
+            ds.vision.CenterCrop(256),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=12)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=12)
     if do_train:
diff --git a/official/cv/Deepsort/modelarts/start_train.py b/official/cv/Deepsort/modelarts/start_train.py
index 879d8f232..bed103450 100644
--- a/official/cv/Deepsort/modelarts/start_train.py
+++ b/official/cv/Deepsort/modelarts/start_train.py
@@ -21,7 +21,7 @@ import numpy as np
 import moxing as mox
 import mindspore.nn as nn
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 
 from mindspore.common import set_seed
 from mindspore.common import dtype as mstype
diff --git a/official/cv/Deepsort/src/deep/train.py b/official/cv/Deepsort/src/deep/train.py
index 3d5e0a227..9488a1f22 100644
--- a/official/cv/Deepsort/src/deep/train.py
+++ b/official/cv/Deepsort/src/deep/train.py
@@ -16,7 +16,7 @@ import argparse
 import os
 import ast
 import numpy as np
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import mindspore.dataset as ds
 import mindspore.nn as nn
 from mindspore import Tensor, context
@@ -24,7 +24,7 @@ from mindspore.communication.management import init, get_rank
 from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor
 from mindspore.train.model import Model
 from mindspore.context import ParallelMode
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.transforms as C2
 from mindspore.common import set_seed
 import mindspore.common.dtype as mstype
 from original_model import Net
diff --git a/official/cv/alexnet/src/dataset.py b/official/cv/alexnet/src/dataset.py
index 149ffdb05..6fcc88205 100644
--- a/official/cv/alexnet/src/dataset.py
+++ b/official/cv/alexnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ Produce the dataset
 import os
 from multiprocessing import cpu_count
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as CV
 from mindspore.common import dtype as mstype
 from mindspore.communication.management import get_rank, get_group_size
 
diff --git a/official/cv/brdnet/src/dataset.py b/official/cv/brdnet/src/dataset.py
index 9e870625f..dbdde6da5 100644
--- a/official/cv/brdnet/src/dataset.py
+++ b/official/cv/brdnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import glob
 import numpy as np
 import PIL.Image as Image
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 
 class BRDNetDataset:
     """ BRDNetDataset.
diff --git a/official/cv/cnn_direction_model/src/dataset.py b/official/cv/cnn_direction_model/src/dataset.py
index ec1bbdefe..b91ec5985 100644
--- a/official/cv/cnn_direction_model/src/dataset.py
+++ b/official/cv/cnn_direction_model/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import cv2
 import numpy as np
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from src.dataset_utils import lucky, noise_blur, noise_speckle, noise_gamma, noise_gaussian, noise_salt_pepper, \
     shift_color, enhance_brightness, enhance_sharpness, enhance_contrast, enhance_color, gaussian_blur, \
     randcrop, resize, rdistort, rgeometry, rotate_about_center, whole_rdistort, warp_perspective, random_contrast, \
diff --git a/official/cv/crnn/src/dataset.py b/official/cv/crnn/src/dataset.py
index 1d07a34f9..2f07b734a 100644
--- a/official/cv/crnn/src/dataset.py
+++ b/official/cv/crnn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ import numpy as np
 from PIL import Image, ImageFile
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vc
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vc
 from src.model_utils.config import config as config1
 from src.ic03_dataset import IC03Dataset
 from src.ic13_dataset import IC13Dataset
diff --git a/official/cv/crnn_seq2seq_ocr/src/dataset.py b/official/cv/crnn_seq2seq_ocr/src/dataset.py
index 40abc60fd..9d12f36c2 100644
--- a/official/cv/crnn_seq2seq_ocr/src/dataset.py
+++ b/official/cv/crnn_seq2seq_ocr/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,9 +19,8 @@ import numpy as np
 from PIL import Image
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.vision.py_transforms as P
-import mindspore.dataset.transforms.c_transforms as ops
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as ops
 import mindspore.common.dtype as mstype
 
 from src.model_utils.config import config
@@ -36,7 +35,7 @@ class AugmentationOps():
         self.min_area_ratio = min_area_ratio
         self.aspect_ratio_range = aspect_ratio_range
         self.img_tile_shape = img_tile_shape
-        self.random_image_distortion_ops = P.RandomColorAdjust(brightness=brightness,
+        self.random_image_distortion_ops = C.RandomColorAdjust(brightness=brightness,
                                                                contrast=contrast,
                                                                saturation=saturation,
                                                                hue=hue)
diff --git a/official/cv/cspdarknet53/src/dataset.py b/official/cv/cspdarknet53/src/dataset.py
index 9025cffdd..e1c3c8e85 100644
--- a/official/cv/cspdarknet53/src/dataset.py
+++ b/official/cv/cspdarknet53/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as V_C
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as V_C
 from PIL import Image, ImageFile
 from .utils.sampler import DistributedSampler
 
diff --git a/official/cv/ctpn/src/dataset.py b/official/cv/ctpn/src/dataset.py
index a7936800e..dfdf942a4 100644
--- a/official/cv/ctpn/src/dataset.py
+++ b/official/cv/ctpn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import numpy as np
 from numpy import random
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as CC
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as CC
 import mindspore.common.dtype as mstype
 from src.model_utils.config import config
 
diff --git a/official/cv/darknet53/src/dataset.py b/official/cv/darknet53/src/dataset.py
index d5bf8dde2..8984090c9 100644
--- a/official/cv/darknet53/src/dataset.py
+++ b/official/cv/darknet53/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target="GPU", distribute=False):
diff --git a/official/cv/deeptext/src/dataset.py b/official/cv/deeptext/src/dataset.py
index 7198f4d8d..7e598b96a 100644
--- a/official/cv/deeptext/src/dataset.py
+++ b/official/cv/deeptext/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,8 +22,8 @@ from numpy import random
 
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as CC
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as CC
 import mindspore.common.dtype as mstype
 from mindspore.mindrecord import FileWriter
 from model_utils.config import config
diff --git a/official/cv/densenet/src/datasets/classification.py b/official/cv/densenet/src/datasets/classification.py
index 438689992..84444a6dc 100644
--- a/official/cv/densenet/src/datasets/classification.py
+++ b/official/cv/densenet/src/datasets/classification.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,8 +21,8 @@ import os
 from PIL import Image, ImageFile
 from mindspore import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as vision_C
-import mindspore.dataset.transforms.c_transforms as normal_C
+import mindspore.dataset.vision as vision_C
+import mindspore.dataset.transforms as normal_C
 from src.datasets.sampler import DistributedSampler
 
 ImageFile.LOAD_TRUNCATED_IMAGES = True
diff --git a/official/cv/depthnet/src/data_loader.py b/official/cv/depthnet/src/data_loader.py
index 41ae8f41f..f5a87c896 100644
--- a/official/cv/depthnet/src/data_loader.py
+++ b/official/cv/depthnet/src/data_loader.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ import os
 import numpy as np
 from PIL import Image
 
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as CV
 import mindspore.dataset as ds
 from mindspore import dtype as mstype
 
diff --git a/official/cv/depthnet/train.py b/official/cv/depthnet/train.py
index 4308d7337..aaa809214 100644
--- a/official/cv/depthnet/train.py
+++ b/official/cv/depthnet/train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import time
 
 import mindspore.numpy as np
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as CV
 import mindspore as ms
 from mindspore import nn, Tensor, Model
 from mindspore import dtype as mstype
diff --git a/official/cv/dncnn/eval.py b/official/cv/dncnn/eval.py
index 14683113f..dca93f8a5 100644
--- a/official/cv/dncnn/eval.py
+++ b/official/cv/dncnn/eval.py
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -27,7 +27,7 @@ import mindspore
 import mindspore.dataset as ds
 from mindspore import context
 from mindspore.train.serialization import load_checkpoint, load_param_into_net
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from src.model import DnCNN
 
 class DnCNN_eval_Dataset():
diff --git a/official/cv/dncnn/infer/data/preprocess/export_bin_file.py b/official/cv/dncnn/infer/data/preprocess/export_bin_file.py
index b4b5462ec..14c0cb596 100644
--- a/official/cv/dncnn/infer/data/preprocess/export_bin_file.py
+++ b/official/cv/dncnn/infer/data/preprocess/export_bin_file.py
@@ -23,7 +23,7 @@ import numpy as np
 import cv2
 import mindspore
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 
 def ResziePadding(img, fixed_side=256):
diff --git a/official/cv/dncnn/src/dataset.py b/official/cv/dncnn/src/dataset.py
index b8240c79e..69b6565c7 100644
--- a/official/cv/dncnn/src/dataset.py
+++ b/official/cv/dncnn/src/dataset.py
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import cv2
 import PIL
 import mindspore
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 def create_train_dataset(data_path, model_type, noise_level=25, batch_size=128):
     # define dataset
diff --git a/official/cv/dpn/src/imagenet_dataset.py b/official/cv/dpn/src/imagenet_dataset.py
index 42ad7f9b9..cd73134f4 100644
--- a/official/cv/dpn/src/imagenet_dataset.py
+++ b/official/cv/dpn/src/imagenet_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,8 +20,8 @@ import cv2
 from PIL import ImageFile
 from mindspore.common import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as V_C
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as V_C
 
 
 ImageFile.LOAD_TRUNCATED_IMAGES = True
diff --git a/official/cv/east/detect.py b/official/cv/east/detect.py
index 48f825168..eb807cd62 100644
--- a/official/cv/east/detect.py
+++ b/official/cv/east/detect.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ import numpy as np
 
 import mindspore.ops as P
 from mindspore import Tensor
-import mindspore.dataset.vision.py_transforms as V
+import mindspore.dataset.vision as V
 from src.dataset import get_rotate_mat
 
 import lanms
@@ -44,7 +44,7 @@ def load_pil(img):
     """convert PIL Image to Tensor
     """
     img = V.ToTensor()(img)
-    img = V.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(img)
+    img = V.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), is_hwc=False)(img)
     img = Tensor(img)
     img = P.ExpandDims()(img, 0)
     return img
diff --git a/official/cv/east/src/dataset.py b/official/cv/east/src/dataset.py
index 79c6ddfbc..2f0da3520 100644
--- a/official/cv/east/src/dataset.py
+++ b/official/cv/east/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import cv2
 from PIL import Image
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 from src.distributed_sampler import DistributedSampler
 
 
diff --git a/official/cv/efficientnet/src/dataset.py b/official/cv/efficientnet/src/dataset.py
index 76a67f149..7b8bd3108 100644
--- a/official/cv/efficientnet/src/dataset.py
+++ b/official/cv/efficientnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,8 +20,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 from mindspore.communication.management import get_group_size, get_rank
 from mindspore.dataset.vision import Inter
 
diff --git a/official/cv/efficientnet/src/transform.py b/official/cv/efficientnet/src/transform.py
index c34a8fe94..39a1eebcb 100644
--- a/official/cv/efficientnet/src/transform.py
+++ b/official/cv/efficientnet/src/transform.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@
 random augment class
 """
 import numpy as np
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as vision
 from src import transform_utils
 
 
@@ -35,9 +35,9 @@ class RandAugment:
         # assert the imgs object are pil_images
         ret_imgs = []
         ret_labels = []
-        py_to_pil_op = P.ToPIL()
-        to_tensor = P.ToTensor()
-        normalize_op = P.Normalize(self.mean, self.std)
+        py_to_pil_op = vision.ToPIL()
+        to_tensor = vision.ToTensor()
+        normalize_op = vision.Normalize(self.mean, self.std, is_hwc=False)
         rand_augment_ops = transform_utils.rand_augment_transform(self.config_str, self.hparams)
         for i, image in enumerate(imgs):
             img_pil = py_to_pil_op(image)
diff --git a/official/cv/faster_rcnn/src/dataset.py b/official/cv/faster_rcnn/src/dataset.py
index 64c955a4c..783f81f8c 100644
--- a/official/cv/faster_rcnn/src/dataset.py
+++ b/official/cv/faster_rcnn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -552,7 +552,7 @@ def create_fasterrcnn_dataset(config, mindrecord_file, batch_size=2, device_num=
     de.config.set_prefetch_size(8)
     ds = de.MindDataset(mindrecord_file, columns_list=["image", "annotation"], num_shards=device_num, shard_id=rank_id,
                         num_parallel_workers=4, shuffle=is_training)
-    decode = ms.dataset.vision.c_transforms.Decode()
+    decode = ms.dataset.vision.Decode()
     ds = ds.map(input_columns=["image"], operations=decode)
     compose_map_func = (lambda image, annotation: preprocess_fn(image, annotation, is_training, config=config))
 
diff --git a/official/cv/fastscnn/eval.py b/official/cv/fastscnn/eval.py
index 86b94b430..9e6993bc2 100644
--- a/official/cv/fastscnn/eval.py
+++ b/official/cv/fastscnn/eval.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,8 +23,8 @@ import mindspore.ops as ops
 from mindspore.context import ParallelMode
 from mindspore import load_checkpoint, load_param_into_net
 from mindspore.communication.management import init, get_rank, get_group_size
-from mindspore.dataset.transforms.py_transforms import Compose
-from mindspore.dataset.vision.py_transforms import ToTensor, Normalize
+from mindspore.dataset.transforms.transforms import Compose
+from mindspore.dataset.vision import ToTensor, Normalize
 
 from src.dataloader import create_CitySegmentation
 from src.fast_scnn import FastSCNN
@@ -140,7 +140,7 @@ def validation():
     # image transform
     input_transform = Compose([
         ToTensor(),
-        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
+        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], is_hwc=False),
     ])
     if args.use_modelarts:
         import moxing as mox
diff --git a/official/cv/fastscnn/modelarts/start_train.py b/official/cv/fastscnn/modelarts/start_train.py
index 665956f5e..8a17519e9 100644
--- a/official/cv/fastscnn/modelarts/start_train.py
+++ b/official/cv/fastscnn/modelarts/start_train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -30,8 +30,8 @@ from mindspore.common.tensor import Tensor
 from mindspore.context import ParallelMode
 from mindspore import FixedLossScaleManager
 from mindspore import load_checkpoint, load_param_into_net
-from mindspore.dataset.transforms.py_transforms import Compose
-from mindspore.dataset.vision.py_transforms import ToTensor, Normalize
+from mindspore.dataset.transforms.transforms import Compose
+from mindspore.dataset.vision import ToTensor, Normalize
 from mindspore.communication.management import init, get_rank, get_group_size
 from mindspore.train.callback import TimeMonitor, LossMonitor, CheckpointConfig, ModelCheckpoint
 
@@ -138,7 +138,7 @@ def train():
     # image transform
     input_transform = Compose([
         ToTensor(),
-        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
+        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], is_hwc=False),
     ])
 
     train_dataset, args.steps_per_epoch = create_CitySegmentation(args, data_path=args.dataset, \
diff --git a/official/cv/fastscnn/src/dataloader.py b/official/cv/fastscnn/src/dataloader.py
index bdaae9a50..b6a770a63 100644
--- a/official/cv/fastscnn/src/dataloader.py
+++ b/official/cv/fastscnn/src/dataloader.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ import numpy as np
 from PIL import Image
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 
 from src.seg_data_base import SegmentationDataset
 
diff --git a/official/cv/fastscnn/train.py b/official/cv/fastscnn/train.py
index 8ba9af1a4..17b98e5e0 100644
--- a/official/cv/fastscnn/train.py
+++ b/official/cv/fastscnn/train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -27,8 +27,8 @@ from mindspore.common.tensor import Tensor
 from mindspore.context import ParallelMode
 from mindspore import FixedLossScaleManager
 from mindspore import load_checkpoint, load_param_into_net
-from mindspore.dataset.transforms.py_transforms import Compose
-from mindspore.dataset.vision.py_transforms import ToTensor, Normalize
+from mindspore.dataset.transforms.transforms import Compose
+from mindspore.dataset.vision import ToTensor, Normalize
 from mindspore.communication.management import init, get_rank, get_group_size
 from mindspore.train.callback import TimeMonitor, LossMonitor, CheckpointConfig, ModelCheckpoint
 
@@ -130,7 +130,7 @@ def train():
     # image transform
     input_transform = Compose([
         ToTensor(),
-        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
+        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], is_hwc=False),
     ])
 
     if args.use_modelarts:
diff --git a/official/cv/googlenet/src/dataset.py b/official/cv/googlenet/src/dataset.py
index e7a82f05d..bd6bb2b52 100644
--- a/official/cv/googlenet/src/dataset.py
+++ b/official/cv/googlenet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 def create_dataset_cifar10(data_home, repeat_num=1, training=True, cifar_cfg=None):
     """Data operations."""
diff --git a/official/cv/inceptionv3/src/dataset.py b/official/cv/inceptionv3/src/dataset.py
index e7fb6076d..f248974fb 100644
--- a/official/cv/inceptionv3/src/dataset.py
+++ b/official/cv/inceptionv3/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset_imagenet(dataset_path, do_train, cfg, repeat_num=1):
diff --git a/official/cv/inceptionv4/src/dataset.py b/official/cv/inceptionv4/src/dataset.py
index a27e4f150..3939cafba 100644
--- a/official/cv/inceptionv4/src/dataset.py
+++ b/official/cv/inceptionv4/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset_imagenet(dataset_path, do_train, cfg, repeat_num=1):
diff --git a/official/cv/lenet/src/dataset.py b/official/cv/lenet/src/dataset.py
index b3801105b..623ee7b73 100644
--- a/official/cv/lenet/src/dataset.py
+++ b/official/cv/lenet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ Produce the dataset
 """
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as CV
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as CV
+import mindspore.dataset.transforms as C
 from mindspore.dataset.vision import Inter
 from mindspore.common import dtype as mstype
 
diff --git a/official/cv/maskrcnn/src/dataset.py b/official/cv/maskrcnn/src/dataset.py
index bc05b98c7..e6178203f 100644
--- a/official/cv/maskrcnn/src/dataset.py
+++ b/official/cv/maskrcnn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import numpy as np
 from numpy import random
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 
 from .model_utils.config import config
diff --git a/official/cv/maskrcnn_mobilenetv1/src/dataset.py b/official/cv/maskrcnn_mobilenetv1/src/dataset.py
index 5acba3a77..19e6e1c30 100644
--- a/official/cv/maskrcnn_mobilenetv1/src/dataset.py
+++ b/official/cv/maskrcnn_mobilenetv1/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020-21 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@ import numpy as np
 from numpy import random
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from mindspore import context
 
diff --git a/official/cv/mobilenetv1/src/dataset.py b/official/cv/mobilenetv1/src/dataset.py
index 3d7c67150..3b62fd343 100644
--- a/official/cv/mobilenetv1/src/dataset.py
+++ b/official/cv/mobilenetv1/src/dataset.py
@@ -47,18 +47,18 @@ def create_dataset1(dataset_path, do_train, device_num=1, batch_size=32, target=
     trans = []
     if do_train:
         trans += [
-            ds.vision.c_transforms.RandomCrop((32, 32), (4, 4, 4, 4)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
+            ds.vision.RandomCrop((32, 32), (4, 4, 4, 4)),
+            ds.vision.RandomHorizontalFlip(prob=0.5)
         ]
 
     trans += [
-        ds.vision.c_transforms.Resize((224, 224)),
-        ds.vision.c_transforms.Rescale(1.0 / 255.0, 0.0),
-        ds.vision.c_transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
-        ds.vision.c_transforms.HWC2CHW()
+        ds.vision.Resize((224, 224)),
+        ds.vision.Rescale(1.0 / 255.0, 0.0),
+        ds.vision.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
+        ds.vision.HWC2CHW()
     ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=THREAD_NUM)
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=THREAD_NUM)
@@ -97,21 +97,21 @@ def create_dataset2(dataset_path, do_train, device_num=1, batch_size=32, target=
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=THREAD_NUM)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=THREAD_NUM)
diff --git a/official/cv/mobilenetv2/src/dataset.py b/official/cv/mobilenetv2/src/dataset.py
index 6149458d6..8d09a7e8c 100644
--- a/official/cv/mobilenetv2/src/dataset.py
+++ b/official/cv/mobilenetv2/src/dataset.py
@@ -52,24 +52,24 @@ def create_dataset(dataset_path, do_train, config, enable_cache=False, cache_ses
     buffer_size = 1000
 
     # define map operations
-    decode_op = ds.vision.c_transforms.Decode()
-    resize_crop_op = ds.vision.c_transforms.RandomCropDecodeResize(resize_height,
-                                                                   scale=(0.08, 1.0), ratio=(0.75, 1.333))
-    horizontal_flip_op = ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
-
-    resize_op = ds.vision.c_transforms.Resize((256, 256))
-    center_crop = ds.vision.c_transforms.CenterCrop(resize_width)
-    rescale_op = ds.vision.c_transforms.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
-    normalize_op = ds.vision.c_transforms.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
-                                                    std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
-    change_swap_op = ds.vision.c_transforms.HWC2CHW()
+    decode_op = ds.vision.Decode()
+    resize_crop_op = ds.vision.RandomCropDecodeResize(resize_height,
+                                                      scale=(0.08, 1.0), ratio=(0.75, 1.333))
+    horizontal_flip_op = ds.vision.RandomHorizontalFlip(prob=0.5)
+
+    resize_op = ds.vision.Resize((256, 256))
+    center_crop = ds.vision.CenterCrop(resize_width)
+    rescale_op = ds.vision.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
+    normalize_op = ds.vision.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
+                                       std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
+    change_swap_op = ds.vision.HWC2CHW()
 
     if do_train:
         trans = [resize_crop_op, horizontal_flip_op, rescale_op, normalize_op, change_swap_op]
     else:
         trans = [decode_op, resize_op, center_crop, normalize_op, change_swap_op]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=num_workers)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_workers)
diff --git a/official/cv/mobilenetv3/src/dataset.py b/official/cv/mobilenetv3/src/dataset.py
index 8061ca3eb..e29ab6767 100644
--- a/official/cv/mobilenetv3/src/dataset.py
+++ b/official/cv/mobilenetv3/src/dataset.py
@@ -49,24 +49,24 @@ def create_dataset(dataset_path, do_train, config, device_target, batch_size=32,
     buffer_size = 1000
 
     # define map operations
-    decode_op = ds.vision.c_transforms.Decode()
-    resize_crop_op = ds.vision.c_transforms.RandomCropDecodeResize(resize_height,
-                                                                   scale=(0.08, 1.0), ratio=(0.75, 1.333))
-    horizontal_flip_op = ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
-
-    resize_op = ds.vision.c_transforms.Resize(256)
-    center_crop = ds.vision.c_transforms.CenterCrop(resize_width)
-    rescale_op = ds.vision.c_transforms.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
-    normalize_op = ds.vision.c_transforms.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
-                                                    std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
-    change_swap_op = ds.vision.c_transforms.HWC2CHW()
+    decode_op = ds.vision.Decode()
+    resize_crop_op = ds.vision.RandomCropDecodeResize(resize_height,
+                                                      scale=(0.08, 1.0), ratio=(0.75, 1.333))
+    horizontal_flip_op = ds.vision.RandomHorizontalFlip(prob=0.5)
+
+    resize_op = ds.vision.Resize(256)
+    center_crop = ds.vision.CenterCrop(resize_width)
+    rescale_op = ds.vision.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
+    normalize_op = ds.vision.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
+                                       std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
+    change_swap_op = ds.vision.HWC2CHW()
 
     if do_train:
         trans = [resize_crop_op, horizontal_flip_op, rescale_op, normalize_op, change_swap_op]
     else:
         trans = [decode_op, resize_op, center_crop, normalize_op, change_swap_op]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=8)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
@@ -99,24 +99,24 @@ def create_dataset_cifar(dataset_path,
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCrop((32, 32), (4, 4, 4, 4)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4),
-            ds.vision.c_transforms.Resize((224, 224)),
-            ds.vision.c_transforms.Rescale(1.0 / 255.0, 0.0),
-            ds.vision.c_transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
-            ds.vision.c_transforms.CutOut(112),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCrop((32, 32), (4, 4, 4, 4)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4),
+            ds.vision.Resize((224, 224)),
+            ds.vision.Rescale(1.0 / 255.0, 0.0),
+            ds.vision.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
+            ds.vision.CutOut(112),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Resize((224, 224)),
-            ds.vision.c_transforms.Rescale(1.0 / 255.0, 0.0),
-            ds.vision.c_transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Resize((224, 224)),
+            ds.vision.Rescale(1.0 / 255.0, 0.0),
+            ds.vision.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=type_cast_op,
                             input_columns="label",
diff --git a/official/cv/nasnet/src/dataset.py b/official/cv/nasnet/src/dataset.py
index efa36ab28..63b32b9f6 100644
--- a/official/cv/nasnet/src/dataset.py
+++ b/official/cv/nasnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ Data operations, will be used in train.py and eval.py
 import mindspore
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 def create_dataset(dataset_path, do_train, rank, group_size,
                    num_parallel_workers=8, batch_size=128,
diff --git a/official/cv/nima/src/dataset.py b/official/cv/nima/src/dataset.py
index be1be6b7f..35c480c19 100644
--- a/official/cv/nima/src/dataset.py
+++ b/official/cv/nima/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ import numpy as np
 import mindspore
 import mindspore.dataset as ds
 from mindspore.dataset.vision import Inter
-from mindspore.dataset.vision import c_transforms as v_ct
-from mindspore.dataset.transforms import c_transforms as t_ct
+from mindspore.dataset.vision import transforms as v_ct
+from mindspore.dataset.transforms import transforms as t_ct
 
 
 class Dataset:
diff --git a/official/cv/patchcore/preprocess.py b/official/cv/patchcore/preprocess.py
index a4f35c46f..03586d845 100644
--- a/official/cv/patchcore/preprocess.py
+++ b/official/cv/patchcore/preprocess.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,10 +20,10 @@ from pathlib import Path
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as vision
 from mindspore.common import set_seed
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore.dataset.vision import Inter
 
 from src.config import _C as cfg
@@ -89,15 +89,15 @@ def createDataset(dataset_path, category):
     std = cfg.std_dft
 
     data_transforms = Compose([
-        py_vision.Resize((256, 256), interpolation=Inter.ANTIALIAS),
-        py_vision.CenterCrop(224),
-        py_vision.ToTensor(),
-        py_vision.Normalize(mean=mean, std=std)
+        vision.Resize((256, 256), interpolation=Inter.ANTIALIAS),
+        vision.CenterCrop(224),
+        vision.ToTensor(),
+        vision.Normalize(mean=mean, std=std, is_hwc=False)
     ])
     gt_transforms = Compose([
-        py_vision.Resize((256, 256)),
-        py_vision.CenterCrop(224),
-        py_vision.ToTensor()
+        vision.Resize((256, 256)),
+        vision.CenterCrop(224),
+        vision.ToTensor()
     ])
 
     train_json_path, test_json_path = createDatasetJson(dataset_path, category, data_transforms, gt_transforms)
diff --git a/official/cv/patchcore/src/dataset.py b/official/cv/patchcore/src/dataset.py
index c8c4b48a9..29a05d788 100644
--- a/official/cv/patchcore/src/dataset.py
+++ b/official/cv/patchcore/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,9 +20,9 @@ from pathlib import Path
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as py_vision
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore.dataset.vision import Inter
 from PIL import Image
 
@@ -137,15 +137,15 @@ def createDataset(dataset_path, category):
     std = [0.229, 0.224, 0.225]
 
     data_transforms = Compose([
-        py_vision.Resize((256, 256), interpolation=Inter.ANTIALIAS),
-        py_vision.CenterCrop(224),
-        py_vision.ToTensor(),
-        py_vision.Normalize(mean=mean, std=std)
+        vision.Resize((256, 256), interpolation=Inter.ANTIALIAS),
+        vision.CenterCrop(224),
+        vision.ToTensor(),
+        vision.Normalize(mean=mean, std=std, is_hwc=False)
     ])
     gt_transforms = Compose([
-        py_vision.Resize((256, 256)),
-        py_vision.CenterCrop(224),
-        py_vision.ToTensor()
+        vision.Resize((256, 256)),
+        vision.CenterCrop(224),
+        vision.ToTensor()
     ])
 
     train_json_path, test_json_path = createDatasetJson(dataset_path, category, data_transforms, gt_transforms)
diff --git a/official/cv/posenet/src/dataset.py b/official/cv/posenet/src/dataset.py
index c9454fadc..573d18992 100644
--- a/official/cv/posenet/src/dataset.py
+++ b/official/cv/posenet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import os
 import numpy as np
 from mindspore.mindrecord import FileWriter
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 
 class Dataset:
     """dataset read"""
diff --git a/official/cv/psenet/src/dataset.py b/official/cv/psenet/src/dataset.py
index 8c88120b7..e0e0f94e7 100644
--- a/official/cv/psenet/src/dataset.py
+++ b/official/cv/psenet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@ import numpy as np
 import Polygon as plg
 import pyclipper
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_transforms
+import mindspore.dataset.vision as vision
 from src.model_utils.config import config
 
 __all__ = ['train_dataset_creator', 'test_dataset_creator']
@@ -255,13 +255,13 @@ class TrainDataset:
         if self.is_transform:
             img = Image.fromarray(img)
             img = img.convert('RGB')
-            img = py_transforms.RandomColorAdjust(brightness=32.0 / 255, saturation=0.5)(img)
+            img = vision.RandomColorAdjust(brightness=32.0 / 255, saturation=0.5)(img)
         else:
             img = Image.fromarray(img)
             img = img.convert('RGB')
 
-        img = py_transforms.ToTensor()(img)
-        img = py_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(img)
+        img = vision.ToTensor()(img)
+        img = vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)(img)
 
         gt_text = gt_text.astype(np.float32)
         gt_kernels = gt_kernels.astype(np.float32)
@@ -306,8 +306,8 @@ def IC15_TEST_Generator():
 
         img_resized = Image.fromarray(img_resized)
         img_resized = img_resized.convert('RGB')
-        img_resized = py_transforms.ToTensor()(img_resized)
-        img_resized = py_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(img_resized)
+        img_resized = vision.ToTensor()(img_resized)
+        img_resized = vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)(img_resized)
 
         yield img, img_resized, img_name
 
diff --git a/official/cv/pvnet/eval.py b/official/cv/pvnet/eval.py
index b687eee4a..52c273dc3 100644
--- a/official/cv/pvnet/eval.py
+++ b/official/cv/pvnet/eval.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,7 @@ import time
 
 import numpy as np
 import mindspore
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.transforms as C
 from mindspore import context
 
 from model_utils.config import config as cfg
@@ -82,9 +81,9 @@ def test(args):
         pose = test_db[idx]['RT'].copy()
 
         rgb = read_rgb_np(rgb_path)
-        rgb = P.ToTensor()(rgb)
+        rgb = C.ToTensor()(rgb)
         rgb = C.TypeCast(mindspore.dtype.float32)(rgb)
-        rgb = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(rgb)
+        rgb = C.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)(rgb)
         rgb = np.expand_dims(rgb, axis=0)
         rgb = mindspore.Tensor(rgb)
 
diff --git a/official/cv/pvnet/src/dataset.py b/official/cv/pvnet/src/dataset.py
index 4a3d59580..29ee73acd 100644
--- a/official/cv/pvnet/src/dataset.py
+++ b/official/cv/pvnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,9 +18,8 @@ import os
 import cv2
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as CV
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as CV
 import numpy as np
 
 from model_utils.config import config as cfg
@@ -242,9 +241,9 @@ def create_dataset(cls_list, batch_size=16, workers=16, devices=1, rank=0, multi
         CV.RandomColorAdjust(
             cfg.brightness, cfg.contrast,
             cfg.saturation, cfg.hue),
-        P.ToTensor(),  # 0~255 HWC to 0~1 CHW
+        C.ToTensor(),  # 0~255 HWC to 0~1 CHW
         C.TypeCast(mstype.float32),
-        P.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
+        C.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=False),
     ])
 
     mask_transforms = [
diff --git a/official/cv/pwcnet/src/flyingchairs.py b/official/cv/pwcnet/src/flyingchairs.py
index c1d68d031..fdcefc667 100644
--- a/official/cv/pwcnet/src/flyingchairs.py
+++ b/official/cv/pwcnet/src/flyingchairs.py
@@ -18,8 +18,8 @@ from glob import glob
 
 import mindspore.dataset as de
 import mindspore
-import mindspore.dataset.vision.py_transforms as CV
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as V
+import mindspore.dataset.transforms as T
 
 import src.common as common
 import src.transforms as transforms
@@ -131,16 +131,16 @@ class FlyingChairs():
         # photometric_augmentations
         if augmentations:
             self._photometric_transform = transforms.ConcatTransformSplitChainer([
-                CV.ToPIL(),
-                CV.RandomColorAdjust(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),
-                CV.ToTensor(),
+                V.ToPIL(),
+                V.RandomColorAdjust(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),
+                V.ToTensor(),
                 transforms.RandomGamma(min_gamma=0.7, max_gamma=1.5, clip_image=True)
                 ])
 
         else:
             self._photometric_transform = transforms.ConcatTransformSplitChainer([
-                CV.ToPIL(),
-                CV.ToTensor(),
+                V.ToPIL(),
+                V.ToTensor(),
                 ])
 
     def __getitem__(self, index):
@@ -176,9 +176,9 @@ def FlyingChairsTrain(dir_root, augmentations, dstype, batchsize, num_parallel_w
                                      shuffle=True, num_shards=world_size, shard_id=local_rank)
 
     # apply map operations on images
-    de_dataset = de_dataset.map(input_columns="im1", operations=C.TypeCast(mindspore.float32))
-    de_dataset = de_dataset.map(input_columns="im2", operations=C.TypeCast(mindspore.float32))
-    de_dataset = de_dataset.map(input_columns="flo", operations=C.TypeCast(mindspore.float32))
+    de_dataset = de_dataset.map(input_columns="im1", operations=T.TypeCast(mindspore.float32))
+    de_dataset = de_dataset.map(input_columns="im2", operations=T.TypeCast(mindspore.float32))
+    de_dataset = de_dataset.map(input_columns="flo", operations=T.TypeCast(mindspore.float32))
 
     de_dataset = de_dataset.batch(batchsize, drop_remainder=True)
     return de_dataset, dataset_len
diff --git a/official/cv/pwcnet/src/sintel.py b/official/cv/pwcnet/src/sintel.py
index 0c7b6caee..cce038c86 100644
--- a/official/cv/pwcnet/src/sintel.py
+++ b/official/cv/pwcnet/src/sintel.py
@@ -19,8 +19,8 @@ import numpy as np
 
 import mindspore.dataset as de
 import mindspore
-import mindspore.dataset.vision.py_transforms as CV
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as V
+import mindspore.dataset.transforms as T
 
 import src.common as common
 import src.transforms as transforms
@@ -129,16 +129,16 @@ class Sintel():
         # photometric_augmentations
         if augmentations:
             self._photometric_transform = transforms.ConcatTransformSplitChainer([
-                CV.ToPIL(),
-                CV.RandomColorAdjust(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),
-                CV.ToTensor(),
+                V.ToPIL(),
+                V.RandomColorAdjust(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),
+                V.ToTensor(),
                 transforms.RandomGamma(min_gamma=0.7, max_gamma=1.5, clip_image=True)
                 ])
 
         else:
             self._photometric_transform = transforms.ConcatTransformSplitChainer([
-                CV.ToPIL(),
-                CV.ToTensor(),
+                V.ToPIL(),
+                V.ToTensor(),
                 ])
 
         self._size = len(self._image_list)
@@ -182,8 +182,8 @@ def SintelTraining(dir_root, augmentations, imgtype, dstype, batchsize, num_para
                                      shuffle=True, num_shards=world_size, shard_id=local_rank)
 
     # apply map operations on images
-    de_dataset = de_dataset.map(input_columns="im1", operations=C.TypeCast(mindspore.float32))
-    de_dataset = de_dataset.map(input_columns="im2", operations=C.TypeCast(mindspore.float32))
-    de_dataset = de_dataset.map(input_columns="flo", operations=C.TypeCast(mindspore.float32))
+    de_dataset = de_dataset.map(input_columns="im1", operations=T.TypeCast(mindspore.float32))
+    de_dataset = de_dataset.map(input_columns="im2", operations=T.TypeCast(mindspore.float32))
+    de_dataset = de_dataset.map(input_columns="flo", operations=T.TypeCast(mindspore.float32))
     de_dataset = de_dataset.batch(batchsize, drop_remainder=True)
     return de_dataset, dataset_len
diff --git a/official/cv/resnet/gpu_resnet_benchmark.py b/official/cv/resnet/gpu_resnet_benchmark.py
index 01e3be045..094301e02 100644
--- a/official/cv/resnet/gpu_resnet_benchmark.py
+++ b/official/cv/resnet/gpu_resnet_benchmark.py
@@ -90,28 +90,28 @@ def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target="
     std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
 
     # define map operations
-    normalize_op = ds.vision.c_transforms.Normalize(mean=mean, std=std)
+    normalize_op = ds.vision.Normalize(mean=mean, std=std)
     if dtype == "fp16":
         if config.eval:
             x_dtype = "float32"
         else:
             x_dtype = "float16"
-        normalize_op = ds.vision.c_transforms.NormalizePad(mean=mean, std=std, dtype=x_dtype)
+        normalize_op = ds.vision.NormalizePad(mean=mean, std=std, dtype=x_dtype)
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
             normalize_op,
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(image_size),
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(image_size),
             normalize_op,
         ]
     if dtype == "fp32":
-        trans.append(ds.vision.c_transforms.HWC2CHW())
+        trans.append(ds.vision.HWC2CHW())
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=map_num_parallel_worker)
     # apply batch operations
     data_set = data_set.batch(batch_size, drop_remainder=True, num_parallel_workers=batch_num_parallel_worker)
diff --git a/official/cv/resnet/src/dataset.py b/official/cv/resnet/src/dataset.py
index a13626f18..7cf8bc978 100644
--- a/official/cv/resnet/src/dataset.py
+++ b/official/cv/resnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -50,18 +50,18 @@ def create_dataset1(dataset_path, do_train, batch_size=32, train_image_size=224,
     trans = []
     if do_train:
         trans += [
-            ds.vision.c_transforms.RandomCrop((32, 32), (4, 4, 4, 4)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
+            ds.vision.RandomCrop((32, 32), (4, 4, 4, 4)),
+            ds.vision.RandomHorizontalFlip(prob=0.5)
         ]
 
     trans += [
-        ds.vision.c_transforms.Resize((train_image_size, train_image_size)),
-        ds.vision.c_transforms.Rescale(1.0 / 255.0, 0.0),
-        ds.vision.c_transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
-        ds.vision.c_transforms.HWC2CHW()
+        ds.vision.Resize((train_image_size, train_image_size)),
+        ds.vision.Rescale(1.0 / 255.0, 0.0),
+        ds.vision.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
+        ds.vision.HWC2CHW()
     ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=type_cast_op, input_columns="label",
                             num_parallel_workers=get_num_parallel_workers(8))
@@ -117,18 +117,18 @@ def create_dataset2(dataset_path, do_train, batch_size=32, train_image_size=224,
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5)
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(eval_image_size)
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(eval_image_size)
         ]
-    trans_norm = [ds.vision.c_transforms.Normalize(mean=mean, std=std), ds.vision.c_transforms.HWC2CHW()]
+    trans_norm = [ds.vision.Normalize(mean=mean, std=std), ds.vision.HWC2CHW()]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
     if device_num == 1:
         trans_work_num = 24
     else:
@@ -190,21 +190,21 @@ def create_dataset_pynative(dataset_path, do_train, batch_size=32, train_image_s
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(eval_image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(eval_image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=4)
     # only enable cache for eval
@@ -257,21 +257,21 @@ def create_dataset3(dataset_path, do_train, batch_size=32, train_image_size=224,
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(rank_id / (rank_id + 1)),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(rank_id / (rank_id + 1)),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(eval_image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(eval_image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=get_num_parallel_workers(8))
     # only enable cache for eval
@@ -326,21 +326,21 @@ def create_dataset4(dataset_path, do_train, batch_size=32, train_image_size=224,
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(292),
-            ds.vision.c_transforms.CenterCrop(eval_image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(292),
+            ds.vision.CenterCrop(eval_image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=get_num_parallel_workers(12))
     # only enable cache for eval
     if do_train:
diff --git a/official/cv/resnet/src/dataset_infer.py b/official/cv/resnet/src/dataset_infer.py
index 5d0a655e8..ce032b1db 100644
--- a/official/cv/resnet/src/dataset_infer.py
+++ b/official/cv/resnet/src/dataset_infer.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -130,21 +130,21 @@ def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target="
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=8)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
@@ -202,21 +202,21 @@ def create_dataset2(dataset_path, do_train, repeat_num=1, batch_size=32, target=
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(rank_id / (rank_id + 1)),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(rank_id / (rank_id + 1)),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=8)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
@@ -271,21 +271,21 @@ def create_dataset3(dataset_path, do_train, repeat_num=1, batch_size=32, target=
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(292),
-            ds.vision.c_transforms.CenterCrop(256),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(292),
+            ds.vision.CenterCrop(256),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=12)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=12)
     if do_train:
diff --git a/official/cv/resnet_thor/src/dataset.py b/official/cv/resnet_thor/src/dataset.py
index 443817150..ae1dbee0a 100644
--- a/official/cv/resnet_thor/src/dataset.py
+++ b/official/cv/resnet_thor/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/official/cv/resnext/src/dataset.py b/official/cv/resnext/src/dataset.py
index d7f5b4678..a3aba86c3 100644
--- a/official/cv/resnext/src/dataset.py
+++ b/official/cv/resnext/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 from PIL import Image, ImageFile
 from mindspore.common import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as V_C
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as V_C
 from src.utils.sampler import DistributedSampler
 
 ImageFile.LOAD_TRUNCATED_IMAGES = True
diff --git a/official/cv/retinanet/src/dataset.py b/official/cv/retinanet/src/dataset.py
index c5105e9c4..312c35d89 100644
--- a/official/cv/retinanet/src/dataset.py
+++ b/official/cv/retinanet/src/dataset.py
@@ -23,7 +23,7 @@ import xml.etree.ElementTree as et
 import numpy as np
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from src.model_utils.config import config
 from .box_utils import jaccard_numpy, retinanet_bboxes_encode
diff --git a/official/cv/se_resnext50/src/dataset.py b/official/cv/se_resnext50/src/dataset.py
index 4ce7b43c0..9fc23bc1d 100644
--- a/official/cv/se_resnext50/src/dataset.py
+++ b/official/cv/se_resnext50/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 from PIL import Image, ImageFile
 from mindspore.common import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as V_C
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as V_C
 from src.utils.sampler import DistributedSampler
 
 ImageFile.LOAD_TRUNCATED_IMAGES = True
diff --git a/official/cv/semantic_human_matting/src/dataset.py b/official/cv/semantic_human_matting/src/dataset.py
index 31e97d1fa..ac7202e9d 100644
--- a/official/cv/semantic_human_matting/src/dataset.py
+++ b/official/cv/semantic_human_matting/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import cv2
 import numpy as np
 
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore import dtype as mstype
 
 
diff --git a/official/cv/shufflenetv1/src/dataset.py b/official/cv/shufflenetv1/src/dataset.py
index 48588b5ab..6656d3503 100644
--- a/official/cv/shufflenetv1/src/dataset.py
+++ b/official/cv/shufflenetv1/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,8 +16,8 @@
 from src.model_utils.config import config
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, device_num=1, rank=0):
diff --git a/official/cv/shufflenetv2/src/dataset.py b/official/cv/shufflenetv2/src/dataset.py
index 0fbe28daa..96b8835a9 100644
--- a/official/cv/shufflenetv2/src/dataset.py
+++ b/official/cv/shufflenetv2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import numpy as np
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 class toBGR():
     def __call__(self, img):
diff --git a/official/cv/simclr/src/dataset.py b/official/cv/simclr/src/dataset.py
index 1b14f0a5e..ef9133359 100644
--- a/official/cv/simclr/src/dataset.py
+++ b/official/cv/simclr/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,9 +17,8 @@ create train or eval dataset.
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.dataset.vision import Inter
 import cv2
 import numpy as np
@@ -70,8 +69,8 @@ def create_dataset(args, dataset_mode, repeat_num=1):
             color_jitter = C.RandomColorAdjust(0.8 * scale, 0.8 * scale, 0.8 * scale, 0.2 * scale)
             trans += [C2.RandomApply([color_jitter], prob=0.8)]
         if args.use_color_gray:
-            trans += [py_vision.ToPIL(),
-                      py_vision.RandomGrayscale(prob=0.2),
+            trans += [C.ToPIL(),
+                      C.RandomGrayscale(prob=0.2),
                       np.array]  # need to convert PIL image to a NumPy array to pass it to C++ operation
         if args.use_blur:
             trans += [C2.RandomApply([gaussian_blur], prob=0.8)]
diff --git a/official/cv/simple_pose/src/dataset.py b/official/cv/simple_pose/src/dataset.py
index 35b85ba24..9e8bbc741 100644
--- a/official/cv/simple_pose/src/dataset.py
+++ b/official/cv/simple_pose/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as V_C
+import mindspore.dataset.vision as V_C
 
 from src.utils.transform import fliplr_joints, get_affine_transform, affine_transform
 
diff --git a/official/cv/sphereface/src/datasets/classification.py b/official/cv/sphereface/src/datasets/classification.py
index c5bc2f984..3d9c04eab 100644
--- a/official/cv/sphereface/src/datasets/classification.py
+++ b/official/cv/sphereface/src/datasets/classification.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,8 +22,8 @@ import os
 from PIL import Image, ImageFile
 from mindspore import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as vision_C
-import mindspore.dataset.transforms.c_transforms as normal_C
+import mindspore.dataset.vision as vision_C
+import mindspore.dataset.transforms as normal_C
 from src.datasets.sampler import DistributedSampler
 from src.model_utils.matlab_cp2tform import get_similarity_transform_for_cv2
 import cv2
diff --git a/official/cv/squeezenet/src/dataset.py b/official/cv/squeezenet/src/dataset.py
index ac70267a3..407091415 100644
--- a/official/cv/squeezenet/src/dataset.py
+++ b/official/cv/squeezenet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import multiprocessing
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/official/cv/ssd/src/dataset.py b/official/cv/ssd/src/dataset.py
index 11a1945e2..a79f605f5 100644
--- a/official/cv/ssd/src/dataset.py
+++ b/official/cv/ssd/src/dataset.py
@@ -398,12 +398,12 @@ def create_ssd_dataset(mindrecord_file, batch_size=32, device_num=1, rank=0,
         num_parallel_workers = cores
     ds = de.MindDataset(mindrecord_file, columns_list=["img_id", "image", "annotation"], num_shards=device_num,
                         shard_id=rank, num_parallel_workers=num_parallel_workers, shuffle=is_training)
-    decode = de.vision.c_transforms.Decode()
+    decode = de.vision.Decode()
     ds = ds.map(operations=decode, input_columns=["image"])
-    change_swap_op = de.vision.c_transforms.HWC2CHW()
-    normalize_op = de.vision.c_transforms.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
-                                                    std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
-    color_adjust_op = de.vision.c_transforms.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
+    change_swap_op = de.vision.HWC2CHW()
+    normalize_op = de.vision.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
+                                       std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
+    color_adjust_op = de.vision.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
     compose_map_func = (lambda img_id, image, annotation: preprocess_fn(img_id, image, annotation, is_training))
     if is_training:
         output_columns = ["image", "box", "label", "num_match"]
diff --git a/official/cv/ssim-ae/src/dataset.py b/official/cv/ssim-ae/src/dataset.py
index 54a415dc7..aa783b2b6 100644
--- a/official/cv/ssim-ae/src/dataset.py
+++ b/official/cv/ssim-ae/src/dataset.py
@@ -20,7 +20,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as c_trans
+import mindspore.dataset.vision as c_trans
 
 from model_utils.config import config as cfg
 from src.utils import read_img, get_file_list
diff --git a/official/cv/tinydarknet/src/dataset.py b/official/cv/tinydarknet/src/dataset.py
index cadeb0834..48cf89368 100644
--- a/official/cv/tinydarknet/src/dataset.py
+++ b/official/cv/tinydarknet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,8 +20,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 from mindspore.communication.management import init, get_rank
 from src.model_utils.config import config as imagenet_cfg
diff --git a/official/cv/unet/src/data_loader.py b/official/cv/unet/src/data_loader.py
index 494c40643..16cd33e04 100644
--- a/official/cv/unet/src/data_loader.py
+++ b/official/cv/unet/src/data_loader.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import cv2
 import numpy as np
 from PIL import Image, ImageSequence
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as c_vision
+import mindspore.dataset.vision as c_vision
 from mindspore.dataset.vision.utils import Inter
 from mindspore.communication.management import get_rank, get_group_size
 
diff --git a/official/cv/unet3d/src/dataset.py b/official/cv/unet3d/src/dataset.py
index b3b828e3c..d2165808b 100644
--- a/official/cv/unet3d/src/dataset.py
+++ b/official/cv/unet3d/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import os
 import glob
 import numpy as np
 import mindspore.dataset as ds
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 from src.model_utils.config import config
 from src.transform import Dataset, ExpandChannel, LoadData, Orientation, ScaleIntensityRange, RandomCropSamples, OneHot
 
diff --git a/official/cv/vgg16/src/dataset.py b/official/cv/vgg16/src/dataset.py
index 6fb95f9ac..013205eb9 100644
--- a/official/cv/vgg16/src/dataset.py
+++ b/official/cv/vgg16/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 from PIL import Image, ImageFile
 from mindspore.common import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from src.utils.sampler import DistributedSampler
 
 ImageFile.LOAD_TRUNCATED_IMAGES = True
diff --git a/official/cv/vit/src/dataset.py b/official/cv/vit/src/dataset.py
index 305faebc2..cd63da9b1 100644
--- a/official/cv/vit/src/dataset.py
+++ b/official/cv/vit/src/dataset.py
@@ -22,9 +22,8 @@ import numpy as np
 
 import mindspore as ms
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.dataset.vision.utils import Inter
 
 from .autoaugment import ImageNetPolicy
@@ -107,7 +106,7 @@ def create_dataset(dataset_path,
         ]
         if autoaugment:
             trans += [
-                P.ToPIL(),
+                C.ToPIL(),
                 ImageNetPolicy(),
                 ToNumpy(),
             ]
diff --git a/official/cv/warpctc/src/dataset.py b/official/cv/warpctc/src/dataset.py
index c58348805..08eb86590 100644
--- a/official/cv/warpctc/src/dataset.py
+++ b/official/cv/warpctc/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,8 +20,8 @@ import numpy as np
 from PIL import Image
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as c
-import mindspore.dataset.vision.c_transforms as vc
+import mindspore.dataset.transforms as c
+import mindspore.dataset.vision as vc
 from src.model_utils.config import config
 
 
diff --git a/official/cv/xception/src/dataset.py b/official/cv/xception/src/dataset.py
index adcc10f87..d5b76f8e1 100644
--- a/official/cv/xception/src/dataset.py
+++ b/official/cv/xception/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ Data operations, will be used in train.py and eval.py
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, batch_size=16, device_num=1, rank=0):
diff --git a/official/cv/yolov3_darknet53/src/yolo_dataset.py b/official/cv/yolov3_darknet53/src/yolo_dataset.py
index 684b1b6eb..82f06c7f4 100644
--- a/official/cv/yolov3_darknet53/src/yolo_dataset.py
+++ b/official/cv/yolov3_darknet53/src/yolo_dataset.py
@@ -156,7 +156,7 @@ def create_yolo_dataset(image_dir, anno_path, batch_size, device_num, rank,
 
     yolo_dataset = COCOYoloDataset(root=image_dir, ann_file=anno_path, filter_crowd_anno=filter_crowd,
                                    remove_images_without_annotations=remove_empty_anno, is_training=is_training)
-    hwc_to_chw = ds.vision.c_transforms.HWC2CHW()
+    hwc_to_chw = ds.vision.HWC2CHW()
 
     config.dataset_size = len(yolo_dataset)
     cores = multiprocessing.cpu_count()
@@ -168,12 +168,12 @@ def create_yolo_dataset(image_dir, anno_path, batch_size, device_num, rank,
                                 "gt_box1", "gt_box2", "gt_box3"]
         if device_num != 8:
             dataset = ds.GeneratorDataset(yolo_dataset, column_names=dataset_column_names, sampler=distributed_sampler)
-            dataset = dataset.map(operations=ds.vision.c_transforms.Decode(), input_columns=["image"])
+            dataset = dataset.map(operations=ds.vision.Decode(), input_columns=["image"])
             dataset = dataset.batch(batch_size, per_batch_map=multi_scale_trans, input_columns=dataset_column_names,
                                     num_parallel_workers=min(32, num_parallel_workers), drop_remainder=True)
         else:
             dataset = ds.GeneratorDataset(yolo_dataset, column_names=dataset_column_names, sampler=distributed_sampler)
-            dataset = dataset.map(operations=ds.vision.c_transforms.Decode(), input_columns=["image"])
+            dataset = dataset.map(operations=ds.vision.Decode(), input_columns=["image"])
             dataset = dataset.batch(batch_size, per_batch_map=multi_scale_trans, input_columns=dataset_column_names,
                                     num_parallel_workers=min(8, num_parallel_workers), drop_remainder=True)
     else:
diff --git a/official/cv/yolov3_resnet18/src/dataset.py b/official/cv/yolov3_resnet18/src/dataset.py
index 4a2651957..e5d3f391d 100644
--- a/official/cv/yolov3_resnet18/src/dataset.py
+++ b/official/cv/yolov3_resnet18/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@ import numpy as np
 from PIL import Image
 import mindspore.dataset as de
 from mindspore.mindrecord import FileWriter
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from src.config import ConfigYOLOV3ResNet18
 
 iter_cnt = 0
diff --git a/official/cv/yolov4/src/yolo_dataset.py b/official/cv/yolov4/src/yolo_dataset.py
index 1ddca7782..f01e55021 100644
--- a/official/cv/yolov4/src/yolo_dataset.py
+++ b/official/cv/yolov4/src/yolo_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import numpy as np
 from PIL import Image
 from pycocotools.coco import COCO
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 from model_utils.config import config
 from src.distributed_sampler import DistributedSampler
 from src.transforms import reshape_fn, MultiScaleTrans
diff --git a/official/cv/yolov5/src/transforms.py b/official/cv/yolov5/src/transforms.py
index 928dbab2c..ac2352f24 100644
--- a/official/cv/yolov5/src/transforms.py
+++ b/official/cv/yolov5/src/transforms.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import copy
 import numpy as np
 from PIL import Image
 import cv2
-import mindspore.dataset as ds
+import mindspore.dataset.vision as vision
 
 
 def _rand(a=0., b=1.):
@@ -524,7 +524,7 @@ class MultiScaleTrans:
 
     def __call__(self, img, anno, input_size, mosaic_flag):
         if mosaic_flag[0] == 0:
-            img = ds.vision.py_transforms.Decode()(img)
+            img = vision.Decode(True)(img)
         img, anno = preprocess_fn(img, anno, self.config, input_size, self.device_num)
         return img, anno, np.array(img.shape[0:2])
 
diff --git a/official/cv/yolov5/src/yolo_dataset.py b/official/cv/yolov5/src/yolo_dataset.py
index f4c602c51..a0b8dbf11 100644
--- a/official/cv/yolov5/src/yolo_dataset.py
+++ b/official/cv/yolov5/src/yolo_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -240,7 +240,7 @@ def create_yolo_dataset(image_dir, anno_path, batch_size, device_num, rank,
                                    remove_images_without_annotations=remove_empty_anno, is_training=is_training)
     distributed_sampler = DistributedSampler(len(yolo_dataset), device_num, rank, shuffle=shuffle)
     yolo_dataset.size = len(distributed_sampler)
-    hwc_to_chw = ds.vision.c_transforms.HWC2CHW()
+    hwc_to_chw = ds.vision.HWC2CHW()
 
     config.dataset_size = len(yolo_dataset)
     cores = multiprocessing.cpu_count()
@@ -267,7 +267,7 @@ def create_yolo_dataset(image_dir, anno_path, batch_size, device_num, rank,
                               num_parallel_workers=min(4, num_parallel_workers), python_multiprocessing=False)
         mean = [m * 255 for m in [0.485, 0.456, 0.406]]
         std = [s * 255 for s in [0.229, 0.224, 0.225]]
-        dataset = dataset.map([ds.vision.c_transforms.Normalize(mean, std), hwc_to_chw],
+        dataset = dataset.map([ds.vision.Normalize(mean, std), hwc_to_chw],
                               num_parallel_workers=min(4, num_parallel_workers))
 
         def concatenate(images):
diff --git a/official/nlp/bert/src/dataset.py b/official/nlp/bert/src/dataset.py
index 2864d3e8c..ece989012 100644
--- a/official/nlp/bert/src/dataset.py
+++ b/official/nlp/bert/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import math
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore import log as logger
 
 
diff --git a/official/nlp/bert/src/finetune_data_preprocess.py b/official/nlp/bert/src/finetune_data_preprocess.py
index 44cd375f7..3f9f682b5 100644
--- a/official/nlp/bert/src/finetune_data_preprocess.py
+++ b/official/nlp/bert/src/finetune_data_preprocess.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -25,7 +25,7 @@ from lxml import etree
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
 import mindspore.dataset.text as text
-import mindspore.dataset.transforms.c_transforms as ops
+import mindspore.dataset.transforms as ops
 from utils import convert_labels_to_index
 
 
diff --git a/official/nlp/bert_thor/pretrain_eval.py b/official/nlp/bert_thor/pretrain_eval.py
index 73c236905..a4f824d73 100644
--- a/official/nlp/bert_thor/pretrain_eval.py
+++ b/official/nlp/bert_thor/pretrain_eval.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ from src.evaluation_config import cfg, bert_net_cfg
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 import mindspore.nn as nn
 from mindspore import context
 from mindspore.common.parameter import Parameter
diff --git a/official/nlp/bert_thor/src/dataset.py b/official/nlp/bert_thor/src/dataset.py
index 8e6dccec0..620b66807 100644
--- a/official/nlp/bert_thor/src/dataset.py
+++ b/official/nlp/bert_thor/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ Data operations, will be used in run_pretrain.py
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore import log as logger
 from .config import cfg
 
diff --git a/official/nlp/cpm/train.py b/official/nlp/cpm/train.py
index 8ce5edc27..ca9b4d249 100644
--- a/official/nlp/cpm/train.py
+++ b/official/nlp/cpm/train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -29,7 +29,7 @@ from mindspore.train.callback import TimeMonitor, ModelCheckpoint, CheckpointCon
 from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell
 from mindspore.train.serialization import load_checkpoint, load_param_into_net
 import mindspore.common.dtype as mstype
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore.parallel import set_algo_parameters
 
 from src.cpm_train import CPMWithLoss, CPMTrainOneStepWithLossScaleCell, VirtualDatasetOneInputCell, \
diff --git a/official/nlp/dgu/src/utils.py b/official/nlp/dgu/src/utils.py
index 474bd2b7e..0d8174f4e 100644
--- a/official/nlp/dgu/src/utils.py
+++ b/official/nlp/dgu/src/utils.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@ import os
 import numpy as np
 
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 import mindspore.nn as nn
 import mindspore.ops as P
 
diff --git a/official/nlp/duconv/src/dataset.py b/official/nlp/duconv/src/dataset.py
index be6752663..4d8e69c99 100644
--- a/official/nlp/duconv/src/dataset.py
+++ b/official/nlp/duconv/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ Data loader
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore import log as logger
 
 def create_dataset(batch_size, device_num=1, rank=0, num_workers=8, do_shuffle=True,
diff --git a/official/nlp/emotect/src/dataset.py b/official/nlp/emotect/src/dataset.py
index 7adeac9f3..e8be1e68f 100644
--- a/official/nlp/emotect/src/dataset.py
+++ b/official/nlp/emotect/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ Data operations, will be used in run_pretrain.py
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 def create_classification_dataset(batch_size=1,
                                   repeat_count=1,
diff --git a/official/nlp/ernie/src/dataset.py b/official/nlp/ernie/src/dataset.py
index 15b6c61dc..d672304a3 100644
--- a/official/nlp/ernie/src/dataset.py
+++ b/official/nlp/ernie/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ Data operations, will be used in run_pretrain.py
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore import log as logger
 from .config import cfg
 
diff --git a/official/nlp/fasttext/eval.py b/official/nlp/fasttext/eval.py
index d4ca9d23a..92f28e027 100644
--- a/official/nlp/fasttext/eval.py
+++ b/official/nlp/fasttext/eval.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ from mindspore.common.tensor import Tensor
 from mindspore.train.model import Model
 from mindspore.train.serialization import load_checkpoint, load_param_into_net
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 from mindspore import context
 from src.fasttext_model import FastText
 
diff --git a/official/nlp/gnmt_v2/src/dataset/load_dataset.py b/official/nlp/gnmt_v2/src/dataset/load_dataset.py
index 8af9fe84c..79a690daf 100644
--- a/official/nlp/gnmt_v2/src/dataset/load_dataset.py
+++ b/official/nlp/gnmt_v2/src/dataset/load_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 """Dataset loader to feed into model."""
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 
 
 def _load_dataset(input_files, batch_size, sink_mode=False,
diff --git a/official/nlp/gpt/src/dataset.py b/official/nlp/gpt/src/dataset.py
index a2b3ec638..29c9fbbad 100644
--- a/official/nlp/gpt/src/dataset.py
+++ b/official/nlp/gpt/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ Create dataset for training and evaluating
 
 import os
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 import mindspore.common.dtype as mstype
 
 
diff --git a/official/nlp/gru/src/dataset.py b/official/nlp/gru/src/dataset.py
index d63d5c2cc..6de44d1c9 100644
--- a/official/nlp/gru/src/dataset.py
+++ b/official/nlp/gru/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 from model_utils.config import config
 
 de.config.set_seed(1)
diff --git a/official/nlp/mass/src/dataset/load_dataset.py b/official/nlp/mass/src/dataset/load_dataset.py
index 879ccf41c..377b6123b 100644
--- a/official/nlp/mass/src/dataset/load_dataset.py
+++ b/official/nlp/mass/src/dataset/load_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 """Dataset loader to feed into model."""
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 
 
 def _load_dataset(input_files, batch_size, epoch_count=1,
diff --git a/official/nlp/pangu_alpha/src/dataset.py b/official/nlp/pangu_alpha/src/dataset.py
index b18fd397f..8e803d82d 100644
--- a/official/nlp/pangu_alpha/src/dataset.py
+++ b/official/nlp/pangu_alpha/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ Create dataset for training and evaluating
 import os
 import numpy as np
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 import mindspore.common.dtype as mstype
 
 from mindspore import context
diff --git a/official/nlp/prophetnet/src/dataset/load_dataset.py b/official/nlp/prophetnet/src/dataset/load_dataset.py
index e585f50b9..84aaf94a7 100644
--- a/official/nlp/prophetnet/src/dataset/load_dataset.py
+++ b/official/nlp/prophetnet/src/dataset/load_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 """Dataset loader to feed into model."""
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 
 
 def _load_dataset(input_files, batch_size, epoch_count=1,
diff --git a/official/nlp/tinybert/src/dataset.py b/official/nlp/tinybert/src/dataset.py
index 2b023f699..62a0523c4 100644
--- a/official/nlp/tinybert/src/dataset.py
+++ b/official/nlp/tinybert/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import os
 from enum import Enum
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 
 class DataType(Enum):
diff --git a/official/nlp/transformer/eval.py b/official/nlp/transformer/eval.py
index 07a30c5d6..e3e6f367f 100644
--- a/official/nlp/transformer/eval.py
+++ b/official/nlp/transformer/eval.py
@@ -23,7 +23,7 @@ from mindspore.common.parameter import Parameter
 from mindspore.common.tensor import Tensor
 from mindspore.train.model import Model
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 
 from src.transformer_model import TransformerModel
 from src.model_utils.config import config
diff --git a/official/nlp/transformer/src/dataset.py b/official/nlp/transformer/src/dataset.py
index 551639072..4728db94e 100644
--- a/official/nlp/transformer/src/dataset.py
+++ b/official/nlp/transformer/src/dataset.py
@@ -33,7 +33,7 @@ def create_transformer_dataset(rank_size=1, rank_id=0, do_shuffle="true", datase
                                           "target_sos_ids", "target_sos_mask",
                                           "target_eos_ids", "target_eos_mask"],
                             shuffle=(do_shuffle == "true"), num_shards=rank_size, shard_id=rank_id)
-        type_cast_op = de.transforms.c_transforms.TypeCast(ms.int32)
+        type_cast_op = de.transforms.transforms.TypeCast(ms.int32)
         ds = ds.map(operations=type_cast_op, input_columns="source_eos_ids")
         ds = ds.map(operations=type_cast_op, input_columns="source_eos_mask")
         ds = ds.map(operations=type_cast_op, input_columns="target_sos_ids")
diff --git a/research/audio/ctcmodel/src/dataset.py b/research/audio/ctcmodel/src/dataset.py
index e0cc55926..117a3cc9d 100644
--- a/research/audio/ctcmodel/src/dataset.py
+++ b/research/audio/ctcmodel/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@
 """Dataset preprocessing."""
 
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 import mindspore.common.dtype as mstype
 
 
diff --git a/research/audio/speech_transformer/src/dataset.py b/research/audio/speech_transformer/src/dataset.py
index 1ae5a5ee6..5354e39f6 100644
--- a/research/audio/speech_transformer/src/dataset.py
+++ b/research/audio/speech_transformer/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ from pathlib import Path
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 import numpy as np
 
 from .model_utils.config import config
diff --git a/research/cv/3dcnn/src/dataset.py b/research/cv/3dcnn/src/dataset.py
index 0610c9b06..1879601b0 100644
--- a/research/cv/3dcnn/src/dataset.py
+++ b/research/cv/3dcnn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ from nibabel import load as load_nii
 
 import mindspore.dataset as ds
 import mindspore.common.dtype as mstype
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.transforms as C2
 
 
 def norm(image):
diff --git a/research/cv/APDrawingGAN/src/data/aligned_dataset.py b/research/cv/APDrawingGAN/src/data/aligned_dataset.py
index 894c6c9b7..5e912e722 100644
--- a/research/cv/APDrawingGAN/src/data/aligned_dataset.py
+++ b/research/cv/APDrawingGAN/src/data/aligned_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ import os.path
 import random
 import csv
 import cv2
-import mindspore.dataset.vision.py_transforms as P_VISION
+import mindspore.dataset.vision as vision
 import mindspore.ops as ops
 from mindspore import Tensor
 from mindspore import dtype as mstype
@@ -139,8 +139,8 @@ def init_AB(opt, AB_path):
         (opt.loadSize, opt.loadSize), Image.BICUBIC)
     B = AB.crop((w2, 0, w, h)).resize(
         (opt.loadSize, opt.loadSize), Image.BICUBIC)
-    A = P_VISION.ToTensor()(A)
-    B = P_VISION.ToTensor()(B)
+    A = vision.ToTensor()(A)
+    B = vision.ToTensor()(B)
     w_offset = random.randint(
         0, max(0, opt.loadSize - opt.fineSize - 1))
     h_offset = random.randint(
@@ -151,8 +151,8 @@ def init_AB(opt, AB_path):
     B = B[:, h_offset:h_offset + opt.fineSize,
           w_offset:w_offset + opt.fineSize]
 
-    A = P_VISION.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(A)
-    B = P_VISION.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(B)
+    A = vision.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), is_hwc=False)(A)
+    B = vision.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), is_hwc=False)(B)
     return A, B
 
 def regions_process(opt, regions, feats, item, A, B, input_nc, output_nc):
@@ -270,7 +270,7 @@ class AlignedDataset(BaseDataset):
             bgdir = self.opt.bg_dir
             bgpath = os.path.join(bgdir, basen[:-4] + '.png')
             im_bg = Image.open(bgpath)
-            mask2 = P_VISION.ToTensor()(im_bg)  # mask out background
+            mask2 = vision.ToTensor()(im_bg)  # mask out background
 
             if flipped:
                 mask2 = np.take(mask2, idx, axis=2)
diff --git a/research/cv/APDrawingGAN/src/data/base_dataset.py b/research/cv/APDrawingGAN/src/data/base_dataset.py
index cd0c4d8f4..f782c1cd2 100644
--- a/research/cv/APDrawingGAN/src/data/base_dataset.py
+++ b/research/cv/APDrawingGAN/src/data/base_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,8 +15,8 @@
 """base dataset"""
 
 from PIL import Image
-import mindspore.dataset.vision.py_transforms as py_trans
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore.dataset.vision import Inter
 
 
@@ -45,17 +45,17 @@ def get_transform(opt):
     transform_list = []
     if opt.resize_or_crop == 'resize_and_crop':
         osize = [opt.loadSize, opt.fineSize]
-        transform_list.append(py_trans.Resize(osize, Inter.BICUBIC))  # PIL
-        transform_list.append(py_trans.RandomCrop(opt.fineSize))  # PIL
+        transform_list.append(vision.Resize(osize, Inter.BICUBIC))  # PIL
+        transform_list.append(vision.RandomCrop(opt.fineSize))  # PIL
     elif opt.resize_or_crop == 'crop':
-        transform_list.append(py_trans.RandomCrop(opt.fineSize))
+        transform_list.append(vision.RandomCrop(opt.fineSize))
     elif opt.resize_or_crop == 'scale_width':
         transform_list.append(
             lambda img: __scale_width(img, opt.fineSize))
     elif opt.resize_or_crop == 'scale_width_and_crop':
         transform_list.append(
             lambda img: __scale_width(img, opt.loadSize))
-        transform_list.append(py_trans.RandomCrop(opt.fineSize))
+        transform_list.append(vision.RandomCrop(opt.fineSize))
     elif opt.resize_or_crop == 'none':
         transform_list.append(
             lambda img: __adjust(img))
@@ -63,11 +63,11 @@ def get_transform(opt):
         raise ValueError('--resize_or_crop %s is not a valid option.' % opt.resize_or_crop)
 
     if opt.isTrain and not opt.no_flip:
-        transform_list.append(py_trans.RandomHorizontalFlip())
+        transform_list.append(vision.RandomHorizontalFlip())
 
-    transform_list += [py_trans.ToTensor(),
-                       py_trans.Normalize((0.5, 0.5, 0.5),
-                                          (0.5, 0.5, 0.5))]
+    transform_list += [vision.ToTensor(),
+                       vision.Normalize((0.5, 0.5, 0.5),
+                                        (0.5, 0.5, 0.5), is_hwc=False)]
     return Compose(transform_list)
 
 # just modify the width and height to be multiple of 4
diff --git a/research/cv/AVA_cifar/src/datasets.py b/research/cv/AVA_cifar/src/datasets.py
index 6761f9090..df2d7a6d3 100644
--- a/research/cv/AVA_cifar/src/datasets.py
+++ b/research/cv/AVA_cifar/src/datasets.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,9 +16,9 @@
 
 import numpy as np
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as transforms
-import mindspore.dataset.transforms.c_transforms as C
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.transforms as data_trans
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore.common import dtype as mstype
 from src.RandAugment import RandAugment
 from src.autoaugment import CIFAR10Policy
@@ -32,39 +32,39 @@ class CIFAR10Dataset():
 
         if not training:
             trsfm = Compose([
-                transforms.ToTensor(),
-                transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+                vision.ToTensor(),
+                vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
             ])
         else:
             if not use_third_trsfm:
                 trsfm = Compose([
-                    transforms.ToPIL(),
-                    transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)),
-                    transforms.RandomColorAdjust(0.4, 0.4, 0.4, 0.4),
-                    transforms.RandomGrayscale(prob=0.2),
-                    transforms.RandomHorizontalFlip(),
-                    transforms.ToTensor(),
-                    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+                    vision.ToPIL(),
+                    vision.RandomResizedCrop(size=32, scale=(0.2, 1.)),
+                    vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.4),
+                    vision.RandomGrayscale(prob=0.2),
+                    vision.RandomHorizontalFlip(),
+                    vision.ToTensor(),
+                    vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
                 ])
             else:
                 if use_auto_augment:
                     trsfm = Compose([
-                        transforms.ToPIL(),
-                        transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)),
-                        transforms.RandomHorizontalFlip(),
+                        vision.ToPIL(),
+                        vision.RandomResizedCrop(size=32, scale=(0.2, 1.)),
+                        vision.RandomHorizontalFlip(),
                         CIFAR10Policy(),
-                        transforms.ToTensor(),
-                        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+                        vision.ToTensor(),
+                        vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
                     ])
                 else:
                     rand_augment = RandAugment(n=2, m=10)
                     trsfm = Compose([
-                        transforms.ToPIL(),
-                        transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)),
-                        transforms.RandomHorizontalFlip(),
+                        vision.ToPIL(),
+                        vision.RandomResizedCrop(size=32, scale=(0.2, 1.)),
+                        vision.RandomHorizontalFlip(),
                         rand_augment,
-                        transforms.ToTensor(),
-                        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+                        vision.ToTensor(),
+                        vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
                     ])
 
         self.trsfm = trsfm
@@ -83,7 +83,7 @@ class CIFAR10Dataset():
                                     num_shards=self.device_num, shard_id=self.device_id)
 
         ds_ = ds_.map(input_columns=["image"], operations=self.trsfm)
-        typecast_op = C.TypeCast(mstype.int32)
+        typecast_op = data_trans.TypeCast(mstype.int32)
         ds_ = ds_.map(input_columns=["label"], operations=typecast_op)
         return ds_
 
diff --git a/research/cv/AVA_hpa/src/datasets.py b/research/cv/AVA_hpa/src/datasets.py
index 7225593e7..9394785d1 100644
--- a/research/cv/AVA_hpa/src/datasets.py
+++ b/research/cv/AVA_hpa/src/datasets.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,9 +19,9 @@ from collections import Counter
 from PIL import Image
 import numpy as np
 import pandas as pd
-import mindspore.dataset.vision.py_transforms as transforms
+import mindspore.dataset.vision as vision
 from mindspore.dataset import GeneratorDataset
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 from src.RandAugment import RandAugment
 
 # split train val test = 4:1:5
@@ -40,35 +40,35 @@ class TransformOnImg:
         self.mode = mode
         rand_augment = RandAugment(n=2, m=10)
         self.trsfm_basic = Compose([
-            transforms.ToPIL(),
-            transforms.Resize(256),
-            transforms.RandomResizedCrop(size=224, scale=(0.2, 1.)),
-            transforms.RandomColorAdjust(0.4, 0.4, 0.4, 0),
-            transforms.RandomHorizontalFlip(),
-            transforms.ToTensor(),
-            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+            vision.ToPIL(),
+            vision.Resize(256),
+            vision.RandomResizedCrop(size=224, scale=(0.2, 1.)),
+            vision.RandomColorAdjust(0.4, 0.4, 0.4, 0),
+            vision.RandomHorizontalFlip(),
+            vision.ToTensor(),
+            vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
         ])
         self.trsfm_aux = Compose([
-            transforms.ToPIL(),
-            transforms.Resize(256),
-            transforms.RandomResizedCrop(size=224, scale=(0.2, 1.)),
-            transforms.RandomHorizontalFlip(),
+            vision.ToPIL(),
+            vision.Resize(256),
+            vision.RandomResizedCrop(size=224, scale=(0.2, 1.)),
+            vision.RandomHorizontalFlip(),
             rand_augment,
-            transforms.ToTensor(),
-            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+            vision.ToTensor(),
+            vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
         ])
         self.trsfm_train = Compose([
-            transforms.ToPIL(),
-            transforms.Resize(256),
-            transforms.RandomResizedCrop(size=224, scale=(0.2, 1.)),
-            transforms.ToTensor(),
-            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+            vision.ToPIL(),
+            vision.Resize(256),
+            vision.RandomResizedCrop(size=224, scale=(0.2, 1.)),
+            vision.ToTensor(),
+            vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
         ])
         self.trsfm = Compose([
-            transforms.ToPIL(),
-            transforms.Resize(224),
-            transforms.ToTensor(),
-            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+            vision.ToPIL(),
+            vision.Resize(224),
+            vision.ToTensor(),
+            vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
         ])
 
     def __call__(self, img, use_aux=False):
diff --git a/research/cv/AlignedReID++/src/dataset_loader.py b/research/cv/AlignedReID++/src/dataset_loader.py
index a3dba42f7..34ac5bbc3 100644
--- a/research/cv/AlignedReID++/src/dataset_loader.py
+++ b/research/cv/AlignedReID++/src/dataset_loader.py
@@ -1,5 +1,5 @@
 """get the dataset"""
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,8 +21,8 @@ import os.path as osp
 from PIL import Image
 
 from mindspore.dataset import GeneratorDataset
-from mindspore.dataset.transforms.py_transforms import Compose
-import mindspore.dataset.vision.py_transforms as P1
+from mindspore.dataset.transforms.transforms import Compose
+import mindspore.dataset.vision as vision
 
 from .import data_manager
 from .import samplers
@@ -159,9 +159,9 @@ def create_train_dataset(real_path, args, rank_id, rank_size):
     transform_train = [
         decode,
         Random2DTranslation(args.height, args.width),
-        P1.RandomHorizontalFlip(0.5),
-        P1.ToTensor(),
-        P1.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
+        vision.RandomHorizontalFlip(0.5),
+        vision.ToTensor(),
+        vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False),
         RandomErasing()
     ]
     transform_train = Compose(transform_train)
@@ -186,9 +186,9 @@ def create_test_dataset(real_path, args):
 
     transform_test = [
         decode,
-        P1.Resize((args.height, args.width)),
-        P1.ToTensor(),
-        P1.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+        vision.Resize((args.height, args.width)),
+        vision.ToTensor(),
+        vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
     ]
     transform_test = Compose(transform_test)
 
@@ -212,4 +212,4 @@ def create_test_dataset(real_path, args):
     galleryloader = galleryloader.batch(batch_size=32, drop_remainder=True)
 
     return queryloader, galleryloader, dataset.num_train_pids
-          
\ No newline at end of file
+
diff --git a/research/cv/AlignedReID/src/dataset.py b/research/cv/AlignedReID/src/dataset.py
index 18afc481d..9014b2467 100644
--- a/research/cv/AlignedReID/src/dataset.py
+++ b/research/cv/AlignedReID/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import pickle
 from collections import defaultdict
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import numpy as np
 from PIL import Image
 
diff --git a/research/cv/AlphaPose/infer/sdk/postprocess/src/dataset.py b/research/cv/AlphaPose/infer/sdk/postprocess/src/dataset.py
index 7265dfeba..baa3310d4 100644
--- a/research/cv/AlphaPose/infer/sdk/postprocess/src/dataset.py
+++ b/research/cv/AlphaPose/infer/sdk/postprocess/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -26,7 +26,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from src.utils.transforms import fliplr_joints, get_affine_transform, affine_transform
 from src.config import config
 
diff --git a/research/cv/AlphaPose/src/dataset.py b/research/cv/AlphaPose/src/dataset.py
index 7265dfeba..baa3310d4 100644
--- a/research/cv/AlphaPose/src/dataset.py
+++ b/research/cv/AlphaPose/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -26,7 +26,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from src.utils.transforms import fliplr_joints, get_affine_transform, affine_transform
 from src.config import config
 
diff --git a/research/cv/AttGAN/src/data.py b/research/cv/AttGAN/src/data.py
index 72cb898c2..75f6a6881 100644
--- a/research/cv/AttGAN/src/data.py
+++ b/research/cv/AttGAN/src/data.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import numpy as np
 from PIL import Image
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.py_transforms as py_vision
-from mindspore.dataset.transforms import py_transforms
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 
 from src.utils import DistributedSampler
 
@@ -40,11 +40,11 @@ class Custom:
 
         mean = [0.5, 0.5, 0.5]
         std = [0.5, 0.5, 0.5]
-        transform = [py_vision.ToPIL()]
-        transform.append(py_vision.Resize([128, 128]))
-        transform.append(py_vision.ToTensor())
-        transform.append(py_vision.Normalize(mean=mean, std=std))
-        transform = py_transforms.Compose(transform)
+        transform = [vision.ToPIL()]
+        transform.append(vision.Resize([128, 128]))
+        transform.append(vision.ToTensor())
+        transform.append(vision.Normalize(mean=mean, std=std), is_hwc=False)
+        transform = Compose(transform)
         self.transform = transform
         self.images = np.array([images]) if images.size == 1 else images[0:]
         self.labels = np.array([labels]) if images.size == 1 else labels[0:]
@@ -108,12 +108,12 @@ def get_loader(data_root, attr_path, selected_attrs, crop_size=170, image_size=1
 
     mean = [0.5, 0.5, 0.5]
     std = [0.5, 0.5, 0.5]
-    transform = [py_vision.ToPIL()]
-    transform.append(py_vision.CenterCrop((crop_size, crop_size)))
-    transform.append(py_vision.Resize([image_size, image_size]))
-    transform.append(py_vision.ToTensor())
-    transform.append(py_vision.Normalize(mean=mean, std=std))
-    transform = py_transforms.Compose(transform)
+    transform = [vision.ToPIL()]
+    transform.append(vision.CenterCrop((crop_size, crop_size)))
+    transform.append(vision.Resize([image_size, image_size]))
+    transform.append(vision.ToTensor())
+    transform.append(vision.Normalize(mean=mean, std=std), is_hwc=False)
+    transform = Compose(transform)
 
     dataset = CelebA(data_root, attr_path, image_size, mode, selected_attrs, transform, split_point=split_point)
 
diff --git a/research/cv/AttentionCluster/make_dataset.py b/research/cv/AttentionCluster/make_dataset.py
index af3b92140..d8e864ca7 100644
--- a/research/cv/AttentionCluster/make_dataset.py
+++ b/research/cv/AttentionCluster/make_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import os
 import numpy as np
 import mindspore
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as c_trans
+import mindspore.dataset.vision as c_trans
 import mindspore.nn as nn
 import mindspore.context as context
 import mindspore.common as common
diff --git a/research/cv/AutoSlim/src/dataset.py b/research/cv/AutoSlim/src/dataset.py
index 82d921f7b..6f8d7e3d8 100644
--- a/research/cv/AutoSlim/src/dataset.py
+++ b/research/cv/AutoSlim/src/dataset.py
@@ -18,8 +18,7 @@ Produce the dataset
 import os
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as c_vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.vision as vision
 
 def data_transforms(args):
     """get transform of dataset"""
@@ -40,18 +39,18 @@ def data_transforms(args):
         std = [0.229, 0.224, 0.225]
         crop_scale = 0.25
         jitter_param = 0.4
-    train_transforms = [c_vision.RandomCropDecodeResize(224, scale=(crop_scale, 1.0)),
-                        c_vision.RandomColorAdjust(brightness=jitter_param,
-                                                   contrast=jitter_param,
-                                                   saturation=jitter_param),
-                        c_vision.RandomHorizontalFlip(),
-                        c_vision.HWC2CHW(),
+    train_transforms = [vision.RandomCropDecodeResize(224, scale=(crop_scale, 1.0)),
+                        vision.RandomColorAdjust(brightness=jitter_param,
+                                                 contrast=jitter_param,
+                                                 saturation=jitter_param),
+                        vision.RandomHorizontalFlip(),
+                        vision.HWC2CHW(),
                         ]
-    val_transforms = [py_vision.Decode(),
-                      py_vision.Resize(256),
-                      py_vision.CenterCrop(224),
-                      py_vision.ToTensor(),
-                      py_vision.Normalize(mean=mean, std=std)
+    val_transforms = [vision.Decode(True),
+                      vision.Resize(256),
+                      vision.CenterCrop(224),
+                      vision.ToTensor(),
+                      vision.Normalize(mean=mean, std=std, is_hwc=False)
                       ]
     return train_transforms, val_transforms
 
diff --git a/research/cv/CBAM/src/data.py b/research/cv/CBAM/src/data.py
index 960b51725..7a425b1b3 100644
--- a/research/cv/CBAM/src/data.py
+++ b/research/cv/CBAM/src/data.py
@@ -22,7 +22,7 @@ import numpy as np
 from mindspore.communication.management import get_rank, get_group_size
 import mindspore.dataset as de
 import mindspore.common.dtype as mstype
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 
 def _get_rank_info(run_distribute):
diff --git a/research/cv/CGAN/src/dataset.py b/research/cv/CGAN/src/dataset.py
index 00e805587..e2b687cb3 100644
--- a/research/cv/CGAN/src/dataset.py
+++ b/research/cv/CGAN/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import os
 import numpy as np
 import mindspore.dataset as ds
 from mindspore.common import dtype as mstype
-import mindspore.dataset.transforms.c_transforms as CT
+import mindspore.dataset.transforms as CT
 from mindspore.communication.management import get_rank, get_group_size
 
 
diff --git a/research/cv/CMT/src/dataset.py b/research/cv/CMT/src/dataset.py
index a27c6ecb2..3bc5b667c 100644
--- a/research/cv/CMT/src/dataset.py
+++ b/research/cv/CMT/src/dataset.py
@@ -16,11 +16,10 @@
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as pytrans
+import mindspore.dataset.transforms as C2
 
-from mindspore.dataset.transforms.py_transforms import Compose
-import mindspore.dataset.vision.c_transforms as C
+from mindspore.dataset.transforms.transforms import Compose
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, repeat_num=1, infer_910=True, device_id=0, batch_size=128):
@@ -59,13 +58,13 @@ def create_dataset(dataset_path, do_train, repeat_num=1, infer_910=True, device_
         ]
     else:
         trans = [
-            pytrans.Decode(),
-            pytrans.Resize(235),
-            pytrans.CenterCrop(224)
+            C.Decode(True),
+            C.Resize(235),
+            C.CenterCrop(224)
         ]
     trans += [
-        pytrans.ToTensor(),
-        pytrans.Normalize(mean=mean, std=std),
+        C.ToTensor(),
+        C.Normalize(mean=mean, std=std, is_hwc=False),
     ]
     trans = Compose(trans)
 
diff --git a/research/cv/CascadeRCNN/src/dataset.py b/research/cv/CascadeRCNN/src/dataset.py
index 657c9619d..8fd59ebd1 100644
--- a/research/cv/CascadeRCNN/src/dataset.py
+++ b/research/cv/CascadeRCNN/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@ from numpy import random
 import cv2
 import mmcv
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from src.config import config
 
diff --git a/research/cv/CycleGAN/src/dataset/cyclegan_dataset.py b/research/cv/CycleGAN/src/dataset/cyclegan_dataset.py
index ec2a889ba..2371995e0 100644
--- a/research/cv/CycleGAN/src/dataset/cyclegan_dataset.py
+++ b/research/cv/CycleGAN/src/dataset/cyclegan_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import multiprocessing
 import numpy as np
 from PIL import Image
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from .distributed_sampler import DistributedSampler
 
 IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.tif', '.tiff']
diff --git a/research/cv/DBPN/src/dataset/dataset.py b/research/cv/DBPN/src/dataset/dataset.py
index a9c57d850..a7f426930 100644
--- a/research/cv/DBPN/src/dataset/dataset.py
+++ b/research/cv/DBPN/src/dataset/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import os
 import random
 import numpy as np
 from PIL import Image, ImageOps
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as V
 from mindspore import dataset as de, context
 from mindspore.context import ParallelMode
 from mindspore.communication import get_rank, get_group_size
@@ -173,10 +173,10 @@ def create_train_dataset(dataset, args):
         mean = [0.5, 0.5, 0.5]
         std = [0.5, 0.5, 0.5]
     trans = [
-        P.ToTensor(),
+        V.ToTensor(),
     ]
     if args.isgan:
-        trans.append(P.Normalize(mean=mean, std=std))
+        trans.append(V.Normalize(mean=mean, std=std, is_hwc=False))
     train_ds = train_ds.map(operations=trans, input_columns=['target_image'])
     train_ds = train_ds.map(operations=trans, input_columns=['input_image'])
     train_ds = train_ds.map(operations=trans, input_columns=['bicubic_image'])
@@ -215,9 +215,9 @@ def create_val_dataset(dataset, args):
     if not args.vgg:
         mean = [0.5, 0.5, 0.5]
         std = [0.5, 0.5, 0.5]
-    trans = [P.ToTensor()]
+    trans = [V.ToTensor()]
     if args.isgan:
-        trans.append(P.Normalize(mean=mean, std=std))
+        trans.append(V.Normalize(mean=mean, std=std, is_hwc=False))
     val_ds = val_ds.map(operations=trans, input_columns=["target_image"])
     val_ds = val_ds.map(operations=trans, input_columns=["input_image"])
     val_ds = val_ds.map(operations=trans, input_columns=["bicubic_image"])
diff --git a/research/cv/DDAG/eval.py b/research/cv/DDAG/eval.py
index cc907a049..9a37a8e7c 100644
--- a/research/cv/DDAG/eval.py
+++ b/research/cv/DDAG/eval.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,12 +21,12 @@ import argparse
 import psutil
 import numpy as np
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 from mindspore import context, load_checkpoint, load_param_into_net, DatasetHelper
 from mindspore.context import ParallelMode
 from mindspore.communication.management import init, get_group_size
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 from src.dataset import SYSUDatasetGenerator, RegDBDatasetGenerator, TestData
 from src.dataset import process_gallery_sysu, process_query_sysu, process_test_regdb
 from src.evalfunc import test
@@ -246,9 +246,9 @@ if __name__ == "__main__":
     transform_test = Compose(
         [
             decode,
-            py_trans.Resize((args.img_h, args.img_w)),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+            vision.Resize((args.img_h, args.img_w)),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
         ]
     )
 
diff --git a/research/cv/DDAG/train.py b/research/cv/DDAG/train.py
index 7c35b7357..ef393d619 100644
--- a/research/cv/DDAG/train.py
+++ b/research/cv/DDAG/train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,12 +24,12 @@ import numpy as np
 import mindspore as ms
 import mindspore.nn as nn
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 from mindspore import context, load_checkpoint, load_param_into_net, save_checkpoint, DatasetHelper, Tensor
 from mindspore.context import ParallelMode
 from mindspore.communication import init, get_group_size, get_rank
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore.nn import SGD, Adam
 
 
@@ -375,35 +375,35 @@ if __name__ == "__main__":
     transform_train_rgb = Compose(
         [
             decode,
-            py_trans.Pad(10),
-            py_trans.RandomCrop((args.img_h, args.img_w)),
-            py_trans.RandomGrayscale(prob=0.5),
-            py_trans.RandomHorizontalFlip(),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-            py_trans.RandomErasing(prob=0.5)
+            vision.Pad(10),
+            vision.RandomCrop((args.img_h, args.img_w)),
+            vision.RandomGrayscale(prob=0.5),
+            vision.RandomHorizontalFlip(),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False),
+            vision.RandomErasing(prob=0.5)
         ]
     )
 
     transform_train_ir = Compose(
         [
             decode,
-            py_trans.Pad(10),
-            py_trans.RandomCrop((args.img_h, args.img_w)),
-            py_trans.RandomGrayscale(prob=0.5),
-            py_trans.RandomHorizontalFlip(),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-            py_trans.RandomErasing(prob=0.5)
+            vision.Pad(10),
+            vision.RandomCrop((args.img_h, args.img_w)),
+            vision.RandomGrayscale(prob=0.5),
+            vision.RandomHorizontalFlip(),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False),
+            vision.RandomErasing(prob=0.5)
         ]
     )
 
     transform_test = Compose(
         [
             decode,
-            py_trans.Resize((args.img_h, args.img_w)),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+            vision.Resize((args.img_h, args.img_w)),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
         ]
     )
 
diff --git a/research/cv/DDRNet/src/data/imagenet.py b/research/cv/DDRNet/src/data/imagenet.py
index b4d01c858..3f176dc05 100644
--- a/research/cv/DDRNet/src/data/imagenet.py
+++ b/research/cv/DDRNet/src/data/imagenet.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 from src.data.augment.auto_augment import rand_augment_transform
 from src.data.augment.mixup import Mixup
@@ -90,29 +89,29 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
             aa_params["interpolation"] = _pil_interp(interpolation)
         transform_img = [
             vision.Decode(),
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             RandomResizedCropAndInterpolation(size=args.image_size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
                                               interpolation=interpolation),
-            py_vision.RandomHorizontalFlip(prob=0.5),
+            vision.RandomHorizontalFlip(prob=0.5),
         ]
         if isinstance(auto_augment, str) and auto_augment.startswith('rand'):
             transform_img += [rand_augment_transform(auto_augment, aa_params)]
         else:
-            transform_img += [py_vision.RandomColorAdjust(args.color_jitter, args.color_jitter, args.color_jitter)]
+            transform_img += [vision.RandomColorAdjust(args.color_jitter, args.color_jitter, args.color_jitter)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)]
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)]
         if args.re_prob > 0.:
             transform_img += [RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)]
     else:
         # test transform complete
         transform_img = [
             vision.Decode(),
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             Resize(int(args.image_size / args.crop_pct), interpolation="bicubic"),
-            py_vision.CenterCrop(image_size),
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)
+            vision.CenterCrop(image_size),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)
         ]
 
     transform_label = C.TypeCast(mstype.int32)
diff --git a/research/cv/DRNet/src/dataset.py b/research/cv/DRNet/src/dataset.py
index 1da459994..04802ca69 100644
--- a/research/cv/DRNet/src/dataset.py
+++ b/research/cv/DRNet/src/dataset.py
@@ -16,9 +16,8 @@
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.transforms.py_transforms as py_transforms
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision import Inter
 
 def create_dataset(dataset_path, do_train, repeat_num=1, infer_910=True, device_id=0, batch_size=128):
@@ -55,16 +54,16 @@ def create_dataset(dataset_path, do_train, repeat_num=1, infer_910=True, device_
         ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True,
                                    num_shards=device_num, shard_id=rank_id)
 
-    decode_p = py_vision.Decode()
-    resize_p = py_vision.Resize(int(256), interpolation=Inter.BILINEAR)
-    center_crop_p = py_vision.CenterCrop(224)
-    totensor = py_vision.ToTensor()
-    normalize_p = py_vision.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
-    trans = py_transforms.Compose([decode_p, resize_p, center_crop_p, totensor, normalize_p])
+    decode_p = vision.Decode(True)
+    resize_p = vision.Resize(int(256), interpolation=Inter.BILINEAR)
+    center_crop_p = vision.CenterCrop(224)
+    totensor = vision.ToTensor()
+    normalize_p = vision.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], is_hwc=False)
+    trans = C2.Compose([decode_p, resize_p, center_crop_p, totensor, normalize_p])
     type_cast_op = C2.TypeCast(mstype.int32)
     ds = ds.map(input_columns="image", operations=trans, num_parallel_workers=8)
     ds = ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=8)
 
     ds = ds.batch(batch_size, drop_remainder=True)
     return ds
-    
\ No newline at end of file
+
diff --git a/research/cv/DeepID/src/dataset.py b/research/cv/DeepID/src/dataset.py
index 7541a7ff4..328410d7a 100644
--- a/research/cv/DeepID/src/dataset.py
+++ b/research/cv/DeepID/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,8 +20,8 @@ import csv
 import numpy as np
 from PIL import Image
 
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_transforms
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.dataset as de
 
 
@@ -112,12 +112,12 @@ def get_loader(data_root, mode='train'):
     """Build and return a data loader."""
     mean = [0.5, 0.5, 0.5]
     std = [0.5, 0.5, 0.5]
-    transform = [py_vision.ToPIL()]
+    transform = [vision.ToPIL()]
     if mode == 'train':
-        transform.append(py_vision.RandomHorizontalFlip())
-    transform.append(py_vision.ToTensor())
-    transform.append(py_vision.Normalize(mean=mean, std=std))
-    transform = py_transforms.Compose(transform)
+        transform.append(vision.RandomHorizontalFlip())
+    transform.append(vision.ToTensor())
+    transform.append(vision.Normalize(mean=mean, std=std, is_hwc=False))
+    transform = data_trans.Compose(transform)
 
     dataset = Youtube(data_root, mode, transform=transform)
 
diff --git a/research/cv/EfficientDet_d0/src/dataset.py b/research/cv/EfficientDet_d0/src/dataset.py
index 537857e52..5c5f87644 100644
--- a/research/cv/EfficientDet_d0/src/dataset.py
+++ b/research/cv/EfficientDet_d0/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ import os
 import numpy as np
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from pycocotools.coco import COCO
 from src.config import config
diff --git a/research/cv/FDA-BNN/src/dataset.py b/research/cv/FDA-BNN/src/dataset.py
index 84177de83..c8f22b5aa 100755
--- a/research/cv/FDA-BNN/src/dataset.py
+++ b/research/cv/FDA-BNN/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,14 +17,12 @@ import math
 import os
 
 import numpy as np
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_transforms
-import mindspore.dataset.transforms.c_transforms as c_transforms
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
 from mindspore.communication.management import get_rank, get_group_size
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.vision.c_transforms as vision
 
 
 # values that should remain constant
@@ -55,24 +53,24 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
     """Create ImageNet training dataset"""
     if not os.path.exists(train_data_url):
         raise ValueError('Path not exists')
-    decode_op = py_vision.Decode()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
-    random_resize_crop_bicubic = py_vision.RandomResizedCrop(size=(input_size, input_size),
-                                                             scale=SCALE, ratio=RATIO,
-                                                             interpolation=Inter.BICUBIC)
-    random_horizontal_flip_op = py_vision.RandomHorizontalFlip(0.5)
+    random_resize_crop_bicubic = vision.RandomResizedCrop(size=(input_size, input_size),
+                                                          scale=SCALE, ratio=RATIO,
+                                                          interpolation=Inter.BICUBIC)
+    random_horizontal_flip_op = vision.RandomHorizontalFlip(0.5)
     adjust_range = (max(0, 1 - color_jitter), 1 + color_jitter)
-    random_color_jitter_op = py_vision.RandomColorAdjust(brightness=adjust_range,
-                                                         contrast=adjust_range,
-                                                         saturation=adjust_range)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    random_color_jitter_op = vision.RandomColorAdjust(brightness=adjust_range,
+                                                      contrast=adjust_range,
+                                                      saturation=adjust_range)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
     # assemble all the transforms
-    image_ops = py_transforms.Compose([decode_op, random_resize_crop_bicubic,
-                                       random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, random_resize_crop_bicubic,
+                                    random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
 
     rank_id = get_rank() if distributed else 0
     rank_size = get_group_size() if distributed else 1
@@ -122,16 +120,16 @@ def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=F
     else:
         scale_size = int(math.floor(input_size / DEFAULT_CROP_PCT))
 
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
-    decode_op = py_vision.Decode()
-    resize_op = py_vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
-    center_crop = py_vision.CenterCrop(size=input_size)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    resize_op = vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
+    center_crop = vision.CenterCrop(size=input_size)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
-    image_ops = py_transforms.Compose([decode_op, resize_op, center_crop,
-                                       to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, resize_op, center_crop,
+                                    to_tensor, normalize_op])
 
     dataset = dataset.map(input_columns=["label"], operations=type_cast_op,
                           num_parallel_workers=workers)
@@ -180,9 +178,9 @@ def create_dataset_cifar10(data_home, repeat_num=1, training=True, cifar_cfg=Non
     random_horizontal_op = vision.RandomHorizontalFlip()
     resize_op = vision.Resize((resize_height, resize_width))  # interpolation default BILINEAR
     rescale_op = vision.Rescale(1.0 / 255.0, 0.0)
-    normalize_op = vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
+    normalize_op = vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=True)
     changeswap_op = vision.HWC2CHW()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
     c_trans = []
     if training:
diff --git a/research/cv/FaceAttribute/preprocess.py b/research/cv/FaceAttribute/preprocess.py
index cbe80e324..1a2ebb578 100644
--- a/research/cv/FaceAttribute/preprocess.py
+++ b/research/cv/FaceAttribute/preprocess.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,8 +16,8 @@
 import os
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.py_transforms as F
-import mindspore.dataset.transforms.py_transforms as F2
+import mindspore.dataset.vision as F
+import mindspore.dataset.transforms as F2
 
 from model_utils.config import config
 
@@ -28,10 +28,10 @@ def eval_data_generator(args):
     dst_h = args.dst_h
     batch_size = 1
     #attri_num = args.attri_num
-    transform_img = F2.Compose([F.Decode(),
+    transform_img = F2.Compose([F.Decode(True)),
                                 F.Resize((dst_w, dst_h)),
                                 F.ToTensor(),
-                                F.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
+                                F.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), is_hwc=False)])
 
     de_dataset = de.MindDataset(mindrecord_path + "0", columns_list=["image", "label"])
     de_dataset = de_dataset.map(input_columns="image", operations=transform_img, num_parallel_workers=args.workers,
diff --git a/research/cv/FaceAttribute/src/dataset_eval.py b/research/cv/FaceAttribute/src/dataset_eval.py
index 2167a4b2f..f4ed3cd9f 100644
--- a/research/cv/FaceAttribute/src/dataset_eval.py
+++ b/research/cv/FaceAttribute/src/dataset_eval.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
 # ============================================================================
 """Face attribute dataset for eval"""
 import mindspore.dataset as de
-import mindspore.dataset.vision.py_transforms as F
-import mindspore.dataset.transforms.py_transforms as F2
+import mindspore.dataset.vision as F
+import mindspore.dataset.transforms as F2
 
 __all__ = ['data_generator_eval']
 
@@ -27,10 +27,10 @@ def data_generator_eval(args):
     dst_h = args.dst_h
     batch_size = 1
     attri_num = args.attri_num
-    transform_img = F2.Compose([F.Decode(),
+    transform_img = F2.Compose([F.Decode(True),
                                 F.Resize((dst_w, dst_h)),
                                 F.ToTensor(),
-                                F.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
+                                F.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), is_hwc=False)])
 
     de_dataset = de.MindDataset(mindrecord_path + "0", columns_list=["image", "label"])
     de_dataset = de_dataset.map(input_columns="image", operations=transform_img, num_parallel_workers=args.workers,
diff --git a/research/cv/FaceAttribute/src/dataset_train.py b/research/cv/FaceAttribute/src/dataset_train.py
index bbd210a35..79617bdb7 100644
--- a/research/cv/FaceAttribute/src/dataset_train.py
+++ b/research/cv/FaceAttribute/src/dataset_train.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
 # ============================================================================
 """Face attribute dataset for train"""
 import mindspore.dataset as de
-import mindspore.dataset.vision.py_transforms as F
-import mindspore.dataset.transforms.py_transforms as F2
+import mindspore.dataset.vision as F
+import mindspore.dataset.transforms as F2
 
 __all__ = ['data_generator']
 
@@ -28,11 +28,11 @@ def data_generator(args):
     batch_size = args.per_batch_size
     attri_num = args.attri_num
     max_epoch = args.max_epoch
-    transform_img = F2.Compose([F.Decode(),
+    transform_img = F2.Compose([F.Decode(True)),
                                 F.Resize((dst_w, dst_h)),
                                 F.RandomHorizontalFlip(prob=0.5),
                                 F.ToTensor(),
-                                F.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
+                                F.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), is_hwc=False)])
 
     de_dataset = de.MindDataset(mindrecord_path + "0", columns_list=["image", "label"], num_shards=args.world_size,
                                 shard_id=args.local_rank)
diff --git a/research/cv/FaceDetection/preprocess.py b/research/cv/FaceDetection/preprocess.py
index 8f9961b26..205d31531 100644
--- a/research/cv/FaceDetection/preprocess.py
+++ b/research/cv/FaceDetection/preprocess.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@
 import os
 import numpy as np
 from PIL import Image
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as V
 import mindspore.dataset as de
 from model_utils.config import config
 
@@ -30,7 +30,7 @@ class SingleScaleTrans_Infer:
 
     def __call__(self, imgs, ann, image_names, image_size, batch_info):
 
-        decode = P.Decode()
+        decode = V.Decode(True)
         ret_imgs = []
         ret_anno = []
 
diff --git a/research/cv/FaceDetection/src/data_preprocess.py b/research/cv/FaceDetection/src/data_preprocess.py
index 1eba91758..8c1e7ebaa 100644
--- a/research/cv/FaceDetection/src/data_preprocess.py
+++ b/research/cv/FaceDetection/src/data_preprocess.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 """Face detection yolov3 data pre-process."""
 import multiprocessing
 import numpy as np
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as V
 import mindspore.dataset as de
 
 from src.transforms import RandomCropLetterbox, RandomFlip, HSVShift, ResizeLetterbox
@@ -31,10 +31,10 @@ class SingleScaleTrans:
     def __call__(self, imgs, ann, image_names, image_size, batch_info):
 
         size = self.resize
-        decode = P.Decode()
+        decode = P.Decode(True)
         resize_letter_box_op = ResizeLetterbox(input_dim=size)
 
-        to_tensor = P.ToTensor()
+        to_tensor = V.ToTensor()
         ret_imgs = []
         ret_anno = []
 
@@ -204,7 +204,7 @@ def preprocess_fn(image, annotation):
     anchors = config.anchors
     anchors_mask = config.anchors_mask
 
-    decode = P.Decode()
+    decode = P.Decode(True)
     random_crop_letter_box_op = RandomCropLetterbox(jitter=jitter, input_dim=size)
     random_flip_op = RandomFlip(flip)
     hsv_shift_op = HSVShift(hue, sat, val)
diff --git a/research/cv/FaceNet/src/LFWDataset.py b/research/cv/FaceNet/src/LFWDataset.py
index cda486727..7cc5444a1 100644
--- a/research/cv/FaceNet/src/LFWDataset.py
+++ b/research/cv/FaceNet/src/LFWDataset.py
@@ -17,8 +17,7 @@
 import os
 import numpy as np
 from PIL import Image
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as C
 import mindspore.dataset as de
 from mindspore.common import set_seed
 set_seed(0)
@@ -94,9 +93,9 @@ class LFWDataset:
 def get_lfw_dataloader(eval_root_dir, eval_pairs_path, eval_batch_size):
 
     data_transforms = [C.RandomResize(size=(224, 224)),
-                       P.ToPIL(),
-                       P.ToTensor(),
-                       P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
+                       C.ToPIL(),
+                       C.ToTensor(),
+                       C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False)]
 
 
     face_dataset = LFWDataset(data_dir=eval_root_dir, pairs_path=eval_pairs_path)
diff --git a/research/cv/FaceNet/src/data_loader.py b/research/cv/FaceNet/src/data_loader.py
index 41968e5d5..abe10b2d3 100644
--- a/research/cv/FaceNet/src/data_loader.py
+++ b/research/cv/FaceNet/src/data_loader.py
@@ -18,8 +18,7 @@ import os
 import csv
 import numpy as np
 from PIL import Image
-import mindspore.dataset.vision.py_transforms as P
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import mindspore.dataset as de
 
 
@@ -68,17 +67,17 @@ def get_dataloader(train_root_dir, valid_root_dir,
         'train': [
             C.RandomResize(size=(224, 224)),
             C.RandomHorizontalFlip(),
-            P.ToTensor(),
-            P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])],
+            C.ToTensor(),
+            C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False)],
         'train_valid': [
             C.RandomResize(size=(224, 224)),
             C.RandomHorizontalFlip(),
-            P.ToTensor(),
-            P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])],
+            C.ToTensor(),
+            C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False)],
         'valid': [
             C.RandomResize(size=(224, 224)),
-            P.ToTensor(),
-            P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]}
+            C.ToTensor(),
+            C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False)]}
 
 
     dataset_column_names = ["anc_img", "pos_img", "neg_img", "pos_class", "neg_class"]
diff --git a/research/cv/FaceNet/src/data_loader_generate_triplets_online.py b/research/cv/FaceNet/src/data_loader_generate_triplets_online.py
index b12537e89..337b200ab 100644
--- a/research/cv/FaceNet/src/data_loader_generate_triplets_online.py
+++ b/research/cv/FaceNet/src/data_loader_generate_triplets_online.py
@@ -18,8 +18,7 @@ import os
 import numpy as np
 import pandas as pd
 from PIL import Image
-import mindspore.dataset.vision.py_transforms as P
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import mindspore.dataset as de
 
 
@@ -107,17 +106,17 @@ def get_dataloader(train_root_dir, valid_root_dir,
         'train': [
             C.RandomResize(size=(224, 224)),
             C.RandomHorizontalFlip(),
-            P.ToTensor(),
-            P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])],
+            C.ToTensor(),
+            C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False)],
         'train_valid': [
             C.RandomResize(size=(224, 224)),
             C.RandomHorizontalFlip(),
-            P.ToTensor(),
-            P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])],
+            C.ToTensor(),
+            C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False)],
         'valid': [
             C.RandomResize(size=(224, 224)),
-            P.ToTensor(),
-            P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]}
+            C.ToTensor(),
+            C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False)]}
 
     dataset_column_names = ["anc_img", "pos_img", "neg_img", "pos_class", "neg_class"]
 
diff --git a/research/cv/FaceQualityAssessment/src/dataset.py b/research/cv/FaceQualityAssessment/src/dataset.py
index 8ccf54460..149ae3410 100644
--- a/research/cv/FaceQualityAssessment/src/dataset.py
+++ b/research/cv/FaceQualityAssessment/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import numpy as np
 from PIL import Image, ImageFile
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as F
+import mindspore.dataset.vision as F
 
 warnings.filterwarnings('ignore')
 ImageFile.LOAD_TRUNCATED_IMAGES = True
diff --git a/research/cv/FaceRecognition/eval.py b/research/cv/FaceRecognition/eval.py
index 6c5dba139..0ca97bfce 100644
--- a/research/cv/FaceRecognition/eval.py
+++ b/research/cv/FaceRecognition/eval.py
@@ -21,8 +21,8 @@ import numpy as np
 import cv2
 
 from mindspore.common import dtype as mstype
-import mindspore.dataset.transforms.py_transforms as transforms
-import mindspore.dataset.vision.py_transforms as vision
+import mindspore.dataset.transforms as transforms
+import mindspore.dataset.vision as vision
 import mindspore.dataset as de
 from mindspore import Tensor, context
 from mindspore.train.serialization import load_checkpoint, load_param_into_net
@@ -266,9 +266,8 @@ def run_eval(args):
     args.logger.info('INFO, graph compile finished, time used:{:.2f}s, start calculate img embedding'.
                      format(compile_time_used))
 
-    img_transforms = transforms.Compose([vision.ToTensor(), vision.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
-
-
+    img_transforms = transforms.Compose([vision.ToTensor(),
+        vision.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), is_hwc=False)])
 
     #for test images
     args.logger.info('INFO, start step1, calculate test img embedding, weight file = {}'.format(args.weight))
diff --git a/research/cv/FaceRecognition/src/dataset_factory.py b/research/cv/FaceRecognition/src/dataset_factory.py
index 0cf1ad0d3..64b0fb49f 100644
--- a/research/cv/FaceRecognition/src/dataset_factory.py
+++ b/research/cv/FaceRecognition/src/dataset_factory.py
@@ -18,8 +18,8 @@ import math
 import numpy as np
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.py_transforms as F
-import mindspore.dataset.transforms.py_transforms as F2
+import mindspore.dataset.vision as F
+import mindspore.dataset.transforms as F2
 
 from src.custom_dataset import DistributedCustomSampler, CustomDataset
 
@@ -27,7 +27,7 @@ __all__ = ['get_de_dataset']
 
 def get_de_dataset(args):
     '''get_de_dataset'''
-    lbl_transforms = [F.ToType(np.int32)]
+    lbl_transforms = [F2.TypeCast(np.int32)]
     transform_label = F2.Compose(lbl_transforms)
 
     drop_remainder = True
@@ -35,7 +35,7 @@ def get_de_dataset(args):
     transforms = [F.ToPIL(),
                   F.RandomHorizontalFlip(),
                   F.ToTensor(),
-                  F.Normalize(mean=[0.5], std=[0.5])]
+                  F.Normalize(mean=[0.5], std=[0.5], is_hwc=False)]
     transform = F2.Compose(transforms)
     cache_path = os.path.join('cache', os.path.basename(args.data_dir), 'data_cache.pkl')
     if args.device_target == 'GPU' and args.local_rank != 0:
diff --git a/research/cv/FaceRecognitionForTracking/eval.py b/research/cv/FaceRecognitionForTracking/eval.py
index 110e37c50..aba67c818 100644
--- a/research/cv/FaceRecognitionForTracking/eval.py
+++ b/research/cv/FaceRecognitionForTracking/eval.py
@@ -21,8 +21,8 @@ import numpy as np
 from PIL import Image
 from tqdm import tqdm
 
-import mindspore.dataset.vision.py_transforms as V
-import mindspore.dataset.transforms.py_transforms as T
+import mindspore.dataset.vision as V
+import mindspore.dataset.transforms as T
 from mindspore import context, Tensor
 from mindspore.train.serialization import load_checkpoint, load_param_into_net
 
@@ -98,7 +98,7 @@ def load_images(paths, batch_size=128):
     resize = V.Resize((96, 64))
     transform = T.Compose([
         V.ToTensor(),
-        V.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
+        V.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], is_hwc=False)])
     for i, _ in enumerate(paths):
         im = Image.open(paths[i])
         im = resize(im)
diff --git a/research/cv/FaceRecognitionForTracking/preprocess.py b/research/cv/FaceRecognitionForTracking/preprocess.py
index dad3c2795..140134811 100644
--- a/research/cv/FaceRecognitionForTracking/preprocess.py
+++ b/research/cv/FaceRecognitionForTracking/preprocess.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ import argparse
 import numpy as np
 from PIL import Image
 
-import mindspore.dataset.vision.py_transforms as V
-import mindspore.dataset.transforms.py_transforms as T
+import mindspore.dataset.vision as V
+import mindspore.dataset.transforms as T
 
 
 def load_images(paths, batch_size=1):
@@ -28,7 +28,7 @@ def load_images(paths, batch_size=1):
     resize = V.Resize((96, 64))
     transform = T.Compose([
         V.ToTensor(),
-        V.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
+        V.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], is_hwc=False)])
     for i, _ in enumerate(paths):
         im = Image.open(paths[i])
         im = resize(im)
diff --git a/research/cv/FaceRecognitionForTracking/src/dataset.py b/research/cv/FaceRecognitionForTracking/src/dataset.py
index 5cea07502..3ffd35795 100644
--- a/research/cv/FaceRecognitionForTracking/src/dataset.py
+++ b/research/cv/FaceRecognitionForTracking/src/dataset.py
@@ -19,8 +19,8 @@ from PIL import ImageFile
 
 from mindspore import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as VC
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as VC
+import mindspore.dataset.transforms as C
 
 sys.path.append('./')
 sys.path.append('../data/')
diff --git a/research/cv/GENet_Res50/src/dataset.py b/research/cv/GENet_Res50/src/dataset.py
index 3f032c27b..3396298b7 100644
--- a/research/cv/GENet_Res50/src/dataset.py
+++ b/research/cv/GENet_Res50/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/HRNetW48_cls/src/dataset.py b/research/cv/HRNetW48_cls/src/dataset.py
index 9d417f545..43f0d27e8 100644
--- a/research/cv/HRNetW48_cls/src/dataset.py
+++ b/research/cv/HRNetW48_cls/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,8 +16,8 @@
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/HireMLP/src/dataset.py b/research/cv/HireMLP/src/dataset.py
index a27c6ecb2..3bc5b667c 100644
--- a/research/cv/HireMLP/src/dataset.py
+++ b/research/cv/HireMLP/src/dataset.py
@@ -16,11 +16,10 @@
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as pytrans
+import mindspore.dataset.transforms as C2
 
-from mindspore.dataset.transforms.py_transforms import Compose
-import mindspore.dataset.vision.c_transforms as C
+from mindspore.dataset.transforms.transforms import Compose
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, repeat_num=1, infer_910=True, device_id=0, batch_size=128):
@@ -59,13 +58,13 @@ def create_dataset(dataset_path, do_train, repeat_num=1, infer_910=True, device_
         ]
     else:
         trans = [
-            pytrans.Decode(),
-            pytrans.Resize(235),
-            pytrans.CenterCrop(224)
+            C.Decode(True),
+            C.Resize(235),
+            C.CenterCrop(224)
         ]
     trans += [
-        pytrans.ToTensor(),
-        pytrans.Normalize(mean=mean, std=std),
+        C.ToTensor(),
+        C.Normalize(mean=mean, std=std, is_hwc=False),
     ]
     trans = Compose(trans)
 
diff --git a/research/cv/HourNAS/src/dataset.py b/research/cv/HourNAS/src/dataset.py
index 663fa35f4..1ec1f68ff 100644
--- a/research/cv/HourNAS/src/dataset.py
+++ b/research/cv/HourNAS/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,14 +17,12 @@ import math
 import os
 
 import numpy as np
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_transforms
-import mindspore.dataset.transforms.c_transforms as c_transforms
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
 from mindspore.communication.management import get_rank, get_group_size
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.vision.c_transforms as vision
 
 # values that should remain constant
 DEFAULT_CROP_PCT = 0.875
@@ -54,24 +52,24 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
     """Create ImageNet training dataset"""
     if not os.path.exists(train_data_url):
         raise ValueError('Path not exists')
-    decode_op = py_vision.Decode()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
-    random_resize_crop_bicubic = py_vision.RandomResizedCrop(size=(input_size, input_size),
-                                                             scale=SCALE, ratio=RATIO,
-                                                             interpolation=Inter.BICUBIC)
-    random_horizontal_flip_op = py_vision.RandomHorizontalFlip(0.5)
+    random_resize_crop_bicubic = vision.RandomResizedCrop(size=(input_size, input_size),
+                                                          scale=SCALE, ratio=RATIO,
+                                                          interpolation=Inter.BICUBIC)
+    random_horizontal_flip_op = vision.RandomHorizontalFlip(0.5)
     adjust_range = (max(0, 1 - color_jitter), 1 + color_jitter)
-    random_color_jitter_op = py_vision.RandomColorAdjust(brightness=adjust_range,
-                                                         contrast=adjust_range,
-                                                         saturation=adjust_range)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    random_color_jitter_op = vision.RandomColorAdjust(brightness=adjust_range,
+                                                      contrast=adjust_range,
+                                                      saturation=adjust_range)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
     # assemble all the transforms
-    image_ops = py_transforms.Compose([decode_op, random_resize_crop_bicubic,
-                                       random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, random_resize_crop_bicubic,
+                                    random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
 
     rank_id = get_rank() if distributed else 0
     rank_size = get_group_size() if distributed else 1
@@ -120,16 +118,16 @@ def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=F
     else:
         scale_size = int(math.floor(input_size / DEFAULT_CROP_PCT))
 
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
-    decode_op = py_vision.Decode()
-    resize_op = py_vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
-    center_crop = py_vision.CenterCrop(size=input_size)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    resize_op = vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
+    center_crop = vision.CenterCrop(size=input_size)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
-    image_ops = py_transforms.Compose([decode_op, resize_op, center_crop,
-                                       to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, resize_op, center_crop,
+                                    to_tensor, normalize_op])
 
     dataset = dataset.map(input_columns=["label"], operations=type_cast_op,
                           num_parallel_workers=workers)
@@ -175,10 +173,10 @@ def create_dataset_cifar10(data_home, repeat_num=1, training=True, cifar_cfg=Non
     random_horizontal_op = vision.RandomHorizontalFlip()
     resize_op = vision.Resize((resize_height, resize_width))  # interpolation default BILINEAR
     rescale_op = vision.Rescale(1.0 / 255.0, 0.0)
-    #normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
-    normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.24703233, 0.24348505, 0.26158768))
+    #normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=True)
+    normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.24703233, 0.24348505, 0.26158768), is_hwc=True)
     changeswap_op = vision.HWC2CHW()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
     c_trans = []
     if training:
diff --git a/research/cv/ICNet/Res50V1_PRE/src/dataset.py b/research/cv/ICNet/Res50V1_PRE/src/dataset.py
index 3f032c27b..3396298b7 100644
--- a/research/cv/ICNet/Res50V1_PRE/src/dataset.py
+++ b/research/cv/ICNet/Res50V1_PRE/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/ICNet/eval.py b/research/cv/ICNet/eval.py
index c48be4d8e..21d0a21e0 100644
--- a/research/cv/ICNet/eval.py
+++ b/research/cv/ICNet/eval.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import mindspore.ops as ops
 from mindspore import load_param_into_net
 from mindspore import load_checkpoint
 from mindspore import Tensor
-import mindspore.dataset.vision.py_transforms as transforms
+import mindspore.dataset.vision as vision
 
 parser = argparse.ArgumentParser(description="ICNet Evaluation")
 parser.add_argument("--dataset_path", type=str, default="/data/cityscapes/", help="dataset path")
@@ -98,8 +98,8 @@ class Evaluator:
 
     def _img_transform(self, image):
         """img_transform"""
-        to_tensor = transforms.ToTensor()
-        normalize = transforms.Normalize([.485, .456, .406], [.229, .224, .225])
+        to_tensor = vision.ToTensor()
+        normalize = vision.Normalize([.485, .456, .406], [.229, .224, .225], is_hwc=False)
         image = to_tensor(image)
         image = normalize(image)
         return image
diff --git a/research/cv/ICNet/src/cityscapes_mindrecord.py b/research/cv/ICNet/src/cityscapes_mindrecord.py
index 0ccc783dd..a3acc652e 100644
--- a/research/cv/ICNet/src/cityscapes_mindrecord.py
+++ b/research/cv/ICNet/src/cityscapes_mindrecord.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,8 +22,8 @@ from PIL import ImageOps
 from PIL import ImageFilter
 import mindspore.dataset as de
 from mindspore.mindrecord import FileWriter
-import mindspore.dataset.vision.py_transforms as transforms
-import mindspore.dataset.transforms.py_transforms as tc
+import mindspore.dataset.vision as transforms
+import mindspore.dataset.transforms as tc
 
 
 def _get_city_pairs(folder, split='train'):
@@ -103,7 +103,8 @@ def _sync_transform(img, mask):
 
 def _class_to_index(mask):
     """class to index"""
-    # reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py
+    # Reference:
+    # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py
     _key = np.array([-1, -1, -1, -1, -1, -1,
                      -1, -1, 0, 1, -1, -1,
                      2, 3, 4, -1, -1, -1,
@@ -136,7 +137,7 @@ def _img_mask_transform(img, mask):
     """img and mask transform"""
     input_transform = tc.Compose([
         transforms.ToTensor(),
-        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
+        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=False)])
     img = _img_transform(img)
     mask = _mask_transform(mask)
     img = input_transform(img)
diff --git a/research/cv/ICNet/src/visualize.py b/research/cv/ICNet/src/visualize.py
index 5748181e9..61adc9d70 100644
--- a/research/cv/ICNet/src/visualize.py
+++ b/research/cv/ICNet/src/visualize.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import mindspore.ops as ops
 from mindspore import Tensor
 from mindspore import load_param_into_net
 from mindspore import load_checkpoint
-import mindspore.dataset.vision.py_transforms as transforms
+import mindspore.dataset.vision as vision
 from models.icnet import ICNet
 
 __all__ = ['get_color_palette', 'set_img_color',
@@ -30,8 +30,8 @@ __all__ = ['get_color_palette', 'set_img_color',
 
 def _img_transform(img):
     """img_transform"""
-    totensor = transforms.ToTensor()
-    normalize = transforms.Normalize([.485, .456, .406], [.229, .224, .225])
+    totensor = vision.ToTensor()
+    normalize = vision.Normalize([.485, .456, .406], [.229, .224, .225], is_hwc=False)
     img = totensor(img)
     img = normalize(img)
     return img
diff --git a/research/cv/ISyNet/src/dataset.py b/research/cv/ISyNet/src/dataset.py
index 06d86d1c9..940c0acc4 100644
--- a/research/cv/ISyNet/src/dataset.py
+++ b/research/cv/ISyNet/src/dataset.py
@@ -19,9 +19,8 @@ import os
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.dataset.vision import Inter
 from mindspore.communication.management import init, get_rank, get_group_size
 from src.model_utils.config import config
@@ -83,7 +82,7 @@ def create_dataset1(dataset_path, do_train, repeat_num=1, batch_size=32, target=
     ]
     if autoaugment:
         trans += [
-            P.ToPIL(),
+            C.ToPIL(),
             ImageNetPolicy(),
             ToNumpy(),
         ]
@@ -171,7 +170,7 @@ def create_dataset2(dataset_path, do_train, repeat_num=1, batch_size=32, target=
                 ]
             if autoaugment:
                 trans += [
-                    P.ToPIL(),
+                    C.ToPIL(),
                     ImageNetPolicy(),
                     ToNumpy(),
                     ]
@@ -267,7 +266,7 @@ def create_dataset_pynative(dataset_path, do_train, repeat_num=1, batch_size=32,
             ]
         if autoaugment:
             trans += [
-                P.ToPIL(),
+                C.ToPIL(),
                 ImageNetPolicy(),
                 ToNumpy(),
                 ]
@@ -351,7 +350,7 @@ def create_dataset3(dataset_path, do_train, repeat_num=1, batch_size=32, target=
             ]
         if autoaugment:
             trans += [
-                P.ToPIL(),
+                C.ToPIL(),
                 ImageNetPolicy(),
                 ToNumpy(),
                 ]
@@ -437,7 +436,7 @@ def create_dataset4(dataset_path, do_train, repeat_num=1, batch_size=32, target=
                 ]
         if autoaugment:
             trans += [
-                P.ToPIL(),
+                C.ToPIL(),
                 ImageNetPolicy(),
                 ToNumpy(),
                 ]
diff --git a/research/cv/ISyNet/src/transform.py b/research/cv/ISyNet/src/transform.py
index 400e8b211..d3ea07319 100644
--- a/research/cv/ISyNet/src/transform.py
+++ b/research/cv/ISyNet/src/transform.py
@@ -16,7 +16,7 @@
 random augment class
 """
 import numpy as np
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as vision
 from src import transform_utils
 
 IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
@@ -37,9 +37,9 @@ class RandAugment:
         # assert the imgs object are pil_images
         ret_imgs = []
         ret_labels = []
-        py_to_pil_op = P.ToPIL()
-        to_tensor = P.ToTensor()
-        normalize_op = P.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+        py_to_pil_op = vision.ToPIL()
+        to_tensor = vision.Tensor()
+        normalize_op = vision.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
         rand_augment_ops = transform_utils.rand_augment_transform(self.config_str, self.hparams)
         for i, image in enumerate(imgs):
             img_pil = py_to_pil_op(image)
diff --git a/research/cv/ISyNet/utils/preprocess_310.py b/research/cv/ISyNet/utils/preprocess_310.py
index 38c855977..897d2e5d4 100644
--- a/research/cv/ISyNet/utils/preprocess_310.py
+++ b/research/cv/ISyNet/utils/preprocess_310.py
@@ -18,8 +18,8 @@ import argparse
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 
 PARSER = argparse.ArgumentParser(description="ISyNet preprocess")
 PARSER.add_argument("--data_path", type=str, required=True, help="data path.")
diff --git a/research/cv/Inception-v2/src/dataset.py b/research/cv/Inception-v2/src/dataset.py
index 59a03b8e7..1ebd2a9f5 100644
--- a/research/cv/Inception-v2/src/dataset.py
+++ b/research/cv/Inception-v2/src/dataset.py
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 
 def create_dataset_cifar10(dataset_path, cfg, training, repeat_num=1):
diff --git a/research/cv/JDE/eval_detect.py b/research/cv/JDE/eval_detect.py
index 9425f78ca..f4487ec2d 100644
--- a/research/cv/JDE/eval_detect.py
+++ b/research/cv/JDE/eval_detect.py
@@ -23,7 +23,7 @@ from mindspore import dataset as ds
 from mindspore.common import set_seed
 from mindspore.communication.management import get_group_size
 from mindspore.communication.management import get_rank
-from mindspore.dataset.vision import py_transforms as PY
+from mindspore.dataset.vision import transforms as vision
 from mindspore.train.serialization import load_checkpoint
 
 from cfg.config import config as default_config
@@ -69,7 +69,7 @@ def main(
         opt.dataset_root,
         test_paths,
         augment=False,
-        transforms=PY.ToTensor(),
+        transforms=vision.ToTensor(),
         config=opt,
     )
 
diff --git a/research/cv/JDE/train.py b/research/cv/JDE/train.py
index da70b3718..dbce644f2 100644
--- a/research/cv/JDE/train.py
+++ b/research/cv/JDE/train.py
@@ -25,7 +25,7 @@ from mindspore.communication.management import get_group_size
 from mindspore.communication.management import get_rank
 from mindspore.communication.management import init
 from mindspore.context import ParallelMode
-from mindspore.dataset.vision import py_transforms as PY
+from mindspore.dataset.vision import transforms as vision
 from mindspore.train.callback import CheckpointConfig
 from mindspore.train.callback import LossMonitor
 from mindspore.train.callback import ModelCheckpoint
@@ -177,7 +177,7 @@ if __name__ == "__main__":
         trainset_paths,
         k_max=config.k_max,
         augment=True,
-        transforms=PY.ToTensor(),
+        transforms=vision.ToTensor(),
         config=config,
     )
 
diff --git a/research/cv/LightCNN/src/dataset.py b/research/cv/LightCNN/src/dataset.py
index 46192a0a0..110d14dd0 100644
--- a/research/cv/LightCNN/src/dataset.py
+++ b/research/cv/LightCNN/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import cv2
 import numpy as np
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_vision
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 
 
 def img_loader(path):
@@ -67,19 +67,19 @@ def create_dataset(mode, data_url, data_list, batch_size, resize_size=144,
     if mode == 'Train':
         shuffle = True
         drop_last = True
-        image_ops = Compose([py_vision.ToPIL(),
-                             py_vision.Resize(resize_size),
-                             py_vision.RandomCrop(input_size),
-                             py_vision.RandomHorizontalFlip(),
-                             py_vision.ToTensor()])
+        image_ops = Compose([vision.ToPIL(),
+                             vision.Resize(resize_size),
+                             vision.RandomCrop(input_size),
+                             vision.RandomHorizontalFlip(),
+                             vision.ToTensor()])
 
     elif mode == 'Val':
         shuffle = False
         drop_last = False
-        image_ops = Compose([py_vision.ToPIL(),
-                             py_vision.Resize(resize_size),
-                             py_vision.CenterCrop(input_size),
-                             py_vision.ToTensor()])
+        image_ops = Compose([vision.ToPIL(),
+                             vision.Resize(resize_size),
+                             vision.CenterCrop(input_size),
+                             vision.ToTensor()])
 
     dataset_generator = ImageList(root=data_url, fileList=data_list)
 
diff --git a/research/cv/MGN/src/dataset.py b/research/cv/MGN/src/dataset.py
index 2d4e8696f..f4ce073a5 100644
--- a/research/cv/MGN/src/dataset.py
+++ b/research/cv/MGN/src/dataset.py
@@ -20,7 +20,7 @@ import random
 import re
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import numpy as np
 from PIL import Image
 
diff --git a/research/cv/MVD/eval.py b/research/cv/MVD/eval.py
index 8151b8e6c..af9c2a75d 100644
--- a/research/cv/MVD/eval.py
+++ b/research/cv/MVD/eval.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,10 +21,10 @@ import argparse
 import numpy as np
 import psutil
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 from mindspore import context, load_checkpoint, load_param_into_net, DatasetHelper
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 
 from PIL import Image
 
@@ -216,9 +216,9 @@ if __name__ == "__main__":
     transform_test = Compose(
         [
             decode,
-            py_trans.Resize((args.img_h, args.img_w)),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+            vision.Resize((args.img_h, args.img_w)),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
         ]
     )
 
diff --git a/research/cv/MVD/train.py b/research/cv/MVD/train.py
index ea0d5f1c4..c6ad55320 100644
--- a/research/cv/MVD/train.py
+++ b/research/cv/MVD/train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,13 +24,13 @@ from tqdm import tqdm
 import mindspore as ms
 import mindspore.ops as P
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 from mindspore import context, load_checkpoint, \
     load_param_into_net, save_checkpoint, DatasetHelper
 from mindspore.context import ParallelMode
 from mindspore.communication.management import init, get_group_size
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore.nn import SGD, Adam
 from mindspore import nn
 
@@ -289,33 +289,33 @@ if __name__ == "__main__":
     transform_train_rgb = Compose(
         [
             decode,
-            py_trans.RandomCrop((args.img_h, args.img_w)),
-            py_trans.RandomGrayscale(prob=0.5),
-            py_trans.RandomHorizontalFlip(),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-            py_trans.RandomErasing(prob=0.5)
+            vision.RandomCrop((args.img_h, args.img_w)),
+            vision.RandomGrayscale(prob=0.5),
+            vision.RandomHorizontalFlip(),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False),
+            vision.RandomErasing(prob=0.5)
         ]
     )
 
     transform_train_ir = Compose(
         [
             decode,
-            py_trans.RandomCrop((args.img_h, args.img_w)),
-            # py_trans.RandomGrayscale(prob=0.5),
-            py_trans.RandomHorizontalFlip(),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-            py_trans.RandomErasing(prob=0.5)
+            vision.RandomCrop((args.img_h, args.img_w)),
+            # vision.RandomGrayscale(prob=0.5),
+            vision.RandomHorizontalFlip(),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False),
+            vision.RandomErasing(prob=0.5)
         ]
     )
 
     transform_test = Compose(
         [
             decode,
-            py_trans.Resize((args.img_h, args.img_w)),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+            vision.Resize((args.img_h, args.img_w)),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
         ]
     )
 
diff --git a/research/cv/ManiDP/src/dataset.py b/research/cv/ManiDP/src/dataset.py
index 220fd0147..58aa330bd 100644
--- a/research/cv/ManiDP/src/dataset.py
+++ b/research/cv/ManiDP/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,14 +17,12 @@ import math
 import os
 
 import numpy as np
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_transforms
-import mindspore.dataset.transforms.c_transforms as c_transforms
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
 from mindspore.communication.management import get_rank, get_group_size
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.vision.c_transforms as vision
 
 
 # values that should remain constant
@@ -55,24 +53,24 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
     """Create ImageNet training dataset"""
     if not os.path.exists(train_data_url):
         raise ValueError('Path not exists')
-    decode_op = py_vision.Decode()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
-    random_resize_crop_bicubic = py_vision.RandomResizedCrop(size=(input_size, input_size),
-                                                             scale=SCALE, ratio=RATIO,
-                                                             interpolation=Inter.BICUBIC)
-    random_horizontal_flip_op = py_vision.RandomHorizontalFlip(0.5)
+    random_resize_crop_bicubic = vision.RandomResizedCrop(size=(input_size, input_size),
+                                                          scale=SCALE, ratio=RATIO,
+                                                          interpolation=Inter.BICUBIC)
+    random_horizontal_flip_op = vision.RandomHorizontalFlip(0.5)
     adjust_range = (max(0, 1 - color_jitter), 1 + color_jitter)
-    random_color_jitter_op = py_vision.RandomColorAdjust(brightness=adjust_range,
-                                                         contrast=adjust_range,
-                                                         saturation=adjust_range)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    random_color_jitter_op = vision.RandomColorAdjust(brightness=adjust_range,
+                                                      contrast=adjust_range,
+                                                      saturation=adjust_range)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
     # assemble all the transforms
-    image_ops = py_transforms.Compose([decode_op, random_resize_crop_bicubic,
-                                       random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, random_resize_crop_bicubic,
+                                    random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
 
     rank_id = get_rank() if distributed else 0
     rank_size = get_group_size() if distributed else 1
@@ -121,16 +119,16 @@ def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=F
     else:
         scale_size = int(math.floor(input_size / DEFAULT_CROP_PCT))
 
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
-    decode_op = py_vision.Decode()
-    resize_op = py_vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
-    center_crop = py_vision.CenterCrop(size=input_size)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    resize_op = vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
+    center_crop = vision.CenterCrop(size=input_size)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
-    image_ops = py_transforms.Compose([decode_op, resize_op, center_crop,
-                                       to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, resize_op, center_crop,
+                                    to_tensor, normalize_op])
 
     dataset = dataset.map(input_columns=["label"], operations=type_cast_op,
                           num_parallel_workers=workers)
@@ -178,7 +176,7 @@ def create_dataset_cifar10(data_home, repeat_num=1, training=True, cifar_cfg=Non
     rescale_op = vision.Rescale(1.0 / 255.0, 0.0)
     normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.2471, 0.2435, 0.2616))
     changeswap_op = vision.HWC2CHW()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
     c_trans = []
     if training:
diff --git a/research/cv/MaskedFaceRecognition/test_dataset.py b/research/cv/MaskedFaceRecognition/test_dataset.py
index 33720e01c..f35c23b7e 100644
--- a/research/cv/MaskedFaceRecognition/test_dataset.py
+++ b/research/cv/MaskedFaceRecognition/test_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ create train or eval dataset.
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as CV
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as CV
+import mindspore.dataset.transforms as C
 from config import config
 from dataset.Dataset import Dataset
 
diff --git a/research/cv/MaskedFaceRecognition/train_dataset.py b/research/cv/MaskedFaceRecognition/train_dataset.py
index 0d9d9664f..c3f8e5d92 100644
--- a/research/cv/MaskedFaceRecognition/train_dataset.py
+++ b/research/cv/MaskedFaceRecognition/train_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ create train or eval dataset.
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as CV
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as CV
+import mindspore.dataset.transforms as C
 from config import config
 from dataset.MGDataset import DistributedPKSampler, MGDataset
 
diff --git a/research/cv/NFNet/src/data/imagenet.py b/research/cv/NFNet/src/data/imagenet.py
index 8edae82eb..213425e7e 100644
--- a/research/cv/NFNet/src/data/imagenet.py
+++ b/research/cv/NFNet/src/data/imagenet.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision.utils import Inter
 
 from src.data.augment.auto_augment import _pil_interp, rand_augment_transform
@@ -95,12 +94,12 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
             vision.RandomCropDecodeResize(input_size, scale=(0.08, 1.0), ratio=(3 / 4, 4 / 3),
                                           interpolation=Inter.BICUBIC),
             vision.RandomHorizontalFlip(prob=0.5),
-            py_vision.ToPIL()
+            vision.ToPIL()
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False),
             RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)
         ]
     else:
diff --git a/research/cv/Neighbor2Neighbor/src/dataset.py b/research/cv/Neighbor2Neighbor/src/dataset.py
index cf438509a..5cda4531b 100644
--- a/research/cv/Neighbor2Neighbor/src/dataset.py
+++ b/research/cv/Neighbor2Neighbor/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ import glob
 import numpy as np
 import PIL.Image as Image
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 
 class DataLoader_Imagenet_val:
     '''DataLoader_Imagenet_val'''
diff --git a/research/cv/PAMTRI/MultiTaskNet/preprocess.py b/research/cv/PAMTRI/MultiTaskNet/preprocess.py
index 26bd80b31..dc872e137 100644
--- a/research/cv/PAMTRI/MultiTaskNet/preprocess.py
+++ b/research/cv/PAMTRI/MultiTaskNet/preprocess.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import argparse
 from pathlib import Path
 import mindspore.dataset as ds
 import mindspore.common.dtype as mstype
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.transforms as C2
 from src.dataset.data_manager import DatasetManager
 from src.dataset.data_loader import ImageDataset
 from src.dataset.transforms import Compose_Keypt, Resize_Keypt, ToTensor_Keypt, Normalize_Keypt
diff --git a/research/cv/PAMTRI/MultiTaskNet/src/dataset/dataset.py b/research/cv/PAMTRI/MultiTaskNet/src/dataset/dataset.py
index ef92874e6..e34b6bbf8 100644
--- a/research/cv/PAMTRI/MultiTaskNet/src/dataset/dataset.py
+++ b/research/cv/PAMTRI/MultiTaskNet/src/dataset/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@
 import os
 import mindspore.dataset as ds
 import mindspore.common.dtype as mstype
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.transforms as C2
 
 from .data_manager import DatasetManager
 from .data_loader import ImageDataset
diff --git a/research/cv/PAMTRI/MultiTaskNet/src/dataset/transforms.py b/research/cv/PAMTRI/MultiTaskNet/src/dataset/transforms.py
index e57f28f2a..303fb4314 100644
--- a/research/cv/PAMTRI/MultiTaskNet/src/dataset/transforms.py
+++ b/research/cv/PAMTRI/MultiTaskNet/src/dataset/transforms.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ import random
 import collections
 import cv2
 
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.vision as vision
 
 if sys.version_info < (3, 3):
     Iterable = collections.Iterable
@@ -54,7 +54,7 @@ class ToTensor_Keypt():
     In the other cases, tensors are returned without scaling.
     """
     def __init__(self):
-        self.to_tensor = py_vision.ToTensor()
+        self.to_tensor = vision.ToTensor()
 
     def __call__(self, img, vkeypt):
         """
@@ -104,7 +104,7 @@ class Normalize_Keypt():
             self.mean.extend([mean_avg] * (channels_new - channels_orig))
             self.std.extend([std_avg] * (channels_new - channels_orig))
 
-        normalize = py_vision.Normalize(self.mean, self.std)
+        normalize = vision.Normalize(self.mean, self.std, is_hwc=False)
         return normalize(tensor)
 
 class Resize_Keypt():
diff --git a/research/cv/PAMTRI/PoseEstNet/src/dataset/dataset.py b/research/cv/PAMTRI/PoseEstNet/src/dataset/dataset.py
index 75a78a144..26031cd94 100644
--- a/research/cv/PAMTRI/PoseEstNet/src/dataset/dataset.py
+++ b/research/cv/PAMTRI/PoseEstNet/src/dataset/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ import copy
 import json
 from pathlib import Path
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_vision
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 
 from .veri import VeRiDataset
 
@@ -42,8 +42,8 @@ def create_dataset(cfg, data_dir, is_train=True):
                 "joints", "joints_vis"], num_parallel_workers=1, shuffle=False, num_shards=1, shard_id=0)
 
     trans = Compose([
-        py_vision.ToTensor(),
-        py_vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
+        vision.ToTensor(),
+        vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=False)
     ])
 
     dataset = dataset.map(operations=trans, input_columns="input", num_parallel_workers=8)
diff --git a/research/cv/PAMTRI/PoseEstNet/trans.py b/research/cv/PAMTRI/PoseEstNet/trans.py
index 5f6c377c8..cb5a4b4ab 100644
--- a/research/cv/PAMTRI/PoseEstNet/trans.py
+++ b/research/cv/PAMTRI/PoseEstNet/trans.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,10 +21,10 @@ python trans.py --cfg config.yaml --ckpt_path Your.ckpt --data_dir datapath
 import os
 import argparse
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.vision as vision
 
 from mindspore import context
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore.train.serialization import load_checkpoint, load_param_into_net
 
 from src.model import get_pose_net
@@ -62,8 +62,8 @@ if __name__ == '__main__':
                                            num_parallel_workers=1, shuffle=False, num_shards=1, shard_id=0)
 
     trans = Compose([
-        py_vision.ToTensor(),
-        py_vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
+        vision.ToTensor(),
+        vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=False)
     ])
 
     test_dataloader = test_dataloader.map(operations=trans, input_columns="input", num_parallel_workers=1)
diff --git a/research/cv/PDarts/src/dataset.py b/research/cv/PDarts/src/dataset.py
index e08fd37c9..2309590cd 100644
--- a/research/cv/PDarts/src/dataset.py
+++ b/research/cv/PDarts/src/dataset.py
@@ -15,8 +15,8 @@
 """Read train and eval data"""
 import mindspore.dataset as ds
 from mindspore.common import dtype as mstype
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision.utils import Inter
 
 
diff --git a/research/cv/Pix2Pix/src/dataset/pix2pix_dataset.py b/research/cv/Pix2Pix/src/dataset/pix2pix_dataset.py
index 2e4e2111c..66efd4c18 100644
--- a/research/cv/Pix2Pix/src/dataset/pix2pix_dataset.py
+++ b/research/cv/Pix2Pix/src/dataset/pix2pix_dataset.py
@@ -22,7 +22,7 @@ import numpy as np
 from PIL import Image
 import mindspore
 from mindspore import dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from src.utils.config import config
 
 class pix2pixDataset():
diff --git a/research/cv/ReIDStrongBaseline/src/dataset.py b/research/cv/ReIDStrongBaseline/src/dataset.py
index 25fd91b51..6955853c0 100644
--- a/research/cv/ReIDStrongBaseline/src/dataset.py
+++ b/research/cv/ReIDStrongBaseline/src/dataset.py
@@ -18,7 +18,7 @@ import math
 import random
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import numpy as np
 from PIL import Image
 
diff --git a/research/cv/RefineDet/src/dataset.py b/research/cv/RefineDet/src/dataset.py
index 9a400aa3f..bac268b49 100644
--- a/research/cv/RefineDet/src/dataset.py
+++ b/research/cv/RefineDet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from .box_utils import jaccard_numpy, refinedet_bboxes_encode, box_init
 
diff --git a/research/cv/RefineNet/src/dataset.py b/research/cv/RefineNet/src/dataset.py
index 50f68bf46..22b40d204 100644
--- a/research/cv/RefineNet/src/dataset.py
+++ b/research/cv/RefineNet/src/dataset.py
@@ -15,7 +15,7 @@
 """ dataset """
 import numpy as np
 import cv2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import mindspore.dataset as ds
 from mindspore.common import set_seed
 cv2.setNumThreads(0)
diff --git a/research/cv/ResNeSt50/src/datasets/autoaug.py b/research/cv/ResNeSt50/src/datasets/autoaug.py
index 1eb4c5a1b..bfc1acd66 100644
--- a/research/cv/ResNeSt50/src/datasets/autoaug.py
+++ b/research/cv/ResNeSt50/src/datasets/autoaug.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
 import random
 import numpy as np
 import PIL
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 RESAMPLE_MODE = PIL.Image.BICUBIC
 
@@ -175,8 +175,8 @@ class RandAugment:
         self.n = n
         self.m = m
         self.augment_list = rand_augment_list()
-        self.to_pil = py_trans.ToPIL()
-        self.to_tensor = py_trans.ToTensor()
+        self.to_pil = vision.ToPIL()
+        self.to_tensor = vision.ToTensor()
         self.from_pil = from_pil
         self.as_pil = as_pil
 
diff --git a/research/cv/ResNeSt50/src/datasets/dataset.py b/research/cv/ResNeSt50/src/datasets/dataset.py
index 3fbea0409..3aaa5e8e8 100644
--- a/research/cv/ResNeSt50/src/datasets/dataset.py
+++ b/research/cv/ResNeSt50/src/datasets/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,9 +16,8 @@
 import os
 
 import mindspore.dataset as dataset
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as V_C
-import mindspore.dataset.vision.py_transforms as P_C
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as V_C
 from mindspore.common import dtype as mstype
 
 from src.datasets.autoaug import RandAugment
@@ -44,10 +43,10 @@ def ImageNet(root, mode,
                 V_C.RandomResizedCrop(crop_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
                 V_C.RandomHorizontalFlip(prob=0.5),
                 V_C.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4),
-                P_C.ToPIL(),
+                V_C.ToPIL(),
                 RandAugment(2, 12, True, True),
-                P_C.ToTensor(),
-                P_C.Normalize(mean=mean, std=std)]
+                V_C.ToTensor(),
+                V_C.Normalize(mean=mean, std=std, is_hwc=False)]
         else:
             mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
             std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
@@ -55,7 +54,7 @@ def ImageNet(root, mode,
                 V_C.Decode(),
                 V_C.Resize((320, 320)),
                 V_C.CenterCrop(256),
-                V_C.Normalize(mean=mean, std=std),
+                V_C.Normalize(mean=mean, std=std, is_hwc=True),
                 V_C.HWC2CHW()]
     else:
         transform_img = transform
diff --git a/research/cv/SE-Net/src/dataset.py b/research/cv/SE-Net/src/dataset.py
index 0b8671e27..22750915f 100644
--- a/research/cv/SE-Net/src/dataset.py
+++ b/research/cv/SE-Net/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 def create_dataset2(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend", distribute=False):
diff --git a/research/cv/SE_ResNeXt50/src/dataset.py b/research/cv/SE_ResNeXt50/src/dataset.py
index 85f97ed70..9e12fefc0 100644
--- a/research/cv/SE_ResNeXt50/src/dataset.py
+++ b/research/cv/SE_ResNeXt50/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from src.config import imagenet_cfg
 
 
diff --git a/research/cv/SPPNet/src/dataset.py b/research/cv/SPPNet/src/dataset.py
index 108114c96..36aad4725 100644
--- a/research/cv/SPPNet/src/dataset.py
+++ b/research/cv/SPPNet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ Produce the dataset
 
 import os
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 from mindspore.communication.management import get_rank, get_group_size
 
 
diff --git a/research/cv/STGAN/modelarts/dataset/celeba.py b/research/cv/STGAN/modelarts/dataset/celeba.py
index bf65edaab..c342793e1 100644
--- a/research/cv/STGAN/modelarts/dataset/celeba.py
+++ b/research/cv/STGAN/modelarts/dataset/celeba.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import os
 import multiprocessing
 import numpy as np
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 
 from mindspore import context
 from mindspore.context import ParallelMode
diff --git a/research/cv/STGAN/src/dataset/celeba.py b/research/cv/STGAN/src/dataset/celeba.py
index bf65edaab..c342793e1 100644
--- a/research/cv/STGAN/src/dataset/celeba.py
+++ b/research/cv/STGAN/src/dataset/celeba.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import os
 import multiprocessing
 import numpy as np
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 
 from mindspore import context
 from mindspore.context import ParallelMode
diff --git a/research/cv/SiamFC/ModelArts/start_train.py b/research/cv/SiamFC/ModelArts/start_train.py
index 303e8df7c..99f0074c4 100644
--- a/research/cv/SiamFC/ModelArts/start_train.py
+++ b/research/cv/SiamFC/ModelArts/start_train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -30,7 +30,7 @@ from mindspore import nn
 from mindspore.train import Model
 from mindspore import Tensor
 from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
-import mindspore.dataset.transforms.py_transforms as py_transforms
+import mindspore.dataset.transforms as data_trans
 from mindspore.train.serialization import load_checkpoint, export, load_param_into_net
 from src.config import config
 from src.create_lmdb import create_lmdb
@@ -104,12 +104,12 @@ def train(args):
 
     set_seed(1234)
     random_crop_size = config.instance_size - 2 * config.total_stride
-    train_z_transforms = py_transforms.Compose([
+    train_z_transforms = data_trans.Compose([
         RandomStretch(),
         CenterCrop((config.exemplar_size, config.exemplar_size)),
         ToTensor()
     ])
-    train_x_transforms = py_transforms.Compose([
+    train_x_transforms = data_trans.Compose([
         RandomStretch(),
         RandomCrop((random_crop_size, random_crop_size),
                    config.max_translate),
diff --git a/research/cv/SiamFC/train.py b/research/cv/SiamFC/train.py
index c38b288d2..f4f2a6338 100644
--- a/research/cv/SiamFC/train.py
+++ b/research/cv/SiamFC/train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -26,7 +26,7 @@ import mindspore.dataset as ds
 from mindspore import nn
 from mindspore.train import Model
 from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
-import mindspore.dataset.transforms.py_transforms as py_transforms
+import mindspore.dataset.transforms as data_trans
 from src.config import config
 from src.alexnet import SiameseAlexNet
 from src.dataset import ImagnetVIDDataset
@@ -44,12 +44,12 @@ def train(data_dir):
 
     set_seed(1234)
     random_crop_size = config.instance_size - 2 * config.total_stride
-    train_z_transforms = py_transforms.Compose([
+    train_z_transforms = data_trans.Compose([
         RandomStretch(),
         CenterCrop((config.exemplar_size, config.exemplar_size)),
         ToTensor()
     ])
-    train_x_transforms = py_transforms.Compose([
+    train_x_transforms = data_trans.Compose([
         RandomStretch(),
         RandomCrop((random_crop_size, random_crop_size),
                    config.max_translate),
diff --git a/research/cv/StarGAN/src/dataset.py b/research/cv/StarGAN/src/dataset.py
index 22813c290..0d31e4564 100644
--- a/research/cv/StarGAN/src/dataset.py
+++ b/research/cv/StarGAN/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import multiprocessing
 import numpy as np
 from PIL import Image
 
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_transforms
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.dataset as de
 
 from src.utils import DistributedSampler
@@ -146,14 +146,14 @@ def get_loader(data_root, attr_path, selected_attrs, crop_size=178, image_size=1
     """Build and return a data loader."""
     mean = [0.5, 0.5, 0.5]
     std = [0.5, 0.5, 0.5]
-    transform = [py_vision.ToPIL()]
+    transform = [vision.ToPIL()]
     if mode == 'train':
-        transform.append(py_vision.RandomHorizontalFlip())
-        transform.append(py_vision.CenterCrop(crop_size))
-    transform.append(py_vision.Resize([image_size, image_size]))
-    transform.append(py_vision.ToTensor())
-    transform.append(py_vision.Normalize(mean=mean, std=std))
-    transform = py_transforms.Compose(transform)
+        transform.append(vision.RandomHorizontalFlip())
+        transform.append(vision.CenterCrop(crop_size))
+    transform.append(vision.Resize([image_size, image_size]))
+    transform.append(vision.ToTensor())
+    transform.append(vision.Normalize(mean=mean, std=std, is_hwc=False))
+    transform = data_trans.Compose(transform)
 
     if dataset == 'CelebA':
         dataset = CelebA(data_root, attr_path, selected_attrs, transform, mode)
diff --git a/research/cv/TCN/src/dataset.py b/research/cv/TCN/src/dataset.py
index f68fe3e6d..413923a1a 100644
--- a/research/cv/TCN/src/dataset.py
+++ b/research/cv/TCN/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@
 """
 import numpy as np
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as CV
 from mindspore import dtype as mstype
 
 np.random.seed(0)
diff --git a/research/cv/TNT/src/data/imagenet.py b/research/cv/TNT/src/data/imagenet.py
index c0fb4832e..95cb688a7 100644
--- a/research/cv/TNT/src/data/imagenet.py
+++ b/research/cv/TNT/src/data/imagenet.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision.utils import Inter
 
 from src.data.augment.auto_augment import _pil_interp, rand_augment_transform
@@ -94,12 +93,12 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
             vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(3 / 4, 4 / 3),
                                           interpolation=Inter.BICUBIC),
             vision.RandomHorizontalFlip(prob=0.5),
-            py_vision.ToPIL()
+            vision.ToPIL()
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False),
             RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)
         ]
     else:
diff --git a/research/cv/U-GAT-IT/src/dataset/dataset.py b/research/cv/U-GAT-IT/src/dataset/dataset.py
index 9a12bb4ab..fade60c4c 100644
--- a/research/cv/U-GAT-IT/src/dataset/dataset.py
+++ b/research/cv/U-GAT-IT/src/dataset/dataset.py
@@ -22,7 +22,7 @@ import math
 
 import numpy as np
 from PIL import Image
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.vision as vision
 import mindspore.dataset as ds
 from mindspore.communication.management import get_rank, get_group_size
 
@@ -30,18 +30,18 @@ from mindspore.communication.management import get_rank, get_group_size
 def TrainDataLoader(img_size, data_path, dataset, batch_size, distributed):
     """ DataLoader """
     train_transform = [
-        py_vision.ToPIL(),
-        py_vision.RandomHorizontalFlip(),
-        py_vision.Resize((img_size + 30, img_size + 30)),
-        py_vision.RandomCrop(img_size),
-        py_vision.ToTensor(),
-        py_vision.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
+        vision.ToPIL(),
+        vision.RandomHorizontalFlip(),
+        vision.Resize((img_size + 30, img_size + 30)),
+        vision.RandomCrop(img_size),
+        vision.ToTensor(),
+        vision.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False),
     ]
     test_transform = [
-        py_vision.ToPIL(),
-        py_vision.Resize((img_size, img_size)),
-        py_vision.ToTensor(),
-        py_vision.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
+        vision.ToPIL(),
+        vision.Resize((img_size, img_size)),
+        vision.ToTensor(),
+        vision.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False),
     ]
     rank_size = 1
     if distributed:
@@ -76,10 +76,10 @@ def TrainDataLoader(img_size, data_path, dataset, batch_size, distributed):
 def TestDataLoader(img_size, data_path, dataset):
     """ DataLoader """
     test_transform = [
-        py_vision.ToPIL(),
-        py_vision.Resize((img_size, img_size)),
-        py_vision.ToTensor(),
-        py_vision.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
+        vision.ToPIL(),
+        vision.Resize((img_size, img_size)),
+        vision.ToTensor(),
+        vision.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False),
     ]
     testA_generator = GetDatasetGenerator(os.path.join(data_path, dataset), 'test')
     testA = ds.GeneratorDataset(testA_generator, ["image_A", "image_B"], shuffle=False, num_parallel_workers=12)
diff --git a/research/cv/UNet3+/src/dataset.py b/research/cv/UNet3+/src/dataset.py
index 277178821..eb8ccb3df 100644
--- a/research/cv/UNet3+/src/dataset.py
+++ b/research/cv/UNet3+/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ from skimage.io import imread
 from skimage import color
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 
 class Dataset:
     '''Dataset'''
diff --git a/research/cv/VehicleNet/src/dataset.py b/research/cv/VehicleNet/src/dataset.py
index 58fc5ba38..04bd4ea81 100644
--- a/research/cv/VehicleNet/src/dataset.py
+++ b/research/cv/VehicleNet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,9 +22,8 @@ from mindspore.mindrecord import FileWriter
 import mindspore.dataset as ds
 from mindspore.dataset.vision import Inter
 import mindspore.common.dtype as mstype
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as P_C
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 
 class Dataset:
     """Dataset"""
@@ -151,9 +150,9 @@ def create_vehiclenet_dataset(mindrecord_file, batch_size=1, device_num=1, is_tr
 
     if is_training:
         if use_aug:
-            py_to_pil_op = P_C.ToPIL()
+            py_to_pil_op = C.ToPIL()
             autoaugment_op = ImageNetPolicy()
-            to_tensor_op = P_C.ToTensor()
+            to_tensor_op = C.ToTensor()
             transforms_list += [py_to_pil_op, autoaugment_op, to_tensor_op]
 
         resized_op = C.Resize([train_inputsize, train_inputsize], interpolation=Inter.BICUBIC)
diff --git a/research/cv/ViG/src/data/imagenet.py b/research/cv/ViG/src/data/imagenet.py
index 0e4ad3790..8fb4e3cb1 100644
--- a/research/cv/ViG/src/data/imagenet.py
+++ b/research/cv/ViG/src/data/imagenet.py
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision.utils import Inter
 
 from src.data.augment.auto_augment import _pil_interp, rand_augment_transform
@@ -94,12 +93,12 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
             vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(3 / 4, 4 / 3),
                                           interpolation=Inter.BICUBIC),
             vision.RandomHorizontalFlip(prob=0.5),
-            py_vision.ToPIL()
+            vision.ToPIL()
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False),
             RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)
         ]
     else:
diff --git a/research/cv/Yolact++/src/dataset.py b/research/cv/Yolact++/src/dataset.py
index 6566291c4..e7b277e3b 100644
--- a/research/cv/Yolact++/src/dataset.py
+++ b/research/cv/Yolact++/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import mmcv
 import numpy as np
 from numpy import random
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from src.config import MEANS
 from src.config import yolact_plus_resnet50_config as cfg
diff --git a/research/cv/advanced_east/src/dataset.py b/research/cv/advanced_east/src/dataset.py
index ea040089c..74949f617 100644
--- a/research/cv/advanced_east/src/dataset.py
+++ b/research/cv/advanced_east/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ dataset.
 import os
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.vision as vision
 from mindspore.mindrecord import FileWriter
 import numpy as np
 from PIL import Image, ImageFile
diff --git a/research/cv/arcface/src/dataset.py b/research/cv/arcface/src/dataset.py
index 5cef4e2f3..b7ce783e3 100644
--- a/research/cv/arcface/src/dataset.py
+++ b/research/cv/arcface/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ python dataset.py
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/augvit/src/c10_dataset.py b/research/cv/augvit/src/c10_dataset.py
index b6042682f..ad36d6e66 100644
--- a/research/cv/augvit/src/c10_dataset.py
+++ b/research/cv/augvit/src/c10_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as c_transforms
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as data_trans
+import mindspore.dataset.vision as vision
 
 def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch_size=1):
     """
@@ -63,7 +63,7 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
     rescale_op = vision.Rescale(1.0 / 255.0, 0.0)
     normalize_op = vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
     changeswap_op = vision.HWC2CHW()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
     c_trans = []
     if do_train:
diff --git a/research/cv/autoaugment/src/dataset/autoaugment/aug.py b/research/cv/autoaugment/src/dataset/autoaugment/aug.py
index 2c6a33b5e..83bda35cb 100644
--- a/research/cv/autoaugment/src/dataset/autoaugment/aug.py
+++ b/research/cv/autoaugment/src/dataset/autoaugment/aug.py
@@ -18,7 +18,7 @@ The Augment operator.
 
 import random
 
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 from .third_party.policies import good_policies
 from .third_party.policies import svhn_good_policies
@@ -61,8 +61,8 @@ class Augment:
             self.policies = policies
 
         self.oc = OperatorClasses()
-        self.to_pil = py_trans.ToPIL()
-        self.to_tensor = py_trans.ToTensor()
+        self.to_pil = vision.ToPIL()
+        self.to_tensor = vision.ToTensor()
 
         self.enable_basic = enable_basic
         self.random_crop = self.oc.RandomCrop(None)
@@ -73,7 +73,7 @@ class Augment:
         self.as_pil = as_pil
         self.normalize = None
         if mean is not None and std is not None:
-            self.normalize = py_trans.Normalize(mean, std)
+            self.normalize = vision.Normalize(mean, std, is_hwc=False)
 
     def _apply(self, name, prob, level, img):
         if random.random() > prob:
diff --git a/research/cv/autoaugment/src/dataset/autoaugment/ops/__init__.py b/research/cv/autoaugment/src/dataset/autoaugment/ops/__init__.py
index f001d2520..fba0aa2e9 100644
--- a/research/cv/autoaugment/src/dataset/autoaugment/ops/__init__.py
+++ b/research/cv/autoaugment/src/dataset/autoaugment/ops/__init__.py
@@ -16,7 +16,7 @@
 Package initialization for custom PIL operators.
 """
 
-from mindspore.dataset.vision import py_transforms
+from mindspore.dataset.vision import transforms
 
 from .crop import RandomCrop
 from .cutout import RandomCutout
@@ -41,9 +41,9 @@ from .transform import (
 
 class OperatorClasses:
     """OperatorClasses gathers all unary-image transformations listed in the
-    Table 6 of https://arxiv.org/abs/1805.09501 and uses discrte levels for
-    these transformations (The Sample Pairing transformation is an
-    exception, which involes multiple images from a single mini-batch and
+    Table 6 of https://arxiv.org/abs/1805.09501 and uses discrete levels for
+    these transformations. (The Sample Pairing transformation is an
+    exception, which involves multiple images from a single mini-batch and
     is not exploited in this implementation.)
 
     Additionally, there are RandomHorizontalFlip and RandomCrop.
@@ -56,9 +56,9 @@ class OperatorClasses:
         self.TranslateX = self.decorate(TranslateX, max_val=10, rounding=True)
         self.TranslateY = self.decorate(TranslateY, max_val=10, rounding=True)
 
-        self.AutoContrast = self.decorate(py_transforms.AutoContrast)
-        self.Invert = self.decorate(py_transforms.Invert)
-        self.Equalize = self.decorate(py_transforms.Equalize)
+        self.AutoContrast = self.decorate(transforms.AutoContrast)
+        self.Invert = self.decorate(transforms.Invert)
+        self.Equalize = self.decorate(transforms.Equalize)
 
         self.Solarize = self.decorate(
             Solarize, max_val=256, rounding=True, post=lambda x: 256 - x)
@@ -76,7 +76,7 @@ class OperatorClasses:
         self.Cutout = self.decorate(RandomCutout, max_val=20, rounding=True)
 
         self.RandomHorizontalFlip = self.decorate(
-            py_transforms.RandomHorizontalFlip)
+            transforms.RandomHorizontalFlip)
         self.RandomCrop = self.decorate(RandomCrop)
 
     def vars(self):
diff --git a/research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py b/research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py
index dd42ad5d3..137236bea 100644
--- a/research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py
+++ b/research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py
@@ -16,17 +16,17 @@
 RandomCrop operator.
 """
 
-from mindspore.dataset.vision import py_transforms
+from mindspore.dataset.vision import transforms
 from mindspore.dataset.vision import py_transforms_util
 from mindspore.dataset.vision import utils
 
 
-class RandomCrop(py_transforms.RandomCrop):
+class RandomCrop(transforms.RandomCrop):
     """
-    RandomCrop inherits from py_transforms.RandomCrop but derives/uses the
+    RandomCrop inherits from transforms.RandomCrop but derives/uses the
     original image size as the output size.
 
-    Please refer to py_transforms.RandomCrop for argument specifications.
+    Please refer to transforms.RandomCrop for argument specifications.
     """
 
     def __init__(self, padding=4, pad_if_needed=False,
diff --git a/research/cv/autoaugment/src/dataset/autoaugment/ops/cutout.py b/research/cv/autoaugment/src/dataset/autoaugment/ops/cutout.py
index 8d3e2d594..7286d24a3 100644
--- a/research/cv/autoaugment/src/dataset/autoaugment/ops/cutout.py
+++ b/research/cv/autoaugment/src/dataset/autoaugment/ops/cutout.py
@@ -21,7 +21,7 @@ import random
 
 class RandomCutout:
     """
-    RandomCutout is similar to py_transforms.Cutout but is simplified and
+    RandomCutout is similar to transforms.CutOut but is simplified and
     crafted for PIL images.
 
     Args:
diff --git a/research/cv/autoaugment/src/dataset/autoaugment/ops/ops_test.py b/research/cv/autoaugment/src/dataset/autoaugment/ops/ops_test.py
index ebe85e632..b4575363a 100644
--- a/research/cv/autoaugment/src/dataset/autoaugment/ops/ops_test.py
+++ b/research/cv/autoaugment/src/dataset/autoaugment/ops/ops_test.py
@@ -19,7 +19,7 @@ Visualization for testing purposes.
 import matplotlib.pyplot as plt
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 from mindspore import context
 context.set_context(mode=context.PYNATIVE_MODE, device_target='CPU')
 
@@ -36,7 +36,7 @@ def compare(data_path, trans, output_path='./ops_test.png'):
 
     # Apply transformations
     dataset_augmented = dataset_orig.map(
-        operations=[py_trans.ToPIL()] + trans + [py_trans.ToTensor()],
+        operations=[vision.ToPIL()] + trans + [vision.ToTensor()],
         input_columns=['image'],
     )
 
diff --git a/research/cv/autoaugment/src/dataset/autoaugment/ops/transform.py b/research/cv/autoaugment/src/dataset/autoaugment/ops/transform.py
index c69a7401c..7974af711 100644
--- a/research/cv/autoaugment/src/dataset/autoaugment/ops/transform.py
+++ b/research/cv/autoaugment/src/dataset/autoaugment/ops/transform.py
@@ -21,7 +21,6 @@ import random
 
 from PIL import Image, __version__
 
-from mindspore.dataset.vision.py_transforms import DE_PY_INTER_MODE
 from mindspore.dataset.vision.py_transforms_util import (
     augment_error_message,
     is_pil,
@@ -46,7 +45,7 @@ class ShearX:
             raise TypeError('shear must be a single number.')
 
         self.shear = shear
-        self.resample = DE_PY_INTER_MODE[resample]
+        self.resample = resample
         self.fill_value = fill_value
 
     def __call__(self, img):
@@ -91,7 +90,7 @@ class ShearY:
             raise TypeError('shear must be a single number.')
 
         self.shear = shear
-        self.resample = DE_PY_INTER_MODE[resample]
+        self.resample = resample
         self.fill_value = fill_value
 
     def __call__(self, img):
@@ -136,7 +135,7 @@ class TranslateX:
             raise TypeError('translate must be a single number.')
 
         self.translate = translate
-        self.resample = DE_PY_INTER_MODE[resample]
+        self.resample = resample
         self.fill_value = fill_value
 
     def __call__(self, img):
@@ -181,7 +180,7 @@ class TranslateY:
             raise TypeError('Translate must be a single number.')
 
         self.translate = translate
-        self.resample = DE_PY_INTER_MODE[resample]
+        self.resample = resample
         self.fill_value = fill_value
 
     def __call__(self, img):
@@ -212,12 +211,12 @@ class TranslateY:
 
 class Rotate:
     """
-    Rotate is similar to py_vision.RandomRotation but uses a fixed degree.
+    Rotate is similar to mindspore.dataset.vision.transform's RandomRotation but uses a fixed degree.
 
     Args:
         degree (int): the degree to rotate.
 
-    Please refer to py_transforms.RandomRotation for more argument
+    Please refer to mindspore.dataset.vision.transforms Rotation for more argument
     specifications.
     """
 
@@ -229,7 +228,7 @@ class Rotate:
             raise TypeError('degree must be a single number.')
 
         self.degree = degree
-        self.resample = DE_PY_INTER_MODE[resample]
+        self.resample = resample
         self.expand = expand
         self.center = center
         self.fill_value = fill_value
diff --git a/research/cv/autoaugment/src/dataset/cifar10.py b/research/cv/autoaugment/src/dataset/cifar10.py
index 25724a9cf..166c78659 100644
--- a/research/cv/autoaugment/src/dataset/cifar10.py
+++ b/research/cv/autoaugment/src/dataset/cifar10.py
@@ -20,8 +20,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 from mindspore.communication.management import get_group_size
 from mindspore.communication.management import get_rank
 from mindspore.communication.management import init
diff --git a/research/cv/autoaugment/src/dataset/svhn_dataset.py b/research/cv/autoaugment/src/dataset/svhn_dataset.py
index fa95bf169..070b877ef 100644
--- a/research/cv/autoaugment/src/dataset/svhn_dataset.py
+++ b/research/cv/autoaugment/src/dataset/svhn_dataset.py
@@ -20,8 +20,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 from mindspore.communication.management import get_group_size
 from mindspore.communication.management import get_rank
 from mindspore.communication.management import init
diff --git a/research/cv/cait/src/data/imagenet.py b/research/cv/cait/src/data/imagenet.py
index 1539daa2c..2eb8354c8 100644
--- a/research/cv/cait/src/data/imagenet.py
+++ b/research/cv/cait/src/data/imagenet.py
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 from src.data.augment.auto_augment import rand_augment_transform
 from src.data.augment.mixup import Mixup
@@ -91,26 +90,26 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
         assert auto_augment.startswith('rand')
         transform_img = [
             vision.Decode(),
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             RandomResizedCropAndInterpolation(size=args.image_size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
                                               interpolation=interpolation),
-            py_vision.RandomHorizontalFlip(prob=0.5),
+            vision.RandomHorizontalFlip(prob=0.5),
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)]
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)]
         if args.re_prob > 0.:
             transform_img += [RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)]
     else:
         # test transform complete
         transform_img = [
             vision.Decode(),
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             Resize(int(args.image_size / args.crop_pct), interpolation="bicubic"),
-            py_vision.CenterCrop(image_size),
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)
+            vision.CenterCrop(image_size),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)
         ]
 
     transform_label = C.TypeCast(mstype.int32)
diff --git a/research/cv/cct/src/data/cifar10.py b/research/cv/cct/src/data/cifar10.py
index dceac7f86..dbfe0dcdb 100644
--- a/research/cv/cct/src/data/cifar10.py
+++ b/research/cv/cct/src/data/cifar10.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 from src.data.augment.auto_augment import rand_augment_transform
 from src.data.augment.mixup import Mixup
@@ -89,15 +89,15 @@ def create_dataset_cifar10(dataset_dir, args, repeat_num=1, training=True):
         auto_augment = args.auto_augment
         assert auto_augment.startswith('rand')
         transform_img = [
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             RandomResizedCropAndInterpolation(size=args.image_size, scale=(0.8, 1.0), ratio=(3. / 4., 4. / 3.),
                                               interpolation=interpolation),
-            py_vision.RandomHorizontalFlip(prob=0.5),
+            vision.RandomHorizontalFlip(prob=0.5),
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)]
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)]
         if args.re_prob > 0.:
             transform_img += [RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)]
     else:
@@ -105,10 +105,10 @@ def create_dataset_cifar10(dataset_dir, args, repeat_num=1, training=True):
         std = [0.2470, 0.2435, 0.2616]
         # test transform complete
         transform_img = [
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             Resize(int(image_size), interpolation="bicubic"),
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)
         ]
 
     transform_label = C.TypeCast(mstype.int32)
diff --git a/research/cv/cct/src/data/imagenet.py b/research/cv/cct/src/data/imagenet.py
index e512c685c..7d1f0a2f3 100644
--- a/research/cv/cct/src/data/imagenet.py
+++ b/research/cv/cct/src/data/imagenet.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 from src.data.augment.auto_augment import rand_augment_transform
 from src.data.augment.mixup import Mixup
@@ -92,26 +91,26 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
         assert auto_augment.startswith('rand')
         transform_img = [
             vision.Decode(),
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             RandomResizedCropAndInterpolation(size=args.image_size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
                                               interpolation=interpolation),
-            py_vision.RandomHorizontalFlip(prob=0.5),
+            vision.RandomHorizontalFlip(prob=0.5),
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)]
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)]
         if args.re_prob > 0.:
             transform_img += [RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)]
     else:
         # test transform complete
         transform_img = [
             vision.Decode(),
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             Resize(int(args.image_size / args.crop_pct), interpolation="bicubic"),
-            py_vision.CenterCrop(image_size),
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)
+            vision.CenterCrop(image_size),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)
         ]
 
     transform_label = C.TypeCast(mstype.int32)
diff --git a/research/cv/convnext/src/data/imagenet.py b/research/cv/convnext/src/data/imagenet.py
index 7aa5c2c63..5a0cdcffe 100644
--- a/research/cv/convnext/src/data/imagenet.py
+++ b/research/cv/convnext/src/data/imagenet.py
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision.utils import Inter
 
 from src.data.augment.auto_augment import pil_interp, rand_augment_transform
@@ -94,12 +93,12 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
             vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(3 / 4, 4 / 3),
                                           interpolation=Inter.PILCUBIC),
             vision.RandomHorizontalFlip(prob=0.5),
-            py_vision.ToPIL()
+            vision.ToPIL()
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False),
             RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)
         ]
     else:
diff --git a/research/cv/dcgan/src/dataset.py b/research/cv/dcgan/src/dataset.py
index 74a467a75..4e0c06772 100644
--- a/research/cv/dcgan/src/dataset.py
+++ b/research/cv/dcgan/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ import numpy as np
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from src.config import dcgan_imagenet_cfg, dcgan_cifar10_cfg
 
 
diff --git a/research/cv/delf/src/data_augmentation_parallel.py b/research/cv/delf/src/data_augmentation_parallel.py
index 1428b0727..fe5571299 100755
--- a/research/cv/delf/src/data_augmentation_parallel.py
+++ b/research/cv/delf/src/data_augmentation_parallel.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ import os
 
 from mindspore import dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 def create_dataset(data_path, image_size=321, batch_size=32, seed=0, augmentation=True, repeat=True):
     """create dataset"""
diff --git a/research/cv/ecolite/src/transforms.py b/research/cv/ecolite/src/transforms.py
index 06e4d14be..c72ee0100 100644
--- a/research/cv/ecolite/src/transforms.py
+++ b/research/cv/ecolite/src/transforms.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import numbers
 import math
 from PIL import Image, ImageOps
 import numpy as np
-from mindspore.dataset.vision import py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 
 class GroupRandomCrop:
@@ -55,7 +55,7 @@ class GroupCenterCrop:
     """GroupCenterCrop"""
 
     def __init__(self, size):
-        self.worker = py_trans.CenterCrop(size)
+        self.worker = vision.CenterCrop(size)
 
     def __call__(self, img_group):
         return [self.worker(img) for img in img_group]
diff --git a/research/cv/efficientnet-b0/src/dataset.py b/research/cv/efficientnet-b0/src/dataset.py
index 56602a36c..64906ba88 100644
--- a/research/cv/efficientnet-b0/src/dataset.py
+++ b/research/cv/efficientnet-b0/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ Data operations, will be used in train.py and eval.py
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, batch_size=16, device_num=1, rank=0):
diff --git a/research/cv/efficientnet-b1/src/dataset.py b/research/cv/efficientnet-b1/src/dataset.py
index 1c87e9973..373f0995f 100644
--- a/research/cv/efficientnet-b1/src/dataset.py
+++ b/research/cv/efficientnet-b1/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,8 +16,8 @@
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/efficientnet-b2/src/dataset.py b/research/cv/efficientnet-b2/src/dataset.py
index c72f7fd3b..696400856 100644
--- a/research/cv/efficientnet-b2/src/dataset.py
+++ b/research/cv/efficientnet-b2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ Data operations, will be used in train.py and eval.py
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, batch_size=16, device_num=1, rank=0):
diff --git a/research/cv/efficientnet-b3/src/dataset.py b/research/cv/efficientnet-b3/src/dataset.py
index 6c1347d02..5c571624e 100644
--- a/research/cv/efficientnet-b3/src/dataset.py
+++ b/research/cv/efficientnet-b3/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ Data operations, will be used in train.py and eval.py
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, batch_size=16, device_num=1, rank=0):
diff --git a/research/cv/efficientnetv2/src/data/imagenet_finetune.py b/research/cv/efficientnetv2/src/data/imagenet_finetune.py
index 600615183..1029f0081 100644
--- a/research/cv/efficientnetv2/src/data/imagenet_finetune.py
+++ b/research/cv/efficientnetv2/src/data/imagenet_finetune.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 import numpy as np
 
 from .data_utils.moxing_adapter import sync_data
diff --git a/research/cv/eppmvsnet/src/blendedmvs.py b/research/cv/eppmvsnet/src/blendedmvs.py
index 21acd1e04..123a6ba0c 100644
--- a/research/cv/eppmvsnet/src/blendedmvs.py
+++ b/research/cv/eppmvsnet/src/blendedmvs.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@ import cv2
 import numpy as np
 from PIL import Image
 
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.vision as vision
 
 from src.utils import read_pfm
 
@@ -202,15 +202,15 @@ class BlendedMVSDataset:
     def define_transforms(self):
         if self.training_tag and self.split == 'train':  # you can add augmentation here
             self.transform = Compose([
-                py_vision.ToTensor(),
-                py_vision.Normalize(mean=[0.485, 0.456, 0.406],
-                                    std=[0.229, 0.224, 0.225]),
+                vision.ToTensor(),
+                vision.Normalize(mean=[0.485, 0.456, 0.406],
+                                 std=[0.229, 0.224, 0.225], is_hwc=False),
             ])
         else:
             self.transform = Compose([
-                py_vision.ToTensor(),
-                py_vision.Normalize(mean=[0.485, 0.456, 0.406],
-                                    std=[0.229, 0.224, 0.225]),
+                vision.ToTensor(),
+                vision.Normalize(mean=[0.485, 0.456, 0.406],
+                                 std=[0.229, 0.224, 0.225], is_hwc=False),
             ])
 
     def __len__(self):
diff --git a/research/cv/faster_rcnn_dcn/src/dataset.py b/research/cv/faster_rcnn_dcn/src/dataset.py
index 1f104189d..23697df6a 100644
--- a/research/cv/faster_rcnn_dcn/src/dataset.py
+++ b/research/cv/faster_rcnn_dcn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@ from numpy import random
 
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 
 
diff --git a/research/cv/fishnet99/src/dataset.py b/research/cv/fishnet99/src/dataset.py
index a5e72f198..8be01542b 100644
--- a/research/cv/fishnet99/src/dataset.py
+++ b/research/cv/fishnet99/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from src.config import imagenet_cfg
 
 
diff --git a/research/cv/ghostnet/src/dataset.py b/research/cv/ghostnet/src/dataset.py
index 05acf36e1..28a272e3f 100644
--- a/research/cv/ghostnet/src/dataset.py
+++ b/research/cv/ghostnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,8 +16,8 @@
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, repeat_num=1, infer_910=True, device_id=0, batch_size=128):
diff --git a/research/cv/ghostnet_quant/src/dataset.py b/research/cv/ghostnet_quant/src/dataset.py
index edee462b4..6c56662cb 100644
--- a/research/cv/ghostnet_quant/src/dataset.py
+++ b/research/cv/ghostnet_quant/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,9 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.vision.c_transforms as C
-import mindspore.dataset.transforms.vision.py_transforms as P
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.dataset.transforms.vision import Inter
 
 
@@ -73,18 +72,18 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
     change_swap_op = C.HWC2CHW()
 
     # define python operations
-    decode_p = P.Decode()
+    decode_p = C.Decode(True)
     if model == 'ghostnet-600':
         s = 274
         c = 240
     else:
         s = 256
         c = 224
-    resize_p = P.Resize(s, interpolation=Inter.BICUBIC)
-    center_crop_p = P.CenterCrop(c)
-    totensor = P.ToTensor()
-    normalize_p = P.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
-    composeop = P.ComposeOp(
+    resize_p = C.Resize(s, interpolation=Inter.BICUBIC)
+    center_crop_p = C.CenterCrop(c)
+    totensor = C.ToTensor()
+    normalize_p = C.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=False)
+    composeop = C.ComposeOp(
         [decode_p, resize_p, center_crop_p, totensor, normalize_p])
     if do_train:
         trans = [resize_crop_op, horizontal_flip_op, color_op,
diff --git a/research/cv/glore_res/src/autoaugment.py b/research/cv/glore_res/src/autoaugment.py
index 35ef907c1..0cc9e6569 100644
--- a/research/cv/glore_res/src/autoaugment.py
+++ b/research/cv/glore_res/src/autoaugment.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,8 +15,8 @@
 """define autoaugment"""
 import os
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as c_transforms
-import mindspore.dataset.vision.c_transforms as c_vision
+import mindspore.dataset.transforms as data_trans
+import mindspore.dataset.vision as vision
 from mindspore import dtype as mstype
 from mindspore.communication.management import init, get_rank, get_group_size
 
@@ -34,101 +34,101 @@ def int_parameter(level, maxval):
 
 def shear_x(level):
     v = float_parameter(level, 0.3)
-    return c_transforms.RandomChoice(
-        [c_vision.RandomAffine(degrees=0, shear=(-v, -v)), c_vision.RandomAffine(degrees=0, shear=(v, v))])
+    return data_trans.RandomChoice(
+        [vision.RandomAffine(degrees=0, shear=(-v, -v)), vision.RandomAffine(degrees=0, shear=(v, v))])
 
 
 def shear_y(level):
     v = float_parameter(level, 0.3)
-    return c_transforms.RandomChoice(
-        [c_vision.RandomAffine(degrees=0, shear=(0, 0, -v, -v)), c_vision.RandomAffine(degrees=0, shear=(0, 0, v, v))])
+    return data_trans.RandomChoice(
+        [vision.RandomAffine(degrees=0, shear=(0, 0, -v, -v)), vision.RandomAffine(degrees=0, shear=(0, 0, v, v))])
 
 
 def translate_x(level):
     v = float_parameter(level, 150 / 331)
-    return c_transforms.RandomChoice(
-        [c_vision.RandomAffine(degrees=0, translate=(-v, -v)), c_vision.RandomAffine(degrees=0, translate=(v, v))])
+    return data_trans.RandomChoice(
+        [vision.RandomAffine(degrees=0, translate=(-v, -v)), vision.RandomAffine(degrees=0, translate=(v, v))])
 
 
 def translate_y(level):
     v = float_parameter(level, 150 / 331)
-    return c_transforms.RandomChoice([c_vision.RandomAffine(degrees=0, translate=(0, 0, -v, -v)),
-                                      c_vision.RandomAffine(degrees=0, translate=(0, 0, v, v))])
+    return data_trans.RandomChoice([vision.RandomAffine(degrees=0, translate=(0, 0, -v, -v)),
+                                    vision.RandomAffine(degrees=0, translate=(0, 0, v, v))])
 
 
 def color_impl(level):
     v = float_parameter(level, 1.8) + 0.1
-    return c_vision.RandomColor(degrees=(v, v))
+    return vision.RandomColor(degrees=(v, v))
 
 
 def rotate_impl(level):
     v = int_parameter(level, 30)
-    return c_transforms.RandomChoice(
-        [c_vision.RandomRotation(degrees=(-v, -v)), c_vision.RandomRotation(degrees=(v, v))])
+    return data_trans.RandomChoice(
+        [vision.RandomRotation(degrees=(-v, -v)), vision.RandomRotation(degrees=(v, v))])
 
 
 def solarize_impl(level):
     level = int_parameter(level, 256)
     v = 256 - level
-    return c_vision.RandomSolarize(threshold=(0, v))
+    return vision.RandomSolarize(threshold=(0, v))
 
 
 def posterize_impl(level):
     level = int_parameter(level, 4)
     v = 4 - level
-    return c_vision.RandomPosterize(bits=(v, v))
+    return vision.RandomPosterize(bits=(v, v))
 
 
 def contrast_impl(level):
     v = float_parameter(level, 1.8) + 0.1
-    return c_vision.RandomColorAdjust(contrast=(v, v))
+    return vision.RandomColorAdjust(contrast=(v, v))
 
 
 def autocontrast_impl(level):
-    return c_vision.AutoContrast()
+    return vision.AutoContrast()
 
 
 def sharpness_impl(level):
     v = float_parameter(level, 1.8) + 0.1
-    return c_vision.RandomSharpness(degrees=(v, v))
+    return vision.RandomSharpness(degrees=(v, v))
 
 
 def brightness_impl(level):
     v = float_parameter(level, 1.8) + 0.1
-    return c_vision.RandomColorAdjust(brightness=(v, v))
+    return vision.RandomColorAdjust(brightness=(v, v))
 
 
 # define the Auto Augmentation policy
 imagenet_policy = [
     [(posterize_impl(8), 0.4), (rotate_impl(9), 0.6)],
     [(solarize_impl(5), 0.6), (autocontrast_impl(5), 0.6)],
-    [(c_vision.Equalize(), 0.8), (c_vision.Equalize(), 0.6)],
+    [(vision.Equalize(), 0.8), (vision.Equalize(), 0.6)],
     [(posterize_impl(7), 0.6), (posterize_impl(6), 0.6)],
-    [(c_vision.Equalize(), 0.4), (solarize_impl(4), 0.2)],
+    [(vision.Equalize(), 0.4), (solarize_impl(4), 0.2)],
 
-    [(c_vision.Equalize(), 0.4), (rotate_impl(8), 0.8)],
-    [(solarize_impl(3), 0.6), (c_vision.Equalize(), 0.6)],
-    [(posterize_impl(5), 0.8), (c_vision.Equalize(), 1.0)],
+    [(vision.Equalize(), 0.4), (rotate_impl(8), 0.8)],
+    [(solarize_impl(3), 0.6), (vision.Equalize(), 0.6)],
+    [(posterize_impl(5), 0.8), (vision.Equalize(), 1.0)],
     [(rotate_impl(3), 0.2), (solarize_impl(8), 0.6)],
-    [(c_vision.Equalize(), 0.6), (posterize_impl(6), 0.4)],
+    [(vision.Equalize(), 0.6), (posterize_impl(6), 0.4)],
 
     [(rotate_impl(8), 0.8), (color_impl(0), 0.4)],
-    [(rotate_impl(9), 0.4), (c_vision.Equalize(), 0.6)],
-    [(c_vision.Equalize(), 0.0), (c_vision.Equalize(), 0.8)],
-    [(c_vision.Invert(), 0.6), (c_vision.Equalize(), 1.0)],
+    [(rotate_impl(9), 0.4), (vision.Equalize(), 0.6)],
+    [(vision.Equalize(), 0.0), (vision.Equalize(), 0.8)],
+    [(vision.Invert(), 0.6), (vision.Equalize(), 1.0)],
     [(color_impl(4), 0.6), (contrast_impl(8), 1.0)],
 
     [(rotate_impl(8), 0.8), (color_impl(2), 1.0)],
     [(color_impl(8), 0.8), (solarize_impl(7), 0.8)],
-    [(sharpness_impl(7), 0.4), (c_vision.Invert(), 0.6)],
-    [(shear_x(5), 0.6), (c_vision.Equalize(), 1.0)],
-    [(color_impl(0), 0.4), (c_vision.Equalize(), 0.6)],
+    [(sharpness_impl(7), 0.4), (vision.Invert(), 0.6)],
+    [(shear_x(5), 0.6), (vision.Equalize(), 1.0)],
+    [(color_impl(0), 0.4), (vision.Equalize(), 0.6)],
 
-    [(c_vision.Equalize(), 0.4), (solarize_impl(4), 0.2)],
+    [(vision.Equalize(), 0.4), (solarize_impl(4), 0.2)],
     [(solarize_impl(5), 0.6), (autocontrast_impl(5), 0.6)],
-    [(c_vision.Invert(), 0.6), (c_vision.Equalize(), 1.0)],
+    [(vision.Invert(), 0.6), (vision.Equalize(), 1.0)],
     [(color_impl(4), 0.6), (contrast_impl(8), 1.0)],
-    [(c_vision.Equalize(), 0.8), (c_vision.Equalize(), 0.6)],
+    [(vision.Equalize(), 0.8), (vision.Equalize(), 0.6)],
 ]
 
 
@@ -153,19 +153,19 @@ def autoaugment(dataset_path, repeat_num=1, batch_size=32, target="Ascend"):
     mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
     std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
     trans = [
-        c_vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+        vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
     ]
 
     post_trans = [
-        c_vision.RandomHorizontalFlip(prob=0.5),
-        c_vision.Normalize(mean=mean, std=std),
-        c_vision.HWC2CHW()
+        vision.RandomHorizontalFlip(prob=0.5),
+        vision.Normalize(mean=mean, std=std),
+        vision.HWC2CHW()
     ]
     dataset = ds.map(operations=trans, input_columns="image")
-    dataset = dataset.map(operations=c_vision.RandomSelectSubpolicy(imagenet_policy), input_columns=["image"])
+    dataset = dataset.map(operations=vision.RandomSelectSubpolicy(imagenet_policy), input_columns=["image"])
     dataset = dataset.map(operations=post_trans, input_columns="image")
 
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
     dataset = dataset.map(operations=type_cast_op, input_columns="label")
     # apply the batch operation
     dataset = dataset.batch(batch_size, drop_remainder=True)
diff --git a/research/cv/glore_res/src/dataset.py b/research/cv/glore_res/src/dataset.py
index b7a5fdc9c..9a6ecc0d0 100644
--- a/research/cv/glore_res/src/dataset.py
+++ b/research/cv/glore_res/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,9 +18,9 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 from src.transform import RandAugment
 from src.config import config
diff --git a/research/cv/glore_res/src/transform.py b/research/cv/glore_res/src/transform.py
index cba6ea73a..83939be14 100644
--- a/research/cv/glore_res/src/transform.py
+++ b/research/cv/glore_res/src/transform.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@
 random augment class
 """
 import numpy as np
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as V
 from src import transform_utils
 
 IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
@@ -37,9 +37,9 @@ class RandAugment:
         # assert the imgs object are pil_images
         ret_imgs = []
         ret_labels = []
-        py_to_pil_op = P.ToPIL()
-        to_tensor = P.ToTensor()
-        normalize_op = P.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+        py_to_pil_op = V.ToPIL()
+        to_tensor = V.ToTensor()
+        normalize_op = V.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
         rand_augment_ops = transform_utils.rand_augment_transform(self.config_str, self.hparams)
         for i, image in enumerate(imgs):
             img_pil = py_to_pil_op(image)
diff --git a/research/cv/hardnet/src/dataset.py b/research/cv/hardnet/src/dataset.py
index 1955ca545..227033753 100644
--- a/research/cv/hardnet/src/dataset.py
+++ b/research/cv/hardnet/src/dataset.py
@@ -18,8 +18,8 @@ Data operations, will be used in train.py and eval.py
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 def create_dataset_ImageNet(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend"):
diff --git a/research/cv/hed/src/dataset.py b/research/cv/hed/src/dataset.py
index a74551d2a..842d1438d 100644
--- a/research/cv/hed/src/dataset.py
+++ b/research/cv/hed/src/dataset.py
@@ -19,8 +19,8 @@ import cv2
 import numpy as np
 import mindspore
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C2
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as C2
+import mindspore.dataset.transforms as C
 mindspore.set_seed(1)
 
 def prepare_image_cv2(im):
diff --git a/research/cv/ibnnet/src/dataset.py b/research/cv/ibnnet/src/dataset.py
index fef7aae68..62c6c4308 100644
--- a/research/cv/ibnnet/src/dataset.py
+++ b/research/cv/ibnnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ python dataset.py
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/inception_resnet_v2/src/dataset.py b/research/cv/inception_resnet_v2/src/dataset.py
index 81912007b..0316efc28 100644
--- a/research/cv/inception_resnet_v2/src/dataset.py
+++ b/research/cv/inception_resnet_v2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, config=None):
diff --git a/research/cv/lresnet100e_ir/src/dataset.py b/research/cv/lresnet100e_ir/src/dataset.py
index d0ddade90..a03dadfb5 100644
--- a/research/cv/lresnet100e_ir/src/dataset.py
+++ b/research/cv/lresnet100e_ir/src/dataset.py
@@ -15,8 +15,8 @@
 """Create train or eval dataset."""
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 
 
 def create_dataset(dataset_path, do_train, img_shape, repeat_num=1, batch_size=32, run_distribute=False):
diff --git a/research/cv/mae/src/datasets/dataset.py b/research/cv/mae/src/datasets/dataset.py
index d1bcd1664..bb9ab3112 100644
--- a/research/cv/mae/src/datasets/dataset.py
+++ b/research/cv/mae/src/datasets/dataset.py
@@ -23,9 +23,8 @@ import numpy as np
 import mindspore.dataset as de
 import mindspore.common.dtype as mstype
 from mindspore.dataset.vision.utils import Inter
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.vision.py_transforms as P
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 
 from src.datasets.mixup import Mixup
 from src.datasets.random_erasing import RandomErasing
@@ -111,12 +110,12 @@ def create_dataset(dataset_path,
             C.RandomCropDecodeResize(image_size, scale=(crop_min, 1.0), ratio=(3 / 4, 4 / 3),
                                      interpolation=interpolation),
             C.RandomHorizontalFlip(prob=hflip),
-            P.ToPIL()
+            C.ToPIL()
         ]
         trans += [rand_augment_transform(auto_augment, aa_params)]
         trans += [
-            P.ToTensor(),
-            P.Normalize(mean=mean, std=std),
+            C.ToTensor(),
+            C.Normalize(mean=mean, std=std, is_hwc=False),
             RandomErasing(probability=re_prop, mode=re_mode, max_count=re_count)
         ]
 
@@ -127,7 +126,7 @@ def create_dataset(dataset_path,
             C.Decode(),
             C.Resize(int(256 / 224 * image_size), interpolation=interpolation),
             C.CenterCrop(image_size),
-            C.Normalize(mean=mean, std=std),
+            C.Normalize(mean=mean, std=std, is_hwc=True),
             C.HWC2CHW()
         ]
 
diff --git a/research/cv/mae/src/datasets/imagenet.py b/research/cv/mae/src/datasets/imagenet.py
index 3c6894daa..e39fc40c6 100644
--- a/research/cv/mae/src/datasets/imagenet.py
+++ b/research/cv/mae/src/datasets/imagenet.py
@@ -21,7 +21,7 @@ from PIL import Image
 
 import mindspore.dataset as de
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 
 
 class DataLoader:
diff --git a/research/cv/meta-baseline/src/data/mini_Imagenet.py b/research/cv/meta-baseline/src/data/mini_Imagenet.py
index 23275afed..da52ceda5 100644
--- a/research/cv/meta-baseline/src/data/mini_Imagenet.py
+++ b/research/cv/meta-baseline/src/data/mini_Imagenet.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ MiniImageNet
 import os
 import pickle
 import numpy as np
-import mindspore.dataset.vision.py_transforms as py_transforms
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 from PIL import Image
 
 
@@ -45,19 +45,19 @@ class MiniImageNet:
         label = [x - min_label for x in label]
 
         image_size = 84
-        normalize = py_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+        normalize = vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
         if split == 'train':
             self.transforms = Compose([
                 decode,
-                py_transforms.RandomCrop(image_size, padding=4),
-                py_transforms.ToTensor(),
+                vision.RandomCrop(image_size, padding=4),
+                vision.ToTensor(),
                 normalize
             ])
         else:
             self.transforms = Compose([
                 decode,
-                py_transforms.Resize(image_size),
-                py_transforms.ToTensor(),
+                vision.Resize(image_size),
+                vision.ToTensor(),
                 normalize
             ])
         data = [self.transforms(x)[0] for x in data]
diff --git a/research/cv/metric_learn/src/dataset.py b/research/cv/metric_learn/src/dataset.py
index 882363fd0..f2d4b75d4 100644
--- a/research/cv/metric_learn/src/dataset.py
+++ b/research/cv/metric_learn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ create train or eval dataset.
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset as dss
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.dataset.vision import Inter
 
 def create_dataset0(dataset_generator, do_train, batch_size=80, device_num=1, rank_id=0):
diff --git a/research/cv/mnasnet/src/dataset.py b/research/cv/mnasnet/src/dataset.py
index e9797e9ae..191a15a4d 100644
--- a/research/cv/mnasnet/src/dataset.py
+++ b/research/cv/mnasnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ Data operations, will be used in train.py and eval.py
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, batch_size=16, device_num=1, rank=0):
diff --git a/research/cv/mobilenetV3_small_x1_0/src/dataset.py b/research/cv/mobilenetV3_small_x1_0/src/dataset.py
index 5d3dd274e..626654039 100644
--- a/research/cv/mobilenetV3_small_x1_0/src/dataset.py
+++ b/research/cv/mobilenetV3_small_x1_0/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import multiprocessing
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 
 
 def create_dataset(dataset_path, do_train, batch_size=16, device_num=1, rank=0):
diff --git a/research/cv/mobilenetv3_large/src/dataset.py b/research/cv/mobilenetv3_large/src/dataset.py
index 49ebdacc5..eb28ad6ea 100644
--- a/research/cv/mobilenetv3_large/src/dataset.py
+++ b/research/cv/mobilenetv3_large/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,8 +15,8 @@
 """Create train or eval dataset."""
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import get_rank, get_group_size
 
 def create_dataset(dataset_path, do_train, config, repeat_num=1, batch_size=32, run_distribute=True):
diff --git a/research/cv/nas-fpn/src/dataset.py b/research/cv/nas-fpn/src/dataset.py
index 36c018347..239d7de55 100644
--- a/research/cv/nas-fpn/src/dataset.py
+++ b/research/cv/nas-fpn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import os
 import numpy as np
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from src.model_utils.config import config
 from src.box_utils import jaccard_numpy, retinanet_bboxes_encode
diff --git a/research/cv/nima_vgg16/src/MyDataset.py b/research/cv/nima_vgg16/src/MyDataset.py
index 36b8e9b80..39f7bb1d5 100644
--- a/research/cv/nima_vgg16/src/MyDataset.py
+++ b/research/cv/nima_vgg16/src/MyDataset.py
@@ -20,9 +20,9 @@ import cv2
 import numpy as np
 from mindspore import dataset as ds
 from mindspore import dtype as mstype
-from mindspore.dataset.transforms import c_transforms as t_ct
+from mindspore.dataset.transforms import transforms as t_ct
 from mindspore.dataset.vision import Inter
-from mindspore.dataset.vision import c_transforms as v_ct
+from mindspore.dataset.vision import transforms as v_ct
 
 
 class Dataset:
diff --git a/research/cv/ntsnet/src/dataset.py b/research/cv/ntsnet/src/dataset.py
index 9e0d068d0..6b1d0d017 100644
--- a/research/cv/ntsnet/src/dataset.py
+++ b/research/cv/ntsnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 
 """ntsnet dataset"""
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision import Inter
 
 
diff --git a/research/cv/ntsnet/src/dataset_gpu.py b/research/cv/ntsnet/src/dataset_gpu.py
index a33d39d76..daab745cb 100644
--- a/research/cv/ntsnet/src/dataset_gpu.py
+++ b/research/cv/ntsnet/src/dataset_gpu.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
 import os
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision import Inter
 
 from src.config_gpu import config
diff --git a/research/cv/osnet/model_utils/transforms.py b/research/cv/osnet/model_utils/transforms.py
index 779d5e89f..d1fd32e60 100644
--- a/research/cv/osnet/model_utils/transforms.py
+++ b/research/cv/osnet/model_utils/transforms.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License Version 2.0(the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@
 
 import math
 import random
-from mindspore.dataset.vision.c_transforms import Resize, Rescale, Normalize, HWC2CHW, RandomHorizontalFlip
-from mindspore.dataset.transforms.c_transforms import Compose
+from mindspore.dataset.vision import Resize, Rescale, Normalize, HWC2CHW, RandomHorizontalFlip
+from mindspore.dataset.transforms import Compose
 
 
 class RandomErasing():
diff --git a/research/cv/pcb_rpp/src/dataset.py b/research/cv/pcb_rpp/src/dataset.py
index 6cc911977..04214054a 100644
--- a/research/cv/pcb_rpp/src/dataset.py
+++ b/research/cv/pcb_rpp/src/dataset.py
@@ -24,9 +24,9 @@ import numpy as np
 from mindspore import dataset as ds
 from mindspore.common import dtype as mstype
 from mindspore.communication.management import init, get_rank, get_group_size
-from mindspore.dataset.transforms import c_transforms as C2
+from mindspore.dataset.transforms import transforms as C2
 from mindspore.dataset.vision import Inter
-from mindspore.dataset.vision import c_transforms as C
+from mindspore.dataset.vision import transforms as C
 from mindspore.mindrecord import FileWriter
 
 from src import datasets
diff --git a/research/cv/pnasnet/src/dataset.py b/research/cv/pnasnet/src/dataset.py
index e991ae495..e3c8adc1a 100644
--- a/research/cv/pnasnet/src/dataset.py
+++ b/research/cv/pnasnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ Data operations, will be used in train.py and eval.py
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 def create_dataset(dataset_path, do_train, rank, group_size,
                    num_parallel_workers=8, batch_size=128,
diff --git a/research/cv/proxylessnas/src/dataset.py b/research/cv/proxylessnas/src/dataset.py
index f8e335f71..1bab7079f 100644
--- a/research/cv/proxylessnas/src/dataset.py
+++ b/research/cv/proxylessnas/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ Data operations, will be used in train.py and eval.py
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 def create_dataset(dataset_path, do_train, rank, group_size,
                    num_parallel_workers=8, batch_size=128,
diff --git a/research/cv/ras/src/dataset_test.py b/research/cv/ras/src/dataset_test.py
index 4a6b64138..53e0d2462 100644
--- a/research/cv/ras/src/dataset_test.py
+++ b/research/cv/ras/src/dataset_test.py
@@ -1,5 +1,5 @@
 """
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
 
 import os
 import numpy as np
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import mindspore.dataset as ds
 from PIL import Image
 
diff --git a/research/cv/ras/src/dataset_train.py b/research/cv/ras/src/dataset_train.py
index 9e6770e53..73ae09830 100644
--- a/research/cv/ras/src/dataset_train.py
+++ b/research/cv/ras/src/dataset_train.py
@@ -1,5 +1,5 @@
 """
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
 
 import os
 import numpy as np
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import mindspore.dataset as ds
 from mindspore.communication import get_rank, get_group_size
 from PIL import Image
diff --git a/research/cv/rcnn/eval.py b/research/cv/rcnn/eval.py
index d16327831..cc34f042d 100644
--- a/research/cv/rcnn/eval.py
+++ b/research/cv/rcnn/eval.py
@@ -25,7 +25,7 @@ from operator import itemgetter
 import cv2
 import mindspore
 import mindspore.dataset
-import mindspore.dataset.vision.c_transforms as c_trans
+import mindspore.dataset.vision as c_trans
 from mindspore import load_param_into_net, load_checkpoint, ops
 import numpy as np
 from tqdm import tqdm
diff --git a/research/cv/relationnet/src/dataset.py b/research/cv/relationnet/src/dataset.py
index 02c86b0a3..516f3c3b3 100644
--- a/research/cv/relationnet/src/dataset.py
+++ b/research/cv/relationnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ import random
 import os
 from PIL import Image
 import numpy as np
-import mindspore.dataset.vision.py_transforms as py_vision
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore import Tensor
 
 
@@ -176,8 +176,8 @@ class ClassBalancedSampler():
 def get_data_loader(task, num_per_class=1, split='train', shuffle=True, rotation=0, flip=None):
     '''get dataloader'''
     mean, std = [0.92206], [0.08426]
-    transform = Compose([py_vision.ToTensor(),  # numpy HWC-> Tensor CHW
-                         py_vision.Normalize(mean=mean, std=std)])
+    transform = Compose([vision.ToTensor(),  # numpy HWC-> Tensor CHW
+                         vision.Normalize(mean=mean, std=std, is_hwc=False)])
 
     dataset = Omniglot(task, split=split, transform=transform, rotation=rotation, flip=flip)
     if split == 'train':
diff --git a/research/cv/renas/src/dataset.py b/research/cv/renas/src/dataset.py
index c411d5b70..899927a4c 100644
--- a/research/cv/renas/src/dataset.py
+++ b/research/cv/renas/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,14 +17,12 @@ import math
 import os
 
 import numpy as np
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_transforms
-import mindspore.dataset.transforms.c_transforms as c_transforms
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
 from mindspore.communication.management import get_rank, get_group_size
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.vision.c_transforms as vision
 
 
 # values that should remain constant
@@ -55,24 +53,24 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
     """Create ImageNet training dataset"""
     if not os.path.exists(train_data_url):
         raise ValueError('Path not exists')
-    decode_op = py_vision.Decode()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
-    random_resize_crop_bicubic = py_vision.RandomResizedCrop(size=(input_size, input_size),
-                                                             scale=SCALE, ratio=RATIO,
-                                                             interpolation=Inter.BICUBIC)
-    random_horizontal_flip_op = py_vision.RandomHorizontalFlip(0.5)
+    random_resize_crop_bicubic = vision.RandomResizedCrop(size=(input_size, input_size),
+                                                          scale=SCALE, ratio=RATIO,
+                                                          interpolation=Inter.BICUBIC)
+    random_horizontal_flip_op = vision.RandomHorizontalFlip(0.5)
     adjust_range = (max(0, 1 - color_jitter), 1 + color_jitter)
-    random_color_jitter_op = py_vision.RandomColorAdjust(brightness=adjust_range,
-                                                         contrast=adjust_range,
-                                                         saturation=adjust_range)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    random_color_jitter_op = vision.RandomColorAdjust(brightness=adjust_range,
+                                                      contrast=adjust_range,
+                                                      saturation=adjust_range)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
     # assemble all the transforms
-    image_ops = py_transforms.Compose([decode_op, random_resize_crop_bicubic,
-                                       random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, random_resize_crop_bicubic,
+                                    random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
 
     rank_id = get_rank() if distributed else 0
     rank_size = get_group_size() if distributed else 1
@@ -121,16 +119,16 @@ def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=F
     else:
         scale_size = int(math.floor(input_size / DEFAULT_CROP_PCT))
 
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
-    decode_op = py_vision.Decode()
-    resize_op = py_vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
-    center_crop = py_vision.CenterCrop(size=input_size)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    resize_op = vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
+    center_crop = vision.CenterCrop(size=input_size)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
-    image_ops = py_transforms.Compose([decode_op, resize_op, center_crop,
-                                       to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, resize_op, center_crop,
+                                    to_tensor, normalize_op])
 
     dataset = dataset.map(input_columns=["label"], operations=type_cast_op,
                           num_parallel_workers=workers)
@@ -176,9 +174,9 @@ def create_dataset_cifar10(data_home, repeat_num=1, training=True, cifar_cfg=Non
     random_horizontal_op = vision.RandomHorizontalFlip()
     resize_op = vision.Resize((resize_height, resize_width))  # interpolation default BILINEAR
     rescale_op = vision.Rescale(1.0 / 255.0, 0.0)
-    normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
+    normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=True)
     changeswap_op = vision.HWC2CHW()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
     c_trans = []
     if training:
diff --git a/research/cv/repvgg/src/data/imagenet.py b/research/cv/repvgg/src/data/imagenet.py
index fabaef259..c8a6762f2 100644
--- a/research/cv/repvgg/src/data/imagenet.py
+++ b/research/cv/repvgg/src/data/imagenet.py
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision.utils import Inter
 
 from src.data.augment.auto_augment import pil_interp, rand_augment_transform
@@ -93,13 +92,13 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
             vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(3 / 4, 4 / 3),
                                           interpolation=Inter.PILCUBIC),
             vision.RandomHorizontalFlip(prob=0.5),
-            py_vision.ToPIL()
+            vision.ToPIL()
         ]
         if auto_augment != "None":
             transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False),
             RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)
         ]
     else:
diff --git a/research/cv/res2net/src/dataset.py b/research/cv/res2net/src/dataset.py
index 3959fd266..8b4ee28f5 100644
--- a/research/cv/res2net/src/dataset.py
+++ b/research/cv/res2net/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import multiprocessing
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 def create_dataset1(dataset_path, do_train, repeat_num=1, batch_size=32, train_image_size=224, eval_image_size=224,
diff --git a/research/cv/res2net/src/dataset_infer.py b/research/cv/res2net/src/dataset_infer.py
index 98114b7c7..1b894af18 100644
--- a/research/cv/res2net/src/dataset_infer.py
+++ b/research/cv/res2net/src/dataset_infer.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 from src.model_utils.config import config
 
diff --git a/research/cv/res2net_faster_rcnn/src/dataset.py b/research/cv/res2net_faster_rcnn/src/dataset.py
index 090452458..7e7c421b7 100644
--- a/research/cv/res2net_faster_rcnn/src/dataset.py
+++ b/research/cv/res2net_faster_rcnn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@ from numpy import random
 
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 
 
diff --git a/research/cv/res2net_yolov3/src/yolo_dataset.py b/research/cv/res2net_yolov3/src/yolo_dataset.py
index 6dac2bad3..b97fd5e83 100644
--- a/research/cv/res2net_yolov3/src/yolo_dataset.py
+++ b/research/cv/res2net_yolov3/src/yolo_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ from PIL import Image
 import numpy as np
 from pycocotools.coco import COCO
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 
 from src.distributed_sampler import DistributedSampler
 from src.transforms import reshape_fn, MultiScaleTrans
diff --git a/research/cv/resnet3d/src/dataset.py b/research/cv/resnet3d/src/dataset.py
index 4bfffda6b..f5034cee2 100644
--- a/research/cv/resnet3d/src/dataset.py
+++ b/research/cv/resnet3d/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import os
 
 import mindspore.dataset as ds
 import mindspore.common.dtype as mstype
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import get_rank, get_group_size
 
 from .videodataset import DatasetGenerator
diff --git a/research/cv/resnet3d/src/pil_transforms.py b/research/cv/resnet3d/src/pil_transforms.py
index 270a3e3fb..cfd0f0fa7 100644
--- a/research/cv/resnet3d/src/pil_transforms.py
+++ b/research/cv/resnet3d/src/pil_transforms.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ transforms by PIL.
 """
 import numpy as np
 
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 
 class PILTrans:
@@ -27,16 +27,16 @@ class PILTrans:
 
     def __init__(self, opt, mean, std):
         super(PILTrans).__init__()
-        self.to_pil = py_trans.ToPIL()
+        self.to_pil = vision.ToPIL()
         self.random_resized_crop = \
-            py_trans.RandomResizedCrop(opt.sample_size, scale=(opt.train_crop_min_scale, 1.0),
-                                       ratio=(opt.train_crop_min_ratio, 1.0 / opt.train_crop_min_ratio))
-        self.random_horizontal_flip = py_trans.RandomHorizontalFlip(prob=0.5)
-        self.color = py_trans.RandomColorAdjust(0.4, 0.4, 0.4, 0.1)
-        self.normalize = py_trans.Normalize(mean=mean, std=std)
-        self.to_tensor = py_trans.ToTensor()
-        self.resize = py_trans.Resize(opt.sample_size)
-        self.center_crop = py_trans.CenterCrop(opt.sample_size)
+            vision.RandomResizedCrop(opt.sample_size, scale=(opt.train_crop_min_scale, 1.0),
+                                     ratio=(opt.train_crop_min_ratio, 1.0 / opt.train_crop_min_ratio))
+        self.random_horizontal_flip = vision.RandomHorizontalFlip(prob=0.5)
+        self.color = vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.1)
+        self.normalize = vision.Normalize(mean=mean, std=std, is_hwc=False)
+        self.to_tensor = vision.ToTensor()
+        self.resize = vision.Resize(opt.sample_size)
+        self.center_crop = vision.CenterCrop(opt.sample_size)
         self.opt = opt
 
     def __call__(self, data, labels, batchInfo):
@@ -72,11 +72,11 @@ class EvalPILTrans:
 
     def __init__(self, opt, mean, std):
         super(EvalPILTrans).__init__()
-        self.to_pil = py_trans.ToPIL()
-        self.resize = py_trans.Resize(opt.sample_size)
-        self.center_crop = py_trans.CenterCrop(opt.sample_size)
-        self.normalize = py_trans.Normalize(mean=mean, std=std)
-        self.to_tensor = py_trans.ToTensor()
+        self.to_pil = vision.ToPIL()
+        self.resize = vision.Resize(opt.sample_size)
+        self.center_crop = vision.CenterCrop(opt.sample_size)
+        self.normalize = vision.Normalize(mean=mean, std=std, is_hwc=False)
+        self.to_tensor = vision.ToTensor()
 
     def __call__(self, data, labels, batchInfo):
         data = data[0]
diff --git a/research/cv/resnet50_adv_pruning/src/pet_dataset.py b/research/cv/resnet50_adv_pruning/src/pet_dataset.py
index 8eb9eaecc..de6adced7 100644
--- a/research/cv/resnet50_adv_pruning/src/pet_dataset.py
+++ b/research/cv/resnet50_adv_pruning/src/pet_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,10 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.vision.py_transforms as P
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.transforms.py_transforms as P2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.dataset.vision import Inter
 
 
@@ -74,12 +72,12 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
     change_swap_op = C.HWC2CHW()
 
     # define python operations
-    decode_p = P.Decode()
-    resize_p = P.Resize(256, interpolation=Inter.BILINEAR)
-    center_crop_p = P.CenterCrop(224)
-    totensor = P.ToTensor()
-    normalize_p = P.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
-    composeop = P2.Compose(
+    decode_p = C.Decode(True)
+    resize_p = C.Resize(256, interpolation=Inter.BILINEAR)
+    center_crop_p = C.CenterCrop(224)
+    totensor = C.ToTensor()
+    normalize_p = C.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=False)
+    composeop = C2.Compose(
         [decode_p, resize_p, center_crop_p, totensor, normalize_p])
     if do_train:
         trans = [resize_crop_op, horizontal_flip_op, color_op,
diff --git a/research/cv/resnet50_bam/src/dataset.py b/research/cv/resnet50_bam/src/dataset.py
index db662754f..6ec6428d7 100644
--- a/research/cv/resnet50_bam/src/dataset.py
+++ b/research/cv/resnet50_bam/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from src.config import imagenet_cfg
 
 
diff --git a/research/cv/resnetv2/src/dataset.py b/research/cv/resnetv2/src/dataset.py
index 784e564e9..cb3ff1d68 100644
--- a/research/cv/resnetv2/src/dataset.py
+++ b/research/cv/resnetv2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,8 +16,8 @@
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 def create_dataset1(dataset_path, do_train=True, repeat_num=1, batch_size=32, target="Ascend", distribute=False):
diff --git a/research/cv/resnetv2_50_frn/src/dataset.py b/research/cv/resnetv2_50_frn/src/dataset.py
index 97221b5b5..b1166b11c 100644
--- a/research/cv/resnetv2_50_frn/src/dataset.py
+++ b/research/cv/resnetv2_50_frn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ Data operations, will be used in train.py and eval.py
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 def create_dataset(dataset_path, do_train, rank, group_size,
                    num_parallel_workers=8, batch_size=128,
diff --git a/research/cv/resnext152_64x4d/src/dataset.py b/research/cv/resnext152_64x4d/src/dataset.py
index 712bd775a..b7f1534e6 100644
--- a/research/cv/resnext152_64x4d/src/dataset.py
+++ b/research/cv/resnext152_64x4d/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ dataset processing.
 import os
 from mindspore.common import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as V_C
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as V_C
 from PIL import Image, ImageFile
 from src.utils.sampler import DistributedSampler
 
diff --git a/research/cv/retinanet_resnet101/src/dataset.py b/research/cv/retinanet_resnet101/src/dataset.py
index 375f5337b..d86f6d8fd 100644
--- a/research/cv/retinanet_resnet101/src/dataset.py
+++ b/research/cv/retinanet_resnet101/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from .model_utils.config import config
 from .box_utils import jaccard_numpy, retinanet_bboxes_encode
diff --git a/research/cv/retinanet_resnet152/src/dataset.py b/research/cv/retinanet_resnet152/src/dataset.py
index f1dad5f44..f9f186612 100644
--- a/research/cv/retinanet_resnet152/src/dataset.py
+++ b/research/cv/retinanet_resnet152/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from .model_utils.config import config
 from .box_utils import jaccard_numpy, retinanet_bboxes_encode
diff --git a/research/cv/rfcn/src/dataset.py b/research/cv/rfcn/src/dataset.py
index a148b7fd3..dcc009b19 100644
--- a/research/cv/rfcn/src/dataset.py
+++ b/research/cv/rfcn/src/dataset.py
@@ -23,7 +23,7 @@ from numpy import random
 import cv2
 from PIL import Image
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 
 def bbox_overlaps(bboxes1, bboxes2, mode='iou'):
diff --git a/research/cv/simple_baselines/src/dataset.py b/research/cv/simple_baselines/src/dataset.py
index 84796c054..af06907cb 100644
--- a/research/cv/simple_baselines/src/dataset.py
+++ b/research/cv/simple_baselines/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -26,7 +26,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from src.utils.transforms import fliplr_joints, get_affine_transform, affine_transform
 
 ds.config.set_seed(1) # Set Random Seed
diff --git a/research/cv/single_path_nas/src/dataset.py b/research/cv/single_path_nas/src/dataset.py
index ac51ad69e..eac12de24 100644
--- a/research/cv/single_path_nas/src/dataset.py
+++ b/research/cv/single_path_nas/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ Data operations, will be used in train.py and eval.py
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 from src.config import imagenet_cfg
 
diff --git a/research/cv/sknet/src/dataset.py b/research/cv/sknet/src/dataset.py
index 7a67fb74a..611a93736 100644
--- a/research/cv/sknet/src/dataset.py
+++ b/research/cv/sknet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 from mindspore.communication.management import get_group_size, get_rank, init
 
 
diff --git a/research/cv/squeezenet/src/dataset.py b/research/cv/squeezenet/src/dataset.py
index 38eaef30f..e778661b4 100644
--- a/research/cv/squeezenet/src/dataset.py
+++ b/research/cv/squeezenet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/squeezenet1_1/src/dataset.py b/research/cv/squeezenet1_1/src/dataset.py
index 81bf48ced..033f8a699 100644
--- a/research/cv/squeezenet1_1/src/dataset.py
+++ b/research/cv/squeezenet1_1/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset of imagenet and cifar10.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 
 def create_dataset_imagenet(dataset_path,
                             do_train,
diff --git a/research/cv/ssc_resnet50/src/dataset.py b/research/cv/ssc_resnet50/src/dataset.py
index 3ef167ceb..b714dbee4 100644
--- a/research/cv/ssc_resnet50/src/dataset.py
+++ b/research/cv/ssc_resnet50/src/dataset.py
@@ -23,8 +23,8 @@ import logging
 import numpy as np
 from PIL import Image
 from PIL import ImageFile
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_trans
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.ops
 import mindspore.dataset as de
 
@@ -91,12 +91,12 @@ class CoMatchDatasetImageNet:
         self.samples = samples
         logging.info("sample len: %d", len(self.samples))
 
-        self.random_resize_crop = py_vision.RandomResizedCrop(224, scale=(0.2, 1.))
-        self.random_horizontal_flip = py_vision.RandomHorizontalFlip()
-        self.to_tensor = py_vision.ToTensor()
-        self.normalize = py_vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
-        self.random_apply = py_trans.RandomApply([py_vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.1)], prob=0.8)
-        self.random_grayscale = py_vision.RandomGrayscale(prob=0.2)
+        self.random_resize_crop = vision.RandomResizedCrop(224, scale=(0.2, 1.))
+        self.random_horizontal_flip = vision.RandomHorizontalFlip()
+        self.to_tensor = vision.ToTensor()
+        self.normalize = vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
+        self.random_apply = data_trans.RandomApply([vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.1)], prob=0.8)
+        self.random_grayscale = vision.RandomGrayscale(prob=0.2)
 
         self.unlable_randomaugmentMC = RandAugmentMC(int(args.unlabel_randomaug_count),
                                                      int(args.unlabel_randomaug_intensity))
@@ -297,10 +297,10 @@ class CoMatchDatasetImageNetTest:
         logging.info("sample len: %d", len(self.samples))
 
         # for test
-        self.resize = py_vision.Resize(256)
-        self.center_crop = py_vision.CenterCrop(224)
-        self.to_tensor = py_vision.ToTensor()
-        self.normalize = py_vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+        self.resize = vision.Resize(256)
+        self.center_crop = vision.CenterCrop(224)
+        self.to_tensor = vision.ToTensor()
+        self.normalize = vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
 
     def __getitem__(self, index):
         """
@@ -362,11 +362,11 @@ class CoMatchSelectSample:
         self.samples = samples
 
         # for test
-        self.random_resize_crop = py_vision.RandomResizedCrop(224, scale=(0.2, 1.))
-        self.random_horizontal_flip = py_vision.RandomHorizontalFlip()
+        self.random_resize_crop = vision.RandomResizedCrop(224, scale=(0.2, 1.))
+        self.random_horizontal_flip = vision.RandomHorizontalFlip()
 
-        self.to_tensor = py_vision.ToTensor()
-        self.normalize = py_vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+        self.to_tensor = vision.ToTensor()
+        self.normalize = vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
 
     def __getitem__(self, index):
         """
diff --git a/research/cv/ssd_ghostnet/src/dataset.py b/research/cv/ssd_ghostnet/src/dataset.py
index 0350fe910..8eb7fa4dc 100644
--- a/research/cv/ssd_ghostnet/src/dataset.py
+++ b/research/cv/ssd_ghostnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C2
+import mindspore.dataset.vision as C2
 from mindspore.mindrecord import FileWriter
 from src.model_utils.config import config
 from .box_utils import jaccard_numpy, ssd_bboxes_encode
diff --git a/research/cv/ssd_inception_v2/src/dataset.py b/research/cv/ssd_inception_v2/src/dataset.py
index 0c08dbae5..ebeaf3774 100644
--- a/research/cv/ssd_inception_v2/src/dataset.py
+++ b/research/cv/ssd_inception_v2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -25,7 +25,7 @@ import xml.etree.ElementTree as et
 import cv2
 import numpy as np
 from mindspore import dataset as de
-from mindspore.dataset.vision import c_transforms as C
+from mindspore.dataset.vision import transforms as C
 from mindspore.mindrecord import FileWriter
 
 from src.model_utils.config import config
diff --git a/research/cv/ssd_inceptionv2/src/dataset.py b/research/cv/ssd_inceptionv2/src/dataset.py
index d6aa4ee1b..1a0d2cb71 100644
--- a/research/cv/ssd_inceptionv2/src/dataset.py
+++ b/research/cv/ssd_inceptionv2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from .config import config
 from .box_utils import jaccard_numpy, ssd_bboxes_encode
diff --git a/research/cv/ssd_mobilenetV2/src/dataset.py b/research/cv/ssd_mobilenetV2/src/dataset.py
index 9b665fa06..c3ab4b7fa 100644
--- a/research/cv/ssd_mobilenetV2/src/dataset.py
+++ b/research/cv/ssd_mobilenetV2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from .config import config
 from .box_utils import jaccard_numpy, ssd_bboxes_encode
diff --git a/research/cv/ssd_mobilenetV2_FPNlite/src/dataset.py b/research/cv/ssd_mobilenetV2_FPNlite/src/dataset.py
index 1e55f9010..f0a96bb84 100644
--- a/research/cv/ssd_mobilenetV2_FPNlite/src/dataset.py
+++ b/research/cv/ssd_mobilenetV2_FPNlite/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -25,7 +25,7 @@ import cv2
 from tqdm import tqdm
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from src.model_utils.config import config as cfg
 from .box_utils import jaccard_numpy, ssd_bboxes_encode
diff --git a/research/cv/ssd_resnet34/src/dataset.py b/research/cv/ssd_resnet34/src/dataset.py
index 5b2a64255..26d042dec 100644
--- a/research/cv/ssd_resnet34/src/dataset.py
+++ b/research/cv/ssd_resnet34/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from .config import config
 from .box_utils import jaccard_numpy, ssd_bboxes_encode
diff --git a/research/cv/ssd_resnet50/src/dataset.py b/research/cv/ssd_resnet50/src/dataset.py
index a102b4729..eaed11ad0 100644
--- a/research/cv/ssd_resnet50/src/dataset.py
+++ b/research/cv/ssd_resnet50/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from .config import config
 from .box_utils import jaccard_numpy, ssd_bboxes_encode
diff --git a/research/cv/ssd_resnet_34/src/dataset.py b/research/cv/ssd_resnet_34/src/dataset.py
index 5df5d78df..d6f878705 100644
--- a/research/cv/ssd_resnet_34/src/dataset.py
+++ b/research/cv/ssd_resnet_34/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -26,7 +26,7 @@ from xml.etree import ElementTree
 import cv2
 import numpy as np
 from mindspore import dataset as ds
-from mindspore.dataset.vision import c_transforms as C
+from mindspore.dataset.vision import transforms as C
 from mindspore.mindrecord import FileWriter
 
 from .box_utils import jaccard_numpy
diff --git a/research/cv/stpm/src/dataset.py b/research/cv/stpm/src/dataset.py
index e8e337855..6bb77547c 100644
--- a/research/cv/stpm/src/dataset.py
+++ b/research/cv/stpm/src/dataset.py
@@ -21,8 +21,8 @@ import numpy as np
 from PIL import Image
 import mindspore.dataset as ds
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.vision.py_transforms as py_vision
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 
 
 class MVTecDataset():
@@ -113,15 +113,15 @@ def createDataset(dataset_path, category, save_sample=False, out_size=256, train
     std = [0.229, 0.224, 0.225]
 
     data_transforms = Compose([
-        py_vision.Resize((out_size, out_size), interpolation=Inter.ANTIALIAS),
-        py_vision.CenterCrop(out_size),
-        py_vision.ToTensor(),
-        py_vision.Normalize(mean=mean, std=std)
+        vision.Resize((out_size, out_size), interpolation=Inter.ANTIALIAS),
+        vision.CenterCrop(out_size),
+        vision.ToTensor(),
+        vision.Normalize(mean=mean, std=std, is_hwc=False)
     ])
     gt_transforms = Compose([
-        py_vision.Resize((out_size, out_size)),
-        py_vision.CenterCrop(out_size),
-        py_vision.ToTensor()
+        vision.Resize((out_size, out_size)),
+        vision.CenterCrop(out_size),
+        vision.ToTensor()
     ])
 
     train_data = MVTecDataset(root=os.path.join(dataset_path, category),
diff --git a/research/cv/swin_transformer/src/data/imagenet.py b/research/cv/swin_transformer/src/data/imagenet.py
index 522159871..f1883aef7 100644
--- a/research/cv/swin_transformer/src/data/imagenet.py
+++ b/research/cv/swin_transformer/src/data/imagenet.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision.utils import Inter
 
 from src.data.augment.auto_augment import _pil_interp, rand_augment_transform
@@ -94,12 +93,12 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
             vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(3 / 4, 4 / 3),
                                           interpolation=Inter.BICUBIC),
             vision.RandomHorizontalFlip(prob=0.5),
-            py_vision.ToPIL()
+            vision.ToPIL()
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False),
             RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)
         ]
     else:
@@ -111,14 +110,14 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
                 vision.Decode(),
                 vision.Resize(int(256 / 224 * image_size), interpolation=Inter.BICUBIC),
                 vision.CenterCrop(image_size),
-                vision.Normalize(mean=mean, std=std),
+                vision.Normalize(mean=mean, std=std, is_hwc=True),
                 vision.HWC2CHW()
             ]
         else:
             transform_img = [
                 vision.Decode(),
                 vision.Resize(int(image_size), interpolation=Inter.BICUBIC),
-                vision.Normalize(mean=mean, std=std),
+                vision.Normalize(mean=mean, std=std, is_hwc=True),
                 vision.HWC2CHW()
             ]
 
diff --git a/research/cv/textfusenet/src/dataset.py b/research/cv/textfusenet/src/dataset.py
index 47479f06b..a406408b3 100755
--- a/research/cv/textfusenet/src/dataset.py
+++ b/research/cv/textfusenet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@ from numpy import random
 import cv2
 import mmcv
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 
 from .model_utils.config import config
diff --git a/research/cv/tinynet/src/dataset.py b/research/cv/tinynet/src/dataset.py
index cf49192e7..b2c001d54 100644
--- a/research/cv/tinynet/src/dataset.py
+++ b/research/cv/tinynet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,9 +17,8 @@ import math
 import os
 
 import numpy as np
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_transforms
-import mindspore.dataset.transforms.c_transforms as c_transforms
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
 from mindspore.communication.management import get_rank, get_group_size
@@ -53,24 +52,24 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
     """Create ImageNet training dataset"""
     if not os.path.exists(train_data_url):
         raise ValueError('Path not exists')
-    decode_op = py_vision.Decode()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
-    random_resize_crop_bicubic = py_vision.RandomResizedCrop(size=(input_size, input_size),
-                                                             scale=SCALE, ratio=RATIO,
-                                                             interpolation=Inter.BICUBIC)
-    random_horizontal_flip_op = py_vision.RandomHorizontalFlip(0.5)
+    random_resize_crop_bicubic = vision.RandomResizedCrop(size=(input_size, input_size),
+                                                          scale=SCALE, ratio=RATIO,
+                                                          interpolation=Inter.BICUBIC)
+    random_horizontal_flip_op = vision.RandomHorizontalFlip(0.5)
     adjust_range = (max(0, 1 - color_jitter), 1 + color_jitter)
-    random_color_jitter_op = py_vision.RandomColorAdjust(brightness=adjust_range,
-                                                         contrast=adjust_range,
-                                                         saturation=adjust_range)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    random_color_jitter_op = vision.RandomColorAdjust(brightness=adjust_range,
+                                                      contrast=adjust_range,
+                                                      saturation=adjust_range)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
     # assemble all the transforms
-    image_ops = py_transforms.Compose([decode_op, random_resize_crop_bicubic,
-                                       random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, random_resize_crop_bicubic,
+                                    random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
 
     rank_id = get_rank() if distributed else 0
     rank_size = get_group_size() if distributed else 1
@@ -119,16 +118,16 @@ def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=F
     else:
         scale_size = int(math.floor(input_size / DEFAULT_CROP_PCT))
 
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
-    decode_op = py_vision.Decode()
-    resize_op = py_vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
-    center_crop = py_vision.CenterCrop(size=input_size)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    resize_op = vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
+    center_crop = vision.CenterCrop(size=input_size)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
-    image_ops = py_transforms.Compose([decode_op, resize_op, center_crop,
-                                       to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, resize_op, center_crop,
+                                    to_tensor, normalize_op])
 
     dataset = dataset.map(input_columns=["label"], operations=type_cast_op,
                           num_parallel_workers=workers)
diff --git a/research/cv/tracktor/src/dataset.py b/research/cv/tracktor/src/dataset.py
index 0e2afa850..d63c6b1f9 100644
--- a/research/cv/tracktor/src/dataset.py
+++ b/research/cv/tracktor/src/dataset.py
@@ -24,7 +24,7 @@ import os.path as osp
 import cv2
 import mindspore as ms
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 import numpy as np
 from numpy import random
diff --git a/research/cv/u2net/src/data_loader.py b/research/cv/u2net/src/data_loader.py
index 66eb91711..0f0c8e05e 100644
--- a/research/cv/u2net/src/data_loader.py
+++ b/research/cv/u2net/src/data_loader.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -25,7 +25,7 @@ from skimage import io, transform, color
 from mindspore import context
 from mindspore import dataset as ds
 from mindspore.common import dtype as mstype
-import mindspore.dataset.transforms.c_transforms as CC
+import mindspore.dataset.transforms as CC
 from mindspore.context import ParallelMode
 from mindspore.communication.management import get_rank, get_group_size
 
diff --git a/research/cv/vgg19/src/dataset.py b/research/cv/vgg19/src/dataset.py
index 93772b806..d6af839cb 100644
--- a/research/cv/vgg19/src/dataset.py
+++ b/research/cv/vgg19/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 from PIL import Image, ImageFile
 from mindspore.common import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from src.utils.sampler import DistributedSampler
 
 ImageFile.LOAD_TRUNCATED_IMAGES = True
diff --git a/research/cv/vit_base/src/dataset.py b/research/cv/vit_base/src/dataset.py
index 645c594c5..75d969c7f 100644
--- a/research/cv/vit_base/src/dataset.py
+++ b/research/cv/vit_base/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.communication.management import get_group_size
 from mindspore.communication.management import get_rank
 
diff --git a/research/cv/wave_mlp/src/dataset.py b/research/cv/wave_mlp/src/dataset.py
index 89c44b541..9449268f6 100644
--- a/research/cv/wave_mlp/src/dataset.py
+++ b/research/cv/wave_mlp/src/dataset.py
@@ -17,15 +17,14 @@ import os
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as pytrans
-import mindspore.dataset.transforms.py_transforms as py_transforms
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.transforms as transforms
 
-from mindspore.dataset.transforms.py_transforms import Compose
-import mindspore.dataset.vision.c_transforms as C
+from mindspore.dataset.transforms.transforms import Compose
+import mindspore.dataset.vision as C
 
 
-class ToNumpy(py_transforms.PyTensorOperation):
+class ToNumpy(transforms.PyTensorOperation):
 
     def __init__(self, output_type=np.float32):
         self.output_type = output_type
@@ -81,13 +80,13 @@ def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=128):
         ]
     else:
         trans = [
-            pytrans.Decode(),
-            pytrans.Resize(235),
-            pytrans.CenterCrop(224)
+            C.Decode(True),
+            C.Resize(235),
+            C.CenterCrop(224)
         ]
     trans += [
-        pytrans.ToTensor(),
-        pytrans.Normalize(mean=mean, std=std),
+        C.ToTensor(),
+        C.Normalize(mean=mean, std=std, is_hwc=False),
     ]
     trans = Compose(trans)
 
diff --git a/research/cv/wgan/src/dataset.py b/research/cv/wgan/src/dataset.py
index 6168e0752..de2c14e7b 100644
--- a/research/cv/wgan/src/dataset.py
+++ b/research/cv/wgan/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,8 +15,8 @@
 
 """ dataset """
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as c
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as c
+import mindspore.dataset.transforms as C2
 import mindspore.common.dtype as mstype
 
 
diff --git a/research/cv/wideresnet/src/dataset.py b/research/cv/wideresnet/src/dataset.py
index 2617a656b..ea4fcef9f 100644
--- a/research/cv/wideresnet/src/dataset.py
+++ b/research/cv/wideresnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ Data operations, will be used in train.py and eval.py
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/yolov3_tiny/src/transforms.py b/research/cv/yolov3_tiny/src/transforms.py
index 8a483f822..3417ba8cc 100644
--- a/research/cv/yolov3_tiny/src/transforms.py
+++ b/research/cv/yolov3_tiny/src/transforms.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import copy
 import random
 
 import cv2
-import mindspore.dataset.vision.py_transforms as PV
+import mindspore.dataset.vision as vision
 import numpy as np
 from PIL import Image
 
@@ -566,6 +566,6 @@ class MultiScaleTrans:
 
     def __call__(self, img, anno, input_size, mosaic_flag):
         if mosaic_flag[0] == 0:
-            img = PV.Decode()(img)
+            img = vision.Decode(True)(img)
         img, anno = preprocess_fn(img, anno, self.config, input_size, self.device_num)
         return img, anno, np.array(img.shape[0:2])
diff --git a/research/cv/yolov3_tiny/src/yolo_dataset.py b/research/cv/yolov3_tiny/src/yolo_dataset.py
index 1678f9d7e..fd0a25f28 100644
--- a/research/cv/yolov3_tiny/src/yolo_dataset.py
+++ b/research/cv/yolov3_tiny/src/yolo_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ import random
 
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 import numpy as np
 from PIL import Image
 from pycocotools.coco import COCO
diff --git a/research/mm/wukong/src/dataset/dataset.py b/research/mm/wukong/src/dataset/dataset.py
index 4fef4bd98..a15635b4c 100644
--- a/research/mm/wukong/src/dataset/dataset.py
+++ b/research/mm/wukong/src/dataset/dataset.py
@@ -15,8 +15,8 @@
 from mindspore import dtype as mstype
 import mindspore.dataset as ds
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 
 
 def get_wukong_dataset(dataset_path, columns_list, num_parallel_workers, shuffle, num_shards, shard_id, batch_size):
diff --git a/research/nlp/DYR/src/dataset.py b/research/nlp/DYR/src/dataset.py
index 9ec0212c7..3e35e7cd1 100644
--- a/research/nlp/DYR/src/dataset.py
+++ b/research/nlp/DYR/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import random
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 # samples in one block
 POS_SIZE = 1
diff --git a/research/nlp/albert/src/dataset.py b/research/nlp/albert/src/dataset.py
index dec0680de..37549dae2 100644
--- a/research/nlp/albert/src/dataset.py
+++ b/research/nlp/albert/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import math
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore import log as logger
 
 
diff --git a/research/nlp/gpt2/src/dataset.py b/research/nlp/gpt2/src/dataset.py
index 7435c52b6..44984954d 100644
--- a/research/nlp/gpt2/src/dataset.py
+++ b/research/nlp/gpt2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 """Data operations"""
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 from .finetune_eval_config import gpt2_net_cfg
 
diff --git a/research/nlp/hypertext/src/dataset.py b/research/nlp/hypertext/src/dataset.py
index a25135520..4479d240b 100644
--- a/research/nlp/hypertext/src/dataset.py
+++ b/research/nlp/hypertext/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import random
 from datetime import timedelta
 import numpy as np
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 import mindspore.common.dtype as mstype
 
 MAX_VOCAB_SIZE = 5000000
diff --git a/research/nlp/ktnet/src/dataset.py b/research/nlp/ktnet/src/dataset.py
index b2f739e59..66ee89773 100644
--- a/research/nlp/ktnet/src/dataset.py
+++ b/research/nlp/ktnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 
 def create_train_dataset(data_file=None, do_shuffle=True, device_num=1, rank=0, batch_size=1,
diff --git a/research/nlp/luke/src/reading_comprehension/dataLoader.py b/research/nlp/luke/src/reading_comprehension/dataLoader.py
index 95494be55..73a3fc345 100644
--- a/research/nlp/luke/src/reading_comprehension/dataLoader.py
+++ b/research/nlp/luke/src/reading_comprehension/dataLoader.py
@@ -32,7 +32,7 @@ def create_dataset(data_file=None, do_shuffle=True, device_num=1, rank=0, batch_
                                            "start_positions", "end_positions"],
                              shuffle=do_shuffle, num_shards=device_num, shard_id=rank,
                              num_samples=num, num_parallel_workers=num_parallel_workers)
-    type_int32 = C.c_transforms.TypeCast(mstype.int32)
+    type_int32 = C.TypeCast(mstype.int32)
     dataset = dataset.map(operations=type_int32, input_columns="word_ids")
     dataset = dataset.map(operations=type_int32, input_columns="word_segment_ids")
     dataset = dataset.map(operations=type_int32, input_columns="word_attention_mask")
@@ -57,7 +57,7 @@ def create_eval_dataset(data_file=None, do_shuffle=True, device_num=1, rank=0, b
                                            "example_indices"],
                              shuffle=do_shuffle, num_shards=device_num, shard_id=rank,
                              num_samples=num, num_parallel_workers=num_parallel_workers)
-    type_int32 = C.c_transforms.TypeCast(mstype.int32)
+    type_int32 = C.TypeCast(mstype.int32)
     dataset = dataset.map(operations=type_int32, input_columns="word_ids")
     dataset = dataset.map(operations=type_int32, input_columns="word_segment_ids")
     dataset = dataset.map(operations=type_int32, input_columns="word_attention_mask")
diff --git a/research/nlp/seq2seq/src/dataset/load_dataset.py b/research/nlp/seq2seq/src/dataset/load_dataset.py
index 5843e0a31..5ada4bd0b 100644
--- a/research/nlp/seq2seq/src/dataset/load_dataset.py
+++ b/research/nlp/seq2seq/src/dataset/load_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 """Dataset loader to feed into model."""
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 
 
 def _load_dataset(input_files, batch_size, sink_mode=False,
diff --git a/research/recommend/mmoe/src/load_dataset.py b/research/recommend/mmoe/src/load_dataset.py
index 602a26d72..650df81c1 100644
--- a/research/recommend/mmoe/src/load_dataset.py
+++ b/research/recommend/mmoe/src/load_dataset.py
@@ -16,7 +16,7 @@
 import os
 import mindspore.dataset as de
 from mindspore.communication.management import get_rank, get_group_size
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 import mindspore.common.dtype as mstype
 
 
diff --git a/utils/model_scaffolding/example/src/dataset.py b/utils/model_scaffolding/example/src/dataset.py
index f659d158a..83d20a7be 100644
--- a/utils/model_scaffolding/example/src/dataset.py
+++ b/utils/model_scaffolding/example/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,8 +15,8 @@
 
 """Dataset"""
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as CV
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as CV
+import mindspore.dataset.transforms as C
 from mindspore.dataset.vision import Inter
 from mindspore.common import dtype as mstype
 
-- 
GitLab