diff --git a/.jenkins/check/config/filter_linklint.txt b/.jenkins/check/config/filter_linklint.txt
index bbd5911cd0e8ee2f07f021bc1eb3d14c872b6bd4..e5a98919a968d255fb35243b300b9815247ab0a6 100644
--- a/.jenkins/check/config/filter_linklint.txt
+++ b/.jenkins/check/config/filter_linklint.txt
@@ -1,2 +1,3 @@
 http://www.vision.caltech.edu/visipedia/CUB-200-2011.html
-http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth
\ No newline at end of file
+http://dl.yf.io/dla/models/imagenet/dla34-ba72cf86.pth
+https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py_key
diff --git a/.jenkins/check/config/whitelizard.txt b/.jenkins/check/config/whitelizard.txt
index 5f42aed7c1fda63e39354e1615f10d6338beb3e7..a571f39f3f1561273618439722f250b731dbd806 100644
--- a/.jenkins/check/config/whitelizard.txt
+++ b/.jenkins/check/config/whitelizard.txt
@@ -55,4 +55,5 @@ models/research/cvtmodel/resnet_ipl/src/resnet26t.py:__init__
 models/research/cvtmodel/resnet_ipl/src/resnet101d.py:__init__
 models/research/cvtmodel/resnet_ipl/src/resnetrs50.py:__init__
 models/official/audio/lpcnet/ascend310_infer/src/main.cc:main
+models/official/nlp/bert/src/finetune_data_preprocess.py:process_msra
 
diff --git a/benchmark/ascend/bert/src/dataset.py b/benchmark/ascend/bert/src/dataset.py
index a611c1ef5629d2d2b8f87375422474cbc9ef855d..433ebe2cc5d92dce6b133c9125212b5a7a09ee5b 100644
--- a/benchmark/ascend/bert/src/dataset.py
+++ b/benchmark/ascend/bert/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import math
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore import log as logger
 
 
diff --git a/benchmark/ascend/resnet/src/dataset.py b/benchmark/ascend/resnet/src/dataset.py
index 77809beb7135f291b2b735bda1dcc1426b32e26c..d96526cb3fcc98b7b43563cca64041d9256674e8 100644
--- a/benchmark/ascend/resnet/src/dataset.py
+++ b/benchmark/ascend/resnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -49,18 +49,18 @@ def create_dataset1(dataset_path, do_train, batch_size=32, train_image_size=224,
     trans = []
     if do_train:
         trans += [
-            ds.vision.c_transforms.RandomCrop((32, 32), (4, 4, 4, 4)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
+            ds.vision.RandomCrop((32, 32), (4, 4, 4, 4)),
+            ds.vision.RandomHorizontalFlip(prob=0.5)
         ]
 
     trans += [
-        ds.vision.c_transforms.Resize((train_image_size, train_image_size)),
-        ds.vision.c_transforms.Rescale(1.0 / 255.0, 0.0),
-        ds.vision.c_transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
-        ds.vision.c_transforms.HWC2CHW()
+        ds.vision.Resize((train_image_size, train_image_size)),
+        ds.vision.Rescale(1.0 / 255.0, 0.0),
+        ds.vision.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
+        ds.vision.HWC2CHW()
     ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=type_cast_op, input_columns="label",
                             num_parallel_workers=get_num_parallel_workers(8))
@@ -115,18 +115,18 @@ def create_dataset2(dataset_path, do_train, batch_size=32, train_image_size=224,
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5)
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(eval_image_size)
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(eval_image_size)
         ]
-    trans_norm = [ds.vision.c_transforms.Normalize(mean=mean, std=std), ds.vision.c_transforms.HWC2CHW()]
+    trans_norm = [ds.vision.Normalize(mean=mean, std=std), ds.vision.HWC2CHW()]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
     if device_num == 1:
         trans_work_num = 24
     else:
@@ -187,21 +187,21 @@ def create_dataset_pynative(dataset_path, do_train, batch_size=32, train_image_s
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(eval_image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(eval_image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=4)
     # only enable cache for eval
@@ -253,21 +253,21 @@ def create_dataset3(dataset_path, do_train, batch_size=32, train_image_size=224,
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(rank_id / (rank_id + 1)),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(rank_id / (rank_id + 1)),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(eval_image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(eval_image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=get_num_parallel_workers(8))
     # only enable cache for eval
@@ -321,21 +321,21 @@ def create_dataset4(dataset_path, do_train, batch_size=32, train_image_size=224,
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(292),
-            ds.vision.c_transforms.CenterCrop(eval_image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(292),
+            ds.vision.CenterCrop(eval_image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=get_num_parallel_workers(12))
     # only enable cache for eval
     if do_train:
diff --git a/benchmark/ascend/resnet/src/dataset_infer.py b/benchmark/ascend/resnet/src/dataset_infer.py
index 5d0a655e88ecfda28ad455515ba4fc17976b3b6d..ce032b1db63c91a8d622b41889b32dc65d5d1ed8 100644
--- a/benchmark/ascend/resnet/src/dataset_infer.py
+++ b/benchmark/ascend/resnet/src/dataset_infer.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -130,21 +130,21 @@ def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target="
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=8)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
@@ -202,21 +202,21 @@ def create_dataset2(dataset_path, do_train, repeat_num=1, batch_size=32, target=
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(rank_id / (rank_id + 1)),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(rank_id / (rank_id + 1)),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=8)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
@@ -271,21 +271,21 @@ def create_dataset3(dataset_path, do_train, repeat_num=1, batch_size=32, target=
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(292),
-            ds.vision.c_transforms.CenterCrop(256),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(292),
+            ds.vision.CenterCrop(256),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=12)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=12)
     if do_train:
diff --git a/official/cv/Deepsort/modelarts/start_train.py b/official/cv/Deepsort/modelarts/start_train.py
index 879d8f232d46213a7bc60851c630ca60ff5e8474..bed1034504e136b59068b5e654aa68bab2c38231 100644
--- a/official/cv/Deepsort/modelarts/start_train.py
+++ b/official/cv/Deepsort/modelarts/start_train.py
@@ -21,7 +21,7 @@ import numpy as np
 import moxing as mox
 import mindspore.nn as nn
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 
 from mindspore.common import set_seed
 from mindspore.common import dtype as mstype
diff --git a/official/cv/Deepsort/src/deep/train.py b/official/cv/Deepsort/src/deep/train.py
index 3d5e0a2278b5be94ce1ab5efcbfa4dc1ea704971..9488a1f227a7f33c2fe1f9b72c45dd98d916f468 100644
--- a/official/cv/Deepsort/src/deep/train.py
+++ b/official/cv/Deepsort/src/deep/train.py
@@ -16,7 +16,7 @@ import argparse
 import os
 import ast
 import numpy as np
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import mindspore.dataset as ds
 import mindspore.nn as nn
 from mindspore import Tensor, context
@@ -24,7 +24,7 @@ from mindspore.communication.management import init, get_rank
 from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor
 from mindspore.train.model import Model
 from mindspore.context import ParallelMode
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.transforms as C2
 from mindspore.common import set_seed
 import mindspore.common.dtype as mstype
 from original_model import Net
diff --git a/official/cv/alexnet/src/dataset.py b/official/cv/alexnet/src/dataset.py
index 149ffdb05a403a83c3cb329d37d68a24855ee56e..6fcc88205e7cbd604e5b1c236c52e5f7c105531e 100644
--- a/official/cv/alexnet/src/dataset.py
+++ b/official/cv/alexnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ Produce the dataset
 import os
 from multiprocessing import cpu_count
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as CV
 from mindspore.common import dtype as mstype
 from mindspore.communication.management import get_rank, get_group_size
 
diff --git a/official/cv/brdnet/src/dataset.py b/official/cv/brdnet/src/dataset.py
index 9e870625fa77b7a077af525e7a23b43f209a53b0..dbdde6da5fbff6850aa14af597ed0495bf6c3a93 100644
--- a/official/cv/brdnet/src/dataset.py
+++ b/official/cv/brdnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import glob
 import numpy as np
 import PIL.Image as Image
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 
 class BRDNetDataset:
     """ BRDNetDataset.
diff --git a/official/cv/cnn_direction_model/src/dataset.py b/official/cv/cnn_direction_model/src/dataset.py
index ec1bbdefe0d257ccfa89e15da7b41ba201a2f127..b91ec59855333ce5d9e3bc01671d702d5134e739 100644
--- a/official/cv/cnn_direction_model/src/dataset.py
+++ b/official/cv/cnn_direction_model/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import cv2
 import numpy as np
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from src.dataset_utils import lucky, noise_blur, noise_speckle, noise_gamma, noise_gaussian, noise_salt_pepper, \
     shift_color, enhance_brightness, enhance_sharpness, enhance_contrast, enhance_color, gaussian_blur, \
     randcrop, resize, rdistort, rgeometry, rotate_about_center, whole_rdistort, warp_perspective, random_contrast, \
diff --git a/official/cv/crnn/src/dataset.py b/official/cv/crnn/src/dataset.py
index 1d07a34f9ad7aab1642f573b2ee114915f78f449..2f07b734ab9b8b2bf343e77e7289be572cc84819 100644
--- a/official/cv/crnn/src/dataset.py
+++ b/official/cv/crnn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ import numpy as np
 from PIL import Image, ImageFile
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vc
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vc
 from src.model_utils.config import config as config1
 from src.ic03_dataset import IC03Dataset
 from src.ic13_dataset import IC13Dataset
diff --git a/official/cv/crnn_seq2seq_ocr/src/dataset.py b/official/cv/crnn_seq2seq_ocr/src/dataset.py
index 40abc60fd46213668daf82805d98137dd89b3526..9d12f36c2e069254ba937ee17a4c968eb020f134 100644
--- a/official/cv/crnn_seq2seq_ocr/src/dataset.py
+++ b/official/cv/crnn_seq2seq_ocr/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,9 +19,8 @@ import numpy as np
 from PIL import Image
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.vision.py_transforms as P
-import mindspore.dataset.transforms.c_transforms as ops
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as ops
 import mindspore.common.dtype as mstype
 
 from src.model_utils.config import config
@@ -36,7 +35,7 @@ class AugmentationOps():
         self.min_area_ratio = min_area_ratio
         self.aspect_ratio_range = aspect_ratio_range
         self.img_tile_shape = img_tile_shape
-        self.random_image_distortion_ops = P.RandomColorAdjust(brightness=brightness,
+        self.random_image_distortion_ops = C.RandomColorAdjust(brightness=brightness,
                                                                contrast=contrast,
                                                                saturation=saturation,
                                                                hue=hue)
diff --git a/official/cv/cspdarknet53/src/dataset.py b/official/cv/cspdarknet53/src/dataset.py
index 9025cffdd2d7d52bedce4cf1398760f5ee469e61..e1c3c8e8593d2e1afc918175a2dd02c22a782f46 100644
--- a/official/cv/cspdarknet53/src/dataset.py
+++ b/official/cv/cspdarknet53/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as V_C
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as V_C
 from PIL import Image, ImageFile
 from .utils.sampler import DistributedSampler
 
diff --git a/official/cv/ctpn/src/dataset.py b/official/cv/ctpn/src/dataset.py
index a7936800e265af0f7b9c70efc292e0190f9e8dac..dfdf942a4186c66d55f62d6fba20877045d63578 100644
--- a/official/cv/ctpn/src/dataset.py
+++ b/official/cv/ctpn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import numpy as np
 from numpy import random
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as CC
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as CC
 import mindspore.common.dtype as mstype
 from src.model_utils.config import config
 
diff --git a/official/cv/darknet53/src/dataset.py b/official/cv/darknet53/src/dataset.py
index d5bf8dde25bd1357589c92b1a18316c19c61842b..8984090c9585b594bf420eb65fdaf61bc5562063 100644
--- a/official/cv/darknet53/src/dataset.py
+++ b/official/cv/darknet53/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target="GPU", distribute=False):
diff --git a/official/cv/deeptext/src/dataset.py b/official/cv/deeptext/src/dataset.py
index 7198f4d8d5e56b0cc3b75161e401a03c98b2e3cb..7e598b96a7625aee96ec0efe37f0b9152f64327b 100644
--- a/official/cv/deeptext/src/dataset.py
+++ b/official/cv/deeptext/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,8 +22,8 @@ from numpy import random
 
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as CC
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as CC
 import mindspore.common.dtype as mstype
 from mindspore.mindrecord import FileWriter
 from model_utils.config import config
diff --git a/official/cv/densenet/src/datasets/classification.py b/official/cv/densenet/src/datasets/classification.py
index 4386899926d9379bc74aa7ec16c149969d4e2b17..84444a6dc51640ad69ee1163b2ddc4b6c4dec7e9 100644
--- a/official/cv/densenet/src/datasets/classification.py
+++ b/official/cv/densenet/src/datasets/classification.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,8 +21,8 @@ import os
 from PIL import Image, ImageFile
 from mindspore import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as vision_C
-import mindspore.dataset.transforms.c_transforms as normal_C
+import mindspore.dataset.vision as vision_C
+import mindspore.dataset.transforms as normal_C
 from src.datasets.sampler import DistributedSampler
 
 ImageFile.LOAD_TRUNCATED_IMAGES = True
diff --git a/official/cv/depthnet/src/data_loader.py b/official/cv/depthnet/src/data_loader.py
index 41ae8f41f82dfe52a8e5be95a116a2ff1dd38346..f5a87c89632b54516698c67b685e6690c59877d4 100644
--- a/official/cv/depthnet/src/data_loader.py
+++ b/official/cv/depthnet/src/data_loader.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ import os
 import numpy as np
 from PIL import Image
 
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as CV
 import mindspore.dataset as ds
 from mindspore import dtype as mstype
 
diff --git a/official/cv/depthnet/train.py b/official/cv/depthnet/train.py
index 4308d7337dd8e46e94115307a8ff609a1f420f51..aaa80921405843c3f04eeb5d9797d2b5cbfb30c2 100644
--- a/official/cv/depthnet/train.py
+++ b/official/cv/depthnet/train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import time
 
 import mindspore.numpy as np
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as CV
 import mindspore as ms
 from mindspore import nn, Tensor, Model
 from mindspore import dtype as mstype
diff --git a/official/cv/dncnn/eval.py b/official/cv/dncnn/eval.py
index 14683113fbfe0f3f255e4e75305111886b4409b4..dca93f8a5ea229e287c0d97091958c5a528dec08 100644
--- a/official/cv/dncnn/eval.py
+++ b/official/cv/dncnn/eval.py
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -27,7 +27,7 @@ import mindspore
 import mindspore.dataset as ds
 from mindspore import context
 from mindspore.train.serialization import load_checkpoint, load_param_into_net
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from src.model import DnCNN
 
 class DnCNN_eval_Dataset():
diff --git a/official/cv/dncnn/infer/data/preprocess/export_bin_file.py b/official/cv/dncnn/infer/data/preprocess/export_bin_file.py
index b4b5462ec9637e9fcf5840cd4d4aa78597df2f3e..14c0cb59612d02afe5b1a5a1f55aa8dfa82cd8e3 100644
--- a/official/cv/dncnn/infer/data/preprocess/export_bin_file.py
+++ b/official/cv/dncnn/infer/data/preprocess/export_bin_file.py
@@ -23,7 +23,7 @@ import numpy as np
 import cv2
 import mindspore
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 
 def ResziePadding(img, fixed_side=256):
diff --git a/official/cv/dncnn/src/dataset.py b/official/cv/dncnn/src/dataset.py
index b8240c79ea7e7e3f85720125dd14986a2dda240d..69b6565c776a4dca7dcc990921f28809c316b9c9 100644
--- a/official/cv/dncnn/src/dataset.py
+++ b/official/cv/dncnn/src/dataset.py
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import cv2
 import PIL
 import mindspore
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 def create_train_dataset(data_path, model_type, noise_level=25, batch_size=128):
     # define dataset
diff --git a/official/cv/dpn/src/imagenet_dataset.py b/official/cv/dpn/src/imagenet_dataset.py
index 42ad7f9b972ebc9f2b184a5061700c63d1bea80a..cd73134f45e21baf8dfbc14a5a30880aec86cca8 100644
--- a/official/cv/dpn/src/imagenet_dataset.py
+++ b/official/cv/dpn/src/imagenet_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,8 +20,8 @@ import cv2
 from PIL import ImageFile
 from mindspore.common import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as V_C
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as V_C
 
 
 ImageFile.LOAD_TRUNCATED_IMAGES = True
diff --git a/official/cv/east/detect.py b/official/cv/east/detect.py
index 48f82516895ed43da657860ed628f149b9b1d5d0..eb807cd62e1663c284fdac0a0d651613086b80ac 100644
--- a/official/cv/east/detect.py
+++ b/official/cv/east/detect.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ import numpy as np
 
 import mindspore.ops as P
 from mindspore import Tensor
-import mindspore.dataset.vision.py_transforms as V
+import mindspore.dataset.vision as V
 from src.dataset import get_rotate_mat
 
 import lanms
@@ -44,7 +44,7 @@ def load_pil(img):
     """convert PIL Image to Tensor
     """
     img = V.ToTensor()(img)
-    img = V.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(img)
+    img = V.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), is_hwc=False)(img)
     img = Tensor(img)
     img = P.ExpandDims()(img, 0)
     return img
diff --git a/official/cv/east/src/dataset.py b/official/cv/east/src/dataset.py
index 79c6ddfbcfea755e579640e9826ebda1d1fbde7b..2f0da352004de618cc6d68e5c58fac5daec0fd84 100644
--- a/official/cv/east/src/dataset.py
+++ b/official/cv/east/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import cv2
 from PIL import Image
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 from src.distributed_sampler import DistributedSampler
 
 
diff --git a/official/cv/efficientnet/src/dataset.py b/official/cv/efficientnet/src/dataset.py
index 76a67f1494c79f5203dee8509ea0b70cca9f2498..7b8bd31087295adfbedda965d011e6f1861c1283 100644
--- a/official/cv/efficientnet/src/dataset.py
+++ b/official/cv/efficientnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,8 +20,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 from mindspore.communication.management import get_group_size, get_rank
 from mindspore.dataset.vision import Inter
 
diff --git a/official/cv/efficientnet/src/transform.py b/official/cv/efficientnet/src/transform.py
index c34a8fe9441c1e78b8044d903f5787b4329b023d..39a1eebcb8c3bc226893924b00afe22de8d730a7 100644
--- a/official/cv/efficientnet/src/transform.py
+++ b/official/cv/efficientnet/src/transform.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@
 random augment class
 """
 import numpy as np
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as vision
 from src import transform_utils
 
 
@@ -35,9 +35,9 @@ class RandAugment:
         # assert the imgs object are pil_images
         ret_imgs = []
         ret_labels = []
-        py_to_pil_op = P.ToPIL()
-        to_tensor = P.ToTensor()
-        normalize_op = P.Normalize(self.mean, self.std)
+        py_to_pil_op = vision.ToPIL()
+        to_tensor = vision.ToTensor()
+        normalize_op = vision.Normalize(self.mean, self.std, is_hwc=False)
         rand_augment_ops = transform_utils.rand_augment_transform(self.config_str, self.hparams)
         for i, image in enumerate(imgs):
             img_pil = py_to_pil_op(image)
diff --git a/official/cv/faster_rcnn/src/dataset.py b/official/cv/faster_rcnn/src/dataset.py
index 64c955a4c3c0b623b54814cbbe27948bf37335a0..783f81f8c8d26f3f321a7e429f7540cc479aacca 100644
--- a/official/cv/faster_rcnn/src/dataset.py
+++ b/official/cv/faster_rcnn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -552,7 +552,7 @@ def create_fasterrcnn_dataset(config, mindrecord_file, batch_size=2, device_num=
     de.config.set_prefetch_size(8)
     ds = de.MindDataset(mindrecord_file, columns_list=["image", "annotation"], num_shards=device_num, shard_id=rank_id,
                         num_parallel_workers=4, shuffle=is_training)
-    decode = ms.dataset.vision.c_transforms.Decode()
+    decode = ms.dataset.vision.Decode()
     ds = ds.map(input_columns=["image"], operations=decode)
     compose_map_func = (lambda image, annotation: preprocess_fn(image, annotation, is_training, config=config))
 
diff --git a/official/cv/fastscnn/eval.py b/official/cv/fastscnn/eval.py
index 86b94b43026c48df342ac809cf62e32461c818c8..9e6993bc215384a81b4c2a263719b9a7ab9059be 100644
--- a/official/cv/fastscnn/eval.py
+++ b/official/cv/fastscnn/eval.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,8 +23,8 @@ import mindspore.ops as ops
 from mindspore.context import ParallelMode
 from mindspore import load_checkpoint, load_param_into_net
 from mindspore.communication.management import init, get_rank, get_group_size
-from mindspore.dataset.transforms.py_transforms import Compose
-from mindspore.dataset.vision.py_transforms import ToTensor, Normalize
+from mindspore.dataset.transforms.transforms import Compose
+from mindspore.dataset.vision import ToTensor, Normalize
 
 from src.dataloader import create_CitySegmentation
 from src.fast_scnn import FastSCNN
@@ -140,7 +140,7 @@ def validation():
     # image transform
     input_transform = Compose([
         ToTensor(),
-        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
+        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], is_hwc=False),
     ])
     if args.use_modelarts:
         import moxing as mox
diff --git a/official/cv/fastscnn/modelarts/start_train.py b/official/cv/fastscnn/modelarts/start_train.py
index 665956f5e899d82e7e3732096a60ed3333d2b712..8a17519e926b82d8d1ea77905dee864f6177414c 100644
--- a/official/cv/fastscnn/modelarts/start_train.py
+++ b/official/cv/fastscnn/modelarts/start_train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -30,8 +30,8 @@ from mindspore.common.tensor import Tensor
 from mindspore.context import ParallelMode
 from mindspore import FixedLossScaleManager
 from mindspore import load_checkpoint, load_param_into_net
-from mindspore.dataset.transforms.py_transforms import Compose
-from mindspore.dataset.vision.py_transforms import ToTensor, Normalize
+from mindspore.dataset.transforms.transforms import Compose
+from mindspore.dataset.vision import ToTensor, Normalize
 from mindspore.communication.management import init, get_rank, get_group_size
 from mindspore.train.callback import TimeMonitor, LossMonitor, CheckpointConfig, ModelCheckpoint
 
@@ -138,7 +138,7 @@ def train():
     # image transform
     input_transform = Compose([
         ToTensor(),
-        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
+        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], is_hwc=False),
     ])
 
     train_dataset, args.steps_per_epoch = create_CitySegmentation(args, data_path=args.dataset, \
diff --git a/official/cv/fastscnn/src/dataloader.py b/official/cv/fastscnn/src/dataloader.py
index bdaae9a5098bee904fdc73134b428817e2d23540..b6a770a6309fd4a3878827082266e1c241a4df5d 100644
--- a/official/cv/fastscnn/src/dataloader.py
+++ b/official/cv/fastscnn/src/dataloader.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ import numpy as np
 from PIL import Image
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 
 from src.seg_data_base import SegmentationDataset
 
diff --git a/official/cv/fastscnn/train.py b/official/cv/fastscnn/train.py
index 8ba9af1a40af158f24d71e27bd63331bf17b97a9..17b98e5e09f57bf3e07192e5696a7b010863ab8e 100644
--- a/official/cv/fastscnn/train.py
+++ b/official/cv/fastscnn/train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -27,8 +27,8 @@ from mindspore.common.tensor import Tensor
 from mindspore.context import ParallelMode
 from mindspore import FixedLossScaleManager
 from mindspore import load_checkpoint, load_param_into_net
-from mindspore.dataset.transforms.py_transforms import Compose
-from mindspore.dataset.vision.py_transforms import ToTensor, Normalize
+from mindspore.dataset.transforms.transforms import Compose
+from mindspore.dataset.vision import ToTensor, Normalize
 from mindspore.communication.management import init, get_rank, get_group_size
 from mindspore.train.callback import TimeMonitor, LossMonitor, CheckpointConfig, ModelCheckpoint
 
@@ -130,7 +130,7 @@ def train():
     # image transform
     input_transform = Compose([
         ToTensor(),
-        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
+        Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], is_hwc=False),
     ])
 
     if args.use_modelarts:
diff --git a/official/cv/googlenet/src/dataset.py b/official/cv/googlenet/src/dataset.py
index e7a82f05d16dac7253adee09af1d8b4e84abfb1c..bd6bb2b5215cf9e48299201fff1cb0a5979dcbdb 100644
--- a/official/cv/googlenet/src/dataset.py
+++ b/official/cv/googlenet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 def create_dataset_cifar10(data_home, repeat_num=1, training=True, cifar_cfg=None):
     """Data operations."""
diff --git a/official/cv/inceptionv3/src/dataset.py b/official/cv/inceptionv3/src/dataset.py
index e7fb6076df445393ecd0be7a330159dca524ba06..f248974fbcb427557e311178a4267ec8dbffc54a 100644
--- a/official/cv/inceptionv3/src/dataset.py
+++ b/official/cv/inceptionv3/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset_imagenet(dataset_path, do_train, cfg, repeat_num=1):
diff --git a/official/cv/inceptionv4/src/dataset.py b/official/cv/inceptionv4/src/dataset.py
index a27e4f1503566b2967b3191fb1f77b51cbc8e07a..3939cafba3132d618f674a36339db9fd43b91ead 100644
--- a/official/cv/inceptionv4/src/dataset.py
+++ b/official/cv/inceptionv4/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset_imagenet(dataset_path, do_train, cfg, repeat_num=1):
diff --git a/official/cv/lenet/src/dataset.py b/official/cv/lenet/src/dataset.py
index b3801105b22db4a6c4a1ff9247fd391696520e29..623ee7b73ec87e7e89fc69130089a6e2a9314fde 100644
--- a/official/cv/lenet/src/dataset.py
+++ b/official/cv/lenet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ Produce the dataset
 """
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as CV
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as CV
+import mindspore.dataset.transforms as C
 from mindspore.dataset.vision import Inter
 from mindspore.common import dtype as mstype
 
diff --git a/official/cv/maskrcnn/src/dataset.py b/official/cv/maskrcnn/src/dataset.py
index bc05b98c7536fbb8ff252854d04a7cd21c1bf7c0..e6178203f7ec966ec8b932eca0ee557ba689726e 100644
--- a/official/cv/maskrcnn/src/dataset.py
+++ b/official/cv/maskrcnn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import numpy as np
 from numpy import random
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 
 from .model_utils.config import config
diff --git a/official/cv/maskrcnn_mobilenetv1/src/dataset.py b/official/cv/maskrcnn_mobilenetv1/src/dataset.py
index 5acba3a77a4f7c2848e92de268c032b3db9608ef..19e6e1c301dac4f4321e916a55426922e6a9f9d0 100644
--- a/official/cv/maskrcnn_mobilenetv1/src/dataset.py
+++ b/official/cv/maskrcnn_mobilenetv1/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020-21 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@ import numpy as np
 from numpy import random
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from mindspore import context
 
diff --git a/official/cv/mobilenetv1/src/dataset.py b/official/cv/mobilenetv1/src/dataset.py
index 3d7c671508462dad5fed4f5541ae0076e123ccc1..3b62fd343b3812be9d60ae81c30b21f03ee8b60f 100644
--- a/official/cv/mobilenetv1/src/dataset.py
+++ b/official/cv/mobilenetv1/src/dataset.py
@@ -47,18 +47,18 @@ def create_dataset1(dataset_path, do_train, device_num=1, batch_size=32, target=
     trans = []
     if do_train:
         trans += [
-            ds.vision.c_transforms.RandomCrop((32, 32), (4, 4, 4, 4)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
+            ds.vision.RandomCrop((32, 32), (4, 4, 4, 4)),
+            ds.vision.RandomHorizontalFlip(prob=0.5)
         ]
 
     trans += [
-        ds.vision.c_transforms.Resize((224, 224)),
-        ds.vision.c_transforms.Rescale(1.0 / 255.0, 0.0),
-        ds.vision.c_transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
-        ds.vision.c_transforms.HWC2CHW()
+        ds.vision.Resize((224, 224)),
+        ds.vision.Rescale(1.0 / 255.0, 0.0),
+        ds.vision.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
+        ds.vision.HWC2CHW()
     ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=THREAD_NUM)
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=THREAD_NUM)
@@ -97,21 +97,21 @@ def create_dataset2(dataset_path, do_train, device_num=1, batch_size=32, target=
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=THREAD_NUM)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=THREAD_NUM)
diff --git a/official/cv/mobilenetv2/src/dataset.py b/official/cv/mobilenetv2/src/dataset.py
index 6149458d669bfadd82a67f734383a8b0491b577a..8d09a7e8c557b134164201b170d0904c11f249e4 100644
--- a/official/cv/mobilenetv2/src/dataset.py
+++ b/official/cv/mobilenetv2/src/dataset.py
@@ -52,24 +52,24 @@ def create_dataset(dataset_path, do_train, config, enable_cache=False, cache_ses
     buffer_size = 1000
 
     # define map operations
-    decode_op = ds.vision.c_transforms.Decode()
-    resize_crop_op = ds.vision.c_transforms.RandomCropDecodeResize(resize_height,
-                                                                   scale=(0.08, 1.0), ratio=(0.75, 1.333))
-    horizontal_flip_op = ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
-
-    resize_op = ds.vision.c_transforms.Resize((256, 256))
-    center_crop = ds.vision.c_transforms.CenterCrop(resize_width)
-    rescale_op = ds.vision.c_transforms.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
-    normalize_op = ds.vision.c_transforms.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
-                                                    std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
-    change_swap_op = ds.vision.c_transforms.HWC2CHW()
+    decode_op = ds.vision.Decode()
+    resize_crop_op = ds.vision.RandomCropDecodeResize(resize_height,
+                                                      scale=(0.08, 1.0), ratio=(0.75, 1.333))
+    horizontal_flip_op = ds.vision.RandomHorizontalFlip(prob=0.5)
+
+    resize_op = ds.vision.Resize((256, 256))
+    center_crop = ds.vision.CenterCrop(resize_width)
+    rescale_op = ds.vision.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
+    normalize_op = ds.vision.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
+                                       std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
+    change_swap_op = ds.vision.HWC2CHW()
 
     if do_train:
         trans = [resize_crop_op, horizontal_flip_op, rescale_op, normalize_op, change_swap_op]
     else:
         trans = [decode_op, resize_op, center_crop, normalize_op, change_swap_op]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=num_workers)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_workers)
diff --git a/official/cv/mobilenetv3/src/dataset.py b/official/cv/mobilenetv3/src/dataset.py
index 8061ca3eb778e05ba789893eb6293a5de98409fc..e29ab6767ed280d08bb27fcf1a49f6f64dba913f 100644
--- a/official/cv/mobilenetv3/src/dataset.py
+++ b/official/cv/mobilenetv3/src/dataset.py
@@ -49,24 +49,24 @@ def create_dataset(dataset_path, do_train, config, device_target, batch_size=32,
     buffer_size = 1000
 
     # define map operations
-    decode_op = ds.vision.c_transforms.Decode()
-    resize_crop_op = ds.vision.c_transforms.RandomCropDecodeResize(resize_height,
-                                                                   scale=(0.08, 1.0), ratio=(0.75, 1.333))
-    horizontal_flip_op = ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
-
-    resize_op = ds.vision.c_transforms.Resize(256)
-    center_crop = ds.vision.c_transforms.CenterCrop(resize_width)
-    rescale_op = ds.vision.c_transforms.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
-    normalize_op = ds.vision.c_transforms.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
-                                                    std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
-    change_swap_op = ds.vision.c_transforms.HWC2CHW()
+    decode_op = ds.vision.Decode()
+    resize_crop_op = ds.vision.RandomCropDecodeResize(resize_height,
+                                                      scale=(0.08, 1.0), ratio=(0.75, 1.333))
+    horizontal_flip_op = ds.vision.RandomHorizontalFlip(prob=0.5)
+
+    resize_op = ds.vision.Resize(256)
+    center_crop = ds.vision.CenterCrop(resize_width)
+    rescale_op = ds.vision.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
+    normalize_op = ds.vision.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
+                                       std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
+    change_swap_op = ds.vision.HWC2CHW()
 
     if do_train:
         trans = [resize_crop_op, horizontal_flip_op, rescale_op, normalize_op, change_swap_op]
     else:
         trans = [decode_op, resize_op, center_crop, normalize_op, change_swap_op]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=8)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
@@ -99,24 +99,24 @@ def create_dataset_cifar(dataset_path,
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCrop((32, 32), (4, 4, 4, 4)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4),
-            ds.vision.c_transforms.Resize((224, 224)),
-            ds.vision.c_transforms.Rescale(1.0 / 255.0, 0.0),
-            ds.vision.c_transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
-            ds.vision.c_transforms.CutOut(112),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCrop((32, 32), (4, 4, 4, 4)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4),
+            ds.vision.Resize((224, 224)),
+            ds.vision.Rescale(1.0 / 255.0, 0.0),
+            ds.vision.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
+            ds.vision.CutOut(112),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Resize((224, 224)),
-            ds.vision.c_transforms.Rescale(1.0 / 255.0, 0.0),
-            ds.vision.c_transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Resize((224, 224)),
+            ds.vision.Rescale(1.0 / 255.0, 0.0),
+            ds.vision.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=type_cast_op,
                             input_columns="label",
diff --git a/official/cv/nasnet/src/dataset.py b/official/cv/nasnet/src/dataset.py
index efa36ab28215ea013ac6051a1d88f6181020d3f1..63b32b9f65f7f6039381da5a6c25a029a7fd0d28 100644
--- a/official/cv/nasnet/src/dataset.py
+++ b/official/cv/nasnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ Data operations, will be used in train.py and eval.py
 import mindspore
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 def create_dataset(dataset_path, do_train, rank, group_size,
                    num_parallel_workers=8, batch_size=128,
diff --git a/official/cv/nima/src/dataset.py b/official/cv/nima/src/dataset.py
index be1be6b7f9418a2fda175c166dc914023cac60a8..35c480c19c650c86a865b8e1652adc3fe395633b 100644
--- a/official/cv/nima/src/dataset.py
+++ b/official/cv/nima/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ import numpy as np
 import mindspore
 import mindspore.dataset as ds
 from mindspore.dataset.vision import Inter
-from mindspore.dataset.vision import c_transforms as v_ct
-from mindspore.dataset.transforms import c_transforms as t_ct
+from mindspore.dataset.vision import transforms as v_ct
+from mindspore.dataset.transforms import transforms as t_ct
 
 
 class Dataset:
diff --git a/official/cv/patchcore/preprocess.py b/official/cv/patchcore/preprocess.py
index a4f35c46f1343b39c8adb3cdf7b906f8fd442005..03586d845d2d3e5ab21a223d7451eb72d0b6956c 100644
--- a/official/cv/patchcore/preprocess.py
+++ b/official/cv/patchcore/preprocess.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,10 +20,10 @@ from pathlib import Path
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as vision
 from mindspore.common import set_seed
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore.dataset.vision import Inter
 
 from src.config import _C as cfg
@@ -89,15 +89,15 @@ def createDataset(dataset_path, category):
     std = cfg.std_dft
 
     data_transforms = Compose([
-        py_vision.Resize((256, 256), interpolation=Inter.ANTIALIAS),
-        py_vision.CenterCrop(224),
-        py_vision.ToTensor(),
-        py_vision.Normalize(mean=mean, std=std)
+        vision.Resize((256, 256), interpolation=Inter.ANTIALIAS),
+        vision.CenterCrop(224),
+        vision.ToTensor(),
+        vision.Normalize(mean=mean, std=std, is_hwc=False)
     ])
     gt_transforms = Compose([
-        py_vision.Resize((256, 256)),
-        py_vision.CenterCrop(224),
-        py_vision.ToTensor()
+        vision.Resize((256, 256)),
+        vision.CenterCrop(224),
+        vision.ToTensor()
     ])
 
     train_json_path, test_json_path = createDatasetJson(dataset_path, category, data_transforms, gt_transforms)
diff --git a/official/cv/patchcore/src/dataset.py b/official/cv/patchcore/src/dataset.py
index c8c4b48a94f2051bb844cb21cf991d47ebfd10d2..29a05d788cc6c2b62fe41cd1e9f89b011d7d1739 100644
--- a/official/cv/patchcore/src/dataset.py
+++ b/official/cv/patchcore/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,9 +20,9 @@ from pathlib import Path
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as py_vision
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore.dataset.vision import Inter
 from PIL import Image
 
@@ -137,15 +137,15 @@ def createDataset(dataset_path, category):
     std = [0.229, 0.224, 0.225]
 
     data_transforms = Compose([
-        py_vision.Resize((256, 256), interpolation=Inter.ANTIALIAS),
-        py_vision.CenterCrop(224),
-        py_vision.ToTensor(),
-        py_vision.Normalize(mean=mean, std=std)
+        vision.Resize((256, 256), interpolation=Inter.ANTIALIAS),
+        vision.CenterCrop(224),
+        vision.ToTensor(),
+        vision.Normalize(mean=mean, std=std, is_hwc=False)
     ])
     gt_transforms = Compose([
-        py_vision.Resize((256, 256)),
-        py_vision.CenterCrop(224),
-        py_vision.ToTensor()
+        vision.Resize((256, 256)),
+        vision.CenterCrop(224),
+        vision.ToTensor()
     ])
 
     train_json_path, test_json_path = createDatasetJson(dataset_path, category, data_transforms, gt_transforms)
diff --git a/official/cv/posenet/src/dataset.py b/official/cv/posenet/src/dataset.py
index c9454fadc4150a5313c21f6b328fc06a0abe8704..573d189925fc329c249fe9e3cd8f8d6db3700db3 100644
--- a/official/cv/posenet/src/dataset.py
+++ b/official/cv/posenet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import os
 import numpy as np
 from mindspore.mindrecord import FileWriter
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 
 class Dataset:
     """dataset read"""
diff --git a/official/cv/psenet/src/dataset.py b/official/cv/psenet/src/dataset.py
index 8c88120b714618b7eba47be3da9b769733a2858e..e0e0f94e7f1bc96aad912f5d451cd9485df5826d 100644
--- a/official/cv/psenet/src/dataset.py
+++ b/official/cv/psenet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@ import numpy as np
 import Polygon as plg
 import pyclipper
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_transforms
+import mindspore.dataset.vision as vision
 from src.model_utils.config import config
 
 __all__ = ['train_dataset_creator', 'test_dataset_creator']
@@ -255,13 +255,13 @@ class TrainDataset:
         if self.is_transform:
             img = Image.fromarray(img)
             img = img.convert('RGB')
-            img = py_transforms.RandomColorAdjust(brightness=32.0 / 255, saturation=0.5)(img)
+            img = vision.RandomColorAdjust(brightness=32.0 / 255, saturation=0.5)(img)
         else:
             img = Image.fromarray(img)
             img = img.convert('RGB')
 
-        img = py_transforms.ToTensor()(img)
-        img = py_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(img)
+        img = vision.ToTensor()(img)
+        img = vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)(img)
 
         gt_text = gt_text.astype(np.float32)
         gt_kernels = gt_kernels.astype(np.float32)
@@ -306,8 +306,8 @@ def IC15_TEST_Generator():
 
         img_resized = Image.fromarray(img_resized)
         img_resized = img_resized.convert('RGB')
-        img_resized = py_transforms.ToTensor()(img_resized)
-        img_resized = py_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(img_resized)
+        img_resized = vision.ToTensor()(img_resized)
+        img_resized = vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)(img_resized)
 
         yield img, img_resized, img_name
 
diff --git a/official/cv/pvnet/eval.py b/official/cv/pvnet/eval.py
index b687eee4a58a2b0575bed71819108437ac47eeae..52c273dc3212fa1e5153aadb9e724d144a144343 100644
--- a/official/cv/pvnet/eval.py
+++ b/official/cv/pvnet/eval.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,7 @@ import time
 
 import numpy as np
 import mindspore
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.transforms as C
 from mindspore import context
 
 from model_utils.config import config as cfg
@@ -82,9 +81,9 @@ def test(args):
         pose = test_db[idx]['RT'].copy()
 
         rgb = read_rgb_np(rgb_path)
-        rgb = P.ToTensor()(rgb)
+        rgb = C.ToTensor()(rgb)
         rgb = C.TypeCast(mindspore.dtype.float32)(rgb)
-        rgb = P.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(rgb)
+        rgb = C.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)(rgb)
         rgb = np.expand_dims(rgb, axis=0)
         rgb = mindspore.Tensor(rgb)
 
diff --git a/official/cv/pvnet/src/dataset.py b/official/cv/pvnet/src/dataset.py
index 4a3d59580d75461863fa80eec47d728eb544a761..29ee73acd58de4bd3f4175a7027ad3b55b8865a3 100644
--- a/official/cv/pvnet/src/dataset.py
+++ b/official/cv/pvnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,9 +18,8 @@ import os
 import cv2
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as CV
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as CV
 import numpy as np
 
 from model_utils.config import config as cfg
@@ -242,9 +241,9 @@ def create_dataset(cls_list, batch_size=16, workers=16, devices=1, rank=0, multi
         CV.RandomColorAdjust(
             cfg.brightness, cfg.contrast,
             cfg.saturation, cfg.hue),
-        P.ToTensor(),  # 0~255 HWC to 0~1 CHW
+        C.ToTensor(),  # 0~255 HWC to 0~1 CHW
         C.TypeCast(mstype.float32),
-        P.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
+        C.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=False),
     ])
 
     mask_transforms = [
diff --git a/official/cv/pwcnet/src/flyingchairs.py b/official/cv/pwcnet/src/flyingchairs.py
index c1d68d031b75c5f3f22723204d05b1374e57a793..fdcefc667a3d1581ea8458355bff78b3d3ea8762 100644
--- a/official/cv/pwcnet/src/flyingchairs.py
+++ b/official/cv/pwcnet/src/flyingchairs.py
@@ -18,8 +18,8 @@ from glob import glob
 
 import mindspore.dataset as de
 import mindspore
-import mindspore.dataset.vision.py_transforms as CV
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as V
+import mindspore.dataset.transforms as T
 
 import src.common as common
 import src.transforms as transforms
@@ -131,16 +131,16 @@ class FlyingChairs():
         # photometric_augmentations
         if augmentations:
             self._photometric_transform = transforms.ConcatTransformSplitChainer([
-                CV.ToPIL(),
-                CV.RandomColorAdjust(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),
-                CV.ToTensor(),
+                V.ToPIL(),
+                V.RandomColorAdjust(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),
+                V.ToTensor(),
                 transforms.RandomGamma(min_gamma=0.7, max_gamma=1.5, clip_image=True)
                 ])
 
         else:
             self._photometric_transform = transforms.ConcatTransformSplitChainer([
-                CV.ToPIL(),
-                CV.ToTensor(),
+                V.ToPIL(),
+                V.ToTensor(),
                 ])
 
     def __getitem__(self, index):
@@ -176,9 +176,9 @@ def FlyingChairsTrain(dir_root, augmentations, dstype, batchsize, num_parallel_w
                                      shuffle=True, num_shards=world_size, shard_id=local_rank)
 
     # apply map operations on images
-    de_dataset = de_dataset.map(input_columns="im1", operations=C.TypeCast(mindspore.float32))
-    de_dataset = de_dataset.map(input_columns="im2", operations=C.TypeCast(mindspore.float32))
-    de_dataset = de_dataset.map(input_columns="flo", operations=C.TypeCast(mindspore.float32))
+    de_dataset = de_dataset.map(input_columns="im1", operations=T.TypeCast(mindspore.float32))
+    de_dataset = de_dataset.map(input_columns="im2", operations=T.TypeCast(mindspore.float32))
+    de_dataset = de_dataset.map(input_columns="flo", operations=T.TypeCast(mindspore.float32))
 
     de_dataset = de_dataset.batch(batchsize, drop_remainder=True)
     return de_dataset, dataset_len
diff --git a/official/cv/pwcnet/src/sintel.py b/official/cv/pwcnet/src/sintel.py
index 0c7b6caeec579877cdafea49221144cbdef908e0..cce038c8629162916b7165e0111ce78507d80bc7 100644
--- a/official/cv/pwcnet/src/sintel.py
+++ b/official/cv/pwcnet/src/sintel.py
@@ -19,8 +19,8 @@ import numpy as np
 
 import mindspore.dataset as de
 import mindspore
-import mindspore.dataset.vision.py_transforms as CV
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as V
+import mindspore.dataset.transforms as T
 
 import src.common as common
 import src.transforms as transforms
@@ -129,16 +129,16 @@ class Sintel():
         # photometric_augmentations
         if augmentations:
             self._photometric_transform = transforms.ConcatTransformSplitChainer([
-                CV.ToPIL(),
-                CV.RandomColorAdjust(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),
-                CV.ToTensor(),
+                V.ToPIL(),
+                V.RandomColorAdjust(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),
+                V.ToTensor(),
                 transforms.RandomGamma(min_gamma=0.7, max_gamma=1.5, clip_image=True)
                 ])
 
         else:
             self._photometric_transform = transforms.ConcatTransformSplitChainer([
-                CV.ToPIL(),
-                CV.ToTensor(),
+                V.ToPIL(),
+                V.ToTensor(),
                 ])
 
         self._size = len(self._image_list)
@@ -182,8 +182,8 @@ def SintelTraining(dir_root, augmentations, imgtype, dstype, batchsize, num_para
                                      shuffle=True, num_shards=world_size, shard_id=local_rank)
 
     # apply map operations on images
-    de_dataset = de_dataset.map(input_columns="im1", operations=C.TypeCast(mindspore.float32))
-    de_dataset = de_dataset.map(input_columns="im2", operations=C.TypeCast(mindspore.float32))
-    de_dataset = de_dataset.map(input_columns="flo", operations=C.TypeCast(mindspore.float32))
+    de_dataset = de_dataset.map(input_columns="im1", operations=T.TypeCast(mindspore.float32))
+    de_dataset = de_dataset.map(input_columns="im2", operations=T.TypeCast(mindspore.float32))
+    de_dataset = de_dataset.map(input_columns="flo", operations=T.TypeCast(mindspore.float32))
     de_dataset = de_dataset.batch(batchsize, drop_remainder=True)
     return de_dataset, dataset_len
diff --git a/official/cv/resnet/gpu_resnet_benchmark.py b/official/cv/resnet/gpu_resnet_benchmark.py
index 01e3be0452f3c4a69bf69c26cfeccca887260722..094301e024b401a385f148cc565f4a6e04b02df2 100644
--- a/official/cv/resnet/gpu_resnet_benchmark.py
+++ b/official/cv/resnet/gpu_resnet_benchmark.py
@@ -90,28 +90,28 @@ def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target="
     std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
 
     # define map operations
-    normalize_op = ds.vision.c_transforms.Normalize(mean=mean, std=std)
+    normalize_op = ds.vision.Normalize(mean=mean, std=std)
     if dtype == "fp16":
         if config.eval:
             x_dtype = "float32"
         else:
             x_dtype = "float16"
-        normalize_op = ds.vision.c_transforms.NormalizePad(mean=mean, std=std, dtype=x_dtype)
+        normalize_op = ds.vision.NormalizePad(mean=mean, std=std, dtype=x_dtype)
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
             normalize_op,
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(image_size),
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(image_size),
             normalize_op,
         ]
     if dtype == "fp32":
-        trans.append(ds.vision.c_transforms.HWC2CHW())
+        trans.append(ds.vision.HWC2CHW())
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=map_num_parallel_worker)
     # apply batch operations
     data_set = data_set.batch(batch_size, drop_remainder=True, num_parallel_workers=batch_num_parallel_worker)
diff --git a/official/cv/resnet/src/dataset.py b/official/cv/resnet/src/dataset.py
index a13626f188db0d7ea5da443f8bb902f31e670c91..7cf8bc978523e1bb876c872cb71577dc1d5a6c41 100644
--- a/official/cv/resnet/src/dataset.py
+++ b/official/cv/resnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -50,18 +50,18 @@ def create_dataset1(dataset_path, do_train, batch_size=32, train_image_size=224,
     trans = []
     if do_train:
         trans += [
-            ds.vision.c_transforms.RandomCrop((32, 32), (4, 4, 4, 4)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
+            ds.vision.RandomCrop((32, 32), (4, 4, 4, 4)),
+            ds.vision.RandomHorizontalFlip(prob=0.5)
         ]
 
     trans += [
-        ds.vision.c_transforms.Resize((train_image_size, train_image_size)),
-        ds.vision.c_transforms.Rescale(1.0 / 255.0, 0.0),
-        ds.vision.c_transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
-        ds.vision.c_transforms.HWC2CHW()
+        ds.vision.Resize((train_image_size, train_image_size)),
+        ds.vision.Rescale(1.0 / 255.0, 0.0),
+        ds.vision.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
+        ds.vision.HWC2CHW()
     ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=type_cast_op, input_columns="label",
                             num_parallel_workers=get_num_parallel_workers(8))
@@ -117,18 +117,18 @@ def create_dataset2(dataset_path, do_train, batch_size=32, train_image_size=224,
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5)
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5)
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(eval_image_size)
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(eval_image_size)
         ]
-    trans_norm = [ds.vision.c_transforms.Normalize(mean=mean, std=std), ds.vision.c_transforms.HWC2CHW()]
+    trans_norm = [ds.vision.Normalize(mean=mean, std=std), ds.vision.HWC2CHW()]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
     if device_num == 1:
         trans_work_num = 24
     else:
@@ -190,21 +190,21 @@ def create_dataset_pynative(dataset_path, do_train, batch_size=32, train_image_s
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(eval_image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(eval_image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=4)
     # only enable cache for eval
@@ -257,21 +257,21 @@ def create_dataset3(dataset_path, do_train, batch_size=32, train_image_size=224,
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(rank_id / (rank_id + 1)),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(rank_id / (rank_id + 1)),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(eval_image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(eval_image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=get_num_parallel_workers(8))
     # only enable cache for eval
@@ -326,21 +326,21 @@ def create_dataset4(dataset_path, do_train, batch_size=32, train_image_size=224,
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(292),
-            ds.vision.c_transforms.CenterCrop(eval_image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(292),
+            ds.vision.CenterCrop(eval_image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=get_num_parallel_workers(12))
     # only enable cache for eval
     if do_train:
diff --git a/official/cv/resnet/src/dataset_infer.py b/official/cv/resnet/src/dataset_infer.py
index 5d0a655e88ecfda28ad455515ba4fc17976b3b6d..ce032b1db63c91a8d622b41889b32dc65d5d1ed8 100644
--- a/official/cv/resnet/src/dataset_infer.py
+++ b/official/cv/resnet/src/dataset_infer.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -130,21 +130,21 @@ def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target="
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=8)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
@@ -202,21 +202,21 @@ def create_dataset2(dataset_path, do_train, repeat_num=1, batch_size=32, target=
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(rank_id / (rank_id + 1)),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(rank_id / (rank_id + 1)),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(256),
-            ds.vision.c_transforms.CenterCrop(image_size),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(256),
+            ds.vision.CenterCrop(image_size),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
 
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=8)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
@@ -271,21 +271,21 @@ def create_dataset3(dataset_path, do_train, repeat_num=1, batch_size=32, target=
     # define map operations
     if do_train:
         trans = [
-            ds.vision.c_transforms.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
-            ds.vision.c_transforms.RandomHorizontalFlip(prob=0.5),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+            ds.vision.RandomHorizontalFlip(prob=0.5),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
     else:
         trans = [
-            ds.vision.c_transforms.Decode(),
-            ds.vision.c_transforms.Resize(292),
-            ds.vision.c_transforms.CenterCrop(256),
-            ds.vision.c_transforms.Normalize(mean=mean, std=std),
-            ds.vision.c_transforms.HWC2CHW()
+            ds.vision.Decode(),
+            ds.vision.Resize(292),
+            ds.vision.CenterCrop(256),
+            ds.vision.Normalize(mean=mean, std=std),
+            ds.vision.HWC2CHW()
         ]
 
-    type_cast_op = ds.transforms.c_transforms.TypeCast(ms.int32)
+    type_cast_op = ds.transforms.transforms.TypeCast(ms.int32)
     data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=12)
     data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=12)
     if do_train:
diff --git a/official/cv/resnet_thor/src/dataset.py b/official/cv/resnet_thor/src/dataset.py
index 443817150b07a9eb7e4adc81af33c41b3d648735..ae1dbee0a06db4d75b87465a731c51ebd0196ed7 100644
--- a/official/cv/resnet_thor/src/dataset.py
+++ b/official/cv/resnet_thor/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/official/cv/resnext/src/dataset.py b/official/cv/resnext/src/dataset.py
index d7f5b4678e84b39b84da100de96bed1a9988d0a7..a3aba86c3925f366f296340b6bf1e3ce7345f805 100644
--- a/official/cv/resnext/src/dataset.py
+++ b/official/cv/resnext/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 from PIL import Image, ImageFile
 from mindspore.common import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as V_C
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as V_C
 from src.utils.sampler import DistributedSampler
 
 ImageFile.LOAD_TRUNCATED_IMAGES = True
diff --git a/official/cv/retinanet/src/dataset.py b/official/cv/retinanet/src/dataset.py
index c5105e9c4ba047ec2780deebd4c6a2ef2180c0b1..312c35d89340670471e1b192944e9a515399c523 100644
--- a/official/cv/retinanet/src/dataset.py
+++ b/official/cv/retinanet/src/dataset.py
@@ -23,7 +23,7 @@ import xml.etree.ElementTree as et
 import numpy as np
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from src.model_utils.config import config
 from .box_utils import jaccard_numpy, retinanet_bboxes_encode
diff --git a/official/cv/se_resnext50/src/dataset.py b/official/cv/se_resnext50/src/dataset.py
index 4ce7b43c00efac7641a909d4ec4bdeb2feb72a76..9fc23bc1d62f32aaf66f7cf0602f1b6376876ba2 100644
--- a/official/cv/se_resnext50/src/dataset.py
+++ b/official/cv/se_resnext50/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 from PIL import Image, ImageFile
 from mindspore.common import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as V_C
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as V_C
 from src.utils.sampler import DistributedSampler
 
 ImageFile.LOAD_TRUNCATED_IMAGES = True
diff --git a/official/cv/semantic_human_matting/src/dataset.py b/official/cv/semantic_human_matting/src/dataset.py
index 31e97d1faff85c9358ff3e948873b6237554075e..ac7202e9d2b5f63ce891f0b9786d8a00377bbf38 100644
--- a/official/cv/semantic_human_matting/src/dataset.py
+++ b/official/cv/semantic_human_matting/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import cv2
 import numpy as np
 
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore import dtype as mstype
 
 
diff --git a/official/cv/shufflenetv1/src/dataset.py b/official/cv/shufflenetv1/src/dataset.py
index 48588b5abb147dcc6a2bee02d2dc4d2d20460ee0..6656d3503df49813bbdad5b2de1669389be60b7f 100644
--- a/official/cv/shufflenetv1/src/dataset.py
+++ b/official/cv/shufflenetv1/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,8 +16,8 @@
 from src.model_utils.config import config
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, device_num=1, rank=0):
diff --git a/official/cv/shufflenetv2/src/dataset.py b/official/cv/shufflenetv2/src/dataset.py
index 0fbe28daaa97077239dd5744d935848db5ec88b4..96b8835a93f5443ce75ac3238ba385173e457484 100644
--- a/official/cv/shufflenetv2/src/dataset.py
+++ b/official/cv/shufflenetv2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import numpy as np
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 class toBGR():
     def __call__(self, img):
diff --git a/official/cv/simclr/src/dataset.py b/official/cv/simclr/src/dataset.py
index 1b14f0a5eaca509b42adcf218779e3a655ad561d..ef913335924ee3facfa73fc08a1c2a1b5f2c5c58 100644
--- a/official/cv/simclr/src/dataset.py
+++ b/official/cv/simclr/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,9 +17,8 @@ create train or eval dataset.
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.dataset.vision import Inter
 import cv2
 import numpy as np
@@ -70,8 +69,8 @@ def create_dataset(args, dataset_mode, repeat_num=1):
             color_jitter = C.RandomColorAdjust(0.8 * scale, 0.8 * scale, 0.8 * scale, 0.2 * scale)
             trans += [C2.RandomApply([color_jitter], prob=0.8)]
         if args.use_color_gray:
-            trans += [py_vision.ToPIL(),
-                      py_vision.RandomGrayscale(prob=0.2),
+            trans += [C.ToPIL(),
+                      C.RandomGrayscale(prob=0.2),
                       np.array]  # need to convert PIL image to a NumPy array to pass it to C++ operation
         if args.use_blur:
             trans += [C2.RandomApply([gaussian_blur], prob=0.8)]
diff --git a/official/cv/simple_pose/src/dataset.py b/official/cv/simple_pose/src/dataset.py
index 35b85ba240ea82cce6d415ae1891ad5564072210..9e8bbc74198cbd244c5b0cf89b86fa506f9118c2 100644
--- a/official/cv/simple_pose/src/dataset.py
+++ b/official/cv/simple_pose/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as V_C
+import mindspore.dataset.vision as V_C
 
 from src.utils.transform import fliplr_joints, get_affine_transform, affine_transform
 
diff --git a/official/cv/sphereface/src/datasets/classification.py b/official/cv/sphereface/src/datasets/classification.py
index c5bc2f984822de5bcd0ac3db1541100d7f042dc4..3d9c04eaba70e79e715949e04125ce485d78e6c9 100644
--- a/official/cv/sphereface/src/datasets/classification.py
+++ b/official/cv/sphereface/src/datasets/classification.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,8 +22,8 @@ import os
 from PIL import Image, ImageFile
 from mindspore import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as vision_C
-import mindspore.dataset.transforms.c_transforms as normal_C
+import mindspore.dataset.vision as vision_C
+import mindspore.dataset.transforms as normal_C
 from src.datasets.sampler import DistributedSampler
 from src.model_utils.matlab_cp2tform import get_similarity_transform_for_cv2
 import cv2
diff --git a/official/cv/squeezenet/src/dataset.py b/official/cv/squeezenet/src/dataset.py
index ac70267a35382f17210c1dd30efc28619e1968ef..407091415d7651ff17df719872a7d03b6571c26d 100644
--- a/official/cv/squeezenet/src/dataset.py
+++ b/official/cv/squeezenet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import multiprocessing
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/official/cv/ssd/src/dataset.py b/official/cv/ssd/src/dataset.py
index 11a1945e2c8990629af65852fb351ec2195db910..a79f605f59d21d3877511c82eddb27b15ee7173d 100644
--- a/official/cv/ssd/src/dataset.py
+++ b/official/cv/ssd/src/dataset.py
@@ -398,12 +398,12 @@ def create_ssd_dataset(mindrecord_file, batch_size=32, device_num=1, rank=0,
         num_parallel_workers = cores
     ds = de.MindDataset(mindrecord_file, columns_list=["img_id", "image", "annotation"], num_shards=device_num,
                         shard_id=rank, num_parallel_workers=num_parallel_workers, shuffle=is_training)
-    decode = de.vision.c_transforms.Decode()
+    decode = de.vision.Decode()
     ds = ds.map(operations=decode, input_columns=["image"])
-    change_swap_op = de.vision.c_transforms.HWC2CHW()
-    normalize_op = de.vision.c_transforms.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
-                                                    std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
-    color_adjust_op = de.vision.c_transforms.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
+    change_swap_op = de.vision.HWC2CHW()
+    normalize_op = de.vision.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
+                                       std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
+    color_adjust_op = de.vision.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)
     compose_map_func = (lambda img_id, image, annotation: preprocess_fn(img_id, image, annotation, is_training))
     if is_training:
         output_columns = ["image", "box", "label", "num_match"]
diff --git a/official/cv/ssim-ae/src/dataset.py b/official/cv/ssim-ae/src/dataset.py
index 54a415dc727bb42932c5afad8dd31b506f3b5223..aa783b2b645cfed369ca808234e7df078cedb7a1 100644
--- a/official/cv/ssim-ae/src/dataset.py
+++ b/official/cv/ssim-ae/src/dataset.py
@@ -20,7 +20,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as c_trans
+import mindspore.dataset.vision as c_trans
 
 from model_utils.config import config as cfg
 from src.utils import read_img, get_file_list
diff --git a/official/cv/tinydarknet/src/dataset.py b/official/cv/tinydarknet/src/dataset.py
index cadeb0834d6f43495074c7304183a02b5ead2659..48cf893684a9be30c328d2ff6d83e765e7348c0b 100644
--- a/official/cv/tinydarknet/src/dataset.py
+++ b/official/cv/tinydarknet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,8 +20,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 from mindspore.communication.management import init, get_rank
 from src.model_utils.config import config as imagenet_cfg
diff --git a/official/cv/unet/src/data_loader.py b/official/cv/unet/src/data_loader.py
index 494c40643951d83272c0c11e3ba59bf241365681..16cd33e04c309011d06bfdc502a6668ab606b7ef 100644
--- a/official/cv/unet/src/data_loader.py
+++ b/official/cv/unet/src/data_loader.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import cv2
 import numpy as np
 from PIL import Image, ImageSequence
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as c_vision
+import mindspore.dataset.vision as c_vision
 from mindspore.dataset.vision.utils import Inter
 from mindspore.communication.management import get_rank, get_group_size
 
diff --git a/official/cv/unet3d/src/dataset.py b/official/cv/unet3d/src/dataset.py
index b3b828e3c04fe16bb5db08f2472af49e39959d48..d2165808b59f44d92ab8e5ffbcdba6c0f24702f0 100644
--- a/official/cv/unet3d/src/dataset.py
+++ b/official/cv/unet3d/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import os
 import glob
 import numpy as np
 import mindspore.dataset as ds
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 from src.model_utils.config import config
 from src.transform import Dataset, ExpandChannel, LoadData, Orientation, ScaleIntensityRange, RandomCropSamples, OneHot
 
diff --git a/official/cv/vgg16/src/dataset.py b/official/cv/vgg16/src/dataset.py
index 6fb95f9ac1080cfee23bf3d36df9fbbd38cfcef0..013205eb948d16297e1aeb75e8473386b872702e 100644
--- a/official/cv/vgg16/src/dataset.py
+++ b/official/cv/vgg16/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 from PIL import Image, ImageFile
 from mindspore.common import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from src.utils.sampler import DistributedSampler
 
 ImageFile.LOAD_TRUNCATED_IMAGES = True
diff --git a/official/cv/vit/src/dataset.py b/official/cv/vit/src/dataset.py
index 305faebc2483ae1b917ecdb57c98a87b73c55929..cd63da9b100c91dcac3b7cce7a10d31df7247778 100644
--- a/official/cv/vit/src/dataset.py
+++ b/official/cv/vit/src/dataset.py
@@ -22,9 +22,8 @@ import numpy as np
 
 import mindspore as ms
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.dataset.vision.utils import Inter
 
 from .autoaugment import ImageNetPolicy
@@ -107,7 +106,7 @@ def create_dataset(dataset_path,
         ]
         if autoaugment:
             trans += [
-                P.ToPIL(),
+                C.ToPIL(),
                 ImageNetPolicy(),
                 ToNumpy(),
             ]
diff --git a/official/cv/warpctc/src/dataset.py b/official/cv/warpctc/src/dataset.py
index c5834880545fd75109e206f4fbe1396c4eae5ebd..08eb865907facef35841683a31156a381f74037b 100644
--- a/official/cv/warpctc/src/dataset.py
+++ b/official/cv/warpctc/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,8 +20,8 @@ import numpy as np
 from PIL import Image
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as c
-import mindspore.dataset.vision.c_transforms as vc
+import mindspore.dataset.transforms as c
+import mindspore.dataset.vision as vc
 from src.model_utils.config import config
 
 
diff --git a/official/cv/xception/src/dataset.py b/official/cv/xception/src/dataset.py
index adcc10f872043439af6510a0ee6c5c5568159879..d5b76f8e1c49a197cd61fad3a73be3709a116c64 100644
--- a/official/cv/xception/src/dataset.py
+++ b/official/cv/xception/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ Data operations, will be used in train.py and eval.py
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, batch_size=16, device_num=1, rank=0):
diff --git a/official/cv/yolov3_darknet53/src/yolo_dataset.py b/official/cv/yolov3_darknet53/src/yolo_dataset.py
index 684b1b6eb86d317ef700c91786f3744b0f689c6c..82f06c7f4166b91c9f6c50b4e991f1b004bb533e 100644
--- a/official/cv/yolov3_darknet53/src/yolo_dataset.py
+++ b/official/cv/yolov3_darknet53/src/yolo_dataset.py
@@ -156,7 +156,7 @@ def create_yolo_dataset(image_dir, anno_path, batch_size, device_num, rank,
 
     yolo_dataset = COCOYoloDataset(root=image_dir, ann_file=anno_path, filter_crowd_anno=filter_crowd,
                                    remove_images_without_annotations=remove_empty_anno, is_training=is_training)
-    hwc_to_chw = ds.vision.c_transforms.HWC2CHW()
+    hwc_to_chw = ds.vision.HWC2CHW()
 
     config.dataset_size = len(yolo_dataset)
     cores = multiprocessing.cpu_count()
@@ -168,12 +168,12 @@ def create_yolo_dataset(image_dir, anno_path, batch_size, device_num, rank,
                                 "gt_box1", "gt_box2", "gt_box3"]
         if device_num != 8:
             dataset = ds.GeneratorDataset(yolo_dataset, column_names=dataset_column_names, sampler=distributed_sampler)
-            dataset = dataset.map(operations=ds.vision.c_transforms.Decode(), input_columns=["image"])
+            dataset = dataset.map(operations=ds.vision.Decode(), input_columns=["image"])
             dataset = dataset.batch(batch_size, per_batch_map=multi_scale_trans, input_columns=dataset_column_names,
                                     num_parallel_workers=min(32, num_parallel_workers), drop_remainder=True)
         else:
             dataset = ds.GeneratorDataset(yolo_dataset, column_names=dataset_column_names, sampler=distributed_sampler)
-            dataset = dataset.map(operations=ds.vision.c_transforms.Decode(), input_columns=["image"])
+            dataset = dataset.map(operations=ds.vision.Decode(), input_columns=["image"])
             dataset = dataset.batch(batch_size, per_batch_map=multi_scale_trans, input_columns=dataset_column_names,
                                     num_parallel_workers=min(8, num_parallel_workers), drop_remainder=True)
     else:
diff --git a/official/cv/yolov3_resnet18/src/dataset.py b/official/cv/yolov3_resnet18/src/dataset.py
index 4a2651957a34fe8f8f4c453104b8270818a266ee..e5d3f391d2f5a2d4ee6e7e84eae28edd51f66695 100644
--- a/official/cv/yolov3_resnet18/src/dataset.py
+++ b/official/cv/yolov3_resnet18/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@ import numpy as np
 from PIL import Image
 import mindspore.dataset as de
 from mindspore.mindrecord import FileWriter
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from src.config import ConfigYOLOV3ResNet18
 
 iter_cnt = 0
diff --git a/official/cv/yolov4/src/yolo_dataset.py b/official/cv/yolov4/src/yolo_dataset.py
index 1ddca778206713e47084a9ac7ad1a7e93857d417..f01e55021f6f4f233ff6da1bd9ff28c8692ad927 100644
--- a/official/cv/yolov4/src/yolo_dataset.py
+++ b/official/cv/yolov4/src/yolo_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import numpy as np
 from PIL import Image
 from pycocotools.coco import COCO
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 from model_utils.config import config
 from src.distributed_sampler import DistributedSampler
 from src.transforms import reshape_fn, MultiScaleTrans
diff --git a/official/cv/yolov5/src/transforms.py b/official/cv/yolov5/src/transforms.py
index 928dbab2c74a5fcbedee958ab43ad1b0b618badb..ac2352f2401202be26f71fb53699d5960af60698 100644
--- a/official/cv/yolov5/src/transforms.py
+++ b/official/cv/yolov5/src/transforms.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import copy
 import numpy as np
 from PIL import Image
 import cv2
-import mindspore.dataset as ds
+import mindspore.dataset.vision as vision
 
 
 def _rand(a=0., b=1.):
@@ -524,7 +524,7 @@ class MultiScaleTrans:
 
     def __call__(self, img, anno, input_size, mosaic_flag):
         if mosaic_flag[0] == 0:
-            img = ds.vision.py_transforms.Decode()(img)
+            img = vision.Decode(True)(img)
         img, anno = preprocess_fn(img, anno, self.config, input_size, self.device_num)
         return img, anno, np.array(img.shape[0:2])
 
diff --git a/official/cv/yolov5/src/yolo_dataset.py b/official/cv/yolov5/src/yolo_dataset.py
index f4c602c5123df740f27ed7fd12b81dbb2b309286..a0b8dbf112a6111398641933dcf37c5a521979dc 100644
--- a/official/cv/yolov5/src/yolo_dataset.py
+++ b/official/cv/yolov5/src/yolo_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -240,7 +240,7 @@ def create_yolo_dataset(image_dir, anno_path, batch_size, device_num, rank,
                                    remove_images_without_annotations=remove_empty_anno, is_training=is_training)
     distributed_sampler = DistributedSampler(len(yolo_dataset), device_num, rank, shuffle=shuffle)
     yolo_dataset.size = len(distributed_sampler)
-    hwc_to_chw = ds.vision.c_transforms.HWC2CHW()
+    hwc_to_chw = ds.vision.HWC2CHW()
 
     config.dataset_size = len(yolo_dataset)
     cores = multiprocessing.cpu_count()
@@ -267,7 +267,7 @@ def create_yolo_dataset(image_dir, anno_path, batch_size, device_num, rank,
                               num_parallel_workers=min(4, num_parallel_workers), python_multiprocessing=False)
         mean = [m * 255 for m in [0.485, 0.456, 0.406]]
         std = [s * 255 for s in [0.229, 0.224, 0.225]]
-        dataset = dataset.map([ds.vision.c_transforms.Normalize(mean, std), hwc_to_chw],
+        dataset = dataset.map([ds.vision.Normalize(mean, std), hwc_to_chw],
                               num_parallel_workers=min(4, num_parallel_workers))
 
         def concatenate(images):
diff --git a/official/nlp/bert/src/dataset.py b/official/nlp/bert/src/dataset.py
index 2864d3e8c62a63ff911e0bbe540c80de9945471e..ece989012fe18432f826742ab3a2840ea22c89ec 100644
--- a/official/nlp/bert/src/dataset.py
+++ b/official/nlp/bert/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import math
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore import log as logger
 
 
diff --git a/official/nlp/bert/src/finetune_data_preprocess.py b/official/nlp/bert/src/finetune_data_preprocess.py
index 44cd375f7ad072990bc1d199ef8251ee7561a161..3f9f682b56b2077fbb2ed8e749a22fb0578b6009 100644
--- a/official/nlp/bert/src/finetune_data_preprocess.py
+++ b/official/nlp/bert/src/finetune_data_preprocess.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -25,7 +25,7 @@ from lxml import etree
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
 import mindspore.dataset.text as text
-import mindspore.dataset.transforms.c_transforms as ops
+import mindspore.dataset.transforms as ops
 from utils import convert_labels_to_index
 
 
diff --git a/official/nlp/bert_thor/pretrain_eval.py b/official/nlp/bert_thor/pretrain_eval.py
index 73c2369053a3ec2cfd02ab778fc3596df67f28d0..a4f824d7308e0572a80763522da8a11e310b67b9 100644
--- a/official/nlp/bert_thor/pretrain_eval.py
+++ b/official/nlp/bert_thor/pretrain_eval.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ from src.evaluation_config import cfg, bert_net_cfg
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 import mindspore.nn as nn
 from mindspore import context
 from mindspore.common.parameter import Parameter
diff --git a/official/nlp/bert_thor/src/dataset.py b/official/nlp/bert_thor/src/dataset.py
index 8e6dccec081678863437bdb94e1c46fd1054d09d..620b668078ac249a4746990af11a05b7a233c356 100644
--- a/official/nlp/bert_thor/src/dataset.py
+++ b/official/nlp/bert_thor/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ Data operations, will be used in run_pretrain.py
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore import log as logger
 from .config import cfg
 
diff --git a/official/nlp/cpm/train.py b/official/nlp/cpm/train.py
index 8ce5edc27698585eade1eb08e5414f4d3b676efb..ca9b4d249dd81673703c4758edefd8befc4e287e 100644
--- a/official/nlp/cpm/train.py
+++ b/official/nlp/cpm/train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -29,7 +29,7 @@ from mindspore.train.callback import TimeMonitor, ModelCheckpoint, CheckpointCon
 from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell
 from mindspore.train.serialization import load_checkpoint, load_param_into_net
 import mindspore.common.dtype as mstype
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore.parallel import set_algo_parameters
 
 from src.cpm_train import CPMWithLoss, CPMTrainOneStepWithLossScaleCell, VirtualDatasetOneInputCell, \
diff --git a/official/nlp/dgu/src/utils.py b/official/nlp/dgu/src/utils.py
index 474bd2b7e2e5b74e33275f3032f00fefd3b88f64..0d8174f4e8fd5488693c9476c567a829111da0a5 100644
--- a/official/nlp/dgu/src/utils.py
+++ b/official/nlp/dgu/src/utils.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@ import os
 import numpy as np
 
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 import mindspore.nn as nn
 import mindspore.ops as P
 
diff --git a/official/nlp/duconv/src/dataset.py b/official/nlp/duconv/src/dataset.py
index be6752663dc190a2378ffdafabe77b964cefa7f9..4d8e69c99240aa1066d197b118e84ce08a73a4ec 100644
--- a/official/nlp/duconv/src/dataset.py
+++ b/official/nlp/duconv/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ Data loader
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore import log as logger
 
 def create_dataset(batch_size, device_num=1, rank=0, num_workers=8, do_shuffle=True,
diff --git a/official/nlp/emotect/src/dataset.py b/official/nlp/emotect/src/dataset.py
index 7adeac9f32b6f1c4894f4db46c0622fd3e649f80..e8be1e68fc3f93f0e384659adb00290ee1bef94d 100644
--- a/official/nlp/emotect/src/dataset.py
+++ b/official/nlp/emotect/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ Data operations, will be used in run_pretrain.py
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 def create_classification_dataset(batch_size=1,
                                   repeat_count=1,
diff --git a/official/nlp/ernie/src/dataset.py b/official/nlp/ernie/src/dataset.py
index 15b6c61dc68c322d8ce191c6adbe84d57ef2c305..d672304a316e6ab75dfc1a10c2e8ab69ef8ec27d 100644
--- a/official/nlp/ernie/src/dataset.py
+++ b/official/nlp/ernie/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ Data operations, will be used in run_pretrain.py
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore import log as logger
 from .config import cfg
 
diff --git a/official/nlp/fasttext/eval.py b/official/nlp/fasttext/eval.py
index d4ca9d23a407bccfcd03d09adb1ef3fcd4953430..92f28e027f3c646ab731352c3cea4443ac9c60ad 100644
--- a/official/nlp/fasttext/eval.py
+++ b/official/nlp/fasttext/eval.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ from mindspore.common.tensor import Tensor
 from mindspore.train.model import Model
 from mindspore.train.serialization import load_checkpoint, load_param_into_net
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 from mindspore import context
 from src.fasttext_model import FastText
 
diff --git a/official/nlp/gnmt_v2/src/dataset/load_dataset.py b/official/nlp/gnmt_v2/src/dataset/load_dataset.py
index 8af9fe84cce0c77fe6013ee8c84cf421e758af14..79a690dafcfec83c9899629e2c675d77582801ca 100644
--- a/official/nlp/gnmt_v2/src/dataset/load_dataset.py
+++ b/official/nlp/gnmt_v2/src/dataset/load_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 """Dataset loader to feed into model."""
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 
 
 def _load_dataset(input_files, batch_size, sink_mode=False,
diff --git a/official/nlp/gpt/src/dataset.py b/official/nlp/gpt/src/dataset.py
index a2b3ec6389de00a69f06d98611795147e1b44c9c..29c9fbbadc0e27990650ba18988a26213db91c9e 100644
--- a/official/nlp/gpt/src/dataset.py
+++ b/official/nlp/gpt/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ Create dataset for training and evaluating
 
 import os
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 import mindspore.common.dtype as mstype
 
 
diff --git a/official/nlp/gru/src/dataset.py b/official/nlp/gru/src/dataset.py
index d63d5c2ccc643a876bff7f17f13ded95fda93a81..6de44d1c9ee31924588d8d15af57576a6ade6ffa 100644
--- a/official/nlp/gru/src/dataset.py
+++ b/official/nlp/gru/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 from model_utils.config import config
 
 de.config.set_seed(1)
diff --git a/official/nlp/mass/src/dataset/load_dataset.py b/official/nlp/mass/src/dataset/load_dataset.py
index 879ccf41c9f88e33910bf041d494e305f8a094a1..377b6123b1655bbe5950ad829a0ed89f7b574262 100644
--- a/official/nlp/mass/src/dataset/load_dataset.py
+++ b/official/nlp/mass/src/dataset/load_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 """Dataset loader to feed into model."""
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 
 
 def _load_dataset(input_files, batch_size, epoch_count=1,
diff --git a/official/nlp/pangu_alpha/src/dataset.py b/official/nlp/pangu_alpha/src/dataset.py
index b18fd397f21ab8296f8ee30b56ee1fe1122e6b12..8e803d82d11195347f00cfd440650a152b052f39 100644
--- a/official/nlp/pangu_alpha/src/dataset.py
+++ b/official/nlp/pangu_alpha/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ Create dataset for training and evaluating
 import os
 import numpy as np
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 import mindspore.common.dtype as mstype
 
 from mindspore import context
diff --git a/official/nlp/prophetnet/src/dataset/load_dataset.py b/official/nlp/prophetnet/src/dataset/load_dataset.py
index e585f50b99907a016da55c31126f8a1cbf8f2cbe..84aaf94a760c3eeba081f4ac54b3499c4ba1f6cd 100644
--- a/official/nlp/prophetnet/src/dataset/load_dataset.py
+++ b/official/nlp/prophetnet/src/dataset/load_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 """Dataset loader to feed into model."""
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 
 
 def _load_dataset(input_files, batch_size, epoch_count=1,
diff --git a/official/nlp/tinybert/src/dataset.py b/official/nlp/tinybert/src/dataset.py
index 2b023f6990e0dcb84974cdd05dfee140626dcd32..62a0523c4296ba945a3d462c809a4c35ab339896 100644
--- a/official/nlp/tinybert/src/dataset.py
+++ b/official/nlp/tinybert/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import os
 from enum import Enum
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 
 class DataType(Enum):
diff --git a/official/nlp/transformer/eval.py b/official/nlp/transformer/eval.py
index 07a30c5d6623d2ebfb21eadf75a2d329d9bb9fb1..e3e6f367f7f688c9b9d3e9c97d8117ed455054d9 100644
--- a/official/nlp/transformer/eval.py
+++ b/official/nlp/transformer/eval.py
@@ -23,7 +23,7 @@ from mindspore.common.parameter import Parameter
 from mindspore.common.tensor import Tensor
 from mindspore.train.model import Model
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 
 from src.transformer_model import TransformerModel
 from src.model_utils.config import config
diff --git a/official/nlp/transformer/src/dataset.py b/official/nlp/transformer/src/dataset.py
index 551639072e394269c5cfb42ead58dee6785cdd77..4728db94e0737ead8e0b69107864b167db4ab11c 100644
--- a/official/nlp/transformer/src/dataset.py
+++ b/official/nlp/transformer/src/dataset.py
@@ -33,7 +33,7 @@ def create_transformer_dataset(rank_size=1, rank_id=0, do_shuffle="true", datase
                                           "target_sos_ids", "target_sos_mask",
                                           "target_eos_ids", "target_eos_mask"],
                             shuffle=(do_shuffle == "true"), num_shards=rank_size, shard_id=rank_id)
-        type_cast_op = de.transforms.c_transforms.TypeCast(ms.int32)
+        type_cast_op = de.transforms.transforms.TypeCast(ms.int32)
         ds = ds.map(operations=type_cast_op, input_columns="source_eos_ids")
         ds = ds.map(operations=type_cast_op, input_columns="source_eos_mask")
         ds = ds.map(operations=type_cast_op, input_columns="target_sos_ids")
diff --git a/research/audio/ctcmodel/src/dataset.py b/research/audio/ctcmodel/src/dataset.py
index e0cc5592624ae40ccf5b019fefd79ba5ceabaff9..117a3cc9da703191b5e7b903cf188f108701fdc9 100644
--- a/research/audio/ctcmodel/src/dataset.py
+++ b/research/audio/ctcmodel/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@
 """Dataset preprocessing."""
 
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 import mindspore.common.dtype as mstype
 
 
diff --git a/research/audio/speech_transformer/src/dataset.py b/research/audio/speech_transformer/src/dataset.py
index 1ae5a5ee610eb1cd86983662da90362c721e3179..5354e39f6fe0b02469b204d4570078a2cb658608 100644
--- a/research/audio/speech_transformer/src/dataset.py
+++ b/research/audio/speech_transformer/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ from pathlib import Path
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 import numpy as np
 
 from .model_utils.config import config
diff --git a/research/cv/3dcnn/src/dataset.py b/research/cv/3dcnn/src/dataset.py
index 0610c9b06818f4b471e4f5d53360822e93756ff8..1879601b01a2afb19c048c07feeaa26f32c38a73 100644
--- a/research/cv/3dcnn/src/dataset.py
+++ b/research/cv/3dcnn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ from nibabel import load as load_nii
 
 import mindspore.dataset as ds
 import mindspore.common.dtype as mstype
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.transforms as C2
 
 
 def norm(image):
diff --git a/research/cv/APDrawingGAN/src/data/aligned_dataset.py b/research/cv/APDrawingGAN/src/data/aligned_dataset.py
index 894c6c9b755277149b389b85372671430148b323..5e912e722f19ebacae0150f69d5ba40d13a57d0d 100644
--- a/research/cv/APDrawingGAN/src/data/aligned_dataset.py
+++ b/research/cv/APDrawingGAN/src/data/aligned_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ import os.path
 import random
 import csv
 import cv2
-import mindspore.dataset.vision.py_transforms as P_VISION
+import mindspore.dataset.vision as vision
 import mindspore.ops as ops
 from mindspore import Tensor
 from mindspore import dtype as mstype
@@ -139,8 +139,8 @@ def init_AB(opt, AB_path):
         (opt.loadSize, opt.loadSize), Image.BICUBIC)
     B = AB.crop((w2, 0, w, h)).resize(
         (opt.loadSize, opt.loadSize), Image.BICUBIC)
-    A = P_VISION.ToTensor()(A)
-    B = P_VISION.ToTensor()(B)
+    A = vision.ToTensor()(A)
+    B = vision.ToTensor()(B)
     w_offset = random.randint(
         0, max(0, opt.loadSize - opt.fineSize - 1))
     h_offset = random.randint(
@@ -151,8 +151,8 @@ def init_AB(opt, AB_path):
     B = B[:, h_offset:h_offset + opt.fineSize,
           w_offset:w_offset + opt.fineSize]
 
-    A = P_VISION.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(A)
-    B = P_VISION.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(B)
+    A = vision.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), is_hwc=False)(A)
+    B = vision.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), is_hwc=False)(B)
     return A, B
 
 def regions_process(opt, regions, feats, item, A, B, input_nc, output_nc):
@@ -270,7 +270,7 @@ class AlignedDataset(BaseDataset):
             bgdir = self.opt.bg_dir
             bgpath = os.path.join(bgdir, basen[:-4] + '.png')
             im_bg = Image.open(bgpath)
-            mask2 = P_VISION.ToTensor()(im_bg)  # mask out background
+            mask2 = vision.ToTensor()(im_bg)  # mask out background
 
             if flipped:
                 mask2 = np.take(mask2, idx, axis=2)
diff --git a/research/cv/APDrawingGAN/src/data/base_dataset.py b/research/cv/APDrawingGAN/src/data/base_dataset.py
index cd0c4d8f4813257a50ee0acfe8acd4aeca6a38e6..f782c1cd25c3861b6227798d8c2796b4095144c4 100644
--- a/research/cv/APDrawingGAN/src/data/base_dataset.py
+++ b/research/cv/APDrawingGAN/src/data/base_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,8 +15,8 @@
 """base dataset"""
 
 from PIL import Image
-import mindspore.dataset.vision.py_transforms as py_trans
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore.dataset.vision import Inter
 
 
@@ -45,17 +45,17 @@ def get_transform(opt):
     transform_list = []
     if opt.resize_or_crop == 'resize_and_crop':
         osize = [opt.loadSize, opt.fineSize]
-        transform_list.append(py_trans.Resize(osize, Inter.BICUBIC))  # PIL
-        transform_list.append(py_trans.RandomCrop(opt.fineSize))  # PIL
+        transform_list.append(vision.Resize(osize, Inter.BICUBIC))  # PIL
+        transform_list.append(vision.RandomCrop(opt.fineSize))  # PIL
     elif opt.resize_or_crop == 'crop':
-        transform_list.append(py_trans.RandomCrop(opt.fineSize))
+        transform_list.append(vision.RandomCrop(opt.fineSize))
     elif opt.resize_or_crop == 'scale_width':
         transform_list.append(
             lambda img: __scale_width(img, opt.fineSize))
     elif opt.resize_or_crop == 'scale_width_and_crop':
         transform_list.append(
             lambda img: __scale_width(img, opt.loadSize))
-        transform_list.append(py_trans.RandomCrop(opt.fineSize))
+        transform_list.append(vision.RandomCrop(opt.fineSize))
     elif opt.resize_or_crop == 'none':
         transform_list.append(
             lambda img: __adjust(img))
@@ -63,11 +63,11 @@ def get_transform(opt):
         raise ValueError('--resize_or_crop %s is not a valid option.' % opt.resize_or_crop)
 
     if opt.isTrain and not opt.no_flip:
-        transform_list.append(py_trans.RandomHorizontalFlip())
+        transform_list.append(vision.RandomHorizontalFlip())
 
-    transform_list += [py_trans.ToTensor(),
-                       py_trans.Normalize((0.5, 0.5, 0.5),
-                                          (0.5, 0.5, 0.5))]
+    transform_list += [vision.ToTensor(),
+                       vision.Normalize((0.5, 0.5, 0.5),
+                                        (0.5, 0.5, 0.5), is_hwc=False)]
     return Compose(transform_list)
 
 # just modify the width and height to be multiple of 4
diff --git a/research/cv/AVA_cifar/src/datasets.py b/research/cv/AVA_cifar/src/datasets.py
index 6761f90902695c3bb25f112e0f0c599acc670d4c..df2d7a6d39ffe00acfbdb75acbb43f090e21f930 100644
--- a/research/cv/AVA_cifar/src/datasets.py
+++ b/research/cv/AVA_cifar/src/datasets.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,9 +16,9 @@
 
 import numpy as np
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as transforms
-import mindspore.dataset.transforms.c_transforms as C
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.transforms as data_trans
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore.common import dtype as mstype
 from src.RandAugment import RandAugment
 from src.autoaugment import CIFAR10Policy
@@ -32,39 +32,39 @@ class CIFAR10Dataset():
 
         if not training:
             trsfm = Compose([
-                transforms.ToTensor(),
-                transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+                vision.ToTensor(),
+                vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
             ])
         else:
             if not use_third_trsfm:
                 trsfm = Compose([
-                    transforms.ToPIL(),
-                    transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)),
-                    transforms.RandomColorAdjust(0.4, 0.4, 0.4, 0.4),
-                    transforms.RandomGrayscale(prob=0.2),
-                    transforms.RandomHorizontalFlip(),
-                    transforms.ToTensor(),
-                    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+                    vision.ToPIL(),
+                    vision.RandomResizedCrop(size=32, scale=(0.2, 1.)),
+                    vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.4),
+                    vision.RandomGrayscale(prob=0.2),
+                    vision.RandomHorizontalFlip(),
+                    vision.ToTensor(),
+                    vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
                 ])
             else:
                 if use_auto_augment:
                     trsfm = Compose([
-                        transforms.ToPIL(),
-                        transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)),
-                        transforms.RandomHorizontalFlip(),
+                        vision.ToPIL(),
+                        vision.RandomResizedCrop(size=32, scale=(0.2, 1.)),
+                        vision.RandomHorizontalFlip(),
                         CIFAR10Policy(),
-                        transforms.ToTensor(),
-                        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+                        vision.ToTensor(),
+                        vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
                     ])
                 else:
                     rand_augment = RandAugment(n=2, m=10)
                     trsfm = Compose([
-                        transforms.ToPIL(),
-                        transforms.RandomResizedCrop(size=32, scale=(0.2, 1.)),
-                        transforms.RandomHorizontalFlip(),
+                        vision.ToPIL(),
+                        vision.RandomResizedCrop(size=32, scale=(0.2, 1.)),
+                        vision.RandomHorizontalFlip(),
                         rand_augment,
-                        transforms.ToTensor(),
-                        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+                        vision.ToTensor(),
+                        vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
                     ])
 
         self.trsfm = trsfm
@@ -83,7 +83,7 @@ class CIFAR10Dataset():
                                     num_shards=self.device_num, shard_id=self.device_id)
 
         ds_ = ds_.map(input_columns=["image"], operations=self.trsfm)
-        typecast_op = C.TypeCast(mstype.int32)
+        typecast_op = data_trans.TypeCast(mstype.int32)
         ds_ = ds_.map(input_columns=["label"], operations=typecast_op)
         return ds_
 
diff --git a/research/cv/AVA_hpa/src/datasets.py b/research/cv/AVA_hpa/src/datasets.py
index 7225593e759602eb58c71c38c69155ff9dcc5374..9394785d15cac48d8f33df0f3dcf9ec37e5e3de7 100644
--- a/research/cv/AVA_hpa/src/datasets.py
+++ b/research/cv/AVA_hpa/src/datasets.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,9 +19,9 @@ from collections import Counter
 from PIL import Image
 import numpy as np
 import pandas as pd
-import mindspore.dataset.vision.py_transforms as transforms
+import mindspore.dataset.vision as vision
 from mindspore.dataset import GeneratorDataset
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 from src.RandAugment import RandAugment
 
 # split train val test = 4:1:5
@@ -40,35 +40,35 @@ class TransformOnImg:
         self.mode = mode
         rand_augment = RandAugment(n=2, m=10)
         self.trsfm_basic = Compose([
-            transforms.ToPIL(),
-            transforms.Resize(256),
-            transforms.RandomResizedCrop(size=224, scale=(0.2, 1.)),
-            transforms.RandomColorAdjust(0.4, 0.4, 0.4, 0),
-            transforms.RandomHorizontalFlip(),
-            transforms.ToTensor(),
-            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+            vision.ToPIL(),
+            vision.Resize(256),
+            vision.RandomResizedCrop(size=224, scale=(0.2, 1.)),
+            vision.RandomColorAdjust(0.4, 0.4, 0.4, 0),
+            vision.RandomHorizontalFlip(),
+            vision.ToTensor(),
+            vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
         ])
         self.trsfm_aux = Compose([
-            transforms.ToPIL(),
-            transforms.Resize(256),
-            transforms.RandomResizedCrop(size=224, scale=(0.2, 1.)),
-            transforms.RandomHorizontalFlip(),
+            vision.ToPIL(),
+            vision.Resize(256),
+            vision.RandomResizedCrop(size=224, scale=(0.2, 1.)),
+            vision.RandomHorizontalFlip(),
             rand_augment,
-            transforms.ToTensor(),
-            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+            vision.ToTensor(),
+            vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
         ])
         self.trsfm_train = Compose([
-            transforms.ToPIL(),
-            transforms.Resize(256),
-            transforms.RandomResizedCrop(size=224, scale=(0.2, 1.)),
-            transforms.ToTensor(),
-            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+            vision.ToPIL(),
+            vision.Resize(256),
+            vision.RandomResizedCrop(size=224, scale=(0.2, 1.)),
+            vision.ToTensor(),
+            vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
         ])
         self.trsfm = Compose([
-            transforms.ToPIL(),
-            transforms.Resize(224),
-            transforms.ToTensor(),
-            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
+            vision.ToPIL(),
+            vision.Resize(224),
+            vision.ToTensor(),
+            vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=False),
         ])
 
     def __call__(self, img, use_aux=False):
diff --git a/research/cv/AlignedReID++/src/dataset_loader.py b/research/cv/AlignedReID++/src/dataset_loader.py
index a3dba42f76418590eae7a05e0396aef03927e7d0..34ac5bbc3ac75ec8f65aff5c87a30b628d549b32 100644
--- a/research/cv/AlignedReID++/src/dataset_loader.py
+++ b/research/cv/AlignedReID++/src/dataset_loader.py
@@ -1,5 +1,5 @@
 """get the dataset"""
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,8 +21,8 @@ import os.path as osp
 from PIL import Image
 
 from mindspore.dataset import GeneratorDataset
-from mindspore.dataset.transforms.py_transforms import Compose
-import mindspore.dataset.vision.py_transforms as P1
+from mindspore.dataset.transforms.transforms import Compose
+import mindspore.dataset.vision as vision
 
 from .import data_manager
 from .import samplers
@@ -159,9 +159,9 @@ def create_train_dataset(real_path, args, rank_id, rank_size):
     transform_train = [
         decode,
         Random2DTranslation(args.height, args.width),
-        P1.RandomHorizontalFlip(0.5),
-        P1.ToTensor(),
-        P1.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
+        vision.RandomHorizontalFlip(0.5),
+        vision.ToTensor(),
+        vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False),
         RandomErasing()
     ]
     transform_train = Compose(transform_train)
@@ -186,9 +186,9 @@ def create_test_dataset(real_path, args):
 
     transform_test = [
         decode,
-        P1.Resize((args.height, args.width)),
-        P1.ToTensor(),
-        P1.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+        vision.Resize((args.height, args.width)),
+        vision.ToTensor(),
+        vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
     ]
     transform_test = Compose(transform_test)
 
@@ -212,4 +212,4 @@ def create_test_dataset(real_path, args):
     galleryloader = galleryloader.batch(batch_size=32, drop_remainder=True)
 
     return queryloader, galleryloader, dataset.num_train_pids
-          
\ No newline at end of file
+
diff --git a/research/cv/AlignedReID/src/dataset.py b/research/cv/AlignedReID/src/dataset.py
index 18afc481d8f7cac8395e914efc065db125a313a0..9014b2467e379ca7c810279860b4c72eb292e4ab 100644
--- a/research/cv/AlignedReID/src/dataset.py
+++ b/research/cv/AlignedReID/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import pickle
 from collections import defaultdict
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import numpy as np
 from PIL import Image
 
diff --git a/research/cv/AlphaPose/infer/sdk/postprocess/src/dataset.py b/research/cv/AlphaPose/infer/sdk/postprocess/src/dataset.py
index 7265dfebad0c25a8b2e8d0b0800ee76aa33286a0..baa3310d4cfac57f7ddeea5029a72eeeb0826963 100644
--- a/research/cv/AlphaPose/infer/sdk/postprocess/src/dataset.py
+++ b/research/cv/AlphaPose/infer/sdk/postprocess/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -26,7 +26,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from src.utils.transforms import fliplr_joints, get_affine_transform, affine_transform
 from src.config import config
 
diff --git a/research/cv/AlphaPose/src/dataset.py b/research/cv/AlphaPose/src/dataset.py
index 7265dfebad0c25a8b2e8d0b0800ee76aa33286a0..baa3310d4cfac57f7ddeea5029a72eeeb0826963 100644
--- a/research/cv/AlphaPose/src/dataset.py
+++ b/research/cv/AlphaPose/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -26,7 +26,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from src.utils.transforms import fliplr_joints, get_affine_transform, affine_transform
 from src.config import config
 
diff --git a/research/cv/AttGAN/src/data.py b/research/cv/AttGAN/src/data.py
index 72cb898c27e024ccc39bfc0abcd1093250cccc77..75f6a68811dea2cb8b604cf965d606253e6f633a 100644
--- a/research/cv/AttGAN/src/data.py
+++ b/research/cv/AttGAN/src/data.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import numpy as np
 from PIL import Image
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.py_transforms as py_vision
-from mindspore.dataset.transforms import py_transforms
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 
 from src.utils import DistributedSampler
 
@@ -40,11 +40,11 @@ class Custom:
 
         mean = [0.5, 0.5, 0.5]
         std = [0.5, 0.5, 0.5]
-        transform = [py_vision.ToPIL()]
-        transform.append(py_vision.Resize([128, 128]))
-        transform.append(py_vision.ToTensor())
-        transform.append(py_vision.Normalize(mean=mean, std=std))
-        transform = py_transforms.Compose(transform)
+        transform = [vision.ToPIL()]
+        transform.append(vision.Resize([128, 128]))
+        transform.append(vision.ToTensor())
+        transform.append(vision.Normalize(mean=mean, std=std), is_hwc=False)
+        transform = Compose(transform)
         self.transform = transform
         self.images = np.array([images]) if images.size == 1 else images[0:]
         self.labels = np.array([labels]) if images.size == 1 else labels[0:]
@@ -108,12 +108,12 @@ def get_loader(data_root, attr_path, selected_attrs, crop_size=170, image_size=1
 
     mean = [0.5, 0.5, 0.5]
     std = [0.5, 0.5, 0.5]
-    transform = [py_vision.ToPIL()]
-    transform.append(py_vision.CenterCrop((crop_size, crop_size)))
-    transform.append(py_vision.Resize([image_size, image_size]))
-    transform.append(py_vision.ToTensor())
-    transform.append(py_vision.Normalize(mean=mean, std=std))
-    transform = py_transforms.Compose(transform)
+    transform = [vision.ToPIL()]
+    transform.append(vision.CenterCrop((crop_size, crop_size)))
+    transform.append(vision.Resize([image_size, image_size]))
+    transform.append(vision.ToTensor())
+    transform.append(vision.Normalize(mean=mean, std=std), is_hwc=False)
+    transform = Compose(transform)
 
     dataset = CelebA(data_root, attr_path, image_size, mode, selected_attrs, transform, split_point=split_point)
 
diff --git a/research/cv/AttentionCluster/make_dataset.py b/research/cv/AttentionCluster/make_dataset.py
index af3b921409b9bea7254c061873a9d7000681477f..d8e864ca707515e276a64aed14ce01063af6dddc 100644
--- a/research/cv/AttentionCluster/make_dataset.py
+++ b/research/cv/AttentionCluster/make_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import os
 import numpy as np
 import mindspore
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as c_trans
+import mindspore.dataset.vision as c_trans
 import mindspore.nn as nn
 import mindspore.context as context
 import mindspore.common as common
diff --git a/research/cv/AutoSlim/src/dataset.py b/research/cv/AutoSlim/src/dataset.py
index 82d921f7b02c861f96a7a00054abb431a1c65777..6f8d7e3d883c8b0c85b7014f2e175f646d7436b2 100644
--- a/research/cv/AutoSlim/src/dataset.py
+++ b/research/cv/AutoSlim/src/dataset.py
@@ -18,8 +18,7 @@ Produce the dataset
 import os
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as c_vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.vision as vision
 
 def data_transforms(args):
     """get transform of dataset"""
@@ -40,18 +39,18 @@ def data_transforms(args):
         std = [0.229, 0.224, 0.225]
         crop_scale = 0.25
         jitter_param = 0.4
-    train_transforms = [c_vision.RandomCropDecodeResize(224, scale=(crop_scale, 1.0)),
-                        c_vision.RandomColorAdjust(brightness=jitter_param,
-                                                   contrast=jitter_param,
-                                                   saturation=jitter_param),
-                        c_vision.RandomHorizontalFlip(),
-                        c_vision.HWC2CHW(),
+    train_transforms = [vision.RandomCropDecodeResize(224, scale=(crop_scale, 1.0)),
+                        vision.RandomColorAdjust(brightness=jitter_param,
+                                                 contrast=jitter_param,
+                                                 saturation=jitter_param),
+                        vision.RandomHorizontalFlip(),
+                        vision.HWC2CHW(),
                         ]
-    val_transforms = [py_vision.Decode(),
-                      py_vision.Resize(256),
-                      py_vision.CenterCrop(224),
-                      py_vision.ToTensor(),
-                      py_vision.Normalize(mean=mean, std=std)
+    val_transforms = [vision.Decode(True),
+                      vision.Resize(256),
+                      vision.CenterCrop(224),
+                      vision.ToTensor(),
+                      vision.Normalize(mean=mean, std=std, is_hwc=False)
                       ]
     return train_transforms, val_transforms
 
diff --git a/research/cv/CBAM/src/data.py b/research/cv/CBAM/src/data.py
index 960b517259f2833f5ff45ebc33f622ea539d8531..7a425b1b3df191f834729b96a22756d0a9e50e00 100644
--- a/research/cv/CBAM/src/data.py
+++ b/research/cv/CBAM/src/data.py
@@ -22,7 +22,7 @@ import numpy as np
 from mindspore.communication.management import get_rank, get_group_size
 import mindspore.dataset as de
 import mindspore.common.dtype as mstype
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 
 def _get_rank_info(run_distribute):
diff --git a/research/cv/CGAN/src/dataset.py b/research/cv/CGAN/src/dataset.py
index 00e80558783a6a23214ca92c35a301a67dfc0b72..e2b687cb326513d4f04edf479ba2a8e5cc6c17a3 100644
--- a/research/cv/CGAN/src/dataset.py
+++ b/research/cv/CGAN/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import os
 import numpy as np
 import mindspore.dataset as ds
 from mindspore.common import dtype as mstype
-import mindspore.dataset.transforms.c_transforms as CT
+import mindspore.dataset.transforms as CT
 from mindspore.communication.management import get_rank, get_group_size
 
 
diff --git a/research/cv/CMT/src/dataset.py b/research/cv/CMT/src/dataset.py
index a27c6ecb2d2ece99900b800efad9b7879ec0b80b..3bc5b667cef889d8469f74b56f3aac4ae0b57ad2 100644
--- a/research/cv/CMT/src/dataset.py
+++ b/research/cv/CMT/src/dataset.py
@@ -16,11 +16,10 @@
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as pytrans
+import mindspore.dataset.transforms as C2
 
-from mindspore.dataset.transforms.py_transforms import Compose
-import mindspore.dataset.vision.c_transforms as C
+from mindspore.dataset.transforms.transforms import Compose
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, repeat_num=1, infer_910=True, device_id=0, batch_size=128):
@@ -59,13 +58,13 @@ def create_dataset(dataset_path, do_train, repeat_num=1, infer_910=True, device_
         ]
     else:
         trans = [
-            pytrans.Decode(),
-            pytrans.Resize(235),
-            pytrans.CenterCrop(224)
+            C.Decode(True),
+            C.Resize(235),
+            C.CenterCrop(224)
         ]
     trans += [
-        pytrans.ToTensor(),
-        pytrans.Normalize(mean=mean, std=std),
+        C.ToTensor(),
+        C.Normalize(mean=mean, std=std, is_hwc=False),
     ]
     trans = Compose(trans)
 
diff --git a/research/cv/CascadeRCNN/src/dataset.py b/research/cv/CascadeRCNN/src/dataset.py
index 657c9619ddd4e116898467114ada2748264a7b03..8fd59ebd1aca423a05286dabc1e96e458f9d0e29 100644
--- a/research/cv/CascadeRCNN/src/dataset.py
+++ b/research/cv/CascadeRCNN/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@ from numpy import random
 import cv2
 import mmcv
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from src.config import config
 
diff --git a/research/cv/CycleGAN/src/dataset/cyclegan_dataset.py b/research/cv/CycleGAN/src/dataset/cyclegan_dataset.py
index ec2a889bad0d8337179b3329a1e5ff7c3b05117c..2371995e0f1f6749b2276a5a94ad859e8626d586 100644
--- a/research/cv/CycleGAN/src/dataset/cyclegan_dataset.py
+++ b/research/cv/CycleGAN/src/dataset/cyclegan_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import multiprocessing
 import numpy as np
 from PIL import Image
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from .distributed_sampler import DistributedSampler
 
 IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.tif', '.tiff']
diff --git a/research/cv/DBPN/src/dataset/dataset.py b/research/cv/DBPN/src/dataset/dataset.py
index a9c57d85023d663c0c7690974f5b8c64814a576e..a7f426930f29273d3c3da647056afb0f31208225 100644
--- a/research/cv/DBPN/src/dataset/dataset.py
+++ b/research/cv/DBPN/src/dataset/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import os
 import random
 import numpy as np
 from PIL import Image, ImageOps
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as V
 from mindspore import dataset as de, context
 from mindspore.context import ParallelMode
 from mindspore.communication import get_rank, get_group_size
@@ -173,10 +173,10 @@ def create_train_dataset(dataset, args):
         mean = [0.5, 0.5, 0.5]
         std = [0.5, 0.5, 0.5]
     trans = [
-        P.ToTensor(),
+        V.ToTensor(),
     ]
     if args.isgan:
-        trans.append(P.Normalize(mean=mean, std=std))
+        trans.append(V.Normalize(mean=mean, std=std, is_hwc=False))
     train_ds = train_ds.map(operations=trans, input_columns=['target_image'])
     train_ds = train_ds.map(operations=trans, input_columns=['input_image'])
     train_ds = train_ds.map(operations=trans, input_columns=['bicubic_image'])
@@ -215,9 +215,9 @@ def create_val_dataset(dataset, args):
     if not args.vgg:
         mean = [0.5, 0.5, 0.5]
         std = [0.5, 0.5, 0.5]
-    trans = [P.ToTensor()]
+    trans = [V.ToTensor()]
     if args.isgan:
-        trans.append(P.Normalize(mean=mean, std=std))
+        trans.append(V.Normalize(mean=mean, std=std, is_hwc=False))
     val_ds = val_ds.map(operations=trans, input_columns=["target_image"])
     val_ds = val_ds.map(operations=trans, input_columns=["input_image"])
     val_ds = val_ds.map(operations=trans, input_columns=["bicubic_image"])
diff --git a/research/cv/DDAG/eval.py b/research/cv/DDAG/eval.py
index cc907a049dc0da5a63d462be5a6855d9a33bdb6d..9a37a8e7c3edeb9d4d8ddf1b514695bed604930a 100644
--- a/research/cv/DDAG/eval.py
+++ b/research/cv/DDAG/eval.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,12 +21,12 @@ import argparse
 import psutil
 import numpy as np
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 from mindspore import context, load_checkpoint, load_param_into_net, DatasetHelper
 from mindspore.context import ParallelMode
 from mindspore.communication.management import init, get_group_size
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 from src.dataset import SYSUDatasetGenerator, RegDBDatasetGenerator, TestData
 from src.dataset import process_gallery_sysu, process_query_sysu, process_test_regdb
 from src.evalfunc import test
@@ -246,9 +246,9 @@ if __name__ == "__main__":
     transform_test = Compose(
         [
             decode,
-            py_trans.Resize((args.img_h, args.img_w)),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+            vision.Resize((args.img_h, args.img_w)),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
         ]
     )
 
diff --git a/research/cv/DDAG/train.py b/research/cv/DDAG/train.py
index 7c35b73576c1af8fac5143b4c2743435b70ac9f1..ef393d6194dffb7a74b1f984689e68e3c90932f1 100644
--- a/research/cv/DDAG/train.py
+++ b/research/cv/DDAG/train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,12 +24,12 @@ import numpy as np
 import mindspore as ms
 import mindspore.nn as nn
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 from mindspore import context, load_checkpoint, load_param_into_net, save_checkpoint, DatasetHelper, Tensor
 from mindspore.context import ParallelMode
 from mindspore.communication import init, get_group_size, get_rank
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore.nn import SGD, Adam
 
 
@@ -375,35 +375,35 @@ if __name__ == "__main__":
     transform_train_rgb = Compose(
         [
             decode,
-            py_trans.Pad(10),
-            py_trans.RandomCrop((args.img_h, args.img_w)),
-            py_trans.RandomGrayscale(prob=0.5),
-            py_trans.RandomHorizontalFlip(),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-            py_trans.RandomErasing(prob=0.5)
+            vision.Pad(10),
+            vision.RandomCrop((args.img_h, args.img_w)),
+            vision.RandomGrayscale(prob=0.5),
+            vision.RandomHorizontalFlip(),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False),
+            vision.RandomErasing(prob=0.5)
         ]
     )
 
     transform_train_ir = Compose(
         [
             decode,
-            py_trans.Pad(10),
-            py_trans.RandomCrop((args.img_h, args.img_w)),
-            py_trans.RandomGrayscale(prob=0.5),
-            py_trans.RandomHorizontalFlip(),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-            py_trans.RandomErasing(prob=0.5)
+            vision.Pad(10),
+            vision.RandomCrop((args.img_h, args.img_w)),
+            vision.RandomGrayscale(prob=0.5),
+            vision.RandomHorizontalFlip(),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False),
+            vision.RandomErasing(prob=0.5)
         ]
     )
 
     transform_test = Compose(
         [
             decode,
-            py_trans.Resize((args.img_h, args.img_w)),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+            vision.Resize((args.img_h, args.img_w)),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
         ]
     )
 
diff --git a/research/cv/DDRNet/src/data/imagenet.py b/research/cv/DDRNet/src/data/imagenet.py
index b4d01c858dbbd6c5a2ac935221cb6219bacc4f05..3f176dc0513be6786374e99001428f6414a2b780 100644
--- a/research/cv/DDRNet/src/data/imagenet.py
+++ b/research/cv/DDRNet/src/data/imagenet.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 from src.data.augment.auto_augment import rand_augment_transform
 from src.data.augment.mixup import Mixup
@@ -90,29 +89,29 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
             aa_params["interpolation"] = _pil_interp(interpolation)
         transform_img = [
             vision.Decode(),
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             RandomResizedCropAndInterpolation(size=args.image_size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
                                               interpolation=interpolation),
-            py_vision.RandomHorizontalFlip(prob=0.5),
+            vision.RandomHorizontalFlip(prob=0.5),
         ]
         if isinstance(auto_augment, str) and auto_augment.startswith('rand'):
             transform_img += [rand_augment_transform(auto_augment, aa_params)]
         else:
-            transform_img += [py_vision.RandomColorAdjust(args.color_jitter, args.color_jitter, args.color_jitter)]
+            transform_img += [vision.RandomColorAdjust(args.color_jitter, args.color_jitter, args.color_jitter)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)]
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)]
         if args.re_prob > 0.:
             transform_img += [RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)]
     else:
         # test transform complete
         transform_img = [
             vision.Decode(),
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             Resize(int(args.image_size / args.crop_pct), interpolation="bicubic"),
-            py_vision.CenterCrop(image_size),
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)
+            vision.CenterCrop(image_size),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)
         ]
 
     transform_label = C.TypeCast(mstype.int32)
diff --git a/research/cv/DRNet/src/dataset.py b/research/cv/DRNet/src/dataset.py
index 1da4599948d9701c0983f29715ef61095ba9b514..04802ca69e60a7b096fbe4321cec2265952ae257 100644
--- a/research/cv/DRNet/src/dataset.py
+++ b/research/cv/DRNet/src/dataset.py
@@ -16,9 +16,8 @@
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.transforms.py_transforms as py_transforms
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision import Inter
 
 def create_dataset(dataset_path, do_train, repeat_num=1, infer_910=True, device_id=0, batch_size=128):
@@ -55,16 +54,16 @@ def create_dataset(dataset_path, do_train, repeat_num=1, infer_910=True, device_
         ds = de.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True,
                                    num_shards=device_num, shard_id=rank_id)
 
-    decode_p = py_vision.Decode()
-    resize_p = py_vision.Resize(int(256), interpolation=Inter.BILINEAR)
-    center_crop_p = py_vision.CenterCrop(224)
-    totensor = py_vision.ToTensor()
-    normalize_p = py_vision.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
-    trans = py_transforms.Compose([decode_p, resize_p, center_crop_p, totensor, normalize_p])
+    decode_p = vision.Decode(True)
+    resize_p = vision.Resize(int(256), interpolation=Inter.BILINEAR)
+    center_crop_p = vision.CenterCrop(224)
+    totensor = vision.ToTensor()
+    normalize_p = vision.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], is_hwc=False)
+    trans = C2.Compose([decode_p, resize_p, center_crop_p, totensor, normalize_p])
     type_cast_op = C2.TypeCast(mstype.int32)
     ds = ds.map(input_columns="image", operations=trans, num_parallel_workers=8)
     ds = ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=8)
 
     ds = ds.batch(batch_size, drop_remainder=True)
     return ds
-    
\ No newline at end of file
+
diff --git a/research/cv/DeepID/src/dataset.py b/research/cv/DeepID/src/dataset.py
index 7541a7ff4b5bee3233fb365eb6145c21a472dce4..328410d7ac8e88c800ceb31d2e95591f1af28abb 100644
--- a/research/cv/DeepID/src/dataset.py
+++ b/research/cv/DeepID/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,8 +20,8 @@ import csv
 import numpy as np
 from PIL import Image
 
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_transforms
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.dataset as de
 
 
@@ -112,12 +112,12 @@ def get_loader(data_root, mode='train'):
     """Build and return a data loader."""
     mean = [0.5, 0.5, 0.5]
     std = [0.5, 0.5, 0.5]
-    transform = [py_vision.ToPIL()]
+    transform = [vision.ToPIL()]
     if mode == 'train':
-        transform.append(py_vision.RandomHorizontalFlip())
-    transform.append(py_vision.ToTensor())
-    transform.append(py_vision.Normalize(mean=mean, std=std))
-    transform = py_transforms.Compose(transform)
+        transform.append(vision.RandomHorizontalFlip())
+    transform.append(vision.ToTensor())
+    transform.append(vision.Normalize(mean=mean, std=std, is_hwc=False))
+    transform = data_trans.Compose(transform)
 
     dataset = Youtube(data_root, mode, transform=transform)
 
diff --git a/research/cv/EfficientDet_d0/src/dataset.py b/research/cv/EfficientDet_d0/src/dataset.py
index 537857e526c771d9d8b8587db2864288bb165c81..5c5f87644c56548a8c8c7c1a46550e7b5057da57 100644
--- a/research/cv/EfficientDet_d0/src/dataset.py
+++ b/research/cv/EfficientDet_d0/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ import os
 import numpy as np
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from pycocotools.coco import COCO
 from src.config import config
diff --git a/research/cv/FDA-BNN/src/dataset.py b/research/cv/FDA-BNN/src/dataset.py
index 84177de83a97f63f69d0fcfa38020117a95225d8..c8f22b5aa390b2f6b578c58526852d417e65bab6 100755
--- a/research/cv/FDA-BNN/src/dataset.py
+++ b/research/cv/FDA-BNN/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,14 +17,12 @@ import math
 import os
 
 import numpy as np
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_transforms
-import mindspore.dataset.transforms.c_transforms as c_transforms
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
 from mindspore.communication.management import get_rank, get_group_size
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.vision.c_transforms as vision
 
 
 # values that should remain constant
@@ -55,24 +53,24 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
     """Create ImageNet training dataset"""
     if not os.path.exists(train_data_url):
         raise ValueError('Path not exists')
-    decode_op = py_vision.Decode()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
-    random_resize_crop_bicubic = py_vision.RandomResizedCrop(size=(input_size, input_size),
-                                                             scale=SCALE, ratio=RATIO,
-                                                             interpolation=Inter.BICUBIC)
-    random_horizontal_flip_op = py_vision.RandomHorizontalFlip(0.5)
+    random_resize_crop_bicubic = vision.RandomResizedCrop(size=(input_size, input_size),
+                                                          scale=SCALE, ratio=RATIO,
+                                                          interpolation=Inter.BICUBIC)
+    random_horizontal_flip_op = vision.RandomHorizontalFlip(0.5)
     adjust_range = (max(0, 1 - color_jitter), 1 + color_jitter)
-    random_color_jitter_op = py_vision.RandomColorAdjust(brightness=adjust_range,
-                                                         contrast=adjust_range,
-                                                         saturation=adjust_range)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    random_color_jitter_op = vision.RandomColorAdjust(brightness=adjust_range,
+                                                      contrast=adjust_range,
+                                                      saturation=adjust_range)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
     # assemble all the transforms
-    image_ops = py_transforms.Compose([decode_op, random_resize_crop_bicubic,
-                                       random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, random_resize_crop_bicubic,
+                                    random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
 
     rank_id = get_rank() if distributed else 0
     rank_size = get_group_size() if distributed else 1
@@ -122,16 +120,16 @@ def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=F
     else:
         scale_size = int(math.floor(input_size / DEFAULT_CROP_PCT))
 
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
-    decode_op = py_vision.Decode()
-    resize_op = py_vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
-    center_crop = py_vision.CenterCrop(size=input_size)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    resize_op = vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
+    center_crop = vision.CenterCrop(size=input_size)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
-    image_ops = py_transforms.Compose([decode_op, resize_op, center_crop,
-                                       to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, resize_op, center_crop,
+                                    to_tensor, normalize_op])
 
     dataset = dataset.map(input_columns=["label"], operations=type_cast_op,
                           num_parallel_workers=workers)
@@ -180,9 +178,9 @@ def create_dataset_cifar10(data_home, repeat_num=1, training=True, cifar_cfg=Non
     random_horizontal_op = vision.RandomHorizontalFlip()
     resize_op = vision.Resize((resize_height, resize_width))  # interpolation default BILINEAR
     rescale_op = vision.Rescale(1.0 / 255.0, 0.0)
-    normalize_op = vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
+    normalize_op = vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=True)
     changeswap_op = vision.HWC2CHW()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
     c_trans = []
     if training:
diff --git a/research/cv/FaceAttribute/preprocess.py b/research/cv/FaceAttribute/preprocess.py
index cbe80e3242d0442e57916ba82eaf090604676577..1a2ebb578c428d96af0c619d1043f3d14913ec5a 100644
--- a/research/cv/FaceAttribute/preprocess.py
+++ b/research/cv/FaceAttribute/preprocess.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,8 +16,8 @@
 import os
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.py_transforms as F
-import mindspore.dataset.transforms.py_transforms as F2
+import mindspore.dataset.vision as F
+import mindspore.dataset.transforms as F2
 
 from model_utils.config import config
 
@@ -28,10 +28,10 @@ def eval_data_generator(args):
     dst_h = args.dst_h
     batch_size = 1
     #attri_num = args.attri_num
-    transform_img = F2.Compose([F.Decode(),
+    transform_img = F2.Compose([F.Decode(True)),
                                 F.Resize((dst_w, dst_h)),
                                 F.ToTensor(),
-                                F.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
+                                F.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), is_hwc=False)])
 
     de_dataset = de.MindDataset(mindrecord_path + "0", columns_list=["image", "label"])
     de_dataset = de_dataset.map(input_columns="image", operations=transform_img, num_parallel_workers=args.workers,
diff --git a/research/cv/FaceAttribute/src/dataset_eval.py b/research/cv/FaceAttribute/src/dataset_eval.py
index 2167a4b2f548a3029bc7798ee5c0e90091e167b5..f4ed3cd9f235b428197a2fcde9c760aff0d7795f 100644
--- a/research/cv/FaceAttribute/src/dataset_eval.py
+++ b/research/cv/FaceAttribute/src/dataset_eval.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
 # ============================================================================
 """Face attribute dataset for eval"""
 import mindspore.dataset as de
-import mindspore.dataset.vision.py_transforms as F
-import mindspore.dataset.transforms.py_transforms as F2
+import mindspore.dataset.vision as F
+import mindspore.dataset.transforms as F2
 
 __all__ = ['data_generator_eval']
 
@@ -27,10 +27,10 @@ def data_generator_eval(args):
     dst_h = args.dst_h
     batch_size = 1
     attri_num = args.attri_num
-    transform_img = F2.Compose([F.Decode(),
+    transform_img = F2.Compose([F.Decode(True),
                                 F.Resize((dst_w, dst_h)),
                                 F.ToTensor(),
-                                F.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
+                                F.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), is_hwc=False)])
 
     de_dataset = de.MindDataset(mindrecord_path + "0", columns_list=["image", "label"])
     de_dataset = de_dataset.map(input_columns="image", operations=transform_img, num_parallel_workers=args.workers,
diff --git a/research/cv/FaceAttribute/src/dataset_train.py b/research/cv/FaceAttribute/src/dataset_train.py
index bbd210a353f34ae88b2fe020fc11e770467aef12..79617bdb77873b106e46b836eab5957e4f645273 100644
--- a/research/cv/FaceAttribute/src/dataset_train.py
+++ b/research/cv/FaceAttribute/src/dataset_train.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
 # ============================================================================
 """Face attribute dataset for train"""
 import mindspore.dataset as de
-import mindspore.dataset.vision.py_transforms as F
-import mindspore.dataset.transforms.py_transforms as F2
+import mindspore.dataset.vision as F
+import mindspore.dataset.transforms as F2
 
 __all__ = ['data_generator']
 
@@ -28,11 +28,11 @@ def data_generator(args):
     batch_size = args.per_batch_size
     attri_num = args.attri_num
     max_epoch = args.max_epoch
-    transform_img = F2.Compose([F.Decode(),
+    transform_img = F2.Compose([F.Decode(True)),
                                 F.Resize((dst_w, dst_h)),
                                 F.RandomHorizontalFlip(prob=0.5),
                                 F.ToTensor(),
-                                F.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
+                                F.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), is_hwc=False)])
 
     de_dataset = de.MindDataset(mindrecord_path + "0", columns_list=["image", "label"], num_shards=args.world_size,
                                 shard_id=args.local_rank)
diff --git a/research/cv/FaceDetection/preprocess.py b/research/cv/FaceDetection/preprocess.py
index 8f9961b26e655ddcb53ed8e066cdbce5125c910a..205d315310f3852dbb7fbcbbb7d22addf13e0daf 100644
--- a/research/cv/FaceDetection/preprocess.py
+++ b/research/cv/FaceDetection/preprocess.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@
 import os
 import numpy as np
 from PIL import Image
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as V
 import mindspore.dataset as de
 from model_utils.config import config
 
@@ -30,7 +30,7 @@ class SingleScaleTrans_Infer:
 
     def __call__(self, imgs, ann, image_names, image_size, batch_info):
 
-        decode = P.Decode()
+        decode = V.Decode(True)
         ret_imgs = []
         ret_anno = []
 
diff --git a/research/cv/FaceDetection/src/data_preprocess.py b/research/cv/FaceDetection/src/data_preprocess.py
index 1eba9175890a25e63258fc9c1d2e2901716c994a..8c1e7ebaae726b0fce08dfebe12add3fd95420a2 100644
--- a/research/cv/FaceDetection/src/data_preprocess.py
+++ b/research/cv/FaceDetection/src/data_preprocess.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 """Face detection yolov3 data pre-process."""
 import multiprocessing
 import numpy as np
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as V
 import mindspore.dataset as de
 
 from src.transforms import RandomCropLetterbox, RandomFlip, HSVShift, ResizeLetterbox
@@ -31,10 +31,10 @@ class SingleScaleTrans:
     def __call__(self, imgs, ann, image_names, image_size, batch_info):
 
         size = self.resize
-        decode = P.Decode()
+        decode = P.Decode(True)
         resize_letter_box_op = ResizeLetterbox(input_dim=size)
 
-        to_tensor = P.ToTensor()
+        to_tensor = V.ToTensor()
         ret_imgs = []
         ret_anno = []
 
@@ -204,7 +204,7 @@ def preprocess_fn(image, annotation):
     anchors = config.anchors
     anchors_mask = config.anchors_mask
 
-    decode = P.Decode()
+    decode = P.Decode(True)
     random_crop_letter_box_op = RandomCropLetterbox(jitter=jitter, input_dim=size)
     random_flip_op = RandomFlip(flip)
     hsv_shift_op = HSVShift(hue, sat, val)
diff --git a/research/cv/FaceNet/src/LFWDataset.py b/research/cv/FaceNet/src/LFWDataset.py
index cda486727c534bf29c89177d24dcc4dddcd74ae5..7cc5444a1363cf18b531dd0e317f93a6f53f0f0a 100644
--- a/research/cv/FaceNet/src/LFWDataset.py
+++ b/research/cv/FaceNet/src/LFWDataset.py
@@ -17,8 +17,7 @@
 import os
 import numpy as np
 from PIL import Image
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as C
 import mindspore.dataset as de
 from mindspore.common import set_seed
 set_seed(0)
@@ -94,9 +93,9 @@ class LFWDataset:
 def get_lfw_dataloader(eval_root_dir, eval_pairs_path, eval_batch_size):
 
     data_transforms = [C.RandomResize(size=(224, 224)),
-                       P.ToPIL(),
-                       P.ToTensor(),
-                       P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
+                       C.ToPIL(),
+                       C.ToTensor(),
+                       C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False)]
 
 
     face_dataset = LFWDataset(data_dir=eval_root_dir, pairs_path=eval_pairs_path)
diff --git a/research/cv/FaceNet/src/data_loader.py b/research/cv/FaceNet/src/data_loader.py
index 41968e5d5bdddf6178675737e005f47d09aac35e..abe10b2d304abeef134a3be7dab7fe5f188c811c 100644
--- a/research/cv/FaceNet/src/data_loader.py
+++ b/research/cv/FaceNet/src/data_loader.py
@@ -18,8 +18,7 @@ import os
 import csv
 import numpy as np
 from PIL import Image
-import mindspore.dataset.vision.py_transforms as P
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import mindspore.dataset as de
 
 
@@ -68,17 +67,17 @@ def get_dataloader(train_root_dir, valid_root_dir,
         'train': [
             C.RandomResize(size=(224, 224)),
             C.RandomHorizontalFlip(),
-            P.ToTensor(),
-            P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])],
+            C.ToTensor(),
+            C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False)],
         'train_valid': [
             C.RandomResize(size=(224, 224)),
             C.RandomHorizontalFlip(),
-            P.ToTensor(),
-            P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])],
+            C.ToTensor(),
+            C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False)],
         'valid': [
             C.RandomResize(size=(224, 224)),
-            P.ToTensor(),
-            P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]}
+            C.ToTensor(),
+            C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False)]}
 
 
     dataset_column_names = ["anc_img", "pos_img", "neg_img", "pos_class", "neg_class"]
diff --git a/research/cv/FaceNet/src/data_loader_generate_triplets_online.py b/research/cv/FaceNet/src/data_loader_generate_triplets_online.py
index b12537e8970d500dacc70844e82b92ffe122d505..337b200ab29f4d212c0a86a49dd3b18f36fda6f6 100644
--- a/research/cv/FaceNet/src/data_loader_generate_triplets_online.py
+++ b/research/cv/FaceNet/src/data_loader_generate_triplets_online.py
@@ -18,8 +18,7 @@ import os
 import numpy as np
 import pandas as pd
 from PIL import Image
-import mindspore.dataset.vision.py_transforms as P
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import mindspore.dataset as de
 
 
@@ -107,17 +106,17 @@ def get_dataloader(train_root_dir, valid_root_dir,
         'train': [
             C.RandomResize(size=(224, 224)),
             C.RandomHorizontalFlip(),
-            P.ToTensor(),
-            P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])],
+            C.ToTensor(),
+            C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False)],
         'train_valid': [
             C.RandomResize(size=(224, 224)),
             C.RandomHorizontalFlip(),
-            P.ToTensor(),
-            P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])],
+            C.ToTensor(),
+            C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False)],
         'valid': [
             C.RandomResize(size=(224, 224)),
-            P.ToTensor(),
-            P.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]}
+            C.ToTensor(),
+            C.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False)]}
 
     dataset_column_names = ["anc_img", "pos_img", "neg_img", "pos_class", "neg_class"]
 
diff --git a/research/cv/FaceQualityAssessment/src/dataset.py b/research/cv/FaceQualityAssessment/src/dataset.py
index 8ccf54460f927ebe5aaa3c064214c487f5fcac1d..149ae3410e7e6af1c409dd93de673923fb89b9e2 100644
--- a/research/cv/FaceQualityAssessment/src/dataset.py
+++ b/research/cv/FaceQualityAssessment/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import numpy as np
 from PIL import Image, ImageFile
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as F
+import mindspore.dataset.vision as F
 
 warnings.filterwarnings('ignore')
 ImageFile.LOAD_TRUNCATED_IMAGES = True
diff --git a/research/cv/FaceRecognition/eval.py b/research/cv/FaceRecognition/eval.py
index 6c5dba1399601e3a96d936005d11e7fa74f38435..0ca97bfce9eaf0f0c4dbb59e0ff3b7136c1a7dd5 100644
--- a/research/cv/FaceRecognition/eval.py
+++ b/research/cv/FaceRecognition/eval.py
@@ -21,8 +21,8 @@ import numpy as np
 import cv2
 
 from mindspore.common import dtype as mstype
-import mindspore.dataset.transforms.py_transforms as transforms
-import mindspore.dataset.vision.py_transforms as vision
+import mindspore.dataset.transforms as transforms
+import mindspore.dataset.vision as vision
 import mindspore.dataset as de
 from mindspore import Tensor, context
 from mindspore.train.serialization import load_checkpoint, load_param_into_net
@@ -266,9 +266,8 @@ def run_eval(args):
     args.logger.info('INFO, graph compile finished, time used:{:.2f}s, start calculate img embedding'.
                      format(compile_time_used))
 
-    img_transforms = transforms.Compose([vision.ToTensor(), vision.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
-
-
+    img_transforms = transforms.Compose([vision.ToTensor(),
+        vision.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), is_hwc=False)])
 
     #for test images
     args.logger.info('INFO, start step1, calculate test img embedding, weight file = {}'.format(args.weight))
diff --git a/research/cv/FaceRecognition/src/dataset_factory.py b/research/cv/FaceRecognition/src/dataset_factory.py
index 0cf1ad0d39d88842b616551bc70488d8a98bef7b..64b0fb49f1988d53b50e79fd2245437bdd34ab9b 100644
--- a/research/cv/FaceRecognition/src/dataset_factory.py
+++ b/research/cv/FaceRecognition/src/dataset_factory.py
@@ -18,8 +18,8 @@ import math
 import numpy as np
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.py_transforms as F
-import mindspore.dataset.transforms.py_transforms as F2
+import mindspore.dataset.vision as F
+import mindspore.dataset.transforms as F2
 
 from src.custom_dataset import DistributedCustomSampler, CustomDataset
 
@@ -27,7 +27,7 @@ __all__ = ['get_de_dataset']
 
 def get_de_dataset(args):
     '''get_de_dataset'''
-    lbl_transforms = [F.ToType(np.int32)]
+    lbl_transforms = [F2.TypeCast(np.int32)]
     transform_label = F2.Compose(lbl_transforms)
 
     drop_remainder = True
@@ -35,7 +35,7 @@ def get_de_dataset(args):
     transforms = [F.ToPIL(),
                   F.RandomHorizontalFlip(),
                   F.ToTensor(),
-                  F.Normalize(mean=[0.5], std=[0.5])]
+                  F.Normalize(mean=[0.5], std=[0.5], is_hwc=False)]
     transform = F2.Compose(transforms)
     cache_path = os.path.join('cache', os.path.basename(args.data_dir), 'data_cache.pkl')
     if args.device_target == 'GPU' and args.local_rank != 0:
diff --git a/research/cv/FaceRecognitionForTracking/eval.py b/research/cv/FaceRecognitionForTracking/eval.py
index 110e37c509ad8b3de7e7987e7180c702ca96d3ec..aba67c8183b1a5ccf50ae1fc959f70e1525a2259 100644
--- a/research/cv/FaceRecognitionForTracking/eval.py
+++ b/research/cv/FaceRecognitionForTracking/eval.py
@@ -21,8 +21,8 @@ import numpy as np
 from PIL import Image
 from tqdm import tqdm
 
-import mindspore.dataset.vision.py_transforms as V
-import mindspore.dataset.transforms.py_transforms as T
+import mindspore.dataset.vision as V
+import mindspore.dataset.transforms as T
 from mindspore import context, Tensor
 from mindspore.train.serialization import load_checkpoint, load_param_into_net
 
@@ -98,7 +98,7 @@ def load_images(paths, batch_size=128):
     resize = V.Resize((96, 64))
     transform = T.Compose([
         V.ToTensor(),
-        V.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
+        V.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], is_hwc=False)])
     for i, _ in enumerate(paths):
         im = Image.open(paths[i])
         im = resize(im)
diff --git a/research/cv/FaceRecognitionForTracking/preprocess.py b/research/cv/FaceRecognitionForTracking/preprocess.py
index dad3c2795b19c6f91458d1864c02e39b11765d94..1401348119e1e7c2abf30e99a5ae466e045164f0 100644
--- a/research/cv/FaceRecognitionForTracking/preprocess.py
+++ b/research/cv/FaceRecognitionForTracking/preprocess.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ import argparse
 import numpy as np
 from PIL import Image
 
-import mindspore.dataset.vision.py_transforms as V
-import mindspore.dataset.transforms.py_transforms as T
+import mindspore.dataset.vision as V
+import mindspore.dataset.transforms as T
 
 
 def load_images(paths, batch_size=1):
@@ -28,7 +28,7 @@ def load_images(paths, batch_size=1):
     resize = V.Resize((96, 64))
     transform = T.Compose([
         V.ToTensor(),
-        V.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
+        V.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5], is_hwc=False)])
     for i, _ in enumerate(paths):
         im = Image.open(paths[i])
         im = resize(im)
diff --git a/research/cv/FaceRecognitionForTracking/src/dataset.py b/research/cv/FaceRecognitionForTracking/src/dataset.py
index 5cea075028f7e3469aaad47ac9f71e30ad800955..3ffd357958f20889cfbc61d3fddf65792092e882 100644
--- a/research/cv/FaceRecognitionForTracking/src/dataset.py
+++ b/research/cv/FaceRecognitionForTracking/src/dataset.py
@@ -19,8 +19,8 @@ from PIL import ImageFile
 
 from mindspore import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as VC
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as VC
+import mindspore.dataset.transforms as C
 
 sys.path.append('./')
 sys.path.append('../data/')
diff --git a/research/cv/GENet_Res50/src/dataset.py b/research/cv/GENet_Res50/src/dataset.py
index 3f032c27b046c609146fc3876e4bd0915ed99a73..3396298b7cccce7594f6656326b8023f0c4da7c0 100644
--- a/research/cv/GENet_Res50/src/dataset.py
+++ b/research/cv/GENet_Res50/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/HRNetW48_cls/src/dataset.py b/research/cv/HRNetW48_cls/src/dataset.py
index 9d417f5455c6ace0e4986dc86c665a614286cf0b..43f0d27e804094cca90fe82d7d9abef53ee49c33 100644
--- a/research/cv/HRNetW48_cls/src/dataset.py
+++ b/research/cv/HRNetW48_cls/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,8 +16,8 @@
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/HireMLP/src/dataset.py b/research/cv/HireMLP/src/dataset.py
index a27c6ecb2d2ece99900b800efad9b7879ec0b80b..3bc5b667cef889d8469f74b56f3aac4ae0b57ad2 100644
--- a/research/cv/HireMLP/src/dataset.py
+++ b/research/cv/HireMLP/src/dataset.py
@@ -16,11 +16,10 @@
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as pytrans
+import mindspore.dataset.transforms as C2
 
-from mindspore.dataset.transforms.py_transforms import Compose
-import mindspore.dataset.vision.c_transforms as C
+from mindspore.dataset.transforms.transforms import Compose
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, repeat_num=1, infer_910=True, device_id=0, batch_size=128):
@@ -59,13 +58,13 @@ def create_dataset(dataset_path, do_train, repeat_num=1, infer_910=True, device_
         ]
     else:
         trans = [
-            pytrans.Decode(),
-            pytrans.Resize(235),
-            pytrans.CenterCrop(224)
+            C.Decode(True),
+            C.Resize(235),
+            C.CenterCrop(224)
         ]
     trans += [
-        pytrans.ToTensor(),
-        pytrans.Normalize(mean=mean, std=std),
+        C.ToTensor(),
+        C.Normalize(mean=mean, std=std, is_hwc=False),
     ]
     trans = Compose(trans)
 
diff --git a/research/cv/HourNAS/src/dataset.py b/research/cv/HourNAS/src/dataset.py
index 663fa35f42b9db69c4170e4a9e4631f72de61fef..1ec1f68ffa8fe155ef9429822b456ca902e36644 100644
--- a/research/cv/HourNAS/src/dataset.py
+++ b/research/cv/HourNAS/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,14 +17,12 @@ import math
 import os
 
 import numpy as np
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_transforms
-import mindspore.dataset.transforms.c_transforms as c_transforms
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
 from mindspore.communication.management import get_rank, get_group_size
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.vision.c_transforms as vision
 
 # values that should remain constant
 DEFAULT_CROP_PCT = 0.875
@@ -54,24 +52,24 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
     """Create ImageNet training dataset"""
     if not os.path.exists(train_data_url):
         raise ValueError('Path not exists')
-    decode_op = py_vision.Decode()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
-    random_resize_crop_bicubic = py_vision.RandomResizedCrop(size=(input_size, input_size),
-                                                             scale=SCALE, ratio=RATIO,
-                                                             interpolation=Inter.BICUBIC)
-    random_horizontal_flip_op = py_vision.RandomHorizontalFlip(0.5)
+    random_resize_crop_bicubic = vision.RandomResizedCrop(size=(input_size, input_size),
+                                                          scale=SCALE, ratio=RATIO,
+                                                          interpolation=Inter.BICUBIC)
+    random_horizontal_flip_op = vision.RandomHorizontalFlip(0.5)
     adjust_range = (max(0, 1 - color_jitter), 1 + color_jitter)
-    random_color_jitter_op = py_vision.RandomColorAdjust(brightness=adjust_range,
-                                                         contrast=adjust_range,
-                                                         saturation=adjust_range)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    random_color_jitter_op = vision.RandomColorAdjust(brightness=adjust_range,
+                                                      contrast=adjust_range,
+                                                      saturation=adjust_range)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
     # assemble all the transforms
-    image_ops = py_transforms.Compose([decode_op, random_resize_crop_bicubic,
-                                       random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, random_resize_crop_bicubic,
+                                    random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
 
     rank_id = get_rank() if distributed else 0
     rank_size = get_group_size() if distributed else 1
@@ -120,16 +118,16 @@ def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=F
     else:
         scale_size = int(math.floor(input_size / DEFAULT_CROP_PCT))
 
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
-    decode_op = py_vision.Decode()
-    resize_op = py_vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
-    center_crop = py_vision.CenterCrop(size=input_size)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    resize_op = vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
+    center_crop = vision.CenterCrop(size=input_size)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
-    image_ops = py_transforms.Compose([decode_op, resize_op, center_crop,
-                                       to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, resize_op, center_crop,
+                                    to_tensor, normalize_op])
 
     dataset = dataset.map(input_columns=["label"], operations=type_cast_op,
                           num_parallel_workers=workers)
@@ -175,10 +173,10 @@ def create_dataset_cifar10(data_home, repeat_num=1, training=True, cifar_cfg=Non
     random_horizontal_op = vision.RandomHorizontalFlip()
     resize_op = vision.Resize((resize_height, resize_width))  # interpolation default BILINEAR
     rescale_op = vision.Rescale(1.0 / 255.0, 0.0)
-    #normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
-    normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.24703233, 0.24348505, 0.26158768))
+    #normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=True)
+    normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.24703233, 0.24348505, 0.26158768), is_hwc=True)
     changeswap_op = vision.HWC2CHW()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
     c_trans = []
     if training:
diff --git a/research/cv/ICNet/Res50V1_PRE/src/dataset.py b/research/cv/ICNet/Res50V1_PRE/src/dataset.py
index 3f032c27b046c609146fc3876e4bd0915ed99a73..3396298b7cccce7594f6656326b8023f0c4da7c0 100644
--- a/research/cv/ICNet/Res50V1_PRE/src/dataset.py
+++ b/research/cv/ICNet/Res50V1_PRE/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/ICNet/eval.py b/research/cv/ICNet/eval.py
index c48be4d8e01cce5dbc4b5bbf23f1e70a8af24d41..21d0a21e0c357df24ec86ea6aa4334b80b35fbd6 100644
--- a/research/cv/ICNet/eval.py
+++ b/research/cv/ICNet/eval.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import mindspore.ops as ops
 from mindspore import load_param_into_net
 from mindspore import load_checkpoint
 from mindspore import Tensor
-import mindspore.dataset.vision.py_transforms as transforms
+import mindspore.dataset.vision as vision
 
 parser = argparse.ArgumentParser(description="ICNet Evaluation")
 parser.add_argument("--dataset_path", type=str, default="/data/cityscapes/", help="dataset path")
@@ -98,8 +98,8 @@ class Evaluator:
 
     def _img_transform(self, image):
         """img_transform"""
-        to_tensor = transforms.ToTensor()
-        normalize = transforms.Normalize([.485, .456, .406], [.229, .224, .225])
+        to_tensor = vision.ToTensor()
+        normalize = vision.Normalize([.485, .456, .406], [.229, .224, .225], is_hwc=False)
         image = to_tensor(image)
         image = normalize(image)
         return image
diff --git a/research/cv/ICNet/src/cityscapes_mindrecord.py b/research/cv/ICNet/src/cityscapes_mindrecord.py
index 0ccc783ddfa95b3c10746301bac5eb5161e63395..a3acc652e3f3bb4bba46cd397bf6896da8500cdf 100644
--- a/research/cv/ICNet/src/cityscapes_mindrecord.py
+++ b/research/cv/ICNet/src/cityscapes_mindrecord.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,8 +22,8 @@ from PIL import ImageOps
 from PIL import ImageFilter
 import mindspore.dataset as de
 from mindspore.mindrecord import FileWriter
-import mindspore.dataset.vision.py_transforms as transforms
-import mindspore.dataset.transforms.py_transforms as tc
+import mindspore.dataset.vision as transforms
+import mindspore.dataset.transforms as tc
 
 
 def _get_city_pairs(folder, split='train'):
@@ -103,7 +103,8 @@ def _sync_transform(img, mask):
 
 def _class_to_index(mask):
     """class to index"""
-    # reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py
+    # Reference:
+    # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py
     _key = np.array([-1, -1, -1, -1, -1, -1,
                      -1, -1, 0, 1, -1, -1,
                      2, 3, 4, -1, -1, -1,
@@ -136,7 +137,7 @@ def _img_mask_transform(img, mask):
     """img and mask transform"""
     input_transform = tc.Compose([
         transforms.ToTensor(),
-        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
+        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=False)])
     img = _img_transform(img)
     mask = _mask_transform(mask)
     img = input_transform(img)
diff --git a/research/cv/ICNet/src/visualize.py b/research/cv/ICNet/src/visualize.py
index 5748181e990959c4cac8a9abd492ab5c184a8732..61adc9d70b2714c979485ed4b0fd4246d4c582ee 100644
--- a/research/cv/ICNet/src/visualize.py
+++ b/research/cv/ICNet/src/visualize.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import mindspore.ops as ops
 from mindspore import Tensor
 from mindspore import load_param_into_net
 from mindspore import load_checkpoint
-import mindspore.dataset.vision.py_transforms as transforms
+import mindspore.dataset.vision as vision
 from models.icnet import ICNet
 
 __all__ = ['get_color_palette', 'set_img_color',
@@ -30,8 +30,8 @@ __all__ = ['get_color_palette', 'set_img_color',
 
 def _img_transform(img):
     """img_transform"""
-    totensor = transforms.ToTensor()
-    normalize = transforms.Normalize([.485, .456, .406], [.229, .224, .225])
+    totensor = vision.ToTensor()
+    normalize = vision.Normalize([.485, .456, .406], [.229, .224, .225], is_hwc=False)
     img = totensor(img)
     img = normalize(img)
     return img
diff --git a/research/cv/ISyNet/src/dataset.py b/research/cv/ISyNet/src/dataset.py
index 06d86d1c943fe35b8636f39a937b68cf8cee2445..940c0acc4df1341eea65da51f39bd7a6dea6a742 100644
--- a/research/cv/ISyNet/src/dataset.py
+++ b/research/cv/ISyNet/src/dataset.py
@@ -19,9 +19,8 @@ import os
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.dataset.vision import Inter
 from mindspore.communication.management import init, get_rank, get_group_size
 from src.model_utils.config import config
@@ -83,7 +82,7 @@ def create_dataset1(dataset_path, do_train, repeat_num=1, batch_size=32, target=
     ]
     if autoaugment:
         trans += [
-            P.ToPIL(),
+            C.ToPIL(),
             ImageNetPolicy(),
             ToNumpy(),
         ]
@@ -171,7 +170,7 @@ def create_dataset2(dataset_path, do_train, repeat_num=1, batch_size=32, target=
                 ]
             if autoaugment:
                 trans += [
-                    P.ToPIL(),
+                    C.ToPIL(),
                     ImageNetPolicy(),
                     ToNumpy(),
                     ]
@@ -267,7 +266,7 @@ def create_dataset_pynative(dataset_path, do_train, repeat_num=1, batch_size=32,
             ]
         if autoaugment:
             trans += [
-                P.ToPIL(),
+                C.ToPIL(),
                 ImageNetPolicy(),
                 ToNumpy(),
                 ]
@@ -351,7 +350,7 @@ def create_dataset3(dataset_path, do_train, repeat_num=1, batch_size=32, target=
             ]
         if autoaugment:
             trans += [
-                P.ToPIL(),
+                C.ToPIL(),
                 ImageNetPolicy(),
                 ToNumpy(),
                 ]
@@ -437,7 +436,7 @@ def create_dataset4(dataset_path, do_train, repeat_num=1, batch_size=32, target=
                 ]
         if autoaugment:
             trans += [
-                P.ToPIL(),
+                C.ToPIL(),
                 ImageNetPolicy(),
                 ToNumpy(),
                 ]
diff --git a/research/cv/ISyNet/src/transform.py b/research/cv/ISyNet/src/transform.py
index 400e8b2116ed851a08eaaaabe697c9ab75a4a7da..d3ea07319c73ac1b217579ec44de7e1621cc2a28 100644
--- a/research/cv/ISyNet/src/transform.py
+++ b/research/cv/ISyNet/src/transform.py
@@ -16,7 +16,7 @@
 random augment class
 """
 import numpy as np
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as vision
 from src import transform_utils
 
 IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
@@ -37,9 +37,9 @@ class RandAugment:
         # assert the imgs object are pil_images
         ret_imgs = []
         ret_labels = []
-        py_to_pil_op = P.ToPIL()
-        to_tensor = P.ToTensor()
-        normalize_op = P.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+        py_to_pil_op = vision.ToPIL()
+        to_tensor = vision.Tensor()
+        normalize_op = vision.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
         rand_augment_ops = transform_utils.rand_augment_transform(self.config_str, self.hparams)
         for i, image in enumerate(imgs):
             img_pil = py_to_pil_op(image)
diff --git a/research/cv/ISyNet/utils/preprocess_310.py b/research/cv/ISyNet/utils/preprocess_310.py
index 38c855977cc44f40e8c1bdae1d67b56b6041513a..897d2e5d4b713236b5da2b109bd68ce702bf42c3 100644
--- a/research/cv/ISyNet/utils/preprocess_310.py
+++ b/research/cv/ISyNet/utils/preprocess_310.py
@@ -18,8 +18,8 @@ import argparse
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 
 PARSER = argparse.ArgumentParser(description="ISyNet preprocess")
 PARSER.add_argument("--data_path", type=str, required=True, help="data path.")
diff --git a/research/cv/Inception-v2/src/dataset.py b/research/cv/Inception-v2/src/dataset.py
index 59a03b8e7f62a2b7a9c562b709f5836cd8945e4b..1ebd2a9f58c3fbac647d5068752fde4545eecddc 100644
--- a/research/cv/Inception-v2/src/dataset.py
+++ b/research/cv/Inception-v2/src/dataset.py
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 
 def create_dataset_cifar10(dataset_path, cfg, training, repeat_num=1):
diff --git a/research/cv/JDE/eval_detect.py b/research/cv/JDE/eval_detect.py
index 9425f78caca267f6a7615263f74993239195b6d7..f4487ec2de663a276102a7d7c08be08d2efad908 100644
--- a/research/cv/JDE/eval_detect.py
+++ b/research/cv/JDE/eval_detect.py
@@ -23,7 +23,7 @@ from mindspore import dataset as ds
 from mindspore.common import set_seed
 from mindspore.communication.management import get_group_size
 from mindspore.communication.management import get_rank
-from mindspore.dataset.vision import py_transforms as PY
+from mindspore.dataset.vision import transforms as vision
 from mindspore.train.serialization import load_checkpoint
 
 from cfg.config import config as default_config
@@ -69,7 +69,7 @@ def main(
         opt.dataset_root,
         test_paths,
         augment=False,
-        transforms=PY.ToTensor(),
+        transforms=vision.ToTensor(),
         config=opt,
     )
 
diff --git a/research/cv/JDE/train.py b/research/cv/JDE/train.py
index da70b37187a1fb6573c6841ec91f1d96cd4560d6..dbce644f2c8beba27017d8bdd7a77386d8749d6d 100644
--- a/research/cv/JDE/train.py
+++ b/research/cv/JDE/train.py
@@ -25,7 +25,7 @@ from mindspore.communication.management import get_group_size
 from mindspore.communication.management import get_rank
 from mindspore.communication.management import init
 from mindspore.context import ParallelMode
-from mindspore.dataset.vision import py_transforms as PY
+from mindspore.dataset.vision import transforms as vision
 from mindspore.train.callback import CheckpointConfig
 from mindspore.train.callback import LossMonitor
 from mindspore.train.callback import ModelCheckpoint
@@ -177,7 +177,7 @@ if __name__ == "__main__":
         trainset_paths,
         k_max=config.k_max,
         augment=True,
-        transforms=PY.ToTensor(),
+        transforms=vision.ToTensor(),
         config=config,
     )
 
diff --git a/research/cv/LightCNN/src/dataset.py b/research/cv/LightCNN/src/dataset.py
index 46192a0a060a5a8e552083e4afea8896d50b7f7e..110d14dd0ad500686b57132f74bbd413e9638c1d 100644
--- a/research/cv/LightCNN/src/dataset.py
+++ b/research/cv/LightCNN/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import cv2
 import numpy as np
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_vision
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 
 
 def img_loader(path):
@@ -67,19 +67,19 @@ def create_dataset(mode, data_url, data_list, batch_size, resize_size=144,
     if mode == 'Train':
         shuffle = True
         drop_last = True
-        image_ops = Compose([py_vision.ToPIL(),
-                             py_vision.Resize(resize_size),
-                             py_vision.RandomCrop(input_size),
-                             py_vision.RandomHorizontalFlip(),
-                             py_vision.ToTensor()])
+        image_ops = Compose([vision.ToPIL(),
+                             vision.Resize(resize_size),
+                             vision.RandomCrop(input_size),
+                             vision.RandomHorizontalFlip(),
+                             vision.ToTensor()])
 
     elif mode == 'Val':
         shuffle = False
         drop_last = False
-        image_ops = Compose([py_vision.ToPIL(),
-                             py_vision.Resize(resize_size),
-                             py_vision.CenterCrop(input_size),
-                             py_vision.ToTensor()])
+        image_ops = Compose([vision.ToPIL(),
+                             vision.Resize(resize_size),
+                             vision.CenterCrop(input_size),
+                             vision.ToTensor()])
 
     dataset_generator = ImageList(root=data_url, fileList=data_list)
 
diff --git a/research/cv/MGN/src/dataset.py b/research/cv/MGN/src/dataset.py
index 2d4e8696fa2a675d8e7571e03f99ef5060081aaa..f4ce073a52334349fa18ee8739e421ae2ee97976 100644
--- a/research/cv/MGN/src/dataset.py
+++ b/research/cv/MGN/src/dataset.py
@@ -20,7 +20,7 @@ import random
 import re
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import numpy as np
 from PIL import Image
 
diff --git a/research/cv/MVD/eval.py b/research/cv/MVD/eval.py
index 8151b8e6c2da15874c55c86a5d802f4ac4cbd3ce..af9c2a75da42d6223af994d16a2facdad966adb9 100644
--- a/research/cv/MVD/eval.py
+++ b/research/cv/MVD/eval.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,10 +21,10 @@ import argparse
 import numpy as np
 import psutil
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 from mindspore import context, load_checkpoint, load_param_into_net, DatasetHelper
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 
 from PIL import Image
 
@@ -216,9 +216,9 @@ if __name__ == "__main__":
     transform_test = Compose(
         [
             decode,
-            py_trans.Resize((args.img_h, args.img_w)),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+            vision.Resize((args.img_h, args.img_w)),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
         ]
     )
 
diff --git a/research/cv/MVD/train.py b/research/cv/MVD/train.py
index ea0d5f1c4dc8e0606d3402b9e4aff8adebb129b9..c6ad55320a7e975a5ba8071146ae10d3ce4040a6 100644
--- a/research/cv/MVD/train.py
+++ b/research/cv/MVD/train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,13 +24,13 @@ from tqdm import tqdm
 import mindspore as ms
 import mindspore.ops as P
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 from mindspore import context, load_checkpoint, \
     load_param_into_net, save_checkpoint, DatasetHelper
 from mindspore.context import ParallelMode
 from mindspore.communication.management import init, get_group_size
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore.nn import SGD, Adam
 from mindspore import nn
 
@@ -289,33 +289,33 @@ if __name__ == "__main__":
     transform_train_rgb = Compose(
         [
             decode,
-            py_trans.RandomCrop((args.img_h, args.img_w)),
-            py_trans.RandomGrayscale(prob=0.5),
-            py_trans.RandomHorizontalFlip(),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-            py_trans.RandomErasing(prob=0.5)
+            vision.RandomCrop((args.img_h, args.img_w)),
+            vision.RandomGrayscale(prob=0.5),
+            vision.RandomHorizontalFlip(),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False),
+            vision.RandomErasing(prob=0.5)
         ]
     )
 
     transform_train_ir = Compose(
         [
             decode,
-            py_trans.RandomCrop((args.img_h, args.img_w)),
-            # py_trans.RandomGrayscale(prob=0.5),
-            py_trans.RandomHorizontalFlip(),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
-            py_trans.RandomErasing(prob=0.5)
+            vision.RandomCrop((args.img_h, args.img_w)),
+            # vision.RandomGrayscale(prob=0.5),
+            vision.RandomHorizontalFlip(),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False),
+            vision.RandomErasing(prob=0.5)
         ]
     )
 
     transform_test = Compose(
         [
             decode,
-            py_trans.Resize((args.img_h, args.img_w)),
-            py_trans.ToTensor(),
-            py_trans.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+            vision.Resize((args.img_h, args.img_w)),
+            vision.ToTensor(),
+            vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
         ]
     )
 
diff --git a/research/cv/ManiDP/src/dataset.py b/research/cv/ManiDP/src/dataset.py
index 220fd0147b0b7b15aed46db0c75c529a88e11648..58aa330bd7a67a17b2cdca639311804088c343ed 100644
--- a/research/cv/ManiDP/src/dataset.py
+++ b/research/cv/ManiDP/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,14 +17,12 @@ import math
 import os
 
 import numpy as np
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_transforms
-import mindspore.dataset.transforms.c_transforms as c_transforms
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
 from mindspore.communication.management import get_rank, get_group_size
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.vision.c_transforms as vision
 
 
 # values that should remain constant
@@ -55,24 +53,24 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
     """Create ImageNet training dataset"""
     if not os.path.exists(train_data_url):
         raise ValueError('Path not exists')
-    decode_op = py_vision.Decode()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
-    random_resize_crop_bicubic = py_vision.RandomResizedCrop(size=(input_size, input_size),
-                                                             scale=SCALE, ratio=RATIO,
-                                                             interpolation=Inter.BICUBIC)
-    random_horizontal_flip_op = py_vision.RandomHorizontalFlip(0.5)
+    random_resize_crop_bicubic = vision.RandomResizedCrop(size=(input_size, input_size),
+                                                          scale=SCALE, ratio=RATIO,
+                                                          interpolation=Inter.BICUBIC)
+    random_horizontal_flip_op = vision.RandomHorizontalFlip(0.5)
     adjust_range = (max(0, 1 - color_jitter), 1 + color_jitter)
-    random_color_jitter_op = py_vision.RandomColorAdjust(brightness=adjust_range,
-                                                         contrast=adjust_range,
-                                                         saturation=adjust_range)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    random_color_jitter_op = vision.RandomColorAdjust(brightness=adjust_range,
+                                                      contrast=adjust_range,
+                                                      saturation=adjust_range)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
     # assemble all the transforms
-    image_ops = py_transforms.Compose([decode_op, random_resize_crop_bicubic,
-                                       random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, random_resize_crop_bicubic,
+                                    random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
 
     rank_id = get_rank() if distributed else 0
     rank_size = get_group_size() if distributed else 1
@@ -121,16 +119,16 @@ def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=F
     else:
         scale_size = int(math.floor(input_size / DEFAULT_CROP_PCT))
 
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
-    decode_op = py_vision.Decode()
-    resize_op = py_vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
-    center_crop = py_vision.CenterCrop(size=input_size)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    resize_op = vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
+    center_crop = vision.CenterCrop(size=input_size)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
-    image_ops = py_transforms.Compose([decode_op, resize_op, center_crop,
-                                       to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, resize_op, center_crop,
+                                    to_tensor, normalize_op])
 
     dataset = dataset.map(input_columns=["label"], operations=type_cast_op,
                           num_parallel_workers=workers)
@@ -178,7 +176,7 @@ def create_dataset_cifar10(data_home, repeat_num=1, training=True, cifar_cfg=Non
     rescale_op = vision.Rescale(1.0 / 255.0, 0.0)
     normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.2471, 0.2435, 0.2616))
     changeswap_op = vision.HWC2CHW()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
     c_trans = []
     if training:
diff --git a/research/cv/MaskedFaceRecognition/test_dataset.py b/research/cv/MaskedFaceRecognition/test_dataset.py
index 33720e01cb79d70af5dce434ab26edef59901d67..f35c23b7e4799e3ae5a217c11c6dd767ef93fedf 100644
--- a/research/cv/MaskedFaceRecognition/test_dataset.py
+++ b/research/cv/MaskedFaceRecognition/test_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ create train or eval dataset.
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as CV
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as CV
+import mindspore.dataset.transforms as C
 from config import config
 from dataset.Dataset import Dataset
 
diff --git a/research/cv/MaskedFaceRecognition/train_dataset.py b/research/cv/MaskedFaceRecognition/train_dataset.py
index 0d9d9664f79b12f3387727a5e6560fbffb90b301..c3f8e5d926f8342ee294d7e6f8d8337cfce62895 100644
--- a/research/cv/MaskedFaceRecognition/train_dataset.py
+++ b/research/cv/MaskedFaceRecognition/train_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ create train or eval dataset.
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as CV
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as CV
+import mindspore.dataset.transforms as C
 from config import config
 from dataset.MGDataset import DistributedPKSampler, MGDataset
 
diff --git a/research/cv/NFNet/src/data/imagenet.py b/research/cv/NFNet/src/data/imagenet.py
index 8edae82ebb5236c4bd3309eaca972ef32582628b..213425e7e21ad4c30e80d7e15dac6641cad3532f 100644
--- a/research/cv/NFNet/src/data/imagenet.py
+++ b/research/cv/NFNet/src/data/imagenet.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision.utils import Inter
 
 from src.data.augment.auto_augment import _pil_interp, rand_augment_transform
@@ -95,12 +94,12 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
             vision.RandomCropDecodeResize(input_size, scale=(0.08, 1.0), ratio=(3 / 4, 4 / 3),
                                           interpolation=Inter.BICUBIC),
             vision.RandomHorizontalFlip(prob=0.5),
-            py_vision.ToPIL()
+            vision.ToPIL()
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False),
             RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)
         ]
     else:
diff --git a/research/cv/Neighbor2Neighbor/src/dataset.py b/research/cv/Neighbor2Neighbor/src/dataset.py
index cf438509a29799b34b8d5cf088a141c936cf958d..5cda4531b6eb216e07030f036067b216b41233f1 100644
--- a/research/cv/Neighbor2Neighbor/src/dataset.py
+++ b/research/cv/Neighbor2Neighbor/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ import glob
 import numpy as np
 import PIL.Image as Image
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 
 class DataLoader_Imagenet_val:
     '''DataLoader_Imagenet_val'''
diff --git a/research/cv/PAMTRI/MultiTaskNet/preprocess.py b/research/cv/PAMTRI/MultiTaskNet/preprocess.py
index 26bd80b310585d45ba16e8454790510d180484eb..dc872e1379feb32d4690ad9d9fb02b7a3589cbaa 100644
--- a/research/cv/PAMTRI/MultiTaskNet/preprocess.py
+++ b/research/cv/PAMTRI/MultiTaskNet/preprocess.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import argparse
 from pathlib import Path
 import mindspore.dataset as ds
 import mindspore.common.dtype as mstype
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.transforms as C2
 from src.dataset.data_manager import DatasetManager
 from src.dataset.data_loader import ImageDataset
 from src.dataset.transforms import Compose_Keypt, Resize_Keypt, ToTensor_Keypt, Normalize_Keypt
diff --git a/research/cv/PAMTRI/MultiTaskNet/src/dataset/dataset.py b/research/cv/PAMTRI/MultiTaskNet/src/dataset/dataset.py
index ef92874e68141e6db1416cdf636e384124ca1f48..e34b6bbf88a3552ce63db5c34482c8e674583077 100644
--- a/research/cv/PAMTRI/MultiTaskNet/src/dataset/dataset.py
+++ b/research/cv/PAMTRI/MultiTaskNet/src/dataset/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@
 import os
 import mindspore.dataset as ds
 import mindspore.common.dtype as mstype
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.transforms as C2
 
 from .data_manager import DatasetManager
 from .data_loader import ImageDataset
diff --git a/research/cv/PAMTRI/MultiTaskNet/src/dataset/transforms.py b/research/cv/PAMTRI/MultiTaskNet/src/dataset/transforms.py
index e57f28f2a48d668a2e0fc603811f944153a3f3c5..303fb431454094a403a855d8939ceed0ac2558f2 100644
--- a/research/cv/PAMTRI/MultiTaskNet/src/dataset/transforms.py
+++ b/research/cv/PAMTRI/MultiTaskNet/src/dataset/transforms.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ import random
 import collections
 import cv2
 
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.vision as vision
 
 if sys.version_info < (3, 3):
     Iterable = collections.Iterable
@@ -54,7 +54,7 @@ class ToTensor_Keypt():
     In the other cases, tensors are returned without scaling.
     """
     def __init__(self):
-        self.to_tensor = py_vision.ToTensor()
+        self.to_tensor = vision.ToTensor()
 
     def __call__(self, img, vkeypt):
         """
@@ -104,7 +104,7 @@ class Normalize_Keypt():
             self.mean.extend([mean_avg] * (channels_new - channels_orig))
             self.std.extend([std_avg] * (channels_new - channels_orig))
 
-        normalize = py_vision.Normalize(self.mean, self.std)
+        normalize = vision.Normalize(self.mean, self.std, is_hwc=False)
         return normalize(tensor)
 
 class Resize_Keypt():
diff --git a/research/cv/PAMTRI/PoseEstNet/src/dataset/dataset.py b/research/cv/PAMTRI/PoseEstNet/src/dataset/dataset.py
index 75a78a144868d550ba72a84f00eb67c8cd7e0f38..26031cd9460e9557284afc29afe637d7e90be427 100644
--- a/research/cv/PAMTRI/PoseEstNet/src/dataset/dataset.py
+++ b/research/cv/PAMTRI/PoseEstNet/src/dataset/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ import copy
 import json
 from pathlib import Path
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_vision
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 
 from .veri import VeRiDataset
 
@@ -42,8 +42,8 @@ def create_dataset(cfg, data_dir, is_train=True):
                 "joints", "joints_vis"], num_parallel_workers=1, shuffle=False, num_shards=1, shard_id=0)
 
     trans = Compose([
-        py_vision.ToTensor(),
-        py_vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
+        vision.ToTensor(),
+        vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=False)
     ])
 
     dataset = dataset.map(operations=trans, input_columns="input", num_parallel_workers=8)
diff --git a/research/cv/PAMTRI/PoseEstNet/trans.py b/research/cv/PAMTRI/PoseEstNet/trans.py
index 5f6c377c8fa8884766ce5b4d0b34b2ddcc3529cb..cb5a4b4abfa052ec6a9318f83ece9c796355b330 100644
--- a/research/cv/PAMTRI/PoseEstNet/trans.py
+++ b/research/cv/PAMTRI/PoseEstNet/trans.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,10 +21,10 @@ python trans.py --cfg config.yaml --ckpt_path Your.ckpt --data_dir datapath
 import os
 import argparse
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.vision as vision
 
 from mindspore import context
-from mindspore.dataset.transforms.py_transforms import Compose
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore.train.serialization import load_checkpoint, load_param_into_net
 
 from src.model import get_pose_net
@@ -62,8 +62,8 @@ if __name__ == '__main__':
                                            num_parallel_workers=1, shuffle=False, num_shards=1, shard_id=0)
 
     trans = Compose([
-        py_vision.ToTensor(),
-        py_vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
+        vision.ToTensor(),
+        vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=False)
     ])
 
     test_dataloader = test_dataloader.map(operations=trans, input_columns="input", num_parallel_workers=1)
diff --git a/research/cv/PDarts/src/dataset.py b/research/cv/PDarts/src/dataset.py
index e08fd37c9ec0ecbfb3505b60aa95e50e7a2b9c18..2309590cd344b959f4b8ad68f4975152b64983f1 100644
--- a/research/cv/PDarts/src/dataset.py
+++ b/research/cv/PDarts/src/dataset.py
@@ -15,8 +15,8 @@
 """Read train and eval data"""
 import mindspore.dataset as ds
 from mindspore.common import dtype as mstype
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision.utils import Inter
 
 
diff --git a/research/cv/Pix2Pix/src/dataset/pix2pix_dataset.py b/research/cv/Pix2Pix/src/dataset/pix2pix_dataset.py
index 2e4e2111c79fa80e92e17f7955746ff680710664..66efd4c181cd418f8584f63d55fa9a91fcc0e1b6 100644
--- a/research/cv/Pix2Pix/src/dataset/pix2pix_dataset.py
+++ b/research/cv/Pix2Pix/src/dataset/pix2pix_dataset.py
@@ -22,7 +22,7 @@ import numpy as np
 from PIL import Image
 import mindspore
 from mindspore import dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from src.utils.config import config
 
 class pix2pixDataset():
diff --git a/research/cv/ReIDStrongBaseline/src/dataset.py b/research/cv/ReIDStrongBaseline/src/dataset.py
index 25fd91b513d9b327fc9503b7a441cc4ee60cedae..6955853c0844210dcbda48e1f118db222d907b79 100644
--- a/research/cv/ReIDStrongBaseline/src/dataset.py
+++ b/research/cv/ReIDStrongBaseline/src/dataset.py
@@ -18,7 +18,7 @@ import math
 import random
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import numpy as np
 from PIL import Image
 
diff --git a/research/cv/RefineDet/src/dataset.py b/research/cv/RefineDet/src/dataset.py
index 9a400aa3f9ac24198c8d6c4ecd2c90950b93791b..bac268b496c7c03a2796c267089d6100eea9853a 100644
--- a/research/cv/RefineDet/src/dataset.py
+++ b/research/cv/RefineDet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from .box_utils import jaccard_numpy, refinedet_bboxes_encode, box_init
 
diff --git a/research/cv/RefineNet/src/dataset.py b/research/cv/RefineNet/src/dataset.py
index 50f68bf46a4c4413c7dce2146ff5bdf0c183e5d0..22b40d20451a6b18e66ac201f91edebb976cc333 100644
--- a/research/cv/RefineNet/src/dataset.py
+++ b/research/cv/RefineNet/src/dataset.py
@@ -15,7 +15,7 @@
 """ dataset """
 import numpy as np
 import cv2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import mindspore.dataset as ds
 from mindspore.common import set_seed
 cv2.setNumThreads(0)
diff --git a/research/cv/ResNeSt50/src/datasets/autoaug.py b/research/cv/ResNeSt50/src/datasets/autoaug.py
index 1eb4c5a1b7de8b304249c5b13a305256dd35a129..bfc1acd66e32a7b6c4c67c11c3bf8a12a1712a3c 100644
--- a/research/cv/ResNeSt50/src/datasets/autoaug.py
+++ b/research/cv/ResNeSt50/src/datasets/autoaug.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
 import random
 import numpy as np
 import PIL
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 RESAMPLE_MODE = PIL.Image.BICUBIC
 
@@ -175,8 +175,8 @@ class RandAugment:
         self.n = n
         self.m = m
         self.augment_list = rand_augment_list()
-        self.to_pil = py_trans.ToPIL()
-        self.to_tensor = py_trans.ToTensor()
+        self.to_pil = vision.ToPIL()
+        self.to_tensor = vision.ToTensor()
         self.from_pil = from_pil
         self.as_pil = as_pil
 
diff --git a/research/cv/ResNeSt50/src/datasets/dataset.py b/research/cv/ResNeSt50/src/datasets/dataset.py
index 3fbea04094807c90086146bb50bf54075a4d0af5..3aaa5e8e825e7279d0abb616d7f0d633e02722ed 100644
--- a/research/cv/ResNeSt50/src/datasets/dataset.py
+++ b/research/cv/ResNeSt50/src/datasets/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,9 +16,8 @@
 import os
 
 import mindspore.dataset as dataset
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as V_C
-import mindspore.dataset.vision.py_transforms as P_C
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as V_C
 from mindspore.common import dtype as mstype
 
 from src.datasets.autoaug import RandAugment
@@ -44,10 +43,10 @@ def ImageNet(root, mode,
                 V_C.RandomResizedCrop(crop_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
                 V_C.RandomHorizontalFlip(prob=0.5),
                 V_C.RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4),
-                P_C.ToPIL(),
+                V_C.ToPIL(),
                 RandAugment(2, 12, True, True),
-                P_C.ToTensor(),
-                P_C.Normalize(mean=mean, std=std)]
+                V_C.ToTensor(),
+                V_C.Normalize(mean=mean, std=std, is_hwc=False)]
         else:
             mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
             std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
@@ -55,7 +54,7 @@ def ImageNet(root, mode,
                 V_C.Decode(),
                 V_C.Resize((320, 320)),
                 V_C.CenterCrop(256),
-                V_C.Normalize(mean=mean, std=std),
+                V_C.Normalize(mean=mean, std=std, is_hwc=True),
                 V_C.HWC2CHW()]
     else:
         transform_img = transform
diff --git a/research/cv/SE-Net/src/dataset.py b/research/cv/SE-Net/src/dataset.py
index 0b8671e2771d9d6874c86d6ebfe3859e29ef3666..22750915f3c8211dab9b2c96fb983e3a00f90a17 100644
--- a/research/cv/SE-Net/src/dataset.py
+++ b/research/cv/SE-Net/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 def create_dataset2(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend", distribute=False):
diff --git a/research/cv/SE_ResNeXt50/src/dataset.py b/research/cv/SE_ResNeXt50/src/dataset.py
index 85f97ed70934b2d2f9de999bb2bdc983d477930d..9e12fefc0b82f332ee998c848984def6f9d4369b 100644
--- a/research/cv/SE_ResNeXt50/src/dataset.py
+++ b/research/cv/SE_ResNeXt50/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from src.config import imagenet_cfg
 
 
diff --git a/research/cv/SPPNet/src/dataset.py b/research/cv/SPPNet/src/dataset.py
index 108114c962ccd51851f5e0b29d9b5eedec04a744..36aad47259815cf706cfaf0ff61ef2b66d25cf91 100644
--- a/research/cv/SPPNet/src/dataset.py
+++ b/research/cv/SPPNet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ Produce the dataset
 
 import os
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 from mindspore.communication.management import get_rank, get_group_size
 
 
diff --git a/research/cv/STGAN/modelarts/dataset/celeba.py b/research/cv/STGAN/modelarts/dataset/celeba.py
index bf65edaab934ba1f047d63a19fee225b5b9df0fd..c342793e1111a839cb3d91ec45d2f6315e62a1ec 100644
--- a/research/cv/STGAN/modelarts/dataset/celeba.py
+++ b/research/cv/STGAN/modelarts/dataset/celeba.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import os
 import multiprocessing
 import numpy as np
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 
 from mindspore import context
 from mindspore.context import ParallelMode
diff --git a/research/cv/STGAN/src/dataset/celeba.py b/research/cv/STGAN/src/dataset/celeba.py
index bf65edaab934ba1f047d63a19fee225b5b9df0fd..c342793e1111a839cb3d91ec45d2f6315e62a1ec 100644
--- a/research/cv/STGAN/src/dataset/celeba.py
+++ b/research/cv/STGAN/src/dataset/celeba.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import os
 import multiprocessing
 import numpy as np
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 
 from mindspore import context
 from mindspore.context import ParallelMode
diff --git a/research/cv/SiamFC/ModelArts/start_train.py b/research/cv/SiamFC/ModelArts/start_train.py
index 303e8df7c16d4663eebffa05087fe72354e7c71f..99f0074c451d47ab0d2802aa07cb517ca62d6c24 100644
--- a/research/cv/SiamFC/ModelArts/start_train.py
+++ b/research/cv/SiamFC/ModelArts/start_train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -30,7 +30,7 @@ from mindspore import nn
 from mindspore.train import Model
 from mindspore import Tensor
 from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
-import mindspore.dataset.transforms.py_transforms as py_transforms
+import mindspore.dataset.transforms as data_trans
 from mindspore.train.serialization import load_checkpoint, export, load_param_into_net
 from src.config import config
 from src.create_lmdb import create_lmdb
@@ -104,12 +104,12 @@ def train(args):
 
     set_seed(1234)
     random_crop_size = config.instance_size - 2 * config.total_stride
-    train_z_transforms = py_transforms.Compose([
+    train_z_transforms = data_trans.Compose([
         RandomStretch(),
         CenterCrop((config.exemplar_size, config.exemplar_size)),
         ToTensor()
     ])
-    train_x_transforms = py_transforms.Compose([
+    train_x_transforms = data_trans.Compose([
         RandomStretch(),
         RandomCrop((random_crop_size, random_crop_size),
                    config.max_translate),
diff --git a/research/cv/SiamFC/train.py b/research/cv/SiamFC/train.py
index c38b288d278907dca7a49f0661107d888c4ae7cd..f4f2a63380e4921fbbafdcaebf1c9b6b23f4f3a0 100644
--- a/research/cv/SiamFC/train.py
+++ b/research/cv/SiamFC/train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -26,7 +26,7 @@ import mindspore.dataset as ds
 from mindspore import nn
 from mindspore.train import Model
 from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
-import mindspore.dataset.transforms.py_transforms as py_transforms
+import mindspore.dataset.transforms as data_trans
 from src.config import config
 from src.alexnet import SiameseAlexNet
 from src.dataset import ImagnetVIDDataset
@@ -44,12 +44,12 @@ def train(data_dir):
 
     set_seed(1234)
     random_crop_size = config.instance_size - 2 * config.total_stride
-    train_z_transforms = py_transforms.Compose([
+    train_z_transforms = data_trans.Compose([
         RandomStretch(),
         CenterCrop((config.exemplar_size, config.exemplar_size)),
         ToTensor()
     ])
-    train_x_transforms = py_transforms.Compose([
+    train_x_transforms = data_trans.Compose([
         RandomStretch(),
         RandomCrop((random_crop_size, random_crop_size),
                    config.max_translate),
diff --git a/research/cv/StarGAN/src/dataset.py b/research/cv/StarGAN/src/dataset.py
index 22813c290c5752116b129298cb9fe85e2607fdc7..0d31e45642a5ab95546de3ba5dcbd815faa6a533 100644
--- a/research/cv/StarGAN/src/dataset.py
+++ b/research/cv/StarGAN/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import multiprocessing
 import numpy as np
 from PIL import Image
 
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_transforms
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.dataset as de
 
 from src.utils import DistributedSampler
@@ -146,14 +146,14 @@ def get_loader(data_root, attr_path, selected_attrs, crop_size=178, image_size=1
     """Build and return a data loader."""
     mean = [0.5, 0.5, 0.5]
     std = [0.5, 0.5, 0.5]
-    transform = [py_vision.ToPIL()]
+    transform = [vision.ToPIL()]
     if mode == 'train':
-        transform.append(py_vision.RandomHorizontalFlip())
-        transform.append(py_vision.CenterCrop(crop_size))
-    transform.append(py_vision.Resize([image_size, image_size]))
-    transform.append(py_vision.ToTensor())
-    transform.append(py_vision.Normalize(mean=mean, std=std))
-    transform = py_transforms.Compose(transform)
+        transform.append(vision.RandomHorizontalFlip())
+        transform.append(vision.CenterCrop(crop_size))
+    transform.append(vision.Resize([image_size, image_size]))
+    transform.append(vision.ToTensor())
+    transform.append(vision.Normalize(mean=mean, std=std, is_hwc=False))
+    transform = data_trans.Compose(transform)
 
     if dataset == 'CelebA':
         dataset = CelebA(data_root, attr_path, selected_attrs, transform, mode)
diff --git a/research/cv/TCN/src/dataset.py b/research/cv/TCN/src/dataset.py
index f68fe3e6db672c622c2fe4e58b20dc6a794efce6..413923a1abfdd0361ca51fb16d50ee71462ee891 100644
--- a/research/cv/TCN/src/dataset.py
+++ b/research/cv/TCN/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@
 """
 import numpy as np
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as CV
 from mindspore import dtype as mstype
 
 np.random.seed(0)
diff --git a/research/cv/TNT/src/data/imagenet.py b/research/cv/TNT/src/data/imagenet.py
index c0fb4832e8538ad5ca776154acbbfd50a4d06069..95cb688a720db352887f3cf5a83221a64533a9f5 100644
--- a/research/cv/TNT/src/data/imagenet.py
+++ b/research/cv/TNT/src/data/imagenet.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision.utils import Inter
 
 from src.data.augment.auto_augment import _pil_interp, rand_augment_transform
@@ -94,12 +93,12 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
             vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(3 / 4, 4 / 3),
                                           interpolation=Inter.BICUBIC),
             vision.RandomHorizontalFlip(prob=0.5),
-            py_vision.ToPIL()
+            vision.ToPIL()
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False),
             RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)
         ]
     else:
diff --git a/research/cv/U-GAT-IT/src/dataset/dataset.py b/research/cv/U-GAT-IT/src/dataset/dataset.py
index 9a12bb4ab69393f6c577a0e7bf6f27f12dd047f4..fade60c4c0b981b780449bc304f8ed8fb6fecd01 100644
--- a/research/cv/U-GAT-IT/src/dataset/dataset.py
+++ b/research/cv/U-GAT-IT/src/dataset/dataset.py
@@ -22,7 +22,7 @@ import math
 
 import numpy as np
 from PIL import Image
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.vision as vision
 import mindspore.dataset as ds
 from mindspore.communication.management import get_rank, get_group_size
 
@@ -30,18 +30,18 @@ from mindspore.communication.management import get_rank, get_group_size
 def TrainDataLoader(img_size, data_path, dataset, batch_size, distributed):
     """ DataLoader """
     train_transform = [
-        py_vision.ToPIL(),
-        py_vision.RandomHorizontalFlip(),
-        py_vision.Resize((img_size + 30, img_size + 30)),
-        py_vision.RandomCrop(img_size),
-        py_vision.ToTensor(),
-        py_vision.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
+        vision.ToPIL(),
+        vision.RandomHorizontalFlip(),
+        vision.Resize((img_size + 30, img_size + 30)),
+        vision.RandomCrop(img_size),
+        vision.ToTensor(),
+        vision.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False),
     ]
     test_transform = [
-        py_vision.ToPIL(),
-        py_vision.Resize((img_size, img_size)),
-        py_vision.ToTensor(),
-        py_vision.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
+        vision.ToPIL(),
+        vision.Resize((img_size, img_size)),
+        vision.ToTensor(),
+        vision.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False),
     ]
     rank_size = 1
     if distributed:
@@ -76,10 +76,10 @@ def TrainDataLoader(img_size, data_path, dataset, batch_size, distributed):
 def TestDataLoader(img_size, data_path, dataset):
     """ DataLoader """
     test_transform = [
-        py_vision.ToPIL(),
-        py_vision.Resize((img_size, img_size)),
-        py_vision.ToTensor(),
-        py_vision.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
+        vision.ToPIL(),
+        vision.Resize((img_size, img_size)),
+        vision.ToTensor(),
+        vision.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], is_hwc=False),
     ]
     testA_generator = GetDatasetGenerator(os.path.join(data_path, dataset), 'test')
     testA = ds.GeneratorDataset(testA_generator, ["image_A", "image_B"], shuffle=False, num_parallel_workers=12)
diff --git a/research/cv/UNet3+/src/dataset.py b/research/cv/UNet3+/src/dataset.py
index 2771788217043bfb9acefbf152e11f654aa1bf65..eb8ccb3df182e78797906b8c19c294424d509602 100644
--- a/research/cv/UNet3+/src/dataset.py
+++ b/research/cv/UNet3+/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ from skimage.io import imread
 from skimage import color
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 
 class Dataset:
     '''Dataset'''
diff --git a/research/cv/VehicleNet/src/dataset.py b/research/cv/VehicleNet/src/dataset.py
index 58fc5ba3805ad5167e090a7bd7d0a5dd37816b5d..04bd4ea81a4fa155f1924223786ec5973867da53 100644
--- a/research/cv/VehicleNet/src/dataset.py
+++ b/research/cv/VehicleNet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,9 +22,8 @@ from mindspore.mindrecord import FileWriter
 import mindspore.dataset as ds
 from mindspore.dataset.vision import Inter
 import mindspore.common.dtype as mstype
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as P_C
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 
 class Dataset:
     """Dataset"""
@@ -151,9 +150,9 @@ def create_vehiclenet_dataset(mindrecord_file, batch_size=1, device_num=1, is_tr
 
     if is_training:
         if use_aug:
-            py_to_pil_op = P_C.ToPIL()
+            py_to_pil_op = C.ToPIL()
             autoaugment_op = ImageNetPolicy()
-            to_tensor_op = P_C.ToTensor()
+            to_tensor_op = C.ToTensor()
             transforms_list += [py_to_pil_op, autoaugment_op, to_tensor_op]
 
         resized_op = C.Resize([train_inputsize, train_inputsize], interpolation=Inter.BICUBIC)
diff --git a/research/cv/ViG/src/data/imagenet.py b/research/cv/ViG/src/data/imagenet.py
index 0e4ad3790da0789afa2272b7ae551f3ad33292c0..8fb4e3cb15fadcf760da93e71ba384033f8c9e11 100644
--- a/research/cv/ViG/src/data/imagenet.py
+++ b/research/cv/ViG/src/data/imagenet.py
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision.utils import Inter
 
 from src.data.augment.auto_augment import _pil_interp, rand_augment_transform
@@ -94,12 +93,12 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
             vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(3 / 4, 4 / 3),
                                           interpolation=Inter.BICUBIC),
             vision.RandomHorizontalFlip(prob=0.5),
-            py_vision.ToPIL()
+            vision.ToPIL()
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False),
             RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)
         ]
     else:
diff --git a/research/cv/Yolact++/src/dataset.py b/research/cv/Yolact++/src/dataset.py
index 6566291c441212ee30f44bd228ae0f1d66f1f0ef..e7b277e3b1aa6ef9fd8b91ba4c445c8e6903ff43 100644
--- a/research/cv/Yolact++/src/dataset.py
+++ b/research/cv/Yolact++/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import mmcv
 import numpy as np
 from numpy import random
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from src.config import MEANS
 from src.config import yolact_plus_resnet50_config as cfg
diff --git a/research/cv/advanced_east/src/dataset.py b/research/cv/advanced_east/src/dataset.py
index ea040089c0be9d9f4713df1d7ea7509a75ea8c60..74949f617be925dc22024200ac3bbfd47912b1ee 100644
--- a/research/cv/advanced_east/src/dataset.py
+++ b/research/cv/advanced_east/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ dataset.
 import os
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.vision as vision
 from mindspore.mindrecord import FileWriter
 import numpy as np
 from PIL import Image, ImageFile
diff --git a/research/cv/arcface/src/dataset.py b/research/cv/arcface/src/dataset.py
index 5cef4e2f3a99b64dd2d2b6378b96f498887ec507..b7ce783e364d4d0e9730d6a3c558e4764a6b31b7 100644
--- a/research/cv/arcface/src/dataset.py
+++ b/research/cv/arcface/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ python dataset.py
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/augvit/src/c10_dataset.py b/research/cv/augvit/src/c10_dataset.py
index b6042682f190e5006524a9bf4cf0aaf0aada339d..ad36d6e6661a9c62182f07389367f1c57c761271 100644
--- a/research/cv/augvit/src/c10_dataset.py
+++ b/research/cv/augvit/src/c10_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as c_transforms
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as data_trans
+import mindspore.dataset.vision as vision
 
 def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch_size=1):
     """
@@ -63,7 +63,7 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
     rescale_op = vision.Rescale(1.0 / 255.0, 0.0)
     normalize_op = vision.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
     changeswap_op = vision.HWC2CHW()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
     c_trans = []
     if do_train:
diff --git a/research/cv/autoaugment/src/dataset/autoaugment/aug.py b/research/cv/autoaugment/src/dataset/autoaugment/aug.py
index 2c6a33b5e678fe03b4249d40e5ab20df19d486f0..83bda35cb76d789f69bca822890f8dcb87b9f0a3 100644
--- a/research/cv/autoaugment/src/dataset/autoaugment/aug.py
+++ b/research/cv/autoaugment/src/dataset/autoaugment/aug.py
@@ -18,7 +18,7 @@ The Augment operator.
 
 import random
 
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 from .third_party.policies import good_policies
 from .third_party.policies import svhn_good_policies
@@ -61,8 +61,8 @@ class Augment:
             self.policies = policies
 
         self.oc = OperatorClasses()
-        self.to_pil = py_trans.ToPIL()
-        self.to_tensor = py_trans.ToTensor()
+        self.to_pil = vision.ToPIL()
+        self.to_tensor = vision.ToTensor()
 
         self.enable_basic = enable_basic
         self.random_crop = self.oc.RandomCrop(None)
@@ -73,7 +73,7 @@ class Augment:
         self.as_pil = as_pil
         self.normalize = None
         if mean is not None and std is not None:
-            self.normalize = py_trans.Normalize(mean, std)
+            self.normalize = vision.Normalize(mean, std, is_hwc=False)
 
     def _apply(self, name, prob, level, img):
         if random.random() > prob:
diff --git a/research/cv/autoaugment/src/dataset/autoaugment/ops/__init__.py b/research/cv/autoaugment/src/dataset/autoaugment/ops/__init__.py
index f001d2520791fac998e76c481dc8666f2fe678cd..fba0aa2e95e761b9ef03952664bb15971873ff9b 100644
--- a/research/cv/autoaugment/src/dataset/autoaugment/ops/__init__.py
+++ b/research/cv/autoaugment/src/dataset/autoaugment/ops/__init__.py
@@ -16,7 +16,7 @@
 Package initialization for custom PIL operators.
 """
 
-from mindspore.dataset.vision import py_transforms
+from mindspore.dataset.vision import transforms
 
 from .crop import RandomCrop
 from .cutout import RandomCutout
@@ -41,9 +41,9 @@ from .transform import (
 
 class OperatorClasses:
     """OperatorClasses gathers all unary-image transformations listed in the
-    Table 6 of https://arxiv.org/abs/1805.09501 and uses discrte levels for
-    these transformations (The Sample Pairing transformation is an
-    exception, which involes multiple images from a single mini-batch and
+    Table 6 of https://arxiv.org/abs/1805.09501 and uses discrete levels for
+    these transformations. (The Sample Pairing transformation is an
+    exception, which involves multiple images from a single mini-batch and
     is not exploited in this implementation.)
 
     Additionally, there are RandomHorizontalFlip and RandomCrop.
@@ -56,9 +56,9 @@ class OperatorClasses:
         self.TranslateX = self.decorate(TranslateX, max_val=10, rounding=True)
         self.TranslateY = self.decorate(TranslateY, max_val=10, rounding=True)
 
-        self.AutoContrast = self.decorate(py_transforms.AutoContrast)
-        self.Invert = self.decorate(py_transforms.Invert)
-        self.Equalize = self.decorate(py_transforms.Equalize)
+        self.AutoContrast = self.decorate(transforms.AutoContrast)
+        self.Invert = self.decorate(transforms.Invert)
+        self.Equalize = self.decorate(transforms.Equalize)
 
         self.Solarize = self.decorate(
             Solarize, max_val=256, rounding=True, post=lambda x: 256 - x)
@@ -76,7 +76,7 @@ class OperatorClasses:
         self.Cutout = self.decorate(RandomCutout, max_val=20, rounding=True)
 
         self.RandomHorizontalFlip = self.decorate(
-            py_transforms.RandomHorizontalFlip)
+            transforms.RandomHorizontalFlip)
         self.RandomCrop = self.decorate(RandomCrop)
 
     def vars(self):
diff --git a/research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py b/research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py
index dd42ad5d3e990aaeea56e7e5dd3dddece1bfbaa7..137236beaaf1474945322d0dea12759be4ad2670 100644
--- a/research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py
+++ b/research/cv/autoaugment/src/dataset/autoaugment/ops/crop.py
@@ -16,17 +16,17 @@
 RandomCrop operator.
 """
 
-from mindspore.dataset.vision import py_transforms
+from mindspore.dataset.vision import transforms
 from mindspore.dataset.vision import py_transforms_util
 from mindspore.dataset.vision import utils
 
 
-class RandomCrop(py_transforms.RandomCrop):
+class RandomCrop(transforms.RandomCrop):
     """
-    RandomCrop inherits from py_transforms.RandomCrop but derives/uses the
+    RandomCrop inherits from transforms.RandomCrop but derives/uses the
     original image size as the output size.
 
-    Please refer to py_transforms.RandomCrop for argument specifications.
+    Please refer to transforms.RandomCrop for argument specifications.
     """
 
     def __init__(self, padding=4, pad_if_needed=False,
diff --git a/research/cv/autoaugment/src/dataset/autoaugment/ops/cutout.py b/research/cv/autoaugment/src/dataset/autoaugment/ops/cutout.py
index 8d3e2d594fa4b5faf8e6b4f2d53bb1d7d7adf57f..7286d24a354408b4c02df01f283ba43bdc01ccc1 100644
--- a/research/cv/autoaugment/src/dataset/autoaugment/ops/cutout.py
+++ b/research/cv/autoaugment/src/dataset/autoaugment/ops/cutout.py
@@ -21,7 +21,7 @@ import random
 
 class RandomCutout:
     """
-    RandomCutout is similar to py_transforms.Cutout but is simplified and
+    RandomCutout is similar to transforms.CutOut but is simplified and
     crafted for PIL images.
 
     Args:
diff --git a/research/cv/autoaugment/src/dataset/autoaugment/ops/ops_test.py b/research/cv/autoaugment/src/dataset/autoaugment/ops/ops_test.py
index ebe85e6322e2c1756be3ad530c49f67d081555cd..b4575363ad7ff0e50fc1275b822191cf889e652f 100644
--- a/research/cv/autoaugment/src/dataset/autoaugment/ops/ops_test.py
+++ b/research/cv/autoaugment/src/dataset/autoaugment/ops/ops_test.py
@@ -19,7 +19,7 @@ Visualization for testing purposes.
 import matplotlib.pyplot as plt
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 from mindspore import context
 context.set_context(mode=context.PYNATIVE_MODE, device_target='CPU')
 
@@ -36,7 +36,7 @@ def compare(data_path, trans, output_path='./ops_test.png'):
 
     # Apply transformations
     dataset_augmented = dataset_orig.map(
-        operations=[py_trans.ToPIL()] + trans + [py_trans.ToTensor()],
+        operations=[vision.ToPIL()] + trans + [vision.ToTensor()],
         input_columns=['image'],
     )
 
diff --git a/research/cv/autoaugment/src/dataset/autoaugment/ops/transform.py b/research/cv/autoaugment/src/dataset/autoaugment/ops/transform.py
index c69a7401c88ba9125a9f7b8db5c6af16cb02364e..7974af71100fcecddc6db1c5890bef7b58635915 100644
--- a/research/cv/autoaugment/src/dataset/autoaugment/ops/transform.py
+++ b/research/cv/autoaugment/src/dataset/autoaugment/ops/transform.py
@@ -21,7 +21,6 @@ import random
 
 from PIL import Image, __version__
 
-from mindspore.dataset.vision.py_transforms import DE_PY_INTER_MODE
 from mindspore.dataset.vision.py_transforms_util import (
     augment_error_message,
     is_pil,
@@ -46,7 +45,7 @@ class ShearX:
             raise TypeError('shear must be a single number.')
 
         self.shear = shear
-        self.resample = DE_PY_INTER_MODE[resample]
+        self.resample = resample
         self.fill_value = fill_value
 
     def __call__(self, img):
@@ -91,7 +90,7 @@ class ShearY:
             raise TypeError('shear must be a single number.')
 
         self.shear = shear
-        self.resample = DE_PY_INTER_MODE[resample]
+        self.resample = resample
         self.fill_value = fill_value
 
     def __call__(self, img):
@@ -136,7 +135,7 @@ class TranslateX:
             raise TypeError('translate must be a single number.')
 
         self.translate = translate
-        self.resample = DE_PY_INTER_MODE[resample]
+        self.resample = resample
         self.fill_value = fill_value
 
     def __call__(self, img):
@@ -181,7 +180,7 @@ class TranslateY:
             raise TypeError('Translate must be a single number.')
 
         self.translate = translate
-        self.resample = DE_PY_INTER_MODE[resample]
+        self.resample = resample
         self.fill_value = fill_value
 
     def __call__(self, img):
@@ -212,12 +211,12 @@ class TranslateY:
 
 class Rotate:
     """
-    Rotate is similar to py_vision.RandomRotation but uses a fixed degree.
+    Rotate is similar to mindspore.dataset.vision.transform's RandomRotation but uses a fixed degree.
 
     Args:
         degree (int): the degree to rotate.
 
-    Please refer to py_transforms.RandomRotation for more argument
+    Please refer to mindspore.dataset.vision.transforms Rotation for more argument
     specifications.
     """
 
@@ -229,7 +228,7 @@ class Rotate:
             raise TypeError('degree must be a single number.')
 
         self.degree = degree
-        self.resample = DE_PY_INTER_MODE[resample]
+        self.resample = resample
         self.expand = expand
         self.center = center
         self.fill_value = fill_value
diff --git a/research/cv/autoaugment/src/dataset/cifar10.py b/research/cv/autoaugment/src/dataset/cifar10.py
index 25724a9cfea66c5f63d35dcd6913767341e25550..166c78659f0f4d69d7b573042601a46ea29c4105 100644
--- a/research/cv/autoaugment/src/dataset/cifar10.py
+++ b/research/cv/autoaugment/src/dataset/cifar10.py
@@ -20,8 +20,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 from mindspore.communication.management import get_group_size
 from mindspore.communication.management import get_rank
 from mindspore.communication.management import init
diff --git a/research/cv/autoaugment/src/dataset/svhn_dataset.py b/research/cv/autoaugment/src/dataset/svhn_dataset.py
index fa95bf1699e33499f571e2f46a0e8723601b5481..070b877ef278878eea70a57a3eff61a9337f93ae 100644
--- a/research/cv/autoaugment/src/dataset/svhn_dataset.py
+++ b/research/cv/autoaugment/src/dataset/svhn_dataset.py
@@ -20,8 +20,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 from mindspore.communication.management import get_group_size
 from mindspore.communication.management import get_rank
 from mindspore.communication.management import init
diff --git a/research/cv/cait/src/data/imagenet.py b/research/cv/cait/src/data/imagenet.py
index 1539daa2c63b26d34e290c4abf9f9a5606c5602f..2eb8354c86c681c37e1a2e95f7d78f83546ffc98 100644
--- a/research/cv/cait/src/data/imagenet.py
+++ b/research/cv/cait/src/data/imagenet.py
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 from src.data.augment.auto_augment import rand_augment_transform
 from src.data.augment.mixup import Mixup
@@ -91,26 +90,26 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
         assert auto_augment.startswith('rand')
         transform_img = [
             vision.Decode(),
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             RandomResizedCropAndInterpolation(size=args.image_size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
                                               interpolation=interpolation),
-            py_vision.RandomHorizontalFlip(prob=0.5),
+            vision.RandomHorizontalFlip(prob=0.5),
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)]
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)]
         if args.re_prob > 0.:
             transform_img += [RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)]
     else:
         # test transform complete
         transform_img = [
             vision.Decode(),
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             Resize(int(args.image_size / args.crop_pct), interpolation="bicubic"),
-            py_vision.CenterCrop(image_size),
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)
+            vision.CenterCrop(image_size),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)
         ]
 
     transform_label = C.TypeCast(mstype.int32)
diff --git a/research/cv/cct/src/data/cifar10.py b/research/cv/cct/src/data/cifar10.py
index dceac7f861c8212e24cfcfa3ddd78b7c6f2d512c..dbfe0dcdb5fc734a18de42fc371984c82ebc4a0b 100644
--- a/research/cv/cct/src/data/cifar10.py
+++ b/research/cv/cct/src/data/cifar10.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 from src.data.augment.auto_augment import rand_augment_transform
 from src.data.augment.mixup import Mixup
@@ -89,15 +89,15 @@ def create_dataset_cifar10(dataset_dir, args, repeat_num=1, training=True):
         auto_augment = args.auto_augment
         assert auto_augment.startswith('rand')
         transform_img = [
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             RandomResizedCropAndInterpolation(size=args.image_size, scale=(0.8, 1.0), ratio=(3. / 4., 4. / 3.),
                                               interpolation=interpolation),
-            py_vision.RandomHorizontalFlip(prob=0.5),
+            vision.RandomHorizontalFlip(prob=0.5),
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)]
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)]
         if args.re_prob > 0.:
             transform_img += [RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)]
     else:
@@ -105,10 +105,10 @@ def create_dataset_cifar10(dataset_dir, args, repeat_num=1, training=True):
         std = [0.2470, 0.2435, 0.2616]
         # test transform complete
         transform_img = [
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             Resize(int(image_size), interpolation="bicubic"),
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)
         ]
 
     transform_label = C.TypeCast(mstype.int32)
diff --git a/research/cv/cct/src/data/imagenet.py b/research/cv/cct/src/data/imagenet.py
index e512c685c4d76d6eabdcdaa5cec4a4b682bfdfb4..7d1f0a2f396ebbb1017cb122508318d7789fee14 100644
--- a/research/cv/cct/src/data/imagenet.py
+++ b/research/cv/cct/src/data/imagenet.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 from src.data.augment.auto_augment import rand_augment_transform
 from src.data.augment.mixup import Mixup
@@ -92,26 +91,26 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
         assert auto_augment.startswith('rand')
         transform_img = [
             vision.Decode(),
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             RandomResizedCropAndInterpolation(size=args.image_size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
                                               interpolation=interpolation),
-            py_vision.RandomHorizontalFlip(prob=0.5),
+            vision.RandomHorizontalFlip(prob=0.5),
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)]
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)]
         if args.re_prob > 0.:
             transform_img += [RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)]
     else:
         # test transform complete
         transform_img = [
             vision.Decode(),
-            py_vision.ToPIL(),
+            vision.ToPIL(),
             Resize(int(args.image_size / args.crop_pct), interpolation="bicubic"),
-            py_vision.CenterCrop(image_size),
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std)
+            vision.CenterCrop(image_size),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False)
         ]
 
     transform_label = C.TypeCast(mstype.int32)
diff --git a/research/cv/convnext/src/data/imagenet.py b/research/cv/convnext/src/data/imagenet.py
index 7aa5c2c638e1998b973d21e0e336b8c2bcb99773..5a0cdcffe1bcfb0b7d9173f871eedefe050a0d21 100644
--- a/research/cv/convnext/src/data/imagenet.py
+++ b/research/cv/convnext/src/data/imagenet.py
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision.utils import Inter
 
 from src.data.augment.auto_augment import pil_interp, rand_augment_transform
@@ -94,12 +93,12 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
             vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(3 / 4, 4 / 3),
                                           interpolation=Inter.PILCUBIC),
             vision.RandomHorizontalFlip(prob=0.5),
-            py_vision.ToPIL()
+            vision.ToPIL()
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False),
             RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)
         ]
     else:
diff --git a/research/cv/dcgan/src/dataset.py b/research/cv/dcgan/src/dataset.py
index 74a467a759608237ed7ffebafee56470ae7c0c59..4e0c06772b3a1d9f2c809fb14b362057e866fb1d 100644
--- a/research/cv/dcgan/src/dataset.py
+++ b/research/cv/dcgan/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ import numpy as np
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from src.config import dcgan_imagenet_cfg, dcgan_cifar10_cfg
 
 
diff --git a/research/cv/delf/src/data_augmentation_parallel.py b/research/cv/delf/src/data_augmentation_parallel.py
index 1428b0727f0bfa51c5448fb013e226aa3587c04b..fe55712999a7abd17af652121df9de6aa9e10bd6 100755
--- a/research/cv/delf/src/data_augmentation_parallel.py
+++ b/research/cv/delf/src/data_augmentation_parallel.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ import os
 
 from mindspore import dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 def create_dataset(data_path, image_size=321, batch_size=32, seed=0, augmentation=True, repeat=True):
     """create dataset"""
diff --git a/research/cv/ecolite/src/transforms.py b/research/cv/ecolite/src/transforms.py
index 06e4d14be2a271d98c1029b5cdb3681986eb9d1f..c72ee01003c5075311f47089fef67b544a2b48d0 100644
--- a/research/cv/ecolite/src/transforms.py
+++ b/research/cv/ecolite/src/transforms.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import numbers
 import math
 from PIL import Image, ImageOps
 import numpy as np
-from mindspore.dataset.vision import py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 
 class GroupRandomCrop:
@@ -55,7 +55,7 @@ class GroupCenterCrop:
     """GroupCenterCrop"""
 
     def __init__(self, size):
-        self.worker = py_trans.CenterCrop(size)
+        self.worker = vision.CenterCrop(size)
 
     def __call__(self, img_group):
         return [self.worker(img) for img in img_group]
diff --git a/research/cv/efficientnet-b0/src/dataset.py b/research/cv/efficientnet-b0/src/dataset.py
index 56602a36c8ad62e5eba51255eb288d623a738f80..64906ba88a337ded3d2746cc62f607f7824d9c0b 100644
--- a/research/cv/efficientnet-b0/src/dataset.py
+++ b/research/cv/efficientnet-b0/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ Data operations, will be used in train.py and eval.py
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, batch_size=16, device_num=1, rank=0):
diff --git a/research/cv/efficientnet-b1/src/dataset.py b/research/cv/efficientnet-b1/src/dataset.py
index 1c87e997310a4de56fe9e245fac0b8d3dc438ddb..373f0995fcbf71f217ac5932b2cb132b8e0997c9 100644
--- a/research/cv/efficientnet-b1/src/dataset.py
+++ b/research/cv/efficientnet-b1/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,8 +16,8 @@
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/efficientnet-b2/src/dataset.py b/research/cv/efficientnet-b2/src/dataset.py
index c72f7fd3bbd4146f25dd53c97e5cb6680ca42050..6964008568c863e56b0cb29084b8fafcfa84dda7 100644
--- a/research/cv/efficientnet-b2/src/dataset.py
+++ b/research/cv/efficientnet-b2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ Data operations, will be used in train.py and eval.py
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, batch_size=16, device_num=1, rank=0):
diff --git a/research/cv/efficientnet-b3/src/dataset.py b/research/cv/efficientnet-b3/src/dataset.py
index 6c1347d020784f93a907bbf73371f72f1d7690ee..5c571624e99d3415ce98728b59d9cf902a1beb52 100644
--- a/research/cv/efficientnet-b3/src/dataset.py
+++ b/research/cv/efficientnet-b3/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ Data operations, will be used in train.py and eval.py
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, batch_size=16, device_num=1, rank=0):
diff --git a/research/cv/efficientnetv2/src/data/imagenet_finetune.py b/research/cv/efficientnetv2/src/data/imagenet_finetune.py
index 600615183093ba58ee21d224399db27fdd8dd257..1029f0081a97169891f0f6c33276db6d189d4686 100644
--- a/research/cv/efficientnetv2/src/data/imagenet_finetune.py
+++ b/research/cv/efficientnetv2/src/data/imagenet_finetune.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 import numpy as np
 
 from .data_utils.moxing_adapter import sync_data
diff --git a/research/cv/eppmvsnet/src/blendedmvs.py b/research/cv/eppmvsnet/src/blendedmvs.py
index 21acd1e04b51bbe9eb61dddda4ebd510db942920..123a6ba0c7a241e33e40d45ee4ed63c988ed6dd2 100644
--- a/research/cv/eppmvsnet/src/blendedmvs.py
+++ b/research/cv/eppmvsnet/src/blendedmvs.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@ import cv2
 import numpy as np
 from PIL import Image
 
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.vision as vision
 
 from src.utils import read_pfm
 
@@ -202,15 +202,15 @@ class BlendedMVSDataset:
     def define_transforms(self):
         if self.training_tag and self.split == 'train':  # you can add augmentation here
             self.transform = Compose([
-                py_vision.ToTensor(),
-                py_vision.Normalize(mean=[0.485, 0.456, 0.406],
-                                    std=[0.229, 0.224, 0.225]),
+                vision.ToTensor(),
+                vision.Normalize(mean=[0.485, 0.456, 0.406],
+                                 std=[0.229, 0.224, 0.225], is_hwc=False),
             ])
         else:
             self.transform = Compose([
-                py_vision.ToTensor(),
-                py_vision.Normalize(mean=[0.485, 0.456, 0.406],
-                                    std=[0.229, 0.224, 0.225]),
+                vision.ToTensor(),
+                vision.Normalize(mean=[0.485, 0.456, 0.406],
+                                 std=[0.229, 0.224, 0.225], is_hwc=False),
             ])
 
     def __len__(self):
diff --git a/research/cv/faster_rcnn_dcn/src/dataset.py b/research/cv/faster_rcnn_dcn/src/dataset.py
index 1f104189dd7d9f1263eda6f5de8ce08c0cb3cef1..23697df6a9bc7fb887443e92a49b1838ee4b50a4 100644
--- a/research/cv/faster_rcnn_dcn/src/dataset.py
+++ b/research/cv/faster_rcnn_dcn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@ from numpy import random
 
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 
 
diff --git a/research/cv/fishnet99/src/dataset.py b/research/cv/fishnet99/src/dataset.py
index a5e72f198082f82bab894992f228029ea7d84ec1..8be01542b01f144adaf70dc94b44511033dd6fdd 100644
--- a/research/cv/fishnet99/src/dataset.py
+++ b/research/cv/fishnet99/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from src.config import imagenet_cfg
 
 
diff --git a/research/cv/ghostnet/src/dataset.py b/research/cv/ghostnet/src/dataset.py
index 05acf36e1d5c5695a04260c92c2e3a334f3b5bca..28a272e3fe53354f59ef6e7cc1b0f5871ebfcf0b 100644
--- a/research/cv/ghostnet/src/dataset.py
+++ b/research/cv/ghostnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,8 +16,8 @@
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, repeat_num=1, infer_910=True, device_id=0, batch_size=128):
diff --git a/research/cv/ghostnet_quant/src/dataset.py b/research/cv/ghostnet_quant/src/dataset.py
index edee462b4e5cfa70fedd7f3f9a4b91a8d0b846a4..6c56662cb4ba8d5cce3f8c653a6362651ee068ef 100644
--- a/research/cv/ghostnet_quant/src/dataset.py
+++ b/research/cv/ghostnet_quant/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,9 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.vision.c_transforms as C
-import mindspore.dataset.transforms.vision.py_transforms as P
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.dataset.transforms.vision import Inter
 
 
@@ -73,18 +72,18 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
     change_swap_op = C.HWC2CHW()
 
     # define python operations
-    decode_p = P.Decode()
+    decode_p = C.Decode(True)
     if model == 'ghostnet-600':
         s = 274
         c = 240
     else:
         s = 256
         c = 224
-    resize_p = P.Resize(s, interpolation=Inter.BICUBIC)
-    center_crop_p = P.CenterCrop(c)
-    totensor = P.ToTensor()
-    normalize_p = P.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
-    composeop = P.ComposeOp(
+    resize_p = C.Resize(s, interpolation=Inter.BICUBIC)
+    center_crop_p = C.CenterCrop(c)
+    totensor = C.ToTensor()
+    normalize_p = C.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=False)
+    composeop = C.ComposeOp(
         [decode_p, resize_p, center_crop_p, totensor, normalize_p])
     if do_train:
         trans = [resize_crop_op, horizontal_flip_op, color_op,
diff --git a/research/cv/glore_res/src/autoaugment.py b/research/cv/glore_res/src/autoaugment.py
index 35ef907c1884d1d331e87cb2bbd20feecbb3c321..0cc9e65690a13ea3f8a51d07fef4b426ee94c509 100644
--- a/research/cv/glore_res/src/autoaugment.py
+++ b/research/cv/glore_res/src/autoaugment.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,8 +15,8 @@
 """define autoaugment"""
 import os
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as c_transforms
-import mindspore.dataset.vision.c_transforms as c_vision
+import mindspore.dataset.transforms as data_trans
+import mindspore.dataset.vision as vision
 from mindspore import dtype as mstype
 from mindspore.communication.management import init, get_rank, get_group_size
 
@@ -34,101 +34,101 @@ def int_parameter(level, maxval):
 
 def shear_x(level):
     v = float_parameter(level, 0.3)
-    return c_transforms.RandomChoice(
-        [c_vision.RandomAffine(degrees=0, shear=(-v, -v)), c_vision.RandomAffine(degrees=0, shear=(v, v))])
+    return data_trans.RandomChoice(
+        [vision.RandomAffine(degrees=0, shear=(-v, -v)), vision.RandomAffine(degrees=0, shear=(v, v))])
 
 
 def shear_y(level):
     v = float_parameter(level, 0.3)
-    return c_transforms.RandomChoice(
-        [c_vision.RandomAffine(degrees=0, shear=(0, 0, -v, -v)), c_vision.RandomAffine(degrees=0, shear=(0, 0, v, v))])
+    return data_trans.RandomChoice(
+        [vision.RandomAffine(degrees=0, shear=(0, 0, -v, -v)), vision.RandomAffine(degrees=0, shear=(0, 0, v, v))])
 
 
 def translate_x(level):
     v = float_parameter(level, 150 / 331)
-    return c_transforms.RandomChoice(
-        [c_vision.RandomAffine(degrees=0, translate=(-v, -v)), c_vision.RandomAffine(degrees=0, translate=(v, v))])
+    return data_trans.RandomChoice(
+        [vision.RandomAffine(degrees=0, translate=(-v, -v)), vision.RandomAffine(degrees=0, translate=(v, v))])
 
 
 def translate_y(level):
     v = float_parameter(level, 150 / 331)
-    return c_transforms.RandomChoice([c_vision.RandomAffine(degrees=0, translate=(0, 0, -v, -v)),
-                                      c_vision.RandomAffine(degrees=0, translate=(0, 0, v, v))])
+    return data_trans.RandomChoice([vision.RandomAffine(degrees=0, translate=(0, 0, -v, -v)),
+                                    vision.RandomAffine(degrees=0, translate=(0, 0, v, v))])
 
 
 def color_impl(level):
     v = float_parameter(level, 1.8) + 0.1
-    return c_vision.RandomColor(degrees=(v, v))
+    return vision.RandomColor(degrees=(v, v))
 
 
 def rotate_impl(level):
     v = int_parameter(level, 30)
-    return c_transforms.RandomChoice(
-        [c_vision.RandomRotation(degrees=(-v, -v)), c_vision.RandomRotation(degrees=(v, v))])
+    return data_trans.RandomChoice(
+        [vision.RandomRotation(degrees=(-v, -v)), vision.RandomRotation(degrees=(v, v))])
 
 
 def solarize_impl(level):
     level = int_parameter(level, 256)
     v = 256 - level
-    return c_vision.RandomSolarize(threshold=(0, v))
+    return vision.RandomSolarize(threshold=(0, v))
 
 
 def posterize_impl(level):
     level = int_parameter(level, 4)
     v = 4 - level
-    return c_vision.RandomPosterize(bits=(v, v))
+    return vision.RandomPosterize(bits=(v, v))
 
 
 def contrast_impl(level):
     v = float_parameter(level, 1.8) + 0.1
-    return c_vision.RandomColorAdjust(contrast=(v, v))
+    return vision.RandomColorAdjust(contrast=(v, v))
 
 
 def autocontrast_impl(level):
-    return c_vision.AutoContrast()
+    return vision.AutoContrast()
 
 
 def sharpness_impl(level):
     v = float_parameter(level, 1.8) + 0.1
-    return c_vision.RandomSharpness(degrees=(v, v))
+    return vision.RandomSharpness(degrees=(v, v))
 
 
 def brightness_impl(level):
     v = float_parameter(level, 1.8) + 0.1
-    return c_vision.RandomColorAdjust(brightness=(v, v))
+    return vision.RandomColorAdjust(brightness=(v, v))
 
 
 # define the Auto Augmentation policy
 imagenet_policy = [
     [(posterize_impl(8), 0.4), (rotate_impl(9), 0.6)],
     [(solarize_impl(5), 0.6), (autocontrast_impl(5), 0.6)],
-    [(c_vision.Equalize(), 0.8), (c_vision.Equalize(), 0.6)],
+    [(vision.Equalize(), 0.8), (vision.Equalize(), 0.6)],
     [(posterize_impl(7), 0.6), (posterize_impl(6), 0.6)],
-    [(c_vision.Equalize(), 0.4), (solarize_impl(4), 0.2)],
+    [(vision.Equalize(), 0.4), (solarize_impl(4), 0.2)],
 
-    [(c_vision.Equalize(), 0.4), (rotate_impl(8), 0.8)],
-    [(solarize_impl(3), 0.6), (c_vision.Equalize(), 0.6)],
-    [(posterize_impl(5), 0.8), (c_vision.Equalize(), 1.0)],
+    [(vision.Equalize(), 0.4), (rotate_impl(8), 0.8)],
+    [(solarize_impl(3), 0.6), (vision.Equalize(), 0.6)],
+    [(posterize_impl(5), 0.8), (vision.Equalize(), 1.0)],
     [(rotate_impl(3), 0.2), (solarize_impl(8), 0.6)],
-    [(c_vision.Equalize(), 0.6), (posterize_impl(6), 0.4)],
+    [(vision.Equalize(), 0.6), (posterize_impl(6), 0.4)],
 
     [(rotate_impl(8), 0.8), (color_impl(0), 0.4)],
-    [(rotate_impl(9), 0.4), (c_vision.Equalize(), 0.6)],
-    [(c_vision.Equalize(), 0.0), (c_vision.Equalize(), 0.8)],
-    [(c_vision.Invert(), 0.6), (c_vision.Equalize(), 1.0)],
+    [(rotate_impl(9), 0.4), (vision.Equalize(), 0.6)],
+    [(vision.Equalize(), 0.0), (vision.Equalize(), 0.8)],
+    [(vision.Invert(), 0.6), (vision.Equalize(), 1.0)],
     [(color_impl(4), 0.6), (contrast_impl(8), 1.0)],
 
     [(rotate_impl(8), 0.8), (color_impl(2), 1.0)],
     [(color_impl(8), 0.8), (solarize_impl(7), 0.8)],
-    [(sharpness_impl(7), 0.4), (c_vision.Invert(), 0.6)],
-    [(shear_x(5), 0.6), (c_vision.Equalize(), 1.0)],
-    [(color_impl(0), 0.4), (c_vision.Equalize(), 0.6)],
+    [(sharpness_impl(7), 0.4), (vision.Invert(), 0.6)],
+    [(shear_x(5), 0.6), (vision.Equalize(), 1.0)],
+    [(color_impl(0), 0.4), (vision.Equalize(), 0.6)],
 
-    [(c_vision.Equalize(), 0.4), (solarize_impl(4), 0.2)],
+    [(vision.Equalize(), 0.4), (solarize_impl(4), 0.2)],
     [(solarize_impl(5), 0.6), (autocontrast_impl(5), 0.6)],
-    [(c_vision.Invert(), 0.6), (c_vision.Equalize(), 1.0)],
+    [(vision.Invert(), 0.6), (vision.Equalize(), 1.0)],
     [(color_impl(4), 0.6), (contrast_impl(8), 1.0)],
-    [(c_vision.Equalize(), 0.8), (c_vision.Equalize(), 0.6)],
+    [(vision.Equalize(), 0.8), (vision.Equalize(), 0.6)],
 ]
 
 
@@ -153,19 +153,19 @@ def autoaugment(dataset_path, repeat_num=1, batch_size=32, target="Ascend"):
     mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
     std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
     trans = [
-        c_vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+        vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
     ]
 
     post_trans = [
-        c_vision.RandomHorizontalFlip(prob=0.5),
-        c_vision.Normalize(mean=mean, std=std),
-        c_vision.HWC2CHW()
+        vision.RandomHorizontalFlip(prob=0.5),
+        vision.Normalize(mean=mean, std=std),
+        vision.HWC2CHW()
     ]
     dataset = ds.map(operations=trans, input_columns="image")
-    dataset = dataset.map(operations=c_vision.RandomSelectSubpolicy(imagenet_policy), input_columns=["image"])
+    dataset = dataset.map(operations=vision.RandomSelectSubpolicy(imagenet_policy), input_columns=["image"])
     dataset = dataset.map(operations=post_trans, input_columns="image")
 
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
     dataset = dataset.map(operations=type_cast_op, input_columns="label")
     # apply the batch operation
     dataset = dataset.batch(batch_size, drop_remainder=True)
diff --git a/research/cv/glore_res/src/dataset.py b/research/cv/glore_res/src/dataset.py
index b7a5fdc9cd2e7db8f4c211d7521ee15a5c00a9fd..9a6ecc0d0e588ca9d97dfed114b7f3ecc4a5169e 100644
--- a/research/cv/glore_res/src/dataset.py
+++ b/research/cv/glore_res/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,9 +18,9 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 from src.transform import RandAugment
 from src.config import config
diff --git a/research/cv/glore_res/src/transform.py b/research/cv/glore_res/src/transform.py
index cba6ea73a65fe5d5e86318557250ffb140476c1f..83939be14781fa1a5b732c56402a7b6f3131d683 100644
--- a/research/cv/glore_res/src/transform.py
+++ b/research/cv/glore_res/src/transform.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@
 random augment class
 """
 import numpy as np
-import mindspore.dataset.vision.py_transforms as P
+import mindspore.dataset.vision as V
 from src import transform_utils
 
 IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
@@ -37,9 +37,9 @@ class RandAugment:
         # assert the imgs object are pil_images
         ret_imgs = []
         ret_labels = []
-        py_to_pil_op = P.ToPIL()
-        to_tensor = P.ToTensor()
-        normalize_op = P.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+        py_to_pil_op = V.ToPIL()
+        to_tensor = V.ToTensor()
+        normalize_op = V.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
         rand_augment_ops = transform_utils.rand_augment_transform(self.config_str, self.hparams)
         for i, image in enumerate(imgs):
             img_pil = py_to_pil_op(image)
diff --git a/research/cv/hardnet/src/dataset.py b/research/cv/hardnet/src/dataset.py
index 1955ca545d75e0966ae857f2ad35cdc649079b87..2270337531998e20c58d822f8b6aac5a1e135c69 100644
--- a/research/cv/hardnet/src/dataset.py
+++ b/research/cv/hardnet/src/dataset.py
@@ -18,8 +18,8 @@ Data operations, will be used in train.py and eval.py
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 def create_dataset_ImageNet(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend"):
diff --git a/research/cv/hed/src/dataset.py b/research/cv/hed/src/dataset.py
index a74551d2aab600bfbebc6357f094e538e1639ff2..842d1438d851621426976f7cf95dd54931f6727d 100644
--- a/research/cv/hed/src/dataset.py
+++ b/research/cv/hed/src/dataset.py
@@ -19,8 +19,8 @@ import cv2
 import numpy as np
 import mindspore
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C2
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as C2
+import mindspore.dataset.transforms as C
 mindspore.set_seed(1)
 
 def prepare_image_cv2(im):
diff --git a/research/cv/ibnnet/src/dataset.py b/research/cv/ibnnet/src/dataset.py
index fef7aae68e50e69fa0cd13ec5b54ac530a96dfc3..62c6c43086cf387b4bd4df7c0971b0462c95d0ba 100644
--- a/research/cv/ibnnet/src/dataset.py
+++ b/research/cv/ibnnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ python dataset.py
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/inception_resnet_v2/src/dataset.py b/research/cv/inception_resnet_v2/src/dataset.py
index 81912007bd471b663a28440263224fd9aac8139e..0316efc28c76ad304aa131de728577d51c4c4ee8 100644
--- a/research/cv/inception_resnet_v2/src/dataset.py
+++ b/research/cv/inception_resnet_v2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, config=None):
diff --git a/research/cv/lresnet100e_ir/src/dataset.py b/research/cv/lresnet100e_ir/src/dataset.py
index d0ddade90076aba1303ec1d89e707e554f01579a..a03dadfb5b2741fd22c27bbb09ef7700095e5364 100644
--- a/research/cv/lresnet100e_ir/src/dataset.py
+++ b/research/cv/lresnet100e_ir/src/dataset.py
@@ -15,8 +15,8 @@
 """Create train or eval dataset."""
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 
 
 def create_dataset(dataset_path, do_train, img_shape, repeat_num=1, batch_size=32, run_distribute=False):
diff --git a/research/cv/mae/src/datasets/dataset.py b/research/cv/mae/src/datasets/dataset.py
index d1bcd1664367603182f55a53c46607db525495cd..bb9ab3112f5f5067c017d601042f8adea794fc7d 100644
--- a/research/cv/mae/src/datasets/dataset.py
+++ b/research/cv/mae/src/datasets/dataset.py
@@ -23,9 +23,8 @@ import numpy as np
 import mindspore.dataset as de
 import mindspore.common.dtype as mstype
 from mindspore.dataset.vision.utils import Inter
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.vision.py_transforms as P
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 
 from src.datasets.mixup import Mixup
 from src.datasets.random_erasing import RandomErasing
@@ -111,12 +110,12 @@ def create_dataset(dataset_path,
             C.RandomCropDecodeResize(image_size, scale=(crop_min, 1.0), ratio=(3 / 4, 4 / 3),
                                      interpolation=interpolation),
             C.RandomHorizontalFlip(prob=hflip),
-            P.ToPIL()
+            C.ToPIL()
         ]
         trans += [rand_augment_transform(auto_augment, aa_params)]
         trans += [
-            P.ToTensor(),
-            P.Normalize(mean=mean, std=std),
+            C.ToTensor(),
+            C.Normalize(mean=mean, std=std, is_hwc=False),
             RandomErasing(probability=re_prop, mode=re_mode, max_count=re_count)
         ]
 
@@ -127,7 +126,7 @@ def create_dataset(dataset_path,
             C.Decode(),
             C.Resize(int(256 / 224 * image_size), interpolation=interpolation),
             C.CenterCrop(image_size),
-            C.Normalize(mean=mean, std=std),
+            C.Normalize(mean=mean, std=std, is_hwc=True),
             C.HWC2CHW()
         ]
 
diff --git a/research/cv/mae/src/datasets/imagenet.py b/research/cv/mae/src/datasets/imagenet.py
index 3c6894daa3ae7e4d7f784bd41e189c8938024653..e39fc40c60d62f76cf4fe91ef142b8c177fa2fd1 100644
--- a/research/cv/mae/src/datasets/imagenet.py
+++ b/research/cv/mae/src/datasets/imagenet.py
@@ -21,7 +21,7 @@ from PIL import Image
 
 import mindspore.dataset as de
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 
 
 class DataLoader:
diff --git a/research/cv/meta-baseline/src/data/mini_Imagenet.py b/research/cv/meta-baseline/src/data/mini_Imagenet.py
index 23275afed98641009ece17460e7decff67f4bd27..da52ceda55b08f6f6a7fb82cc1bf837330a978bf 100644
--- a/research/cv/meta-baseline/src/data/mini_Imagenet.py
+++ b/research/cv/meta-baseline/src/data/mini_Imagenet.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ MiniImageNet
 import os
 import pickle
 import numpy as np
-import mindspore.dataset.vision.py_transforms as py_transforms
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 from PIL import Image
 
 
@@ -45,19 +45,19 @@ class MiniImageNet:
         label = [x - min_label for x in label]
 
         image_size = 84
-        normalize = py_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+        normalize = vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
         if split == 'train':
             self.transforms = Compose([
                 decode,
-                py_transforms.RandomCrop(image_size, padding=4),
-                py_transforms.ToTensor(),
+                vision.RandomCrop(image_size, padding=4),
+                vision.ToTensor(),
                 normalize
             ])
         else:
             self.transforms = Compose([
                 decode,
-                py_transforms.Resize(image_size),
-                py_transforms.ToTensor(),
+                vision.Resize(image_size),
+                vision.ToTensor(),
                 normalize
             ])
         data = [self.transforms(x)[0] for x in data]
diff --git a/research/cv/metric_learn/src/dataset.py b/research/cv/metric_learn/src/dataset.py
index 882363fd01e93aeb2e5a8d558c78c3b492311149..f2d4b75d48467d6036099d529a59fe3b62189038 100644
--- a/research/cv/metric_learn/src/dataset.py
+++ b/research/cv/metric_learn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ create train or eval dataset.
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset as dss
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.dataset.vision import Inter
 
 def create_dataset0(dataset_generator, do_train, batch_size=80, device_num=1, rank_id=0):
diff --git a/research/cv/mnasnet/src/dataset.py b/research/cv/mnasnet/src/dataset.py
index e9797e9aedd901fe6a54710a8d5b0375e0ec2c8a..191a15a4d3f03c941749557d795976123638a408 100644
--- a/research/cv/mnasnet/src/dataset.py
+++ b/research/cv/mnasnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@ Data operations, will be used in train.py and eval.py
 """
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 
 def create_dataset(dataset_path, do_train, batch_size=16, device_num=1, rank=0):
diff --git a/research/cv/mobilenetV3_small_x1_0/src/dataset.py b/research/cv/mobilenetV3_small_x1_0/src/dataset.py
index 5d3dd274e503138c5018acb3b539996858f76c5c..62665403953b26a8264024c65cb4f3eb1ea536e0 100644
--- a/research/cv/mobilenetV3_small_x1_0/src/dataset.py
+++ b/research/cv/mobilenetV3_small_x1_0/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import multiprocessing
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 
 
 def create_dataset(dataset_path, do_train, batch_size=16, device_num=1, rank=0):
diff --git a/research/cv/mobilenetv3_large/src/dataset.py b/research/cv/mobilenetv3_large/src/dataset.py
index 49ebdacc5adce53891d73e564aacfea6254947a1..eb28ad6ea1f9cf05e49b72e9e8d6d333338426e6 100644
--- a/research/cv/mobilenetv3_large/src/dataset.py
+++ b/research/cv/mobilenetv3_large/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,8 +15,8 @@
 """Create train or eval dataset."""
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import get_rank, get_group_size
 
 def create_dataset(dataset_path, do_train, config, repeat_num=1, batch_size=32, run_distribute=True):
diff --git a/research/cv/nas-fpn/src/dataset.py b/research/cv/nas-fpn/src/dataset.py
index 36c018347bc367a002be96df7259cc415c4fe37d..239d7de558faff4287c2304cef90894dc9d6f1b4 100644
--- a/research/cv/nas-fpn/src/dataset.py
+++ b/research/cv/nas-fpn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import os
 import numpy as np
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from src.model_utils.config import config
 from src.box_utils import jaccard_numpy, retinanet_bboxes_encode
diff --git a/research/cv/nima_vgg16/src/MyDataset.py b/research/cv/nima_vgg16/src/MyDataset.py
index 36b8e9b80a52e9ee9477fc7dd1d16f9a55a9a8bc..39f7bb1d56e95ce55b05b3eb71dfdf583e476946 100644
--- a/research/cv/nima_vgg16/src/MyDataset.py
+++ b/research/cv/nima_vgg16/src/MyDataset.py
@@ -20,9 +20,9 @@ import cv2
 import numpy as np
 from mindspore import dataset as ds
 from mindspore import dtype as mstype
-from mindspore.dataset.transforms import c_transforms as t_ct
+from mindspore.dataset.transforms import transforms as t_ct
 from mindspore.dataset.vision import Inter
-from mindspore.dataset.vision import c_transforms as v_ct
+from mindspore.dataset.vision import transforms as v_ct
 
 
 class Dataset:
diff --git a/research/cv/ntsnet/src/dataset.py b/research/cv/ntsnet/src/dataset.py
index 9e0d068d010fd17575313aacc106aa16ac502ae1..6b1d0d0170ce37ea8640c7a2feeb45ed3120b42e 100644
--- a/research/cv/ntsnet/src/dataset.py
+++ b/research/cv/ntsnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 
 """ntsnet dataset"""
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision import Inter
 
 
diff --git a/research/cv/ntsnet/src/dataset_gpu.py b/research/cv/ntsnet/src/dataset_gpu.py
index a33d39d761a84d024095d7af8f95b02111e2655b..daab745cbdec86576bb535d8f5bbb6bee38fad2b 100644
--- a/research/cv/ntsnet/src/dataset_gpu.py
+++ b/research/cv/ntsnet/src/dataset_gpu.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
 import os
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision import Inter
 
 from src.config_gpu import config
diff --git a/research/cv/osnet/model_utils/transforms.py b/research/cv/osnet/model_utils/transforms.py
index 779d5e89f21ff81f237d3a2a9f9c6d3c872a5271..d1fd32e60ece5f9368096d4013e4292fcb4c3643 100644
--- a/research/cv/osnet/model_utils/transforms.py
+++ b/research/cv/osnet/model_utils/transforms.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License Version 2.0(the "License");
 # you may not use this file except in compliance with the License.
@@ -17,8 +17,8 @@
 
 import math
 import random
-from mindspore.dataset.vision.c_transforms import Resize, Rescale, Normalize, HWC2CHW, RandomHorizontalFlip
-from mindspore.dataset.transforms.c_transforms import Compose
+from mindspore.dataset.vision import Resize, Rescale, Normalize, HWC2CHW, RandomHorizontalFlip
+from mindspore.dataset.transforms import Compose
 
 
 class RandomErasing():
diff --git a/research/cv/pcb_rpp/src/dataset.py b/research/cv/pcb_rpp/src/dataset.py
index 6cc9119771af38ddcc1ad8c82c80ec797bf846f3..04214054ae7902338d74fd0accab6185bcdfa2b8 100644
--- a/research/cv/pcb_rpp/src/dataset.py
+++ b/research/cv/pcb_rpp/src/dataset.py
@@ -24,9 +24,9 @@ import numpy as np
 from mindspore import dataset as ds
 from mindspore.common import dtype as mstype
 from mindspore.communication.management import init, get_rank, get_group_size
-from mindspore.dataset.transforms import c_transforms as C2
+from mindspore.dataset.transforms import transforms as C2
 from mindspore.dataset.vision import Inter
-from mindspore.dataset.vision import c_transforms as C
+from mindspore.dataset.vision import transforms as C
 from mindspore.mindrecord import FileWriter
 
 from src import datasets
diff --git a/research/cv/pnasnet/src/dataset.py b/research/cv/pnasnet/src/dataset.py
index e991ae49528c2f7324ed947d859fe59925147862..e3c8adc1a768c10d2945c0778dae75b42343ffdc 100644
--- a/research/cv/pnasnet/src/dataset.py
+++ b/research/cv/pnasnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ Data operations, will be used in train.py and eval.py
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 def create_dataset(dataset_path, do_train, rank, group_size,
                    num_parallel_workers=8, batch_size=128,
diff --git a/research/cv/proxylessnas/src/dataset.py b/research/cv/proxylessnas/src/dataset.py
index f8e335f714d48912d2c9be38435c9d50572aa0cf..1bab7079f378c03f48764f5d4bf9cd493802ac60 100644
--- a/research/cv/proxylessnas/src/dataset.py
+++ b/research/cv/proxylessnas/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ Data operations, will be used in train.py and eval.py
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 def create_dataset(dataset_path, do_train, rank, group_size,
                    num_parallel_workers=8, batch_size=128,
diff --git a/research/cv/ras/src/dataset_test.py b/research/cv/ras/src/dataset_test.py
index 4a6b641385552a7b1e3621c1c20c6929fbbe49af..53e0d2462f277f29bbd2d56b5a7df9a4193f343b 100644
--- a/research/cv/ras/src/dataset_test.py
+++ b/research/cv/ras/src/dataset_test.py
@@ -1,5 +1,5 @@
 """
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
 
 import os
 import numpy as np
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import mindspore.dataset as ds
 from PIL import Image
 
diff --git a/research/cv/ras/src/dataset_train.py b/research/cv/ras/src/dataset_train.py
index 9e6770e539010c65471f9791c8b99ae3f71b024c..73ae0983037f97951e6730d3879479889ba94c92 100644
--- a/research/cv/ras/src/dataset_train.py
+++ b/research/cv/ras/src/dataset_train.py
@@ -1,5 +1,5 @@
 """
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
 
 import os
 import numpy as np
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 import mindspore.dataset as ds
 from mindspore.communication import get_rank, get_group_size
 from PIL import Image
diff --git a/research/cv/rcnn/eval.py b/research/cv/rcnn/eval.py
index d1632783169f4ef237931a8fe869de57b4850456..cc34f042deb64ad1398afc5776c3c7fded796466 100644
--- a/research/cv/rcnn/eval.py
+++ b/research/cv/rcnn/eval.py
@@ -25,7 +25,7 @@ from operator import itemgetter
 import cv2
 import mindspore
 import mindspore.dataset
-import mindspore.dataset.vision.c_transforms as c_trans
+import mindspore.dataset.vision as c_trans
 from mindspore import load_param_into_net, load_checkpoint, ops
 import numpy as np
 from tqdm import tqdm
diff --git a/research/cv/relationnet/src/dataset.py b/research/cv/relationnet/src/dataset.py
index 02c86b0a3b228516d94a42ec0ee7a4fcf305b7d7..516f3c3b37881dc623d5a723ca2e32f5928e1687 100644
--- a/research/cv/relationnet/src/dataset.py
+++ b/research/cv/relationnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ import random
 import os
 from PIL import Image
 import numpy as np
-import mindspore.dataset.vision.py_transforms as py_vision
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 from mindspore import Tensor
 
 
@@ -176,8 +176,8 @@ class ClassBalancedSampler():
 def get_data_loader(task, num_per_class=1, split='train', shuffle=True, rotation=0, flip=None):
     '''get dataloader'''
     mean, std = [0.92206], [0.08426]
-    transform = Compose([py_vision.ToTensor(),  # numpy HWC-> Tensor CHW
-                         py_vision.Normalize(mean=mean, std=std)])
+    transform = Compose([vision.ToTensor(),  # numpy HWC-> Tensor CHW
+                         vision.Normalize(mean=mean, std=std, is_hwc=False)])
 
     dataset = Omniglot(task, split=split, transform=transform, rotation=rotation, flip=flip)
     if split == 'train':
diff --git a/research/cv/renas/src/dataset.py b/research/cv/renas/src/dataset.py
index c411d5b707688bee9da8dc53c1891611ee205fd8..899927a4c8cacca4160fca6ef6a33637164c608e 100644
--- a/research/cv/renas/src/dataset.py
+++ b/research/cv/renas/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,14 +17,12 @@ import math
 import os
 
 import numpy as np
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_transforms
-import mindspore.dataset.transforms.c_transforms as c_transforms
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
 from mindspore.communication.management import get_rank, get_group_size
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.vision.c_transforms as vision
 
 
 # values that should remain constant
@@ -55,24 +53,24 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
     """Create ImageNet training dataset"""
     if not os.path.exists(train_data_url):
         raise ValueError('Path not exists')
-    decode_op = py_vision.Decode()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
-    random_resize_crop_bicubic = py_vision.RandomResizedCrop(size=(input_size, input_size),
-                                                             scale=SCALE, ratio=RATIO,
-                                                             interpolation=Inter.BICUBIC)
-    random_horizontal_flip_op = py_vision.RandomHorizontalFlip(0.5)
+    random_resize_crop_bicubic = vision.RandomResizedCrop(size=(input_size, input_size),
+                                                          scale=SCALE, ratio=RATIO,
+                                                          interpolation=Inter.BICUBIC)
+    random_horizontal_flip_op = vision.RandomHorizontalFlip(0.5)
     adjust_range = (max(0, 1 - color_jitter), 1 + color_jitter)
-    random_color_jitter_op = py_vision.RandomColorAdjust(brightness=adjust_range,
-                                                         contrast=adjust_range,
-                                                         saturation=adjust_range)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    random_color_jitter_op = vision.RandomColorAdjust(brightness=adjust_range,
+                                                      contrast=adjust_range,
+                                                      saturation=adjust_range)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
     # assemble all the transforms
-    image_ops = py_transforms.Compose([decode_op, random_resize_crop_bicubic,
-                                       random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, random_resize_crop_bicubic,
+                                    random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
 
     rank_id = get_rank() if distributed else 0
     rank_size = get_group_size() if distributed else 1
@@ -121,16 +119,16 @@ def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=F
     else:
         scale_size = int(math.floor(input_size / DEFAULT_CROP_PCT))
 
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
-    decode_op = py_vision.Decode()
-    resize_op = py_vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
-    center_crop = py_vision.CenterCrop(size=input_size)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    resize_op = vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
+    center_crop = vision.CenterCrop(size=input_size)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
-    image_ops = py_transforms.Compose([decode_op, resize_op, center_crop,
-                                       to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, resize_op, center_crop,
+                                    to_tensor, normalize_op])
 
     dataset = dataset.map(input_columns=["label"], operations=type_cast_op,
                           num_parallel_workers=workers)
@@ -176,9 +174,9 @@ def create_dataset_cifar10(data_home, repeat_num=1, training=True, cifar_cfg=Non
     random_horizontal_op = vision.RandomHorizontalFlip()
     resize_op = vision.Resize((resize_height, resize_width))  # interpolation default BILINEAR
     rescale_op = vision.Rescale(1.0 / 255.0, 0.0)
-    normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
+    normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010), is_hwc=True)
     changeswap_op = vision.HWC2CHW()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
     c_trans = []
     if training:
diff --git a/research/cv/repvgg/src/data/imagenet.py b/research/cv/repvgg/src/data/imagenet.py
index fabaef2597743796f2e2c9259ec7602733fbb4c8..c8a6762f2c9da1b5191db146596be9572f92b202 100644
--- a/research/cv/repvgg/src/data/imagenet.py
+++ b/research/cv/repvgg/src/data/imagenet.py
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision.utils import Inter
 
 from src.data.augment.auto_augment import pil_interp, rand_augment_transform
@@ -93,13 +92,13 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
             vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(3 / 4, 4 / 3),
                                           interpolation=Inter.PILCUBIC),
             vision.RandomHorizontalFlip(prob=0.5),
-            py_vision.ToPIL()
+            vision.ToPIL()
         ]
         if auto_augment != "None":
             transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False),
             RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)
         ]
     else:
diff --git a/research/cv/res2net/src/dataset.py b/research/cv/res2net/src/dataset.py
index 3959fd2667c77995768061383611c6cc66dcc38b..8b4ee28f5b5ffa2069903c67e2232012d54b521a 100644
--- a/research/cv/res2net/src/dataset.py
+++ b/research/cv/res2net/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import multiprocessing
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 def create_dataset1(dataset_path, do_train, repeat_num=1, batch_size=32, train_image_size=224, eval_image_size=224,
diff --git a/research/cv/res2net/src/dataset_infer.py b/research/cv/res2net/src/dataset_infer.py
index 98114b7c77192f15fa3ecc15f9740df544581ed6..1b894af182479286c80b2f543673bad5f0cf2ba3 100644
--- a/research/cv/res2net/src/dataset_infer.py
+++ b/research/cv/res2net/src/dataset_infer.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 from src.model_utils.config import config
 
diff --git a/research/cv/res2net_faster_rcnn/src/dataset.py b/research/cv/res2net_faster_rcnn/src/dataset.py
index 09045245818d5fdf29ecd2f62b7d054e97b97b5a..7e7c421b707066d2a7782407cb12f54ed36a3afc 100644
--- a/research/cv/res2net_faster_rcnn/src/dataset.py
+++ b/research/cv/res2net_faster_rcnn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@ from numpy import random
 
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 
 
diff --git a/research/cv/res2net_yolov3/src/yolo_dataset.py b/research/cv/res2net_yolov3/src/yolo_dataset.py
index 6dac2bad3c32ce104083896af4ff28c6e21c292c..b97fd5e83f9697c2eb5d2c385c36584150005c2f 100644
--- a/research/cv/res2net_yolov3/src/yolo_dataset.py
+++ b/research/cv/res2net_yolov3/src/yolo_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ from PIL import Image
 import numpy as np
 from pycocotools.coco import COCO
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 
 from src.distributed_sampler import DistributedSampler
 from src.transforms import reshape_fn, MultiScaleTrans
diff --git a/research/cv/resnet3d/src/dataset.py b/research/cv/resnet3d/src/dataset.py
index 4bfffda6be2dfa8e22bf0bdf2af08b064050c28f..f5034cee227087c1b75357bc45917f8c7628559c 100644
--- a/research/cv/resnet3d/src/dataset.py
+++ b/research/cv/resnet3d/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import os
 
 import mindspore.dataset as ds
 import mindspore.common.dtype as mstype
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import get_rank, get_group_size
 
 from .videodataset import DatasetGenerator
diff --git a/research/cv/resnet3d/src/pil_transforms.py b/research/cv/resnet3d/src/pil_transforms.py
index 270a3e3fba53e69c0e9c10fa1457b7b0aa7515f2..cfd0f0fa7166370eef969ea9fc8bc816d4f4c0dd 100644
--- a/research/cv/resnet3d/src/pil_transforms.py
+++ b/research/cv/resnet3d/src/pil_transforms.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ transforms by PIL.
 """
 import numpy as np
 
-import mindspore.dataset.vision.py_transforms as py_trans
+import mindspore.dataset.vision as vision
 
 
 class PILTrans:
@@ -27,16 +27,16 @@ class PILTrans:
 
     def __init__(self, opt, mean, std):
         super(PILTrans).__init__()
-        self.to_pil = py_trans.ToPIL()
+        self.to_pil = vision.ToPIL()
         self.random_resized_crop = \
-            py_trans.RandomResizedCrop(opt.sample_size, scale=(opt.train_crop_min_scale, 1.0),
-                                       ratio=(opt.train_crop_min_ratio, 1.0 / opt.train_crop_min_ratio))
-        self.random_horizontal_flip = py_trans.RandomHorizontalFlip(prob=0.5)
-        self.color = py_trans.RandomColorAdjust(0.4, 0.4, 0.4, 0.1)
-        self.normalize = py_trans.Normalize(mean=mean, std=std)
-        self.to_tensor = py_trans.ToTensor()
-        self.resize = py_trans.Resize(opt.sample_size)
-        self.center_crop = py_trans.CenterCrop(opt.sample_size)
+            vision.RandomResizedCrop(opt.sample_size, scale=(opt.train_crop_min_scale, 1.0),
+                                     ratio=(opt.train_crop_min_ratio, 1.0 / opt.train_crop_min_ratio))
+        self.random_horizontal_flip = vision.RandomHorizontalFlip(prob=0.5)
+        self.color = vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.1)
+        self.normalize = vision.Normalize(mean=mean, std=std, is_hwc=False)
+        self.to_tensor = vision.ToTensor()
+        self.resize = vision.Resize(opt.sample_size)
+        self.center_crop = vision.CenterCrop(opt.sample_size)
         self.opt = opt
 
     def __call__(self, data, labels, batchInfo):
@@ -72,11 +72,11 @@ class EvalPILTrans:
 
     def __init__(self, opt, mean, std):
         super(EvalPILTrans).__init__()
-        self.to_pil = py_trans.ToPIL()
-        self.resize = py_trans.Resize(opt.sample_size)
-        self.center_crop = py_trans.CenterCrop(opt.sample_size)
-        self.normalize = py_trans.Normalize(mean=mean, std=std)
-        self.to_tensor = py_trans.ToTensor()
+        self.to_pil = vision.ToPIL()
+        self.resize = vision.Resize(opt.sample_size)
+        self.center_crop = vision.CenterCrop(opt.sample_size)
+        self.normalize = vision.Normalize(mean=mean, std=std, is_hwc=False)
+        self.to_tensor = vision.ToTensor()
 
     def __call__(self, data, labels, batchInfo):
         data = data[0]
diff --git a/research/cv/resnet50_adv_pruning/src/pet_dataset.py b/research/cv/resnet50_adv_pruning/src/pet_dataset.py
index 8eb9eaecc7513db7c401cd0c52f644c7d2c9ff39..de6adced75425be8b04a333af8825b9d0805a95c 100644
--- a/research/cv/resnet50_adv_pruning/src/pet_dataset.py
+++ b/research/cv/resnet50_adv_pruning/src/pet_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,10 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.vision.py_transforms as P
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.transforms.py_transforms as P2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.dataset.vision import Inter
 
 
@@ -74,12 +72,12 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
     change_swap_op = C.HWC2CHW()
 
     # define python operations
-    decode_p = P.Decode()
-    resize_p = P.Resize(256, interpolation=Inter.BILINEAR)
-    center_crop_p = P.CenterCrop(224)
-    totensor = P.ToTensor()
-    normalize_p = P.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
-    composeop = P2.Compose(
+    decode_p = C.Decode(True)
+    resize_p = C.Resize(256, interpolation=Inter.BILINEAR)
+    center_crop_p = C.CenterCrop(224)
+    totensor = C.ToTensor()
+    normalize_p = C.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225), is_hwc=False)
+    composeop = C2.Compose(
         [decode_p, resize_p, center_crop_p, totensor, normalize_p])
     if do_train:
         trans = [resize_crop_op, horizontal_flip_op, color_op,
diff --git a/research/cv/resnet50_bam/src/dataset.py b/research/cv/resnet50_bam/src/dataset.py
index db662754fa1173ef90c1530519e24211b70ce106..6ec6428d7e365615c51dd49d0035c0b843caf062 100644
--- a/research/cv/resnet50_bam/src/dataset.py
+++ b/research/cv/resnet50_bam/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from src.config import imagenet_cfg
 
 
diff --git a/research/cv/resnetv2/src/dataset.py b/research/cv/resnetv2/src/dataset.py
index 784e564e904db05ac81a47410b13aeacfdbe36b7..cb3ff1d68a47fbdb27991d5909d70251f05d5021 100644
--- a/research/cv/resnetv2/src/dataset.py
+++ b/research/cv/resnetv2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,8 +16,8 @@
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 def create_dataset1(dataset_path, do_train=True, repeat_num=1, batch_size=32, target="Ascend", distribute=False):
diff --git a/research/cv/resnetv2_50_frn/src/dataset.py b/research/cv/resnetv2_50_frn/src/dataset.py
index 97221b5b5a95d134543663bfc76c4d118f694cbc..b1166b11c3482f3ff5a0a8e69a1b67363002e7d7 100644
--- a/research/cv/resnetv2_50_frn/src/dataset.py
+++ b/research/cv/resnetv2_50_frn/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ Data operations, will be used in train.py and eval.py
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 
 def create_dataset(dataset_path, do_train, rank, group_size,
                    num_parallel_workers=8, batch_size=128,
diff --git a/research/cv/resnext152_64x4d/src/dataset.py b/research/cv/resnext152_64x4d/src/dataset.py
index 712bd775a4224bbd4de38e66513116439c5b89c2..b7f1534e65f25ce38331e2e28d67260bd5995aa4 100644
--- a/research/cv/resnext152_64x4d/src/dataset.py
+++ b/research/cv/resnext152_64x4d/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ dataset processing.
 import os
 from mindspore.common import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as V_C
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as V_C
 from PIL import Image, ImageFile
 from src.utils.sampler import DistributedSampler
 
diff --git a/research/cv/retinanet_resnet101/src/dataset.py b/research/cv/retinanet_resnet101/src/dataset.py
index 375f5337b2b02f0dcecccbb1172e03fcbb0d0cf8..d86f6d8fdc180fb0dd4e8503d7a261818128ed4e 100644
--- a/research/cv/retinanet_resnet101/src/dataset.py
+++ b/research/cv/retinanet_resnet101/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from .model_utils.config import config
 from .box_utils import jaccard_numpy, retinanet_bboxes_encode
diff --git a/research/cv/retinanet_resnet152/src/dataset.py b/research/cv/retinanet_resnet152/src/dataset.py
index f1dad5f446e0c7273ae567cb09e2080c2b3c9006..f9f186612bb290ddf1d739b8101dde24963c641a 100644
--- a/research/cv/retinanet_resnet152/src/dataset.py
+++ b/research/cv/retinanet_resnet152/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from .model_utils.config import config
 from .box_utils import jaccard_numpy, retinanet_bboxes_encode
diff --git a/research/cv/rfcn/src/dataset.py b/research/cv/rfcn/src/dataset.py
index a148b7fd3def37488b26ab4e9b7e869d1dd1d602..dcc009b196ad9b45bb3f049e3e1c10ae550c20d1 100644
--- a/research/cv/rfcn/src/dataset.py
+++ b/research/cv/rfcn/src/dataset.py
@@ -23,7 +23,7 @@ from numpy import random
 import cv2
 from PIL import Image
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 
 def bbox_overlaps(bboxes1, bboxes2, mode='iou'):
diff --git a/research/cv/simple_baselines/src/dataset.py b/research/cv/simple_baselines/src/dataset.py
index 84796c054dd469228b9c908f171e2c42609f1fae..af06907cb88e89bc378c0c32b8e76e56fff3d352 100644
--- a/research/cv/simple_baselines/src/dataset.py
+++ b/research/cv/simple_baselines/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -26,7 +26,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from src.utils.transforms import fliplr_joints, get_affine_transform, affine_transform
 
 ds.config.set_seed(1) # Set Random Seed
diff --git a/research/cv/single_path_nas/src/dataset.py b/research/cv/single_path_nas/src/dataset.py
index ac51ad69e82410668aef968c4396d816be056806..eac12de2437640d1d3b8fd26ae1c31cd8527b493 100644
--- a/research/cv/single_path_nas/src/dataset.py
+++ b/research/cv/single_path_nas/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ Data operations, will be used in train.py and eval.py
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 
 from src.config import imagenet_cfg
 
diff --git a/research/cv/sknet/src/dataset.py b/research/cv/sknet/src/dataset.py
index 7a67fb74ad1e0e7a9aca3d8cd79ab67a9118c98b..611a937369723682f2ba4312b5f715b8c0f15c05 100644
--- a/research/cv/sknet/src/dataset.py
+++ b/research/cv/sknet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.vision as C
 from mindspore.communication.management import get_group_size, get_rank, init
 
 
diff --git a/research/cv/squeezenet/src/dataset.py b/research/cv/squeezenet/src/dataset.py
index 38eaef30f308abc0c3c1ba6d1a4e7144751bcfaf..e778661b4519b989aa1e83f0b92d5e9893b205b0 100644
--- a/research/cv/squeezenet/src/dataset.py
+++ b/research/cv/squeezenet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/squeezenet1_1/src/dataset.py b/research/cv/squeezenet1_1/src/dataset.py
index 81bf48ced9b8329d2ec735917480ea0f6c68b05d..033f8a699d73d62823988d2f4c33d39bd4354dbf 100644
--- a/research/cv/squeezenet1_1/src/dataset.py
+++ b/research/cv/squeezenet1_1/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,8 +18,8 @@ create train or eval dataset of imagenet and cifar10.
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 
 def create_dataset_imagenet(dataset_path,
                             do_train,
diff --git a/research/cv/ssc_resnet50/src/dataset.py b/research/cv/ssc_resnet50/src/dataset.py
index 3ef167ceb465767f1f4f7f41ea846daab0dcf00b..b714dbee4a3ae85e55a3a42aff4dbff56462167a 100644
--- a/research/cv/ssc_resnet50/src/dataset.py
+++ b/research/cv/ssc_resnet50/src/dataset.py
@@ -23,8 +23,8 @@ import logging
 import numpy as np
 from PIL import Image
 from PIL import ImageFile
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_trans
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.ops
 import mindspore.dataset as de
 
@@ -91,12 +91,12 @@ class CoMatchDatasetImageNet:
         self.samples = samples
         logging.info("sample len: %d", len(self.samples))
 
-        self.random_resize_crop = py_vision.RandomResizedCrop(224, scale=(0.2, 1.))
-        self.random_horizontal_flip = py_vision.RandomHorizontalFlip()
-        self.to_tensor = py_vision.ToTensor()
-        self.normalize = py_vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
-        self.random_apply = py_trans.RandomApply([py_vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.1)], prob=0.8)
-        self.random_grayscale = py_vision.RandomGrayscale(prob=0.2)
+        self.random_resize_crop = vision.RandomResizedCrop(224, scale=(0.2, 1.))
+        self.random_horizontal_flip = vision.RandomHorizontalFlip()
+        self.to_tensor = vision.ToTensor()
+        self.normalize = vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
+        self.random_apply = data_trans.RandomApply([vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.1)], prob=0.8)
+        self.random_grayscale = vision.RandomGrayscale(prob=0.2)
 
         self.unlable_randomaugmentMC = RandAugmentMC(int(args.unlabel_randomaug_count),
                                                      int(args.unlabel_randomaug_intensity))
@@ -297,10 +297,10 @@ class CoMatchDatasetImageNetTest:
         logging.info("sample len: %d", len(self.samples))
 
         # for test
-        self.resize = py_vision.Resize(256)
-        self.center_crop = py_vision.CenterCrop(224)
-        self.to_tensor = py_vision.ToTensor()
-        self.normalize = py_vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+        self.resize = vision.Resize(256)
+        self.center_crop = vision.CenterCrop(224)
+        self.to_tensor = vision.ToTensor()
+        self.normalize = vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
 
     def __getitem__(self, index):
         """
@@ -362,11 +362,11 @@ class CoMatchSelectSample:
         self.samples = samples
 
         # for test
-        self.random_resize_crop = py_vision.RandomResizedCrop(224, scale=(0.2, 1.))
-        self.random_horizontal_flip = py_vision.RandomHorizontalFlip()
+        self.random_resize_crop = vision.RandomResizedCrop(224, scale=(0.2, 1.))
+        self.random_horizontal_flip = vision.RandomHorizontalFlip()
 
-        self.to_tensor = py_vision.ToTensor()
-        self.normalize = py_vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+        self.to_tensor = vision.ToTensor()
+        self.normalize = vision.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], is_hwc=False)
 
     def __getitem__(self, index):
         """
diff --git a/research/cv/ssd_ghostnet/src/dataset.py b/research/cv/ssd_ghostnet/src/dataset.py
index 0350fe910a3970b873466a5e5c543391181b8a9a..8eb7fa4dcfb311ade7a185c87b0de78d96d66a77 100644
--- a/research/cv/ssd_ghostnet/src/dataset.py
+++ b/research/cv/ssd_ghostnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C2
+import mindspore.dataset.vision as C2
 from mindspore.mindrecord import FileWriter
 from src.model_utils.config import config
 from .box_utils import jaccard_numpy, ssd_bboxes_encode
diff --git a/research/cv/ssd_inception_v2/src/dataset.py b/research/cv/ssd_inception_v2/src/dataset.py
index 0c08dbae5701b6bed776d521ba6ed1591780ea3c..ebeaf3774edd9e416b1a0249965e4e2d629b237b 100644
--- a/research/cv/ssd_inception_v2/src/dataset.py
+++ b/research/cv/ssd_inception_v2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -25,7 +25,7 @@ import xml.etree.ElementTree as et
 import cv2
 import numpy as np
 from mindspore import dataset as de
-from mindspore.dataset.vision import c_transforms as C
+from mindspore.dataset.vision import transforms as C
 from mindspore.mindrecord import FileWriter
 
 from src.model_utils.config import config
diff --git a/research/cv/ssd_inceptionv2/src/dataset.py b/research/cv/ssd_inceptionv2/src/dataset.py
index d6aa4ee1bdcbe762e0a3adf8bd939a40f7d53b3d..1a0d2cb717d18532465ee3855e537abffe65f8ad 100644
--- a/research/cv/ssd_inceptionv2/src/dataset.py
+++ b/research/cv/ssd_inceptionv2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from .config import config
 from .box_utils import jaccard_numpy, ssd_bboxes_encode
diff --git a/research/cv/ssd_mobilenetV2/src/dataset.py b/research/cv/ssd_mobilenetV2/src/dataset.py
index 9b665fa0670aff7175820132c33b254b0c011abc..c3ab4b7fa63acd4b2e044c2a05bbf56a0cc38142 100644
--- a/research/cv/ssd_mobilenetV2/src/dataset.py
+++ b/research/cv/ssd_mobilenetV2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from .config import config
 from .box_utils import jaccard_numpy, ssd_bboxes_encode
diff --git a/research/cv/ssd_mobilenetV2_FPNlite/src/dataset.py b/research/cv/ssd_mobilenetV2_FPNlite/src/dataset.py
index 1e55f901061cf970c0ba2dbf224030b5cb3dcbdd..f0a96bb844b3198370fca7783063c29b039b7beb 100644
--- a/research/cv/ssd_mobilenetV2_FPNlite/src/dataset.py
+++ b/research/cv/ssd_mobilenetV2_FPNlite/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -25,7 +25,7 @@ import cv2
 from tqdm import tqdm
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from src.model_utils.config import config as cfg
 from .box_utils import jaccard_numpy, ssd_bboxes_encode
diff --git a/research/cv/ssd_resnet34/src/dataset.py b/research/cv/ssd_resnet34/src/dataset.py
index 5b2a642557be69456ec1993967ee3148e8e267e8..26d042dec3379057509bc374d7817bda74fb52f6 100644
--- a/research/cv/ssd_resnet34/src/dataset.py
+++ b/research/cv/ssd_resnet34/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from .config import config
 from .box_utils import jaccard_numpy, ssd_bboxes_encode
diff --git a/research/cv/ssd_resnet50/src/dataset.py b/research/cv/ssd_resnet50/src/dataset.py
index a102b4729b1c6d3d6513700467892337975ef0f4..eaed11ad0e4751ed1487c8a664094af01ab76103 100644
--- a/research/cv/ssd_resnet50/src/dataset.py
+++ b/research/cv/ssd_resnet50/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import numpy as np
 import cv2
 
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 from .config import config
 from .box_utils import jaccard_numpy, ssd_bboxes_encode
diff --git a/research/cv/ssd_resnet_34/src/dataset.py b/research/cv/ssd_resnet_34/src/dataset.py
index 5df5d78dff0c2d3730835d2e13f703907b0ce3d2..d6f8787052aaeadc26e1eb40b3bacf5d856620d5 100644
--- a/research/cv/ssd_resnet_34/src/dataset.py
+++ b/research/cv/ssd_resnet_34/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -26,7 +26,7 @@ from xml.etree import ElementTree
 import cv2
 import numpy as np
 from mindspore import dataset as ds
-from mindspore.dataset.vision import c_transforms as C
+from mindspore.dataset.vision import transforms as C
 from mindspore.mindrecord import FileWriter
 
 from .box_utils import jaccard_numpy
diff --git a/research/cv/stpm/src/dataset.py b/research/cv/stpm/src/dataset.py
index e8e337855a9605a99ec479497307b8d134c6c115..6bb77547ce3240d79288db852b737e85424d280a 100644
--- a/research/cv/stpm/src/dataset.py
+++ b/research/cv/stpm/src/dataset.py
@@ -21,8 +21,8 @@ import numpy as np
 from PIL import Image
 import mindspore.dataset as ds
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.vision.py_transforms as py_vision
-from mindspore.dataset.transforms.py_transforms import Compose
+import mindspore.dataset.vision as vision
+from mindspore.dataset.transforms.transforms import Compose
 
 
 class MVTecDataset():
@@ -113,15 +113,15 @@ def createDataset(dataset_path, category, save_sample=False, out_size=256, train
     std = [0.229, 0.224, 0.225]
 
     data_transforms = Compose([
-        py_vision.Resize((out_size, out_size), interpolation=Inter.ANTIALIAS),
-        py_vision.CenterCrop(out_size),
-        py_vision.ToTensor(),
-        py_vision.Normalize(mean=mean, std=std)
+        vision.Resize((out_size, out_size), interpolation=Inter.ANTIALIAS),
+        vision.CenterCrop(out_size),
+        vision.ToTensor(),
+        vision.Normalize(mean=mean, std=std, is_hwc=False)
     ])
     gt_transforms = Compose([
-        py_vision.Resize((out_size, out_size)),
-        py_vision.CenterCrop(out_size),
-        py_vision.ToTensor()
+        vision.Resize((out_size, out_size)),
+        vision.CenterCrop(out_size),
+        vision.ToTensor()
     ])
 
     train_data = MVTecDataset(root=os.path.join(dataset_path, category),
diff --git a/research/cv/swin_transformer/src/data/imagenet.py b/research/cv/swin_transformer/src/data/imagenet.py
index 522159871905995bfbfca9ec301569df263b868c..f1883aef7f9f9ef819e68ef6b960a3e5e99e919a 100644
--- a/research/cv/swin_transformer/src/data/imagenet.py
+++ b/research/cv/swin_transformer/src/data/imagenet.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,9 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
-import mindspore.dataset.vision.py_transforms as py_vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.dataset.vision.utils import Inter
 
 from src.data.augment.auto_augment import _pil_interp, rand_augment_transform
@@ -94,12 +93,12 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
             vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(3 / 4, 4 / 3),
                                           interpolation=Inter.BICUBIC),
             vision.RandomHorizontalFlip(prob=0.5),
-            py_vision.ToPIL()
+            vision.ToPIL()
         ]
         transform_img += [rand_augment_transform(auto_augment, aa_params)]
         transform_img += [
-            py_vision.ToTensor(),
-            py_vision.Normalize(mean=mean, std=std),
+            vision.ToTensor(),
+            vision.Normalize(mean=mean, std=std, is_hwc=False),
             RandomErasing(args.re_prob, mode=args.re_mode, max_count=args.re_count)
         ]
     else:
@@ -111,14 +110,14 @@ def create_dataset_imagenet(dataset_dir, args, repeat_num=1, training=True):
                 vision.Decode(),
                 vision.Resize(int(256 / 224 * image_size), interpolation=Inter.BICUBIC),
                 vision.CenterCrop(image_size),
-                vision.Normalize(mean=mean, std=std),
+                vision.Normalize(mean=mean, std=std, is_hwc=True),
                 vision.HWC2CHW()
             ]
         else:
             transform_img = [
                 vision.Decode(),
                 vision.Resize(int(image_size), interpolation=Inter.BICUBIC),
-                vision.Normalize(mean=mean, std=std),
+                vision.Normalize(mean=mean, std=std, is_hwc=True),
                 vision.HWC2CHW()
             ]
 
diff --git a/research/cv/textfusenet/src/dataset.py b/research/cv/textfusenet/src/dataset.py
index 47479f06b6ece7b30bfc858303705676718e14cb..a406408b328cdc5a73a42e6e25dca606bdf6ca53 100755
--- a/research/cv/textfusenet/src/dataset.py
+++ b/research/cv/textfusenet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@ from numpy import random
 import cv2
 import mmcv
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 
 from .model_utils.config import config
diff --git a/research/cv/tinynet/src/dataset.py b/research/cv/tinynet/src/dataset.py
index cf49192e7300729b67668a93a54e88a56c515295..b2c001d542c7491ce26a8054f9198785a13217ad 100644
--- a/research/cv/tinynet/src/dataset.py
+++ b/research/cv/tinynet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,9 +17,8 @@ import math
 import os
 
 import numpy as np
-import mindspore.dataset.vision.py_transforms as py_vision
-import mindspore.dataset.transforms.py_transforms as py_transforms
-import mindspore.dataset.transforms.c_transforms as c_transforms
+import mindspore.dataset.vision as vision
+import mindspore.dataset.transforms as data_trans
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
 from mindspore.communication.management import get_rank, get_group_size
@@ -53,24 +52,24 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
     """Create ImageNet training dataset"""
     if not os.path.exists(train_data_url):
         raise ValueError('Path not exists')
-    decode_op = py_vision.Decode()
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
 
-    random_resize_crop_bicubic = py_vision.RandomResizedCrop(size=(input_size, input_size),
-                                                             scale=SCALE, ratio=RATIO,
-                                                             interpolation=Inter.BICUBIC)
-    random_horizontal_flip_op = py_vision.RandomHorizontalFlip(0.5)
+    random_resize_crop_bicubic = vision.RandomResizedCrop(size=(input_size, input_size),
+                                                          scale=SCALE, ratio=RATIO,
+                                                          interpolation=Inter.BICUBIC)
+    random_horizontal_flip_op = vision.RandomHorizontalFlip(0.5)
     adjust_range = (max(0, 1 - color_jitter), 1 + color_jitter)
-    random_color_jitter_op = py_vision.RandomColorAdjust(brightness=adjust_range,
-                                                         contrast=adjust_range,
-                                                         saturation=adjust_range)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    random_color_jitter_op = vision.RandomColorAdjust(brightness=adjust_range,
+                                                      contrast=adjust_range,
+                                                      saturation=adjust_range)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
     # assemble all the transforms
-    image_ops = py_transforms.Compose([decode_op, random_resize_crop_bicubic,
-                                       random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, random_resize_crop_bicubic,
+                                    random_horizontal_flip_op, random_color_jitter_op, to_tensor, normalize_op])
 
     rank_id = get_rank() if distributed else 0
     rank_size = get_group_size() if distributed else 1
@@ -119,16 +118,16 @@ def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=F
     else:
         scale_size = int(math.floor(input_size / DEFAULT_CROP_PCT))
 
-    type_cast_op = c_transforms.TypeCast(mstype.int32)
-    decode_op = py_vision.Decode()
-    resize_op = py_vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
-    center_crop = py_vision.CenterCrop(size=input_size)
-    to_tensor = py_vision.ToTensor()
-    normalize_op = py_vision.Normalize(
-        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD)
+    type_cast_op = data_trans.TypeCast(mstype.int32)
+    decode_op = vision.Decode(True)
+    resize_op = vision.Resize(size=scale_size, interpolation=Inter.BICUBIC)
+    center_crop = vision.CenterCrop(size=input_size)
+    to_tensor = vision.ToTensor()
+    normalize_op = vision.Normalize(
+        IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, is_hwc=False)
 
-    image_ops = py_transforms.Compose([decode_op, resize_op, center_crop,
-                                       to_tensor, normalize_op])
+    image_ops = data_trans.Compose([decode_op, resize_op, center_crop,
+                                    to_tensor, normalize_op])
 
     dataset = dataset.map(input_columns=["label"], operations=type_cast_op,
                           num_parallel_workers=workers)
diff --git a/research/cv/tracktor/src/dataset.py b/research/cv/tracktor/src/dataset.py
index 0e2afa850f90d9f44a74137709dac0a7a9144aba..d63c6b1f9d49b3f421b4337f91a8c36670ed1353 100644
--- a/research/cv/tracktor/src/dataset.py
+++ b/research/cv/tracktor/src/dataset.py
@@ -24,7 +24,7 @@ import os.path as osp
 import cv2
 import mindspore as ms
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.vision as C
 from mindspore.mindrecord import FileWriter
 import numpy as np
 from numpy import random
diff --git a/research/cv/u2net/src/data_loader.py b/research/cv/u2net/src/data_loader.py
index 66eb91711aea3067d5c40570c1d678226140bc1d..0f0c8e05ebf5a8b404c96ef6e8e8cfc3e36a739c 100644
--- a/research/cv/u2net/src/data_loader.py
+++ b/research/cv/u2net/src/data_loader.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -25,7 +25,7 @@ from skimage import io, transform, color
 from mindspore import context
 from mindspore import dataset as ds
 from mindspore.common import dtype as mstype
-import mindspore.dataset.transforms.c_transforms as CC
+import mindspore.dataset.transforms as CC
 from mindspore.context import ParallelMode
 from mindspore.communication.management import get_rank, get_group_size
 
diff --git a/research/cv/vgg19/src/dataset.py b/research/cv/vgg19/src/dataset.py
index 93772b806f5ba6e685f29578bbd16b2b10139e92..d6af839cb262a0bd8b3a47bbcc6d17019af59094 100644
--- a/research/cv/vgg19/src/dataset.py
+++ b/research/cv/vgg19/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 from PIL import Image, ImageFile
 from mindspore.common import dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from src.utils.sampler import DistributedSampler
 
 ImageFile.LOAD_TRUNCATED_IMAGES = True
diff --git a/research/cv/vit_base/src/dataset.py b/research/cv/vit_base/src/dataset.py
index 645c594c556f3284a87f6e6eecc3361babc80dd1..75d969c7f590ba7c8c9e0635b1b578f73319a552 100644
--- a/research/cv/vit_base/src/dataset.py
+++ b/research/cv/vit_base/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ import os
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
-import mindspore.dataset.vision.c_transforms as vision
+import mindspore.dataset.transforms as C
+import mindspore.dataset.vision as vision
 from mindspore.communication.management import get_group_size
 from mindspore.communication.management import get_rank
 
diff --git a/research/cv/wave_mlp/src/dataset.py b/research/cv/wave_mlp/src/dataset.py
index 89c44b54105c8b8a1f3d18ed50c5418f90a3be30..9449268f6e2abc064714bd630de85661850ad67e 100644
--- a/research/cv/wave_mlp/src/dataset.py
+++ b/research/cv/wave_mlp/src/dataset.py
@@ -17,15 +17,14 @@ import os
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.transforms.c_transforms as C2
-import mindspore.dataset.vision.py_transforms as pytrans
-import mindspore.dataset.transforms.py_transforms as py_transforms
+import mindspore.dataset.transforms as C2
+import mindspore.dataset.transforms as transforms
 
-from mindspore.dataset.transforms.py_transforms import Compose
-import mindspore.dataset.vision.c_transforms as C
+from mindspore.dataset.transforms.transforms import Compose
+import mindspore.dataset.vision as C
 
 
-class ToNumpy(py_transforms.PyTensorOperation):
+class ToNumpy(transforms.PyTensorOperation):
 
     def __init__(self, output_type=np.float32):
         self.output_type = output_type
@@ -81,13 +80,13 @@ def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=128):
         ]
     else:
         trans = [
-            pytrans.Decode(),
-            pytrans.Resize(235),
-            pytrans.CenterCrop(224)
+            C.Decode(True),
+            C.Resize(235),
+            C.CenterCrop(224)
         ]
     trans += [
-        pytrans.ToTensor(),
-        pytrans.Normalize(mean=mean, std=std),
+        C.ToTensor(),
+        C.Normalize(mean=mean, std=std, is_hwc=False),
     ]
     trans = Compose(trans)
 
diff --git a/research/cv/wgan/src/dataset.py b/research/cv/wgan/src/dataset.py
index 6168e0752108b79c4a2a4cb2e9764ec410be7741..de2c14e7b27534db5c6c595b420d0e8794874c17 100644
--- a/research/cv/wgan/src/dataset.py
+++ b/research/cv/wgan/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,8 +15,8 @@
 
 """ dataset """
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as c
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as c
+import mindspore.dataset.transforms as C2
 import mindspore.common.dtype as mstype
 
 
diff --git a/research/cv/wideresnet/src/dataset.py b/research/cv/wideresnet/src/dataset.py
index 2617a656b27d22911fad7a02dcc458b46b42ba87..ea4fcef9fd1a919efaa97bda186dfc3d48ab1cb6 100644
--- a/research/cv/wideresnet/src/dataset.py
+++ b/research/cv/wideresnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,8 +19,8 @@ Data operations, will be used in train.py and eval.py
 import os
 import mindspore.common.dtype as mstype
 import mindspore.dataset.engine as de
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 from mindspore.communication.management import init, get_rank, get_group_size
 
 
diff --git a/research/cv/yolov3_tiny/src/transforms.py b/research/cv/yolov3_tiny/src/transforms.py
index 8a483f82269815c2383b45b8b266827d3ac300d6..3417ba8cc8eedefb583c971e52500d0c29201877 100644
--- a/research/cv/yolov3_tiny/src/transforms.py
+++ b/research/cv/yolov3_tiny/src/transforms.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@ import copy
 import random
 
 import cv2
-import mindspore.dataset.vision.py_transforms as PV
+import mindspore.dataset.vision as vision
 import numpy as np
 from PIL import Image
 
@@ -566,6 +566,6 @@ class MultiScaleTrans:
 
     def __call__(self, img, anno, input_size, mosaic_flag):
         if mosaic_flag[0] == 0:
-            img = PV.Decode()(img)
+            img = vision.Decode(True)(img)
         img, anno = preprocess_fn(img, anno, self.config, input_size, self.device_num)
         return img, anno, np.array(img.shape[0:2])
diff --git a/research/cv/yolov3_tiny/src/yolo_dataset.py b/research/cv/yolov3_tiny/src/yolo_dataset.py
index 1678f9d7e317692281b91599bbcf9f1b1f10db2d..fd0a25f2825e899beb7a8f0d80e1ae6dfcaf95e2 100644
--- a/research/cv/yolov3_tiny/src/yolo_dataset.py
+++ b/research/cv/yolov3_tiny/src/yolo_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@ import random
 
 import cv2
 import mindspore.dataset as de
-import mindspore.dataset.vision.c_transforms as CV
+import mindspore.dataset.vision as CV
 import numpy as np
 from PIL import Image
 from pycocotools.coco import COCO
diff --git a/research/mm/wukong/src/dataset/dataset.py b/research/mm/wukong/src/dataset/dataset.py
index 4fef4bd98c6b5dafd6c3d4534ecb0e4544401fd7..a15635b4c9ef4cc4566e1b461797412a2852bc14 100644
--- a/research/mm/wukong/src/dataset/dataset.py
+++ b/research/mm/wukong/src/dataset/dataset.py
@@ -15,8 +15,8 @@
 from mindspore import dtype as mstype
 import mindspore.dataset as ds
 from mindspore.dataset.vision import Inter
-import mindspore.dataset.vision.c_transforms as C
-import mindspore.dataset.transforms.c_transforms as C2
+import mindspore.dataset.vision as C
+import mindspore.dataset.transforms as C2
 
 
 def get_wukong_dataset(dataset_path, columns_list, num_parallel_workers, shuffle, num_shards, shard_id, batch_size):
diff --git a/research/nlp/DYR/src/dataset.py b/research/nlp/DYR/src/dataset.py
index 9ec0212c73bdbb8263139f7c693ce69c8f5bb41e..3e35e7cd11a45c7cba9365aa472fe037cd1cda0a 100644
--- a/research/nlp/DYR/src/dataset.py
+++ b/research/nlp/DYR/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import random
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 # samples in one block
 POS_SIZE = 1
diff --git a/research/nlp/albert/src/dataset.py b/research/nlp/albert/src/dataset.py
index dec0680de3d3ce028d7c290d8766e75e4d5cd087..37549dae2408cb6b0017e0d8625e889486df73d6 100644
--- a/research/nlp/albert/src/dataset.py
+++ b/research/nlp/albert/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ import math
 import numpy as np
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 from mindspore import log as logger
 
 
diff --git a/research/nlp/gpt2/src/dataset.py b/research/nlp/gpt2/src/dataset.py
index 7435c52b6350ae291bc5ca87a45a7658609a7856..44984954d313e7300d917347f0b59147fad8a669 100644
--- a/research/nlp/gpt2/src/dataset.py
+++ b/research/nlp/gpt2/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2020 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 """Data operations"""
 import mindspore.common.dtype as mstype
 import mindspore.dataset as de
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 from .finetune_eval_config import gpt2_net_cfg
 
diff --git a/research/nlp/hypertext/src/dataset.py b/research/nlp/hypertext/src/dataset.py
index a2513552054309f08fcf6eabcaaca689805ff698..4479d240b4e64d1aa29d5368229f634408987965 100644
--- a/research/nlp/hypertext/src/dataset.py
+++ b/research/nlp/hypertext/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@ import random
 from datetime import timedelta
 import numpy as np
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 import mindspore.common.dtype as mstype
 
 MAX_VOCAB_SIZE = 5000000
diff --git a/research/nlp/ktnet/src/dataset.py b/research/nlp/ktnet/src/dataset.py
index b2f739e591848d17e0a25b44e07c9bddbaf8d8cc..66ee897730e00f032dfe3e22306d0f1cb4a4a3b2 100644
--- a/research/nlp/ktnet/src/dataset.py
+++ b/research/nlp/ktnet/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@
 
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 
 
 def create_train_dataset(data_file=None, do_shuffle=True, device_num=1, rank=0, batch_size=1,
diff --git a/research/nlp/luke/src/reading_comprehension/dataLoader.py b/research/nlp/luke/src/reading_comprehension/dataLoader.py
index 95494be5581f83ba526772521031dc3257bcde50..73a3fc3456559c3e429759558fc25e60e7617b9d 100644
--- a/research/nlp/luke/src/reading_comprehension/dataLoader.py
+++ b/research/nlp/luke/src/reading_comprehension/dataLoader.py
@@ -32,7 +32,7 @@ def create_dataset(data_file=None, do_shuffle=True, device_num=1, rank=0, batch_
                                            "start_positions", "end_positions"],
                              shuffle=do_shuffle, num_shards=device_num, shard_id=rank,
                              num_samples=num, num_parallel_workers=num_parallel_workers)
-    type_int32 = C.c_transforms.TypeCast(mstype.int32)
+    type_int32 = C.TypeCast(mstype.int32)
     dataset = dataset.map(operations=type_int32, input_columns="word_ids")
     dataset = dataset.map(operations=type_int32, input_columns="word_segment_ids")
     dataset = dataset.map(operations=type_int32, input_columns="word_attention_mask")
@@ -57,7 +57,7 @@ def create_eval_dataset(data_file=None, do_shuffle=True, device_num=1, rank=0, b
                                            "example_indices"],
                              shuffle=do_shuffle, num_shards=device_num, shard_id=rank,
                              num_samples=num, num_parallel_workers=num_parallel_workers)
-    type_int32 = C.c_transforms.TypeCast(mstype.int32)
+    type_int32 = C.TypeCast(mstype.int32)
     dataset = dataset.map(operations=type_int32, input_columns="word_ids")
     dataset = dataset.map(operations=type_int32, input_columns="word_segment_ids")
     dataset = dataset.map(operations=type_int32, input_columns="word_attention_mask")
diff --git a/research/nlp/seq2seq/src/dataset/load_dataset.py b/research/nlp/seq2seq/src/dataset/load_dataset.py
index 5843e0a31c16d43d829c9b00909462740f03dcc6..5ada4bd0b33d354fb8c284545fb39c1f2baa68d6 100644
--- a/research/nlp/seq2seq/src/dataset/load_dataset.py
+++ b/research/nlp/seq2seq/src/dataset/load_dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 """Dataset loader to feed into model."""
 import mindspore.common.dtype as mstype
 import mindspore.dataset as ds
-import mindspore.dataset.transforms.c_transforms as deC
+import mindspore.dataset.transforms as deC
 
 
 def _load_dataset(input_files, batch_size, sink_mode=False,
diff --git a/research/recommend/mmoe/src/load_dataset.py b/research/recommend/mmoe/src/load_dataset.py
index 602a26d726788630bb207dfd5387edbba7b7b7d3..650df81c19570d51061ade82d6155a991e1200de 100644
--- a/research/recommend/mmoe/src/load_dataset.py
+++ b/research/recommend/mmoe/src/load_dataset.py
@@ -16,7 +16,7 @@
 import os
 import mindspore.dataset as de
 from mindspore.communication.management import get_rank, get_group_size
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.transforms as C
 import mindspore.common.dtype as mstype
 
 
diff --git a/utils/model_scaffolding/example/src/dataset.py b/utils/model_scaffolding/example/src/dataset.py
index f659d158a26a3ec7f53e170f74cf6f26a907e11e..83d20a7be2dbb2a86fd4a855c98ae07f4c86c1ef 100644
--- a/utils/model_scaffolding/example/src/dataset.py
+++ b/utils/model_scaffolding/example/src/dataset.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -15,8 +15,8 @@
 
 """Dataset"""
 import mindspore.dataset as ds
-import mindspore.dataset.vision.c_transforms as CV
-import mindspore.dataset.transforms.c_transforms as C
+import mindspore.dataset.vision as CV
+import mindspore.dataset.transforms as C
 from mindspore.dataset.vision import Inter
 from mindspore.common import dtype as mstype