diff --git a/official/cv/maskrcnn/src/dataset.py b/official/cv/maskrcnn/src/dataset.py
index b3b1b9c13be7c5f32dc292ad1e4191a401a5d19d..1529ce9a7713c7dabfe6791bca045dcc61edf752 100644
--- a/official/cv/maskrcnn/src/dataset.py
+++ b/official/cv/maskrcnn/src/dataset.py
@@ -550,7 +550,8 @@ def create_maskrcnn_dataset(mindrecord_file, batch_size=2, device_num=1, rank_id
                     column_order=["image", "image_shape", "box", "label", "valid_num", "mask"],
                     python_multiprocessing=False,
                     num_parallel_workers=num_parallel_workers)
-        ds = ds.batch(batch_size, drop_remainder=True, pad_info={"mask": ([config.max_instance_count, None, None], 0)})
+        ds = ds.padded_batch(batch_size, drop_remainder=True,
+                             pad_info={"mask": ([config.max_instance_count, None, None], 0)})
 
     else:
         ds = ds.map(operations=compose_map_func,
diff --git a/official/cv/maskrcnn_mobilenetv1/src/dataset.py b/official/cv/maskrcnn_mobilenetv1/src/dataset.py
index 0d05c8c1023b49836aba4f8b81e2ea226c500858..eb065636f6dae21838d8ba9209cf6e75f6088c0b 100644
--- a/official/cv/maskrcnn_mobilenetv1/src/dataset.py
+++ b/official/cv/maskrcnn_mobilenetv1/src/dataset.py
@@ -565,7 +565,8 @@ def create_maskrcnn_dataset(mindrecord_file, batch_size=2, device_num=1, rank_id
                     column_order=["image", "image_shape", "box", "label", "valid_num", "mask"],
                     python_multiprocessing=False,
                     num_parallel_workers=num_parallel_workers)
-        ds = ds.batch(batch_size, drop_remainder=True, pad_info={"mask": ([config.max_instance_count, None, None], 0)})
+        ds = ds.padded_batch(batch_size, drop_remainder=True,
+                             pad_info={"mask": ([config.max_instance_count, None, None], 0)})
 
     else:
         ds = ds.map(operations=compose_map_func,
diff --git a/official/nlp/cpm/train.py b/official/nlp/cpm/train.py
index ca9b4d249dd81673703c4758edefd8befc4e287e..881ac3c4c1e62cf53fc9f93f657af44a6ba7bb19 100644
--- a/official/nlp/cpm/train.py
+++ b/official/nlp/cpm/train.py
@@ -89,17 +89,17 @@ def _load_dataset(dataset_path, batch_size, rank_size=None, rank_id=None, shuffl
                           per_batch_map=collate,
                           input_columns=["truth", "input_ids"],
                           output_columns=["input_ids", "attention_mask", "position_ids", "truth"],
-                          column_order=["input_ids", "attention_mask", "position_ids", "loss_mask", "labels"],
                           num_parallel_workers=4,
                           drop_remainder=drop_remainder)
+        data = data.project(["input_ids", "attention_mask", "position_ids", "loss_mask", "labels"])
     else:
         data = data.batch(batch_size,
                           per_batch_map=collate,
                           input_columns=["truth", "input_ids"],
                           output_columns=["input_ids", "attention_mask", "position_ids", "truth"],
-                          column_order=["input_ids", "attention_mask", "position_ids", "loss_mask", "labels", "truth"],
                           num_parallel_workers=4,
                           drop_remainder=drop_remainder)
+        data = data.project(["input_ids", "attention_mask", "position_ids", "loss_mask", "labels", "truth"])
 
     return data
 
diff --git a/research/cv/IPT/train.py b/research/cv/IPT/train.py
index db6b66bfdb43e5e920b89cb67b937f9cc900edb8..c6424fe9681c7f4069ed25987e5f7535790a952a 100644
--- a/research/cv/IPT/train.py
+++ b/research/cv/IPT/train.py
@@ -131,9 +131,10 @@ def train_net(distribute, imagenet, epochs):
     resize_fuc = bicubic()
     train_de_dataset = train_de_dataset.project(columns=["HR", "Rain", "LRx2", "LRx3", "LRx4", "filename"])
     train_de_dataset = train_de_dataset.batch(args.batch_size,
+                                              drop_remainder=True,
+                                              per_batch_map=resize_fuc.forward,
                                               input_columns=["HR", "Rain", "LRx2", "LRx3", "LRx4", "filename"],
-                                              output_columns=["LR", "HR", "idx", "filename"],
-                                              drop_remainder=True, per_batch_map=resize_fuc.forward)
+                                              output_columns=["LR", "HR", "idx", "filename"])
     train_loader = train_de_dataset.create_dict_iterator(output_numpy=True)
 
     net_work = IPT(args)
diff --git a/research/cv/IPT/train_finetune.py b/research/cv/IPT/train_finetune.py
index 4861726119fa0896593e3123e19f7c1c17af8169..11b184c0d44ea2c8f526f7a13e8ab9027878d80d 100644
--- a/research/cv/IPT/train_finetune.py
+++ b/research/cv/IPT/train_finetune.py
@@ -68,9 +68,10 @@ def train_net(distribute, imagenet):
         resize_fuc = bicubic()
         train_de_dataset = train_de_dataset.batch(
             args.batch_size,
+            drop_remainder=True,
+            per_batch_map=resize_fuc.forward,
             input_columns=["HR", "Rain", "LRx2", "LRx3", "LRx4", "scales", "filename"],
-            output_columns=["LR", "HR", "idx", "filename"], drop_remainder=True,
-            per_batch_map=resize_fuc.forward)
+            output_columns=["LR", "HR", "idx", "filename"])
     else:
         train_de_dataset = train_de_dataset.batch(args.batch_size, drop_remainder=True)
 
diff --git a/research/cv/Yolact++/src/dataset.py b/research/cv/Yolact++/src/dataset.py
index e7b277e3b1aa6ef9fd8b91ba4c445c8e6903ff43..02c95b56689dea7a26c1423f52f5d16f5c4fa644 100644
--- a/research/cv/Yolact++/src/dataset.py
+++ b/research/cv/Yolact++/src/dataset.py
@@ -483,7 +483,8 @@ def create_yolact_dataset(mindrecord_file, batch_size=2, device_num=1, rank_id=0
                     python_multiprocessing=False,
                     num_parallel_workers=8)
 
-        ds = ds.batch(batch_size, drop_remainder=True, pad_info={"mask": ([cfg['max_instance_count'], None, None], 0)})
+        ds = ds.padded_batch(batch_size, drop_remainder=True,
+                             pad_info={"mask": ([cfg['max_instance_count'], None, None], 0)})
 
     else:
         ds = ds.map(operations=compose_map_func,
diff --git a/research/cv/pointnet2/train.py b/research/cv/pointnet2/train.py
index f0a1d95a4b094281fb107c5fa05ad44c94cf93f8..16558007ec7b75d07315e4a456cc1d3f6fbde15a 100644
--- a/research/cv/pointnet2/train.py
+++ b/research/cv/pointnet2/train.py
@@ -170,9 +170,9 @@ def run_train():
     random_input_dropout = RandomInputDropout()
 
     train_ds = train_ds.batch(batch_size=args.batch_size,
+                              drop_remainder=True,
                               per_batch_map=random_input_dropout,
                               input_columns=["data", "label"],
-                              drop_remainder=True,
                               num_parallel_workers=num_workers,
                               python_multiprocessing=True)
 
diff --git a/research/cv/textfusenet/src/dataset.py b/research/cv/textfusenet/src/dataset.py
index a406408b328cdc5a73a42e6e25dca606bdf6ca53..70b9cd602963cfd6bd03e6133f50324f4b40d9f5 100755
--- a/research/cv/textfusenet/src/dataset.py
+++ b/research/cv/textfusenet/src/dataset.py
@@ -519,7 +519,8 @@ def create_textfusenet_dataset(mindrecord_file, batch_size=2, device_num=1, rank
                     column_order=["image", "image_shape", "box", "label", "valid_num", "mask"],
                     python_multiprocessing=False,
                     num_parallel_workers=num_parallel_workers)
-        ds = ds.batch(batch_size, drop_remainder=True, pad_info={"mask": ([config.max_instance_count, None, None], 0)})
+        ds = ds.padded_batch(batch_size, drop_remainder=True,
+                             pad_info={"mask": ([config.max_instance_count, None, None], 0)})
 
     else:
         ds = ds.map(operations=compose_map_func,