diff --git a/official/cv/cspdarknet53/src/dataset.py b/official/cv/cspdarknet53/src/dataset.py
index 762f0a332054bed8c4e497978c559dfa0503a839..9025cffdd2d7d52bedce4cf1398760f5ee469e61 100644
--- a/official/cv/cspdarknet53/src/dataset.py
+++ b/official/cv/cspdarknet53/src/dataset.py
@@ -114,6 +114,6 @@ def create_dataset(data_dir, image_size, per_batch_size, rank, group_size,
     de_dataset = de_dataset.project(columns=columns_to_project)
 
     de_dataset = de_dataset.batch(per_batch_size, drop_remainder=drop_remainder)
-    de_dataset = de_dataset.repeat(1)
+
 
     return de_dataset
diff --git a/official/cv/dncnn/src/dataset.py b/official/cv/dncnn/src/dataset.py
index 4f08d7eecbf93cfc335536425e2d2a363c7a8845..b8240c79ea7e7e3f85720125dd14986a2dda240d 100644
--- a/official/cv/dncnn/src/dataset.py
+++ b/official/cv/dncnn/src/dataset.py
@@ -42,7 +42,6 @@ def create_train_dataset(data_path, model_type, noise_level=25, batch_size=128):
     # apply DatasetOps
     dataloader = dataloader.shuffle(buffer_size=10000)
     dataloader = dataloader.batch(batch_size, drop_remainder=True)
-    dataloader = dataloader.repeat(1) #here 400 images as an epoch , on the paper 128x1600 patches as a epoch
     return dataloader
 
 class DnCNN_train_Dataset():
diff --git a/official/cv/openpose/src/dataset.py b/official/cv/openpose/src/dataset.py
index 1cfc35820af345cd655337f0a662bdc8f25a89cd..03b2705da30f5df7918b7c79de957dd3407bb895 100644
--- a/official/cv/openpose/src/dataset.py
+++ b/official/cv/openpose/src/dataset.py
@@ -547,7 +547,6 @@ def valdata(jsonpath, imgpath, rank, group_size, mode='val', maskpath=''):
     dataset = txtdataset(val, imgpath, maskpath, config.insize, mode=mode)
     sampler = DistributedSampler(dataset, rank, group_size)
     ds = de.GeneratorDataset(dataset, ['img', 'img_id'], num_parallel_workers=8, sampler=sampler)
-    ds = ds.repeat(1)
     return ds
 
 
diff --git a/official/cv/psenet/src/dataset.py b/official/cv/psenet/src/dataset.py
index 201690bc73b1b6aae7edfc1c56f93d13721c5882..8c88120b714618b7eba47be3da9b769733a2858e 100644
--- a/official/cv/psenet/src/dataset.py
+++ b/official/cv/psenet/src/dataset.py
@@ -345,7 +345,6 @@ def train_dataset_creator(rank, group_size, shuffle=True):
     sampler = DistributedSampler(dataset, rank, group_size, shuffle)
     data_set = ds.GeneratorDataset(dataset, ['img', 'gt_text', 'gt_kernels', 'training_mask'], num_parallel_workers=8,
                                    sampler=sampler)
-    data_set = data_set.repeat(1)
     data_set = data_set.batch(config.TRAIN_BATCH_SIZE, drop_remainder=config.TRAIN_DROP_REMAINDER)
     return data_set
 
diff --git a/official/cv/sphereface/src/datasets/classification.py b/official/cv/sphereface/src/datasets/classification.py
index c5bc2f984822de5bcd0ac3db1541100d7f042dc4..03cf57c1b3ce28236cce7423a02e05b15858457b 100644
--- a/official/cv/sphereface/src/datasets/classification.py
+++ b/official/cv/sphereface/src/datasets/classification.py
@@ -126,5 +126,4 @@ def classification_dataset_imagenet(data_dir, image_size, per_batch_size, max_ep
     de_dataset = de_dataset.project(columns=columns_to_project)
 
     de_dataset = de_dataset.batch(per_batch_size, drop_remainder=drop_remainder)
-    de_dataset = de_dataset.repeat(1)
     return de_dataset
diff --git a/official/cv/unet/src/data_loader.py b/official/cv/unet/src/data_loader.py
index 4bc345597b06520b08dacc9e86ab90026e64328f..90ad3a20b86b2d091619ecdc5c406beaca1582ab 100644
--- a/official/cv/unet/src/data_loader.py
+++ b/official/cv/unet/src/data_loader.py
@@ -260,5 +260,4 @@ def create_multi_class_dataset(data_dir, img_size, repeat, batch_size, num_class
                           python_multiprocessing=python_multiprocessing,
                           num_parallel_workers=num_parallel_workers)
     dataset = dataset.batch(batch_size, drop_remainder=is_train)
-    dataset = dataset.repeat(1)
     return dataset
diff --git a/official/nlp/mass/default_config.yaml b/official/nlp/mass/default_config.yaml
index 86cdbc6236fe67d2b84f52664dd699aea4de0ed4..343b08fc6d7ab35d01af2463763951090b747be9 100644
--- a/official/nlp/mass/default_config.yaml
+++ b/official/nlp/mass/default_config.yaml
@@ -68,7 +68,7 @@ output: ""
 device_id: 0
 ckpt_file: ""
 file_name: "mass"
-file_format: "AIR"
+file_format: "MINDIR"
 vocab_file: ""
 result_path: "./preprocess_Result/"
 source_id_folder: "./preprocess_Result/00_source_eos_ids"
diff --git a/research/cv/CycleGAN/src/dataset/cyclegan_dataset.py b/research/cv/CycleGAN/src/dataset/cyclegan_dataset.py
index 130e7e9ac15d0d94a98bb1931f2ad733e1e7fa9a..ec2a889bad0d8337179b3329a1e5ff7c3b05117c 100644
--- a/research/cv/CycleGAN/src/dataset/cyclegan_dataset.py
+++ b/research/cv/CycleGAN/src/dataset/cyclegan_dataset.py
@@ -164,7 +164,6 @@ def create_dataset(args):
         ds = ds.map(operations=trans, input_columns=["image_A"], num_parallel_workers=num_parallel_workers)
         ds = ds.map(operations=trans, input_columns=["image_B"], num_parallel_workers=num_parallel_workers)
         ds = ds.batch(batch_size, drop_remainder=True)
-        ds = ds.repeat(1)
     else:
         datadir = os.path.join(dataroot, args.data_dir)
         dataset = ImageFolderDataset(datadir, max_dataset_size=max_dataset_size)
@@ -177,7 +176,6 @@ def create_dataset(args):
         ]
         ds = ds.map(operations=trans, input_columns=["image"], num_parallel_workers=num_parallel_workers)
         ds = ds.batch(1, drop_remainder=True)
-        ds = ds.repeat(1)
     args.dataset_size = len(dataset)
     return ds
     
\ No newline at end of file
diff --git a/research/cv/FaceDetection/eval.py b/research/cv/FaceDetection/eval.py
index 48b4f2e2b2f1ef211ac656336fdb88408e1b7ba3..e367653c7960a7eebe0a56c68e0cbf58800167eb 100644
--- a/research/cv/FaceDetection/eval.py
+++ b/research/cv/FaceDetection/eval.py
@@ -151,7 +151,6 @@ def run_eval():
     network = backbone_HwYolov3(num_classes, num_anchors_list, config)
     network = load_pretrain(network, config)
 
-    ds = ds.repeat(1)
 
     det = {}
     img_size = {}
diff --git a/research/cv/FaceDetection/preprocess.py b/research/cv/FaceDetection/preprocess.py
index 3255a9de057bde222bb9dd0803678a46ef5ac645..8f9961b26e655ddcb53ed8e066cdbce5125c910a 100644
--- a/research/cv/FaceDetection/preprocess.py
+++ b/research/cv/FaceDetection/preprocess.py
@@ -74,7 +74,6 @@ def preprocess():
     single_scale_trans = SingleScaleTrans_Infer(resize=config.input_shape)
     ds = ds.batch(config.batch_size, per_batch_map=single_scale_trans,
                   input_columns=["image", "annotation", "image_name", "image_size"], num_parallel_workers=8)
-    ds = ds.repeat(1)
     for data in ds.create_tuple_iterator(output_numpy=True):
         images, labels, image_name, image_size = data[0:4]
         images = Image.fromarray(images[0].astype('uint8')).convert('RGB')
diff --git a/research/cv/FaceRecognition/eval.py b/research/cv/FaceRecognition/eval.py
index 6bfedde5864ca4d82e08c9a87e86afb3d58c914c..ebff9e37d6bbe88dbebb10472e2ae97a7019e183 100644
--- a/research/cv/FaceRecognition/eval.py
+++ b/research/cv/FaceRecognition/eval.py
@@ -87,7 +87,6 @@ def get_dataloader(img_predix_all, img_list_all, batch_size, img_transforms):
     ds = de.GeneratorDataset(dataset, column_names=dataset_column_names, sampler=sampler)
     ds = ds.map(input_columns=["image"], operations=img_transforms)
     ds = ds.batch(batch_size, num_parallel_workers=8, drop_remainder=False)
-    ds = ds.repeat(1)
 
     return ds, len(dataset), dataset.get_all_labels()
 
diff --git a/research/cv/FaceRecognition/postprocess.py b/research/cv/FaceRecognition/postprocess.py
index 0f680aaf20ad89764e8f77bbf0db9fdd21399f97..7495890a93029d2994be54a69d901855bb4c7d65 100644
--- a/research/cv/FaceRecognition/postprocess.py
+++ b/research/cv/FaceRecognition/postprocess.py
@@ -79,7 +79,6 @@ def get_dataloader(img_predix_all, img_list_all, batch_size):
     dataset_column_names = ["image", "path", "index"]
     ds = de.GeneratorDataset(dataset, column_names=dataset_column_names, sampler=sampler)
     ds = ds.batch(batch_size, num_parallel_workers=8, drop_remainder=False)
-    ds = ds.repeat(1)
 
     return ds, len(dataset), dataset.get_all_labels()
 
diff --git a/research/cv/FaceRecognition/preprocess.py b/research/cv/FaceRecognition/preprocess.py
index ae5b0e50fb8c137598c9e630029a4490b2b33fd0..69f788911fa82566915df1f276698d1d157e2e3c 100644
--- a/research/cv/FaceRecognition/preprocess.py
+++ b/research/cv/FaceRecognition/preprocess.py
@@ -78,7 +78,6 @@ def get_dataloader(img_predix_all, img_list_all):
     dataset_column_names = ["image", "path", "index"]
     ds = de.GeneratorDataset(dataset, column_names=dataset_column_names, sampler=sampler)
     ds = ds.batch(1, num_parallel_workers=8, drop_remainder=False)
-    ds = ds.repeat(1)
 
     return ds, len(dataset), dataset.get_all_labels()
 
diff --git a/research/cv/HourNAS/src/dataset.py b/research/cv/HourNAS/src/dataset.py
index 0ac34747d8f8fbfe77e55e5ed2f426116768d755..663fa35f42b9db69c4170e4a9e4631f72de61fef 100644
--- a/research/cv/HourNAS/src/dataset.py
+++ b/research/cv/HourNAS/src/dataset.py
@@ -97,7 +97,6 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
                                    num_parallel_workers=2,
                                    drop_remainder=True)
 
-    ds_train = ds_train.repeat(1)
     return ds_train
 
 
@@ -140,7 +139,6 @@ def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=F
                             input_columns=["image", "label"],
                             num_parallel_workers=2,
                             drop_remainder=True)
-    dataset = dataset.repeat(1)
     return dataset
 
 def _get_rank_info():
diff --git a/research/cv/LightCNN/src/dataset.py b/research/cv/LightCNN/src/dataset.py
index a8eb2866b199902fb0cfb96f0360b2b4d81b32e6..46192a0a060a5a8e552083e4afea8896d50b7f7e 100644
--- a/research/cv/LightCNN/src/dataset.py
+++ b/research/cv/LightCNN/src/dataset.py
@@ -97,7 +97,6 @@ def create_dataset(mode, data_url, data_list, batch_size, resize_size=144,
                           num_parallel_workers=num_of_workers)
 
     dataset = dataset.batch(batch_size, num_parallel_workers=num_of_workers, drop_remainder=drop_last)
-    dataset = dataset.repeat(1)
 
     return dataset
 
diff --git a/research/cv/ManiDP/src/dataset.py b/research/cv/ManiDP/src/dataset.py
index 3475f1e9594cd16d1a5163539a6727c570ece08a..220fd0147b0b7b15aed46db0c75c529a88e11648 100644
--- a/research/cv/ManiDP/src/dataset.py
+++ b/research/cv/ManiDP/src/dataset.py
@@ -98,7 +98,6 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
                                    num_parallel_workers=2,
                                    drop_remainder=True)
 
-    ds_train = ds_train.repeat(1)
     return ds_train
 
 
@@ -141,7 +140,6 @@ def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=F
                             input_columns=["image", "label"],
                             num_parallel_workers=2,
                             drop_remainder=True)
-    dataset = dataset.repeat(1)
     return dataset
 
 def _get_rank_info():
diff --git a/research/cv/Pix2Pix/src/dataset/pix2pix_dataset.py b/research/cv/Pix2Pix/src/dataset/pix2pix_dataset.py
index 382fc114f038b3e9d1508d5bdee793688ba4b1d2..73c56ad4021c7897f9b9919bba85ad38991002da 100644
--- a/research/cv/Pix2Pix/src/dataset/pix2pix_dataset.py
+++ b/research/cv/Pix2Pix/src/dataset/pix2pix_dataset.py
@@ -119,7 +119,6 @@ def create_train_dataset(dataset):
     train_ds = train_ds.map(operations=trans, input_columns=["target_images"])
 
     train_ds = train_ds.batch(1, drop_remainder=True)
-    train_ds = train_ds.repeat(1)
 
     return train_ds
 
@@ -168,6 +167,5 @@ def create_val_dataset(dataset):
     val_ds = val_ds.map(operations=trans, input_columns=["input_images"])
     val_ds = val_ds.map(operations=trans, input_columns=["target_images"])
     val_ds = val_ds.batch(1, drop_remainder=True)
-    val_ds = val_ds.repeat(1)
 
     return val_ds
diff --git a/research/cv/STGAN/modelarts/dataset/celeba.py b/research/cv/STGAN/modelarts/dataset/celeba.py
index db7491a20cadc846aa2aec891224bbca8f4b32ab..bf65edaab934ba1f047d63a19fee225b5b9df0fd 100644
--- a/research/cv/STGAN/modelarts/dataset/celeba.py
+++ b/research/cv/STGAN/modelarts/dataset/celeba.py
@@ -149,7 +149,6 @@ class CelebADataLoader:
                                             num_parallel_workers=min(
                                                 32, num_parallel_workers))
             test_dataset = test_dataset.batch(batch_size, drop_remainder=True)
-            test_dataset = test_dataset.repeat(1)
 
             self.test_loader = test_dataset.create_dict_iterator()
 
diff --git a/research/cv/STGAN/src/dataset/celeba.py b/research/cv/STGAN/src/dataset/celeba.py
index db7491a20cadc846aa2aec891224bbca8f4b32ab..bf65edaab934ba1f047d63a19fee225b5b9df0fd 100644
--- a/research/cv/STGAN/src/dataset/celeba.py
+++ b/research/cv/STGAN/src/dataset/celeba.py
@@ -149,7 +149,6 @@ class CelebADataLoader:
                                             num_parallel_workers=min(
                                                 32, num_parallel_workers))
             test_dataset = test_dataset.batch(batch_size, drop_remainder=True)
-            test_dataset = test_dataset.repeat(1)
 
             self.test_loader = test_dataset.create_dict_iterator()
 
diff --git a/research/cv/renas/src/dataset.py b/research/cv/renas/src/dataset.py
index 0a997e4098f253e0763df61fc6daab19e65489b3..c411d5b707688bee9da8dc53c1891611ee205fd8 100644
--- a/research/cv/renas/src/dataset.py
+++ b/research/cv/renas/src/dataset.py
@@ -98,7 +98,6 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
                                    num_parallel_workers=2,
                                    drop_remainder=True)
 
-    ds_train = ds_train.repeat(1)
     return ds_train
 
 
@@ -141,7 +140,6 @@ def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=F
                             input_columns=["image", "label"],
                             num_parallel_workers=2,
                             drop_remainder=True)
-    dataset = dataset.repeat(1)
     return dataset
 
 def _get_rank_info():
diff --git a/research/cv/tinynet/src/dataset.py b/research/cv/tinynet/src/dataset.py
index 2ecf01b64ef410370ac93b7a333000936070fce8..cf49192e7300729b67668a93a54e88a56c515295 100644
--- a/research/cv/tinynet/src/dataset.py
+++ b/research/cv/tinynet/src/dataset.py
@@ -96,7 +96,6 @@ def create_dataset(batch_size, train_data_url='', workers=8, distributed=False,
                                    num_parallel_workers=2,
                                    drop_remainder=True)
 
-    ds_train = ds_train.repeat(1)
     return ds_train
 
 
@@ -139,5 +138,4 @@ def create_dataset_val(batch_size=128, val_data_url='', workers=8, distributed=F
                             input_columns=["image", "label"],
                             num_parallel_workers=2,
                             drop_remainder=True)
-    dataset = dataset.repeat(1)
     return dataset