diff --git a/research/hpc/pinns/src/NavierStokes/dataset.py b/research/hpc/pinns/src/NavierStokes/dataset.py
index d2093ec8bae0518ec920195a9d89b8d271fa405f..d2583e9d0f3aac471c994597abe1f79619ef1d03 100644
--- a/research/hpc/pinns/src/NavierStokes/dataset.py
+++ b/research/hpc/pinns/src/NavierStokes/dataset.py
@@ -28,10 +28,11 @@ class data_set_navier_stokes:
         noise (float): noise intensity, 0 for noiseless training data
         train (bool): True for training set, False for evaluation set
     """
-    def __init__(self, n_train, path, noise, train=True):
+    def __init__(self, n_train, path, noise, batch_size, train=True):
         data = scio.loadmat(path)
         self.n_train = n_train
         self.noise = noise
+        self.batch_size = batch_size
 
         # load data
         X_star = data['X_star'].astype(np.float32)
@@ -64,8 +65,10 @@ class data_set_navier_stokes:
             self.t = t[idx, :]
             u_train = u[idx, :]
             self.u = u_train + noise*np.std(u_train)*np.random.randn(u_train.shape[0], u_train.shape[1])
+            self.u = self.u.astype(np.float32)
             v_train = v[idx, :]
             self.v = v_train + noise*np.std(v_train)*np.random.randn(v_train.shape[0], v_train.shape[1])
+            self.v = self.v.astype(np.float32)
         else:
             self.x = x
             self.y = y
@@ -78,17 +81,17 @@ class data_set_navier_stokes:
             self.p = PP.flatten()[:, None]
 
     def __getitem__(self, index):
-        ans_x = self.x[index]
-        ans_y = self.y[index]
-        ans_t = self.t[index]
-        ans_u = self.u[index]
-        ans_v = self.v[index]
-        input_data = np.hstack((ans_x, ans_y, ans_t)).astype(np.float32)
-        label = np.hstack((ans_u, ans_v, np.array([0.]))).astype(np.float32)  #
+        ans_x = self.x[index * self.batch_size : (index + 1) * self.batch_size]
+        ans_y = self.y[index * self.batch_size : (index + 1) * self.batch_size]
+        ans_t = self.t[index * self.batch_size : (index + 1) * self.batch_size]
+        ans_u = self.u[index * self.batch_size : (index + 1) * self.batch_size]
+        ans_v = self.v[index * self.batch_size : (index + 1) * self.batch_size]
+        input_data = np.hstack((ans_x, ans_y, ans_t))
+        label = np.hstack((ans_u, ans_v, np.zeros([self.batch_size, 1], dtype=np.float32)))
         return input_data, label
 
     def __len__(self):
-        return self.n_train
+        return self.n_train // self.batch_size
 
 
 def generate_training_set_navier_stokes(batch_size, n_train, path, noise):
@@ -101,9 +104,9 @@ def generate_training_set_navier_stokes(batch_size, n_train, path, noise):
         path (str): path of dataset
         noise (float): noise intensity, 0 for noiseless training data
     """
-    s = data_set_navier_stokes(n_train, path, noise, True)
+    s = data_set_navier_stokes(n_train, path, noise, batch_size, True)
     lb = s.lb
     ub = s.ub
-    dataset = ds.GeneratorDataset(source=s, column_names=['data', 'label'], shuffle=True)
-    dataset = dataset.batch(batch_size)
+    dataset = ds.GeneratorDataset(source=s, column_names=['data', 'label'], shuffle=True,
+                                  num_parallel_workers=2, python_multiprocessing=True)
     return dataset, lb, ub
diff --git a/research/hpc/pinns/src/NavierStokes/train_ns.py b/research/hpc/pinns/src/NavierStokes/train_ns.py
index 0b5c646c205f041d7511e0137da82cc6b2506bb6..a8a5638b23db278b3f678d1b93c13d698f02e5d2 100644
--- a/research/hpc/pinns/src/NavierStokes/train_ns.py
+++ b/research/hpc/pinns/src/NavierStokes/train_ns.py
@@ -29,7 +29,7 @@ from src.NavierStokes.net import PINNs_navier
 
 class EvalCallback(Callback):
     """eval callback."""
-    def __init__(self, data_path, ckpt_dir, per_eval_epoch, num_neuron=20):
+    def __init__(self, data_path, ckpt_dir, per_eval_epoch, num_neuron=20, eval_begin_epoch=10000):
         super(EvalCallback, self).__init__()
         if not isinstance(per_eval_epoch, int) or per_eval_epoch <= 0:
             raise ValueError("per_eval_epoch must be int and > 0")
@@ -39,6 +39,7 @@ class EvalCallback(Callback):
         self.network = PINNs_navier(layers, lb, ub)
         self.ckpt_dir = ckpt_dir
         self.per_eval_epoch = per_eval_epoch
+        self.eval_begin_epoch = eval_begin_epoch
         self.best_result = None
 
     def epoch_end(self, run_context):
@@ -46,7 +47,7 @@ class EvalCallback(Callback):
         cb_params = run_context.original_args()
         cur_epoch = cb_params.cur_epoch_num
         batch_num = cb_params.batch_num
-        if cur_epoch % self.per_eval_epoch == 0:
+        if cur_epoch % self.per_eval_epoch == 0 and cur_epoch >= self.eval_begin_epoch:
             ckpt_format = os.path.join(self.ckpt_dir,
                                        "checkpoint_PINNs_NavierStokes*-{}_{}.ckpt".format(cur_epoch, batch_num))
             ckpt_list = glob.glob(ckpt_format)
@@ -98,9 +99,9 @@ def train_navier(epoch, lr, batch_size, n_train, path, noise, num_neuron, ck_pat
     #call back configuration
     loss_print_num = 1 # print loss per loss_print_num epochs
     # save model
-    config_ck = CheckpointConfig(save_checkpoint_steps=1000, keep_checkpoint_max=20)
+    config_ck = CheckpointConfig(save_checkpoint_steps=200, keep_checkpoint_max=20)
     ckpoint = ModelCheckpoint(prefix="checkpoint_PINNs_NavierStokes", directory=ck_path, config=config_ck)
-    eval_cb = EvalCallback(data_path=path, ckpt_dir=ck_path, per_eval_epoch=100)
+    eval_cb = EvalCallback(data_path=path, ckpt_dir=ck_path, per_eval_epoch=20)
 
     model = Model(network=n, loss_fn=loss, optimizer=opt)