diff --git a/research/cv/FaceDetection/README.md b/research/cv/FaceDetection/README.md
index 330739f9e6b908bf656798b0fc757ea465fb6b20..c9389e1fe4d87446f5e009ca6796b94fa631063a 100644
--- a/research/cv/FaceDetection/README.md
+++ b/research/cv/FaceDetection/README.md
@@ -406,6 +406,8 @@ Saving ../../results/0-2441_61000/.._.._results_0-2441_61000_face_AP_0.7575.png
 | Accuracy            | 8pcs: 76.0%                 | 4pcs: 77.8%                 |
 | Model for inference | 37M (.ckpt file)            | --                          |
 
+Note there is ±2 deviation of accuracy.
+
 ### Inference Performance
 
 | Parameters          | Ascend                      |
diff --git a/research/cv/FaceDetection/eval.py b/research/cv/FaceDetection/eval.py
index e367653c7960a7eebe0a56c68e0cbf58800167eb..3ded3b96e41e740253688fb3cbfb0a04eebac6c8 100644
--- a/research/cv/FaceDetection/eval.py
+++ b/research/cv/FaceDetection/eval.py
@@ -1,4 +1,4 @@
-# Copyright 2020-2021 Huawei Technologies Co., Ltd
+# Copyright 2020-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -37,10 +37,16 @@ from model_utils.device_adapter import get_device_id, get_device_num, get_rank_i
 
 plt.switch_backend('agg')
 
+
 def load_pretrain(net, cfg):
     '''load pretrain model'''
-    if os.path.isfile(cfg.pretrained):
-        param_dict = load_checkpoint(cfg.pretrained)
+    return load_ckpt(net, cfg.pretrained)
+
+
+def load_ckpt(net, ckpt):
+    '''load ckpt'''
+    if os.path.isfile(ckpt):
+        param_dict = load_checkpoint(ckpt)
         param_dict_new = {}
         for key, values in param_dict.items():
             if key.startswith('moments.'):
@@ -50,13 +56,30 @@ def load_pretrain(net, cfg):
             else:
                 param_dict_new[key] = values
         load_param_into_net(net, param_dict_new)
-        print('load model {} success'.format(cfg.pretrained))
+        print('load model {} success'.format(ckpt))
     else:
-        print('load model {} failed, please check the path of model, evaluating end'.format(cfg.pretrained))
+        print('load model {} failed, please check the path of model, evaluating end'.format(ckpt))
         exit(0)
 
     return net
 
+def get_ckpt_list(cfg, max_ckpt_nums):
+    # get ckpt list
+    ckpt_url_list = []
+    if os.path.isfile(cfg.pretrained):
+        ckpt_url_list.append(cfg.pretrained)
+    elif os.path.isdir(cfg.pretrained):
+        lists = os.listdir(cfg.pretrained)
+        lists.sort(key=lambda fn: os.path.getmtime(
+            cfg.pretrained+'/'+fn), reverse=True)
+        for ckpt_name in lists:
+            if "ckpt" in ckpt_name:
+                ckpt_url = os.path.join(cfg.pretrained, ckpt_name)
+                ckpt_url_list.append(ckpt_url)
+            if len(ckpt_url_list) >= max_ckpt_nums:
+                break
+    return ckpt_url_list
+
 def modelarts_pre_process():
     '''modelarts pre process function.'''
     def unzip(zip_file, save_dir):
@@ -73,7 +96,8 @@ def modelarts_pre_process():
                 i = 0
                 for file in fz.namelist():
                     if i % data_print == 0:
-                        print("unzip percent: {}%".format(int(i * 100 / data_num)), flush=True)
+                        print("unzip percent: {}%".format(
+                            int(i * 100 / data_num)), flush=True)
                     i += 1
                     fz.extract(file, save_dir)
                 print("cost time: {}min:{}s.".format(int((time.time() - s_time) / 60),
@@ -85,7 +109,8 @@ def modelarts_pre_process():
             print("Zip has been extracted.")
 
     if config.need_modelarts_dataset_unzip:
-        zip_file_1 = os.path.join(config.data_path, config.modelarts_dataset_unzip_name + ".zip")
+        zip_file_1 = os.path.join(
+            config.data_path, config.modelarts_dataset_unzip_name + ".zip")
         save_dir_1 = os.path.join(config.data_path)
 
         sync_lock = "/tmp/unzip_sync.lock"
@@ -106,18 +131,21 @@ def modelarts_pre_process():
                 break
             time.sleep(1)
 
-        print("Device: {}, Finish sync unzip data from {} to {}.".format(get_device_id(), zip_file_1, save_dir_1))
+        print("Device: {}, Finish sync unzip data from {} to {}.".format(
+            get_device_id(), zip_file_1, save_dir_1))
 
     config.result_path = os.path.join(config.output_path, "results")
 
 
+
 @moxing_wrapper(pre_process=modelarts_pre_process)
 def run_eval():
     '''run eval'''
     config.world_size = get_device_num()
     config.local_rank = get_rank_id()
     devid = get_device_id() if config.run_platform != 'CPU' else 0
-    context.set_context(mode=context.GRAPH_MODE, device_target=config.run_platform, save_graphs=False, device_id=devid)
+    context.set_context(mode=context.GRAPH_MODE,
+                        device_target=config.run_platform, save_graphs=False, device_id=devid)
     print('=============yolov3 start evaluating==================')
 
     context.set_auto_parallel_context(parallel_mode=ParallelMode.STAND_ALONE, device_num=config.world_size,
@@ -139,7 +167,8 @@ def run_eval():
 
     # dataloader
     print('Loading data from {}'.format(config.mindrecord_path))
-    ds = de.MindDataset(config.mindrecord_path + "0", columns_list=["image", "annotation", "image_name", "image_size"])
+    ds = de.MindDataset(config.mindrecord_path + "0",
+                        columns_list=["image", "annotation", "image_name", "image_size"])
 
     single_scale_trans = SingleScaleTrans(resize=config.input_shape)
     ds = ds.batch(config.batch_size, per_batch_map=single_scale_trans,
@@ -147,99 +176,104 @@ def run_eval():
 
     config.steps_per_epoch = ds.get_dataset_size()
 
-    # backbone
-    network = backbone_HwYolov3(num_classes, num_anchors_list, config)
-    network = load_pretrain(network, config)
-
-
-    det = {}
-    img_size = {}
-    img_anno = {}
-
-    model_name = config.pretrained.split('/')[-1].replace('.ckpt', '')
-    result_path = os.path.join(config.result_path, model_name)
-    if os.path.exists(result_path):
-        pass
-    if not os.path.isdir(result_path):
-        os.makedirs(result_path, exist_ok=True)
-
-    # result file
-    ret_files_set = {'face': os.path.join(result_path, 'comp4_det_test_face_rm5050.txt'),}
-
-    test_net = BuildTestNetwork(network, reduction_0, reduction_1, reduction_2, anchors, anchors_mask, num_classes,
-                                config)
-
-    print('conf_thresh:', config.conf_thresh)
-
-    eval_times = 0
-
-    for data in ds.create_tuple_iterator(output_numpy=True):
-        batch_images, batch_labels, batch_image_name, batch_image_size = data[0:4]
-        eval_times += 1
-
-        img_tensor = Tensor(batch_images, mstype.float32)
-
-        dets = []
-        tdets = []
-
-        coords_0, cls_scores_0, coords_1, cls_scores_1, coords_2, cls_scores_2 = test_net(img_tensor)
-
-        boxes_0, boxes_1, boxes_2 = get_bounding_boxes(coords_0, cls_scores_0, coords_1, cls_scores_1, coords_2,
-                                                       cls_scores_2, config.conf_thresh, config.input_shape,
-                                                       num_classes)
-
-        converted_boxes_0, converted_boxes_1, converted_boxes_2 = tensor_to_brambox(boxes_0, boxes_1, boxes_2,
-                                                                                    config.input_shape, labels)
-
-        tdets.append(converted_boxes_0)
-        tdets.append(converted_boxes_1)
-        tdets.append(converted_boxes_2)
-
-        batch = len(tdets[0])
-        for b in range(batch):
-            single_dets = []
-            for op in range(3):
-                single_dets.extend(tdets[op][b])
-            dets.append(single_dets)
-
-        det.update({batch_image_name[k].decode('UTF-8'): v for k, v in enumerate(dets)})
-        img_size.update({batch_image_name[k].decode('UTF-8'): v for k, v in enumerate(batch_image_size)})
-        img_anno.update({batch_image_name[k].decode('UTF-8'): v for k, v in enumerate(batch_labels)})
-
-    print('eval times:', eval_times)
-    print('batch size: ', config.batch_size)
-
-    netw, neth = config.input_shape
-    reorg_dets = voc_wrapper.reorg_detection(det, netw, neth, img_size)
-    voc_wrapper.gen_results(reorg_dets, result_path, img_size, config.nms_thresh)
-
-    # compute mAP
-    ground_truth = parse_gt_from_anno(img_anno, classes)
-
-    ret_list = parse_rets(ret_files_set)
-    iou_thr = 0.5
-    evaluate = calc_recall_precision_ap(ground_truth, ret_list, iou_thr)
-
-    aps_str = ''
-    for cls in evaluate:
-        per_line, = plt.plot(evaluate[cls]['recall'], evaluate[cls]['precision'], 'b-')
-        per_line.set_label('%s:AP=%.3f' % (cls, evaluate[cls]['ap']))
-        aps_str += '_%s_AP_%.3f' % (cls, evaluate[cls]['ap'])
-        plt.plot([i / 1000.0 for i in range(1, 1001)], [i / 1000.0 for i in range(1, 1001)], 'y--')
-        plt.axis([0, 1.2, 0, 1.2])
-        plt.xlabel('recall')
-        plt.ylabel('precision')
-        plt.grid()
-
-        plt.legend()
-        plt.title('PR')
-
-    # save mAP
-    ap_save_path = os.path.join(result_path, result_path.replace('/', '_') + aps_str + '.png')
-    print('Saving {}'.format(ap_save_path))
-    plt.savefig(ap_save_path)
+    # get ckpt list
+    ckpt_url_list = get_ckpt_list(config, 10)
+    max_aps = 0
+    for ckpt_url in ckpt_url_list:
+        det = {}
+        img_size = {}
+        img_anno = {}
+        # backbone
+        network = backbone_HwYolov3(num_classes, num_anchors_list, config)
+        network = load_ckpt(network, ckpt_url)
+        model_name = ckpt_url.split('/')[-1].replace('.ckpt', '')
+        result_path = os.path.join(config.result_path, model_name)
+        if os.path.exists(result_path):
+            pass
+        if not os.path.isdir(result_path):
+            os.makedirs(result_path, exist_ok=True)
+
+        # result file
+        ret_files_set = {'face': os.path.join(result_path, 'comp4_det_test_face_rm5050.txt'),}
+
+        test_net = BuildTestNetwork(network, reduction_0, reduction_1, reduction_2, anchors, anchors_mask, num_classes,
+                                    config)
+
+        print('conf_thresh:', config.conf_thresh)
+
+        eval_times = 0
+
+        for data in ds.create_tuple_iterator(output_numpy=True):
+            batch_images, batch_labels, batch_image_name, batch_image_size = data[0:4]
+            eval_times += 1
+
+            img_tensor = Tensor(batch_images, mstype.float32)
+
+            dets = []
+            tdets = []
+
+            coords_0, cls_scores_0, coords_1, cls_scores_1, coords_2, cls_scores_2 = test_net(img_tensor)
+
+            boxes_0, boxes_1, boxes_2 = get_bounding_boxes(coords_0, cls_scores_0, coords_1, cls_scores_1, coords_2,
+                                                           cls_scores_2, config.conf_thresh, config.input_shape,
+                                                           num_classes)
+
+            converted_boxes_0, converted_boxes_1, converted_boxes_2 = tensor_to_brambox(boxes_0, boxes_1, boxes_2,
+                                                                                        config.input_shape, labels)
+
+            tdets.append(converted_boxes_0)
+            tdets.append(converted_boxes_1)
+            tdets.append(converted_boxes_2)
+
+            batch = len(tdets[0])
+            for b in range(batch):
+                single_dets = []
+                for op in range(3):
+                    single_dets.extend(tdets[op][b])
+                dets.append(single_dets)
+
+            det.update({batch_image_name[k].decode('UTF-8'): v for k, v in enumerate(dets)})
+            img_size.update({batch_image_name[k].decode('UTF-8'): v for k, v in enumerate(batch_image_size)})
+            img_anno.update({batch_image_name[k].decode('UTF-8'): v for k, v in enumerate(batch_labels)})
+
+        print('eval times:', eval_times)
+        print('batch size: ', config.batch_size)
+
+        netw, neth = config.input_shape
+        reorg_dets = voc_wrapper.reorg_detection(det, netw, neth, img_size)
+        voc_wrapper.gen_results(reorg_dets, result_path, img_size, config.nms_thresh)
+
+        # compute mAP
+        ground_truth = parse_gt_from_anno(img_anno, classes)
+
+        ret_list = parse_rets(ret_files_set)
+        iou_thr = 0.5
+        evaluate = calc_recall_precision_ap(ground_truth, ret_list, iou_thr)
+
+        aps_str = ''
+        aps = 0
+        for cls in evaluate:
+            per_line, = plt.plot(evaluate[cls]['recall'], evaluate[cls]['precision'], 'b-')
+            per_line.set_label('%s:AP=%.3f' % (cls, evaluate[cls]['ap']))
+            aps_str += '_%s_AP_%.3f' % (cls, evaluate[cls]['ap'])
+            aps = evaluate[cls]['ap']
+            plt.plot([i / 1000.0 for i in range(1, 1001)], [i / 1000.0 for i in range(1, 1001)], 'y--')
+            plt.axis([0, 1.2, 0, 1.2])
+            plt.xlabel('recall')
+            plt.ylabel('precision')
+            plt.grid()
+
+            plt.legend()
+            plt.title('PR')
+        if aps >= max_aps:
+            # save mAP
+            max_aps = aps
+            ap_save_path = os.path.join(result_path, result_path.replace('/', '_') + aps_str + '.png')
+            print('Saving {}'.format(ap_save_path))
+            plt.savefig(ap_save_path)
 
     print('=============yolov3 evaluating finished==================')
 
+
 if __name__ == "__main__":
     run_eval()
diff --git a/research/cv/FaceDetection/scripts/run_eval.sh b/research/cv/FaceDetection/scripts/run_eval.sh
index 456e4e5f60bd0e393674fb64137785ec5fd2ce10..3a22480caf0bac8c8d03ff1b8b6be78d990a6973 100644
--- a/research/cv/FaceDetection/scripts/run_eval.sh
+++ b/research/cv/FaceDetection/scripts/run_eval.sh
@@ -47,9 +47,9 @@ MINDRECORD_FILE=$(get_real_path $2)
 USE_DEVICE_ID=$3
 PRETRAINED_BACKBONE=$(get_real_path $4)
 
-if [ ! -f $PRETRAINED_BACKBONE ]
+if [ ! -f $PRETRAINED_BACKBONE ] && [ ! -d $PRETRAINED_BACKBONE ]
     then
-    echo "error: PRETRAINED_PATH=$PRETRAINED_BACKBONE is not a file"
+    echo "error: PRETRAINED_PATH=$PRETRAINED_BACKBONE is not a file neither a dir."
 exit 1
 fi