diff --git a/official/cv/ssd/README.md b/official/cv/ssd/README.md
index f53367b9ecf31024dd74652d9516a3cdd0eb8744..d7f37059dc8f44b8d7cee30b04507720eb6cbfea 100644
--- a/official/cv/ssd/README.md
+++ b/official/cv/ssd/README.md
@@ -291,11 +291,14 @@ Then you can run everything just like on ascend.
     "save_best_ckpt": True                           # Save best checkpoint when run_eval is True
     "eval_start_epoch": 40                           # Evaluation start epoch when run_eval is True
     "eval_interval": 1                               # valuation interval when run_eval is True
+    "data_path": "your_path/data"                    # your_path represents absolute path
+    "output_path": "your_path/data/train"            # your_path represents absolute path
+    "load_path": "your_path/data/checkpoint"         # your_path represents absolute path
 
     "class_num": 81                                  # Dataset class number
-    "img_shape": [300, 300]                        # Image height and width used as input to the model
+    "img_shape": [300, 300]                          # Image height and width used as input to the model
     "mindrecord_dir": "/data/MindRecord_COCO"        # MindRecord path
-    "coco_root": "/data/coco2017"                    # COCO2017 dataset path
+    "coco_root": "your_path/cocodataset"             # COCO2017 dataset path
     "voc_root": "/data/voc_dataset"                  # VOC original dataset path
     "voc_json": "annotations/voc_instances_val.json" # is the path of json file with coco format for evaluation
     "image_dir": ""                                  # Other dataset image path, if coco or voc used, it will be useless
@@ -483,6 +486,12 @@ mAP: 0.2244936111705981
   bash scripts/run_eval_onnx.sh <DATA_DIR> <COCO_SUBDIR> <ONNX_MODEL_PATH> [<INSTANCES_SET>] [<DEVICE_TARGET>] [<CONFIG_PATH>]
   ```
 
+We need three parameters for this scripts.
+
+- `DATA_DIR`:mindspore evaluation generate the data path.
+- `COCO_SUBDIR`: COCO2017 dataset path.
+- `ONNX_MODEL_PATH`: ONNX model path.
+
   Results will be saved in eval.log and have the following form:
 
   ```log
@@ -503,16 +512,16 @@ mAP: 0.2244936111705981
 
 ## Inference Process
 
-### [Export MindIR](#contents)
+### [Export Model](#contents)
 
-Export MindIR on local
+Export Model on local
 
 ```shell
 python export.py --checkpoint_file_path [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT] --config_path [CONFIG_PATH]
 ```
 
 The ckpt_file parameter is required,
-`FILE_FORMAT` should be in ["AIR", "MINDIR"]
+`FILE_FORMAT` should be in ["AIR", "MINDIR", "ONNX"]
 
 Export on ModelArts (If you want to run in modelarts, please check the official documentation of [modelarts](https://support.huaweicloud.com/modelarts/), and you can start as follows)
 
diff --git a/official/cv/ssd/README_CN.md b/official/cv/ssd/README_CN.md
index 696d6bcaec569c48c178dd983064af3049ff018f..0474fdc79c82e34327c19f52da01ecd110eafc64 100644
--- a/official/cv/ssd/README_CN.md
+++ b/official/cv/ssd/README_CN.md
@@ -17,6 +17,7 @@
     - [评估过程](#评估过程)
         - [Ascend处理器环境评估](#ascend处理器环境评估)
         - [GPU处理器环境评估](#gpu处理器环境评估)
+        - [ONNX处理器环境评估](#onnx处理器环境评估)
     - [推理过程](#推理过程)
         - [导出MindIR](#导出mindir)
         - [在Ascend310执行推理](#在ascend310执行推理)
@@ -230,13 +231,16 @@ bash run_eval_gpu.sh [DATASET] [CHECKPOINT_PATH] [DEVICE_ID] [CONFIG_PATH]
     "batch_size": 32                           # 输入张量的批次大小
     "pre_trained": None                        # 预训练检查点文件路径
     "pre_trained_epoch_size": 0                # 预训练轮次大小
-    "save_checkpoint_epochs": 10               # 两个检查点之间的轮次间隔。默认情况下,每10个轮次都会保存检查点。
+    "save_checkpoint_epochs": 10               # 两个检查点之间的轮次间隔。默认情况下,每10个轮次都会保存检查点
     "loss_scale": 1024                         # 损失放大
+    "data_path": "your_path/data"              # your_path是你自己的路径,一定要是绝对路径
+    "output_path": "your_path/data/train"      # your_path是你自己的路径,一定要是绝对路径
+    "load_path": "your_path/data/checkpoint"   # your_path是你自己的路径,一定要是绝对路径
 
     "class_num": 81                            # 数据集类数
-    "img_shape": [300, 300]                  # 作为模型输入的图像高和宽
+    "img_shape": [300, 300]                    # 作为模型输入的图像高和宽
     "mindrecord_dir": "/data/MindRecord_COCO"  # MindRecord路径
-    "coco_root": "/data/coco2017"              # COCO2017数据集路径
+    "coco_root": "your_path/cocodataset"       # COCO2017数据集路径
     "voc_root": ""                             # VOC原始数据集路径
     "image_dir": ""                            # 其他数据集图片路径,如果使用coco或voc,此参数无效。
     "anno_path": ""                            # 其他数据集标注路径,如果使用coco或voc,此参数无效。
@@ -390,18 +394,52 @@ Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.686
 mAP: 0.2244936111705981
 ```
 
+### ONNX处理器环境评估
+
+```shell script
+bash bash run_eval_onnx.sh <DATA_PATH> <COCO_ROOT> <ONNX_MODEL_PATH> [<INSTANCES_SET>] [<DEVICE_TARGET>] [<CONFIG_PATH>]
+```
+
+此脚本需要三个参数。
+
+- `DATA_PATH`:mindspore评估生成的data路径。
+- `COCO_ROOT`:COCO2017数据集路径。
+- `ONNX_MODEL_PATH`: onnx模型的路径。
+
+推理结果保存在示例路径中您可以在eval.log日志中找到类似以下的结果。
+
+```text
+Average Precision  (AP) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.239
+  Average Precision  (AP) @[ IoU=0.50      | area=   all | maxDets=100 ] = 0.398
+  Average Precision  (AP) @[ IoU=0.75      | area=   all | maxDets=100 ] = 0.242
+  Average Precision  (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.035
+  Average Precision  (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.198
+  Average Precision  (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.436
+  Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=  1 ] = 0.251
+  Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets= 10 ] = 0.388
+  Average Recall     (AR) @[ IoU=0.50:0.95 | area=   all | maxDets=100 ] = 0.423
+  Average Recall     (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.117
+  Average Recall     (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.435
+  Average Recall     (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.688
+  mAP: 0.23850595066045968
+```
+
 ## 推理过程
 
-### [导出MindIR](#contents)
+### [导出模型](#contents)
 
-本地导出mindir
+本地导出模型
 
 ```shell
 python export.py --checkpoint_file_path [CKPT_PATH] --file_name [FILE_NAME] --file_format [FILE_FORMAT] --config_path [CONFIG_PATH]
 ```
 
-参数ckpt_file为必填项,
-`FILE_FORMAT` 必须在 ["AIR", "MINDIR"]中选择。
+此脚本需要四个参数。
+
+- `CKPT_PATH`:检查点文件的绝对路径。
+- `FILE_NAME`:模型文件名。
+- `FILE_FORMAT`:必须在 ["AIR", "MINDIR", "ONNX""]中选择。
+- `CONFIG_PATH`:参数配置。
 
 ModelArts导出mindir
 
diff --git a/official/cv/ssd/eval_onnx.py b/official/cv/ssd/eval_onnx.py
index 4ab00f3d71405d7e9a18d56b34f9a8f40bc92c21..a736ab41ca6117d8159ab2b26690561d14a2dc2c 100644
--- a/official/cv/ssd/eval_onnx.py
+++ b/official/cv/ssd/eval_onnx.py
@@ -24,7 +24,7 @@ from src.eval_utils import COCOMetrics
 from src.model_utils.config import config
 
 
-def create_session(checkpoint_path, target_device):
+def create_session(onnx_path, target_device):
     """Create onnxruntime session"""
     if target_device == 'GPU':
         providers = ['CUDAExecutionProvider']
@@ -35,19 +35,19 @@ def create_session(checkpoint_path, target_device):
             f'Unsupported target device {target_device}, '
             f'Expected one of: "CPU", "GPU"'
         )
-    session = ort.InferenceSession(checkpoint_path, providers=providers)
+    session = ort.InferenceSession(onnx_path, providers=providers)
     input_name = session.get_inputs()[0].name
     return session, input_name
 
 
-def ssd_eval(dataset_path, ckpt_path, anno_json):
+def ssd_eval(dataset_path, onnx_path, anno_json):
     """SSD evaluation."""
     # Silence false positive
     # pylint: disable=unexpected-keyword-arg
     ds = create_ssd_dataset(dataset_path, batch_size=config.batch_size,
                             is_training=False, use_multiprocessing=False)
 
-    session, input_name = create_session(ckpt_path, config.device_target)
+    session, input_name = create_session(onnx_path, config.device_target)
     total = ds.get_dataset_size() * config.batch_size
     print("\n========================================\n")
     print("total images num: ", total)
diff --git a/official/cv/ssd/requirements.txt b/official/cv/ssd/requirements.txt
index 37482455857ea89188387c1b5b453668dcb3f98d..bf56813c4dcda4693dc68e54959373b6e28ee7aa 100644
--- a/official/cv/ssd/requirements.txt
+++ b/official/cv/ssd/requirements.txt
@@ -1,4 +1,5 @@
 pycocotools >= 2.0.1
+Cython
 opencv-python
 xml-python
 Pillow
diff --git a/official/cv/ssd/scripts/run_eval_onnx.sh b/official/cv/ssd/scripts/run_eval_onnx.sh
index 4fca9487fa39ad46b9a3c8030005187bc22775a2..d2a468293f8281593b30436d32e3ef32ad7cb3c0 100644
--- a/official/cv/ssd/scripts/run_eval_onnx.sh
+++ b/official/cv/ssd/scripts/run_eval_onnx.sh
@@ -36,7 +36,12 @@ COCO_ROOT=$2
 ONNX_MODEL_PATH=$3
 INSTANCES_SET=${4:-'annotations/instances_{}.json'}
 DEVICE_TARGET=${5:-"GPU"}
-CONFIG_PATH=${6:-"config/ssd300_config_gpu.yaml"}
+CONFIG_PATH=${6:-"config/ssd_vgg16_config_gpu.yaml"}
+
+echo $ONNX_MODEL_PATH
+echo $INSTANCES_SET
+echo $DEVICE_TARGET
+echo $CONFIG_PATH
 
 python eval_onnx.py \
     --dataset coco \