diff --git a/research/cv/Pix2Pix/README.md b/research/cv/Pix2Pix/README.md
index c2ac43c10458e4835763ddee127e93a317947605..1f07e4768baf5f0ee35da3fe3a162c2f6a5893f1 100644
--- a/research/cv/Pix2Pix/README.md
+++ b/research/cv/Pix2Pix/README.md
@@ -68,6 +68,18 @@ Dataset_2 used: [maps](http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/maps.
 
 **Note:** We provide data/download_Pix2Pix_dataset.sh to download the datasets.
 
+Download facades dataset
+
+```python
+bash data/download_Pix2Pix_dataset.sh facades
+```
+
+Download maps dataset
+
+```python
+bash data/download_Pix2Pix_dataset.sh maps
+```
+
 # [Environment Requirements](#contents)
 
 - Hardware锛圓scend锛�
@@ -101,6 +113,7 @@ The entire code structure is as following:
   鈹斺攢run_train.sh                       # launch gpu/ascend training(1 pcs)
   鈹斺攢run_distribute_train_gpu.sh        # launch gpu training(8 pcs)
   鈹斺攢run_eval_gpu.sh                    # launch gpu eval
+  鈹斺攢run_infer_onnx.sh                  # launch onnx infer
 鈹溾攢 imgs
   鈹斺攢Pix2Pix-examples.jpg               # Pix2Pix Imgs
 鈹溾攢 src
@@ -123,8 +136,11 @@ The entire code structure is as following:
     鈹溾攢 local_adapter.py                # Get local ID
     鈹溾攢 moxing_adapter.py               # Parameter processing
 鈹溾攢 eval.py                             # evaluate Pix2Pix Model
+鈹溾攢 infer_onnx.py                       # Pix2Pix onnx inference
 鈹溾攢 train.py                            # train script
-鈹斺攢 export.py                           # export mindir script
+鈹溾攢 requirements.txt                    # requirements file
+鈹溾攢 export_onnx.py                      # export onnx script
+鈹斺攢 export.py                           # export mindir and air script
 ```
 
 ## [Script Parameters](#contents)
@@ -153,11 +169,14 @@ Major parameters in train.py and config.py as follows:
 "dataset_size": 400                         # for Facade_dataset,the number is 400; for Maps_dataset,the number is 1096.
 "train_data_dir": None                      # the file path of input data during training.
 "val_data_dir": None                        # the file path of input data during validating.
+"onnx_infer_data_dir": ./data/facades/val/  # the file path of input data during onnx infer.
 "train_fakeimg_dir": ./results/fake_img/    # during training, the file path of stored fake img.
 "loss_show_dir": ./results/loss_show        # during training, the file path of stored loss img.
 "ckpt_dir": ./results/ckpt                  # during training, the file path of stored CKPT.
 "ckpt": None                                # during validating, the file path of the CKPT used.
+"onnx_path": None                           # during onnx infer, the file path of the ONNX used.
 "predict_dir": ./results/predict/           # during validating, the file path of Generated images.
+"onnx_infer_dir": ./results/onnx_infer/     # during onnx infer, the file path of Generated images.
 ```
 
 ## [Training](#contents)
@@ -171,7 +190,7 @@ python train.py --device_target [Ascend] --device_id [0]
 - running distributed trainning on Ascend with fixed parameters
 
 ```python
-bash run_distribute_train_ascend.sh [RANK_TABLE_FILE] [DATASET_PATH]
+bash scripts/run_distribute_train_ascend.sh [RANK_TABLE_FILE] [DATASET_PATH]
 ```
 
 - running on GPU with fixed parameters
@@ -185,7 +204,7 @@ bash scripts/run_train.sh [DEVICE_TARGET] [DEVICE_ID]
 - running distributed trainning on GPU with fixed parameters
 
 ```python
-bash run_distribute_train_gpu.sh [DATASET_PATH] [DATASET_NAME]
+bash scripts/run_distribute_train_gpu.sh [DATASET_PATH] [DATASET_NAME]
 ```
 
 ## [Evaluation](#contents)
@@ -211,11 +230,25 @@ bash scripts/run_eval_gpu.sh [DATASET_PATH] [DATASET_NAME] [VAL_DATA_PATH] [CKPT
 ## [310 infer](#contents)
 
 ```python
-bash run_infer_310.sh [The path of the MINDIR for 310 infer] [The path of the dataset for 310 infer] y Ascend 0
+bash scripts/run_infer_310.sh [The path of the MINDIR for 310 infer] [The path of the dataset for 310 infer] y Ascend 0
 ```
 
 **Note:**: Before executing 310 infer, create the MINDIR/AIR model using "python export.py --ckpt [The path of the CKPT for exporting] --train_data_dir [The path of the training dataset]".
 
+## [Onnx export](#contents)
+
+```python
+python export_onnx.py --ckpt [/path/pix2pix.ckpt] --device_target [GPU] --device_id [0]
+```
+
+## [Onnx infer](#contents)
+
+```python
+python infer_onnx.py --device_target [GPU] --device_id [0] --onnx_infer_data_dir [/path/data] --onnx_path [/path/pix2pix.onnx]
+OR
+bash scripts/run_infer_onnx.sh [DEVICE_TARGET] [DEVICE_ID] [ONNX_INFER_DATA_DIR] [ONNX_PATH]
+```
+
 # [Model Description](#contents)
 
 ## [Performance](#contents)
diff --git a/research/cv/Pix2Pix/default_config.yaml b/research/cv/Pix2Pix/default_config.yaml
index 7fbf42f7a60562d2950399836d40060a6c653811..d00326ad389650c5869deaf81f3351f4d47e82bd 100644
--- a/research/cv/Pix2Pix/default_config.yaml
+++ b/research/cv/Pix2Pix/default_config.yaml
@@ -61,6 +61,12 @@ predict_dir: "results/predict/"
 # export options
 image_size: 256
 file_format: "MINDIR"
+
+# onnx infer options
+onnx_infer_data_dir: "data/facades/val/"
+onnx_infer_dir: "results/onnx_infer/"
+onnx_path: "pix2pix.onnx"
+
 file_name: "pix2pix"
 
 ---
@@ -113,3 +119,6 @@ ckpt_dir: "file path of stored checkpoint file in training"
 ckpt: "file path of checking point file used in validation"
 predict_dir: "file path of generated image in validation"
 image_size: "export image size default=256"
+path_onnx: "file path of onnx file used in validation"
+onnx_infer_dir: "file path of generated image in onnx infer"
+onnx_infer_data_dir: "file path of onnx infer input data"
diff --git a/research/cv/Pix2Pix/export_onnx.py b/research/cv/Pix2Pix/export_onnx.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd1cba03046f7c536d1990a578b232f3706ee9db
--- /dev/null
+++ b/research/cv/Pix2Pix/export_onnx.py
@@ -0,0 +1,58 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+"""
+    export checkpoint file into onnx models
+"""
+import numpy as np
+from mindspore import Tensor, nn, context
+from mindspore.train.serialization import export
+from mindspore.train.serialization import load_checkpoint
+from mindspore.train.serialization import load_param_into_net
+from src.models.pix2pix import Pix2Pix, get_generator, get_discriminator
+from src.models.loss import D_Loss, D_WithLossCell, G_Loss, G_WithLossCell, TrainOneStepCell
+from src.utils.tools import get_lr
+from src.utils.config import config
+from src.utils.moxing_adapter import moxing_wrapper
+@moxing_wrapper()
+def export_pix2pix():
+    context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target,
+                        device_id=config.device_id)
+    netG = get_generator()
+    netD = get_discriminator()
+    pix2pix = Pix2Pix(generator=netG, discriminator=netD)
+    d_loss_fn = D_Loss()
+    g_loss_fn = G_Loss()
+    d_loss_net = D_WithLossCell(backbone=pix2pix, loss_fn=d_loss_fn)
+    g_loss_net = G_WithLossCell(backbone=pix2pix, loss_fn=g_loss_fn)
+    d_opt = nn.Adam(pix2pix.netD.trainable_params(), learning_rate=get_lr(),
+                    beta1=config.beta1, beta2=config.beta2, loss_scale=1)
+    g_opt = nn.Adam(pix2pix.netG.trainable_params(), learning_rate=get_lr(),
+                    beta1=config.beta1, beta2=config.beta2, loss_scale=1)
+    train_net = TrainOneStepCell(loss_netD=d_loss_net, loss_netG=g_loss_net, optimizerD=d_opt, optimizerG=g_opt, sens=1)
+    train_net.set_train()
+    ckpt_url = config.ckpt
+    param_G = load_checkpoint(ckpt_url)
+    load_param_into_net(pix2pix, param_G)
+    input_shp = [1, 3, config.image_size, config.image_size]
+    input_array = Tensor(np.random.uniform(-1.0, 1.0, size=input_shp).astype(np.float32))
+    target_shp = [1, 3, config.image_size, config.image_size]
+    target_array = Tensor(np.random.uniform(-1.0, 1.0, size=target_shp).astype(np.float32))
+    inputs = [input_array, target_array]
+    file = f"{config.file_name}"
+    export(pix2pix, *inputs, file_name=file, file_format="ONNX")
+
+if __name__ == '__main__':
+    export_pix2pix()
diff --git a/research/cv/Pix2Pix/infer_onnx.py b/research/cv/Pix2Pix/infer_onnx.py
new file mode 100644
index 0000000000000000000000000000000000000000..4cea651dc337498ed0f78ccc2cd19446766a30b8
--- /dev/null
+++ b/research/cv/Pix2Pix/infer_onnx.py
@@ -0,0 +1,75 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===========================================================================
+
+"""
+    Evaluate Pix2Pix Model.
+"""
+
+import os
+import onnxruntime
+import numpy as np
+import mindspore as ms
+from mindspore import Tensor
+from src.dataset.pix2pix_dataset import pix2pixDataset_val, create_val_dataset
+from src.utils.tools import save_image
+from src.utils.config import config
+from src.utils.moxing_adapter import moxing_wrapper
+from src.utils.device_adapter import get_device_id
+
+@moxing_wrapper()
+def pix2pix_infer():
+    ms.set_context(mode=ms.GRAPH_MODE, device_target=config.device_target, device_id=get_device_id())
+    # Preprocess the data for evaluating
+    dataset_val = pix2pixDataset_val(root_dir=config.onnx_infer_data_dir)
+    ds_val = create_val_dataset(dataset_val)
+    print("ds:", ds_val.get_dataset_size())
+    print("ds:", ds_val.get_col_names())
+    print("ds.shape:", ds_val.output_shapes())
+    if config.device_target == 'GPU':
+        providers = ['CUDAExecutionProvider']
+    elif config.device_target == 'CPU':
+        providers = ['CPUExecutionProvider']
+    else:
+        raise ValueError(
+            f'Unsupported target device {config.device_target}, '
+            f'Expected one of: "CPU", "GPU"'
+        )
+    onnx_session = onnxruntime.InferenceSession(config.onnx_path, providers=providers)
+    if not os.path.isdir(config.onnx_infer_dir):
+        os.makedirs(config.onnx_infer_dir)
+    data_loader_val = ds_val.create_dict_iterator(output_numpy=True, num_epochs=config.epoch_num)
+    print("=======Starting infer=======")
+    for i, data in enumerate(data_loader_val):
+        input_image = np.array(data["input_images"])
+        fake_image = onnx_session.run(None, {get_input_name(onnx_session)[0]: input_image,
+                                             get_input_name(onnx_session)[1]: None})
+        fake_image = Tensor(fake_image)[0]
+        save_image(fake_image, config.onnx_infer_dir + str(i+1))
+        print("=======image", i + 1, "saved success=======")
+
+def get_input_name(onnx_session):
+    """
+    input_name = onnx_session.get_inputs()[0].name
+    :param onnx_session:
+    :return:
+    """
+    input_name = []
+    for node in onnx_session.get_inputs():
+        input_name.append(node.name)
+    return input_name
+
+
+if __name__ == '__main__':
+    pix2pix_infer()
diff --git a/research/cv/Pix2Pix/requirements.txt b/research/cv/Pix2Pix/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8256e1f9330b2097c0521e8a4a4366584efc21c5
--- /dev/null
+++ b/research/cv/Pix2Pix/requirements.txt
@@ -0,0 +1,3 @@
+mindspore
+numpy
+onnxruntime-gpu==1.11.1
\ No newline at end of file
diff --git a/research/cv/Pix2Pix/scripts/run_infer_onnx.sh b/research/cv/Pix2Pix/scripts/run_infer_onnx.sh
new file mode 100644
index 0000000000000000000000000000000000000000..2bdee641d5f0617f9d85a946454e007b14e07104
--- /dev/null
+++ b/research/cv/Pix2Pix/scripts/run_infer_onnx.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ $# != 4 ]
+then
+    echo "Usage: bash run_infer_onnx.sh [DEVICE_TARGET] [DEVICE_ID] [ONNX_INFER_DATA_DIR] [ONNX_PATH]"
+    exit 1
+fi
+
+get_real_path(){
+  if [ "${1:0:1}" == "/" ]; then
+    echo "$1"
+  else
+    echo "$(realpath -m $PWD/$3)"
+  fi
+}
+
+PATH1=$(get_real_path $3)
+ONNX_PATH=$(get_real_path $4)
+if [ ! -d $PATH1 ]
+then
+    echo "error: DATASET_PATH=$PATH1 is not a directory"
+    exit 1
+fi
+
+python infer_onnx.py --device_target GPU --device_id 0 --onnx_infer_data_dir $PATH1 --onnx_path $ONNX_PATH &> infer_onnx.log 2>&1 &