diff --git a/research/cv/simple_baselines/README.md b/research/cv/simple_baselines/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..2a9cd38bf1895079cc9e251b11c928bf2a0e2038
--- /dev/null
+++ b/research/cv/simple_baselines/README.md
@@ -0,0 +1,370 @@
+# Contents
+
+<!-- TOC -->
+
+[鏌ョ湅涓枃](./README_CN.md)
+
+- [Simple Baselines Description](#simple_baselines-description)
+- [Model Architecture](#model-architecture)
+- [Dataset](#dataset)
+- [Features](#features)
+    - [Mixed Precision](#mixed-precision)
+- [Environment Requirements](#environment-requirements)
+- [Quick Start](#quick-start)
+- [Script Description](#script-description)
+    - [Script and Sample Code](#script-and-sample-code)
+    - [Script Parameters](#script-parameters)
+    - [Training Process](#training-process)
+        - [Usage](#usage1)
+        - [Result](#result1)
+    - [Evaluation Process](#evaluation-process)
+        - [Usage](#usage2)
+        - [Result](#result2)
+    - [Inference Process](#inference-process)
+        - [Model Export](#model-export)
+        - [Infer on Ascend310](#infer-ascend310)
+        - [Result](#result)
+- [Model Description](#model-description)
+    - [Performance](#performance)
+- [Description of Random State](#description-of-random-state)
+- [ModelZoo Homepage](#ModelZoo-homepage)
+
+<!-- /TOC -->
+
+# Simple Baselines Description
+
+## Overview
+
+Simple Baselines proposed by Bin Xiao, Haiping Wu, and Yichen Wei from Microsoft Research Asia. The authors believe that
+the current popular human pose estimation and tracking methods are too complicated. The existing human pose estimation and
+pose tracking models seem to be quite different in structure, but It's really close in terms of performance. The author proposes
+a simple and effective baseline method by adding a deconvolution layer on the backbone network ResNet, which is precisely the
+simplest method to estimate the heatmap from the high and low resolution feature maps, thereby helping to stimulate and evaluate
+new ideas in the field.
+
+For more details refer to [paper](https://arxiv.org/pdf/1804.06208.pdf).
+Mindspore implementation is based on [original pytorch version](https://github.com/microsoft/human-pose-estimation.pytorch) released by Microsoft Asia Research Institute.
+
+## Paper
+
+[Paper](https://arxiv.org/pdf/1804.06208.pdf): Bin Xiao, Haiping Wu, Yichen Wei "Simple baselines for human pose estimation and tracking"
+
+# Model Architecture
+
+The overall network architecture of simple baselines is [here](https://arxiv.org/pdf/1804.06208.pdf).
+
+# Dataset
+
+Dataset used: [COCO2017](https://gitee.com/link?target=https%3A%2F%2Fcocodataset.org%2F%23download)
+
+- Dataset size锛�
+    - Train锛�19.56GB, 57k images, 149813 person instances
+    - Test锛�825MB, 5k images, 6352 person instances
+- Data Format锛欽PG
+    - Note: Data is processed in src/dataset.py
+
+# Features
+
+## Mixed Precision
+
+The [mixed precision](https://www.mindspore.cn/tutorials/experts/en/master/others/mixed_precision.html) training
+method accelerates the deep learning neural network training process by using both the single-precision and half-precision
+data types, and maintains the network precision achieved by the single-precision training at the same time. Mixed precision
+training can accelerate the computation process, reduce memory usage, and enable a larger model or batch size to be trained
+on specific hardware. For FP16 operators, if the input data type is FP32, the backend of MindSpore will automatically handle
+it with reduced precision. Users could check the reduced-precision operators by enabling INFO log and then searching 鈥榬educe precision鈥�.
+
+# Environment Requirements
+
+- Hardware锛圓scend/GPU锛�
+    - Prepare hardware environment with Ascend or GPU.
+- Framework
+    - [MindSpore](https://www.mindspore.cn/install/en)
+- For more information about MindSpore, please check the resources below锛�
+    - [MindSpore Tutorials](https://www.mindspore.cn/tutorials/zh-CN/master/index.html)
+    - [MindSpore Python API](https://www.mindspore.cn/docs/api/zh-CN/master/index.html)
+
+# Quick Start
+
+After installing MindSpore through the official website, you can follow the steps below for training and evaluation.
+
+- Dataset preparation
+
+The simple baselines uses the COCO2017 dataset for training and evaluation. Download dataset from [official website](https://cocodataset.org/).
+
+- Running on Ascend
+
+```shell
+# Distributed training
+bash scripts/run_distribute_train.sh RANK_TABLE
+
+# Standalone training
+bash scripts/run_standalone_train.sh DEVICE_ID
+
+# Evaluation
+bash scripts/run_eval.sh
+```
+
+- Running on GPU
+
+```shell
+# Distributed training
+bash scripts/run_distribute_train_gpu.sh DEVICE_NUM
+
+# Standalone training
+bash scripts/run_standalone_train_gpu.sh DEVICE_ID
+
+# Evaluation
+bash scripts/run_eval_gpu.sh DEVICE_ID
+```
+
+# Script Description
+
+## Script and Sample Code
+
+```text
+.
+鈹斺攢鈹€simple_baselines
+  鈹溾攢鈹€ README.md
+  鈹溾攢鈹€ scripts
+    鈹溾攢鈹€ run_distribute_train.sh            # train on Ascend
+    鈹溾攢鈹€ run_distribute_train_gpu.sh        # train on GPU
+    鈹溾攢鈹€ run_eval.sh                        # eval on Ascend
+    鈹溾攢鈹€ run_eval_gpu.sh                    # eval on GPU
+    鈹溾攢鈹€ run_standalone_train.sh            # train on Ascend
+    鈹溾攢鈹€ run_standalone_train_gpu.sh        # train on GPU
+    鈹斺攢鈹€ run_infer_310.sh
+  鈹溾攢鈹€ src
+    鈹溾攢鈹€ utils
+        鈹溾攢鈹€ coco.py                        # COCO dataset evaluation results
+        鈹溾攢鈹€ nms.py
+        鈹斺攢鈹€ transforms.py                  # Image processing conversion
+    鈹溾攢鈹€ config.py
+    鈹溾攢鈹€ dataset.py                         # Data preprocessing
+    鈹溾攢鈹€ network_with_loss.py               # Loss function
+    鈹溾攢鈹€ pose_resnet.py                     # Backbone network
+    鈹斺攢鈹€ predict.py                         # Heatmap key point prediction
+  鈹溾攢鈹€ export.py
+  鈹溾攢鈹€ postprocess.py
+  鈹溾攢鈹€ preprocess.py
+  鈹溾攢鈹€ eval.py
+  鈹斺攢鈹€ train.py
+```
+
+## Script Parameters
+
+Before training configure parameters and paths in src/config.py.
+
+- Model parameters:
+
+```text
+config.MODEL.INIT_WEIGHTS = True                                 # Initialize model weights
+config.MODEL.PRETRAINED = 'resnet50.ckpt'                        # Pre-trained model
+config.MODEL.NUM_JOINTS = 17                                     # Number of key points
+config.MODEL.IMAGE_SIZE = [192, 256]                             # Image size
+```
+
+- Network parameters:
+
+```text
+config.NETWORK.NUM_LAYERS = 50                                   # Resnet backbone layers
+config.NETWORK.DECONV_WITH_BIAS = False                          # Network deconvolution bias
+config.NETWORK.NUM_DECONV_LAYERS = 3                             # Number of network deconvolution layers
+config.NETWORK.NUM_DECONV_FILTERS = [256, 256, 256]              # Deconvolution layer filter size
+config.NETWORK.NUM_DECONV_KERNELS = [4, 4, 4]                    # Deconvolution layer kernel size
+config.NETWORK.FINAL_CONV_KERNEL = 1                             # Final convolutional layer kernel size
+config.NETWORK.HEATMAP_SIZE = [48, 64]
+```
+
+- Training parameters:
+
+```text
+config.TRAIN.SHUFFLE = True
+config.TRAIN.BATCH_SIZE = 64
+config.TRAIN.BEGIN_EPOCH = 0
+config.TRAIN.END_EPOCH = 140
+config.TRAIN.LR = 0.001
+config.TRAIN.LR_FACTOR = 0.1                 # learning rate reduction factor
+config.TRAIN.LR_STEP = [90, 120]
+config.TRAIN.NUM_PARALLEL_WORKERS = 8
+config.TRAIN.SAVE_CKPT = True
+config.TRAIN.CKPT_PATH = "./model"           # directory of pretrained resnet50 and to save ckpt
+config.TRAIN.SAVE_CKPT_EPOCH = 3
+config.TRAIN.KEEP_CKPT_MAX = 10
+```
+
+- Evaluation parameters:
+
+```text
+config.TEST.BATCH_SIZE = 32
+config.TEST.FLIP_TEST = True
+config.TEST.USE_GT_BBOX = False
+```
+
+- nms parameters:
+
+```text
+config.TEST.OKS_THRE = 0.9                                       # OKS threshold
+config.TEST.IN_VIS_THRE = 0.2                                    # Visualization threshold
+config.TEST.BBOX_THRE = 1.0                                      # Candidate box threshold
+config.TEST.IMAGE_THRE = 0.0                                     # Image threshold
+config.TEST.NMS_THRE = 1.0                                       # nms threshold
+```
+
+## Training Process
+
+### Usage
+
+- Ascend
+
+```shell
+# Distributed training 8p
+bash scripts/run_distribute_train.sh RANK_TABLE
+
+# Standalone training
+bash scripts/run_standalone_train.sh DEVICE_ID
+
+# Evaluation
+bash scripts/run_eval.sh
+```
+
+- GPU
+
+```shell
+# Distributed training
+bash scripts/run_distribute_train_gpu.sh DEVICE_NUM
+
+# Standalone training
+bash scripts/run_standalone_train_gpu.sh DEVICE_ID
+
+# Evaluation
+bash scripts/run_eval_gpu.sh DEVICE_ID
+```
+
+### Result
+
+- Use COCO2017 dataset to train simple_baselines
+
+```text
+# Standalone training results 锛�1P锛�
+epoch:1 step:2340, loss is 0.0008106
+epoch:2 step:2340, loss is 0.0006160
+epoch:3 step:2340, loss is 0.0006480
+epoch:4 step:2340, loss is 0.0005620
+epoch:5 step:2340, loss is 0.0005207
+...
+epoch:138 step:2340, loss is 0.0003183
+epoch:139 step:2340, loss is 0.0002866
+epoch:140 step:2340, loss is 0.0003393
+```
+
+## Evaluation Process
+
+### Usage
+
+The corresponding model inference can be performed by changing the "config.TEST.MODEL_FILE" file in the config.py file.
+Use val2017 in the COCO2017 dataset folder to evaluate simple_baselines.
+
+- Ascend
+
+```shell
+# Evaluation
+bash scripts/run_eval.sh
+```
+
+- GPU
+
+```shell
+# Evaluation
+bash scripts/run_eval_gpu.sh DEVICE_ID
+```
+
+### Result
+
+results will be saved in keypoints_results.pkl
+
+```text
+AP: 0.704
+```
+
+## Inference Process
+
+### Model Export
+
+- Export in local
+
+```shell
+python export.py
+```
+
+- Export in ModelArts (If you want to run in modelarts, please check [modelarts official document](https://support.huaweicloud.com/modelarts/).
+
+```text
+# (1) Upload the code folder to S3 bucket.
+# (2) Click to "create training task" on the website UI interface.
+# (3) Set the code directory to "/{path}/simple_pose" on the website UI interface.
+# (4) Set the startup file to /{path}/simple_pose/export.py" on the website UI interface.
+# (5) Perform a .
+#     a. setting parameters in /{path}/simple_pose/default_config.yaml.
+#         1. Set 鈥漞nable_modelarts: True鈥�
+#         2. Set 鈥淭EST.MODEL_FILE: ./{path}/*.ckpt鈥�('TEST.MODEL_FILE' indicates the path of the weight file to be exported relative to the file `export.py`, and the weight file must be included in the code directory.)
+#         3. Set 鈥滶XPORT.FILE_NAME: simple_pose鈥�
+#         4. Set 鈥滶XPORT.FILE_FORMAT锛歁INDIR鈥�
+# (7) Check the "data storage location" on the website UI interface and set the "Dataset path" path (This step is useless, but necessary.).
+# (8) Set the "Output file path" and "Job log path" to your path on the website UI interface.
+# (9) Under the item "resource pool selection", select the specification of a single card.
+# (10) Create your job.
+# You will see simple_pose.mindir under {Output file path}.
+```
+
+`FILE_FORMAT` should be in ["AIR", "MINDIR"]
+
+### 310 inference
+
+Before performing inference, the mindir file must bu exported by export.py script. We only provide an example of inference using MINDIR model.
+When the network is processing the dataset, if the last batch is not enough, it will not be automatically supplemented. Better set batch_size to 1.
+
+```shell
+# Ascend310 inference
+bash run_infer_310.sh [MINDIR_PATH] [NEED_PREPROCESS] [DEVICE_ID]
+```
+
+- `NEED_PREPROCESS` indicates that dataset is processed in binary format, value are "y" or "n".
+- `DEVICE_ID` optional, default value is 0.
+
+### Result
+
+The inference results are saved in the current path in `acc.log` file.
+
+```text
+AP: 0.7139169694686592
+```
+
+# Model Description
+
+## Performance
+
+| Parameters          | Ascend 910                  | GPU 1p | GPU 8p |
+| ------------------- | --------------------------- | ------------ | ------------ |
+| Model               | simple_baselines            | simple_baselines | simple_baselines |
+| Environment         | Ascend 910锛汣PU 2.60GHz锛�192cores锛汻AM锛�755G  | Ubuntu 18.04.6, 1p RTX3090, CPU 2.90GHz, 64cores, RAM 252GB; Mindspore 1.5.0 | Ubuntu 18.04.6, 8pcs RTX3090, CPU 2.90GHz, 64cores, RAM 252GB; Mindspore 1.5.0 |
+| Upload date (Y-M-D) | 2021-03-29                  | 2021-12-29 | 2021-12-29 |
+| MindSpore Version   | 1.1.0                       | 1.5.0 | 1.5.0 |
+| Dataset             | COCO2017                    | COCO2017 | COCO2017 |
+| Training params     | epoch=140, batch_size=64    | epoch=140, batch_size=64 | epoch=140, batch_size=64 |
+| Optimizer           | Adam                        | Adam | Adam |
+| Loss function       | Mean Squared Error          | Mean Squared Error | Mean Squared Error |
+| Output              | heatmap                     | heatmap | heatmap |
+| Final Loss          |                             | 0.27 | 0.27 |
+| Training speed      | 1pc: 251.4 ms/step          | 184 ms/step | 285 ms/step |
+| Total training time |                             | 17h | 3.5h |
+| Accuracy            | AP: 0.704                   | AP: 0.7143 | AP: 0.7143 |
+
+# Description of Random State
+
+Random seed is set inside "create_dataset" function in dataset.py.
+Initial network weights are used in model.py.
+
+# ModelZoo Homepage
+
+Please check the official [homepage](https://gitee.com/mindspore/models).
diff --git a/research/cv/simple_baselines/README_CN.md b/research/cv/simple_baselines/README_CN.md
index 65a6b34babb4c0cc31dcf8e177a58db9f0466dc8..c57f927a054eca709e23d62d1f984aac5b0853fd 100644
--- a/research/cv/simple_baselines/README_CN.md
+++ b/research/cv/simple_baselines/README_CN.md
@@ -2,6 +2,8 @@
 
 <!-- TOC -->
 
+[View English](./README.md)
+
 - [simple_baselines鎻忚堪](#simple_baselines鎻忚堪)
 - [妯″瀷鏋舵瀯](#妯″瀷鏋舵瀯)
 - [鏁版嵁闆哴(#鏁版嵁闆�)
@@ -17,7 +19,6 @@
         - [onnx鎺ㄧ悊](#onnx鎺ㄧ悊)
 - [妯″瀷鎻忚堪](#妯″瀷鎻忚堪)
     - [鎬ц兘](#鎬ц兘)
-        - [璇勪及鎬ц兘](#璇勪及鎬ц兘)
 - [闅忔満鎯呭喌璇存槑](#闅忔満鎯呭喌璇存槑)
 - [ModelZoo涓婚〉](#ModelZoo涓婚〉)
 
@@ -59,7 +60,7 @@ simple_baselines鐨勬€讳綋缃戠粶鏋舵瀯濡備笅锛�
 
 # 鐜瑕佹眰
 
-- 纭欢(Ascend)
+- 纭欢锛圓scend/GPU锛�
     - 鍑嗗Ascend澶勭悊鍣ㄦ惌寤虹‖浠剁幆澧冦€�
 - 妗嗘灦
     - [MindSpore](https://www.mindspore.cn/install/en)
@@ -81,7 +82,7 @@ simple_baselines鐨勬€讳綋缃戠粶鏋舵瀯濡備笅锛�
 
 - Ascend澶勭悊鍣ㄧ幆澧冭繍琛�
 
-```text
+```shell
 # 鍒嗗竷寮忚缁�
 鐢ㄦ硶锛歜ash run_distribute_train.sh RANK_TABLE
 
@@ -92,12 +93,25 @@ simple_baselines鐨勬€讳綋缃戠粶鏋舵瀯濡備笅锛�
 鐢ㄦ硶锛歜ash run_eval.sh
 ```
 
+- GPU澶勭悊鍣ㄧ幆澧冭繍琛�
+
+```shell
+# 鍒嗗竷寮忚缁�
+鐢ㄦ硶锛歜ash scripts/run_distribute_train_gpu.sh DEVICE_NUM
+
+# 鍗曟満璁粌
+鐢ㄦ硶锛歜ash scripts/run_standalone_train_gpu.sh DEVICE_ID
+
+# 杩愯璇勪及绀轰緥
+鐢ㄦ硶锛歜ash scripts/run_eval_gpu.sh DEVICE_ID
+```
+
 # 鑴氭湰璇存槑
 
 ## 鑴氭湰鍙婃牱渚嬩唬鐮�
 
-```shell
-
+```text
+.
 鈹斺攢鈹€simple_baselines
   鈹溾攢鈹€ README.md
   鈹溾攢鈹€ scripts
@@ -108,13 +122,13 @@ simple_baselines鐨勬€讳綋缃戠粶鏋舵瀯濡備笅锛�
   鈹溾攢鈹€ src
     鈹溾攢鈹€ utils
         鈹溾攢鈹€ coco.py                        # COCO鏁版嵁闆嗚瘎浼扮粨鏋�
-        鈹溾攢鈹€ inference.py                   # 鐑浘鍏抽敭鐐归娴�
         鈹溾攢鈹€ nms.py                         # nms
         鈹溾攢鈹€ transforms.py                  # 鍥惧儚澶勭悊杞崲
     鈹溾攢鈹€ config.py                          # 鍙傛暟閰嶇疆
     鈹溾攢鈹€ dataset.py                         # 鏁版嵁棰勫鐞�
     鈹溾攢鈹€ network_with_loss.py               # 鎹熷け鍑芥暟瀹氫箟
-    鈹斺攢鈹€ pose_resnet.py                     # 涓诲共缃戠粶瀹氫箟
+    鈹溾攢鈹€ pose_resnet.py                     # 涓诲共缃戠粶瀹氫箟
+    鈹斺攢鈹€ predict.py                         # 鐑浘鍏抽敭鐐归娴�
   鈹溾攢鈹€ eval.py                              # 璇勪及缃戠粶
   鈹溾攢鈹€ eval_onnx.py                         # onnx鎺ㄧ悊
   鈹斺攢鈹€ train.py                             # 璁粌缃戠粶
@@ -126,7 +140,7 @@ simple_baselines鐨勬€讳綋缃戠粶鏋舵瀯濡備笅锛�
 
 - 閰嶇疆妯″瀷鐩稿叧鍙傛暟锛�
 
-```python
+```text
 config.MODEL.INIT_WEIGHTS = True                                 # 鍒濆鍖栨ā鍨嬫潈閲�
 config.MODEL.PRETRAINED = 'resnet50.ckpt'                        # 棰勮缁冩ā鍨�
 config.MODEL.NUM_JOINTS = 17                                     # 鍏抽敭鐐规暟閲�
@@ -135,7 +149,7 @@ config.MODEL.IMAGE_SIZE = [192, 256]                             # 鍥惧儚澶у皬
 
 - 閰嶇疆缃戠粶鐩稿叧鍙傛暟锛�
 
-```python
+```text
 config.NETWORK.NUM_LAYERS = 50                                   # resnet涓诲共缃戠粶灞傛暟
 config.NETWORK.DECONV_WITH_BIAS = False                          # 缃戠粶鍙嶅嵎绉亸宸�
 config.NETWORK.NUM_DECONV_LAYERS = 3                             # 缃戠粶鍙嶅嵎绉眰鏁�
@@ -147,7 +161,7 @@ config.NETWORK.HEATMAP_SIZE = [48, 64]                           # 鐑浘灏哄
 
 - 閰嶇疆璁粌鐩稿叧鍙傛暟锛�
 
-```python
+```text
 config.TRAIN.SHUFFLE = True                                      # 璁粌鏁版嵁闅忔満鎺掑簭
 config.TRAIN.BATCH_SIZE = 64                                     # 璁粌鎵规澶у皬
 config.TRAIN.BEGIN_EPOCH = 0                                     # 娴嬭瘯鏁版嵁闆嗘枃浠跺悕
@@ -162,7 +176,7 @@ config.TRAIN.LR_FACTOR = 0.1                                     # 瀛︿範鐜囬檷
 
 - 閰嶇疆楠岃瘉鐩稿叧鍙傛暟锛�
 
-```python
+```text
 config.TEST.BATCH_SIZE = 32                                      # 楠岃瘉鎵规澶у皬
 config.TEST.FLIP_TEST = True                                     # 缈昏浆楠岃瘉
 config.TEST.USE_GT_BBOX = False                                  # 浣跨敤鏍囨敞妗�
@@ -170,7 +184,7 @@ config.TEST.USE_GT_BBOX = False                                  # 浣跨敤鏍囨敞
 
 - 閰嶇疆nms鐩稿叧鍙傛暟锛�
 
-```python
+```text
 config.TEST.OKS_THRE = 0.9                                       # OKS闃堝€�
 config.TEST.IN_VIS_THRE = 0.2                                    # 鍙鍖栭槇鍊�
 config.TEST.BBOX_THRE = 1.0                                      # 鍊欓€夋闃堝€�
@@ -182,9 +196,9 @@ config.TEST.NMS_THRE = 1.0                                       # nms闃堝€�
 
 ### 鐢ㄦ硶
 
-#### Ascend澶勭悊鍣ㄧ幆澧冭繍琛�
+- Ascend澶勭悊鍣ㄧ幆澧冭繍琛�
 
-```text
+```shell
 # 鍒嗗竷寮忚缁�
 鐢ㄦ硶锛歜ash run_distribute_train.sh RANK_TABLE
 
@@ -195,6 +209,19 @@ config.TEST.NMS_THRE = 1.0                                       # nms闃堝€�
 鐢ㄦ硶锛歜ash run_eval.sh
 ```
 
+- GPU澶勭悊鍣ㄧ幆澧冭繍琛�
+
+```shell
+# 鍒嗗竷寮忚缁�
+bash scripts/run_distribute_train_gpu.sh DEVICE_NUM
+
+# 鍗曟満璁粌
+bash scripts/run_standalone_train_gpu.sh DEVICE_ID
+
+# 杩愯璇勪及绀轰緥
+bash scripts/run_eval_gpu.sh DEVICE_ID
+```
+
 ### 缁撴灉
 
 - 浣跨敤COCO2017鏁版嵁闆嗚缁僺imple_baselines
@@ -216,21 +243,27 @@ epoch:140 step:2340, loss is 0.0003393
 
 ### 鐢ㄦ硶
 
-#### Ascend澶勭悊鍣ㄧ幆澧冭繍琛�
-
 鍙€氳繃鏀瑰彉config.py鏂囦欢涓殑"config.TEST.MODEL_FILE"鏂囦欢杩涜鐩稿簲妯″瀷鎺ㄧ悊銆�
 
-```bash
+- Ascend澶勭悊鍣ㄧ幆澧冭繍琛�
+
+```shell
 # 璇勪及
 bash eval.sh
 ```
 
+- GPU澶勭悊鍣ㄧ幆澧冭繍琛�
+
+```shell
+# Evaluation
+bash scripts/run_eval_gpu.sh DEVICE_ID
+```
+
 ### 缁撴灉
 
 浣跨敤COCO2017鏁版嵁闆嗘枃浠跺す涓璿al2017杩涜璇勪及simple_baselines,濡備笅鎵€绀猴細
 
 ```text
-coco eval results saved to /cache/train_output/multi_train_poseresnet_v5_2-140_2340/keypoints_results.pkl
 AP: 0.704
 ```
 
@@ -270,7 +303,7 @@ AP:0.72296
 
 ## 鎺ㄧ悊杩囩▼
 
-### [瀵煎嚭mindir]
+### 瀵煎嚭mindir
 
 - 鏈湴瀵煎嚭
 
@@ -280,7 +313,7 @@ python export.py
 
 - 鍦∕odelArts涓婂鍑猴紙濡傛灉鎯冲湪modelarts涓繍琛岋紝璇锋煡鐪嬨€恗odelarts銆戝畼鏂规枃妗o紙https://support huaweicloud.com/modelarts/锛夛紝濡備笅鍚姩鍗冲彲锛�
 
-```python
+```text
 # (1) Upload the code folder to S3 bucket.
 # (2) Click to "create training task" on the website UI interface.
 # (3) Set the code directory to "/{path}/simple_pose" on the website UI interface.
@@ -317,7 +350,7 @@ bash run_infer_310.sh [MINDIR_PATH] [NEED_PREPROCESS] [DEVICE_ID]
 
 鎺ㄧ悊缁撴灉淇濆瓨鍦ㄥ綋鍓嶈矾寰勪腑锛屾偍鍙互鍦� acc.log 鏂囦欢涓壘鍒拌繖鏍风殑缁撴灉銆�
 
-```bash
+```text
 AP: 0.7139169694686592
 ```
 
@@ -325,24 +358,21 @@ AP: 0.7139169694686592
 
 ## 鎬ц兘
 
-### 璇勪及鎬ц兘
-
-#### COCO2017涓婃€ц兘鍙傛暟
-
-| Parameters          | Ascend 910                   |
-| ------------------- | --------------------------- |
-| 妯″瀷鐗堟湰       | simple_baselines               |
-| 璧勬簮            | Ascend 910锛汣PU锛�2.60GHz锛�192鏍革紱鍐呭瓨锛�755G                  |
-| 涓婁紶鏃ユ湡       | 2021-03-29 |
-| MindSpore鐗堟湰   | 1.1.0                       |
-| 鏁版嵁闆�             | COCO2017                    |
-| 璁粌鍙傛暟 | epoch=140, batch_size=64   |
-| 浼樺寲鍣�           | Adam                        |
-| 鎹熷け鍑芥暟       | Mean Squared Error          |
-| 杈撳嚭             | heatmap                     |
-| 杈撳嚭             | heatmap                     |
-| 閫熷害               | 1pc: 251.4 ms/step        |
-| 璁粌鎬ц兘   | AP: 0.704          |
+| Parameters     | Ascend 910                  | GPU 1p           | GPU 8p |
+| -------------- | --------------------------- | ---------------- | ------------ |
+| 妯″瀷鐗堟湰         | simple_baselines           | simple_baselines | simple_baselines |
+| 璧勬簮            | Ascend 910锛汣PU锛�2.60GHz锛�192鏍革紱鍐呭瓨锛�755G | Ubuntu 18.04.6, 1p RTX3090, CPU 2.90GHz, 64cores, RAM 252GB; Mindspore 1.5.0 | Ubuntu 18.04.6, 8pcs RTX3090, CPU 2.90GHz, 64cores, RAM 252GB; Mindspore 1.5.0 |
+| 涓婁紶鏃ユ湡         | 2021-03-29                 | 2021-12-29       | 2021-12-29 |
+| MindSpore鐗堟湰   | 1.1.0                       | 1.5.0           | 1.5.0 |
+| 鏁版嵁闆�           | COCO2017                   | COCO2017        | COCO2017 |
+| 璁粌鍙傛暟         | epoch=140, batch_size=64    | epoch=140, batch_size=64 | epoch=140, batch_size=64 |
+| 浼樺寲鍣�           | Adam                       | Adam            | Adam |
+| 鎹熷け鍑芥暟         | Mean Squared Error          | Mean Squared Error | Mean Squared Error |
+| 杈撳嚭            | heatmap                     | heatmap        | heatmap |
+| 鏈€缁堟崯澶�         |                             | 0.27           | 0.27 |
+| 閫熷害            | 1pc: 251.4 ms/step         | 184 ms/step      | 285 ms/step |
+| 璁粌鎬绘椂闂�       |                            | 17h              | 3.5h |
+| 绮剧‘搴�          | AP: 0.704                   | AP: 0.7143      | AP: 0.7143 |
 
 # 闅忔満鎯呭喌璇存槑
 
@@ -350,4 +380,4 @@ dataset.py涓缃簡鈥渃reate_dataset鈥濆嚱鏁板唴鐨勭瀛愶紝鍚屾椂鍦╩odel.py
 
 # ModelZoo涓婚〉
 
- 璇锋祻瑙堝畼缃慬涓婚〉](https://gitee.com/mindspore/models)銆�
+璇锋祻瑙堝畼缃慬涓婚〉](https://gitee.com/mindspore/models)銆�
diff --git a/research/cv/simple_baselines/eval.py b/research/cv/simple_baselines/eval.py
index dd26c8d2b436b2bbc991c0fc33dd629ccefc9a0f..f5696d7ab6f69b8646fa56952b0c3240926ea403 100644
--- a/research/cv/simple_baselines/eval.py
+++ b/research/cv/simple_baselines/eval.py
@@ -12,9 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
-'''
-This file evaluates the model used.
-'''
+""" This file evaluates the model """
 from __future__ import division
 
 import argparse
@@ -32,25 +30,28 @@ from src.dataset import flip_pairs
 from src.dataset import keypoint_dataset
 from src.utils.coco import evaluate
 from src.utils.transforms import flip_back
-from src.utils.inference import get_final_preds
+from src.predict import get_final_preds
 
 if config.MODELARTS.IS_MODEL_ARTS:
     import moxing as mox
 
 set_seed(config.GENERAL.EVAL_SEED)
-device_id = int(os.getenv('DEVICE_ID'))
+
 
 def parse_args():
+    """command line arguments parsing"""
     parser = argparse.ArgumentParser(description='Evaluate')
     parser.add_argument('--data_url', required=False, default=None, help='Location of data.')
     parser.add_argument('--train_url', required=False, default=None, help='Location of evaluate outputs.')
+    parser.add_argument("--device_target", type=str, choices=["Ascend", "GPU", "CPU"], default="Ascend",
+                        help="device target")
+    parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
     args = parser.parse_args()
     return args
 
+
 def validate(cfg, val_dataset, model, output_dir, ann_path):
-    '''
-    validate
-    '''
+    """evaluate model"""
     model.set_train(False)
     num_samples = val_dataset.get_dataset_size() * cfg.TEST.BATCH_SIZE
     all_preds = np.zeros((num_samples, cfg.MODEL.NUM_JOINTS, 3),
@@ -98,22 +99,21 @@ def validate(cfg, val_dataset, model, output_dir, ann_path):
 
 
 def main():
-    context.set_context(mode=context.GRAPH_MODE,
-                        device_target="Ascend", save_graphs=False, device_id=device_id)
-
+    """main"""
     args = parse_args()
+    context.set_context(mode=context.GRAPH_MODE,
+                        device_target=args.device_target, save_graphs=False, device_id=args.device_id)
 
     if config.MODELARTS.IS_MODEL_ARTS:
         mox.file.copy_parallel(src_url=args.data_url, dst_url=config.MODELARTS.CACHE_INPUT)
 
     model = GetPoseResNet(config)
 
-    ckpt_name = ''
     if config.MODELARTS.IS_MODEL_ARTS:
         ckpt_name = config.MODELARTS.CACHE_INPUT
+        ckpt_name = os.path.join(ckpt_name, config.TEST.MODEL_FILE)
     else:
-        ckpt_name = config.DATASET.ROOT
-    ckpt_name = ckpt_name + config.TEST.MODEL_FILE
+        ckpt_name = config.TEST.MODEL_FILE
     print('loading model ckpt from {}'.format(ckpt_name))
     load_param_into_net(model, load_checkpoint(ckpt_name))
 
@@ -126,20 +126,19 @@ def main():
     ckpt_name = ckpt_name.split('/')
     ckpt_name = ckpt_name[len(ckpt_name) - 1]
     ckpt_name = ckpt_name.split('.')[0]
-    output_dir = ''
-    ann_path = ''
     if config.MODELARTS.IS_MODEL_ARTS:
         output_dir = config.MODELARTS.CACHE_OUTPUT
         ann_path = config.MODELARTS.CACHE_INPUT
     else:
         output_dir = config.TEST.OUTPUT_DIR
         ann_path = config.DATASET.ROOT
-    output_dir = output_dir + ckpt_name
-    ann_path = ann_path + config.DATASET.TEST_JSON
+    output_dir = os.path.join(output_dir, ckpt_name)
+    ann_path = os.path.join(ann_path, config.DATASET.TEST_JSON)
     validate(config, valid_dataset, model, output_dir, ann_path)
 
     if config.MODELARTS.IS_MODEL_ARTS:
         mox.file.copy_parallel(src_url=config.MODELARTS.CACHE_OUTPUT, dst_url=args.train_url)
 
+
 if __name__ == '__main__':
     main()
diff --git a/research/cv/simple_baselines/export.py b/research/cv/simple_baselines/export.py
index 53ffef59459e02f0280b8c9c56ca742f72be34ab..b7871c24a518a7d1fd9e3ec861fd2ce45a178e20 100644
--- a/research/cv/simple_baselines/export.py
+++ b/research/cv/simple_baselines/export.py
@@ -12,9 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
-"""
-export simple_baseline to mindir or air
-"""
+""" Export simple_baseline to mindir or air """
 import argparse
 import numpy as np
 from mindspore import context, Tensor, export
diff --git a/research/cv/simple_baselines/postprocess.py b/research/cv/simple_baselines/postprocess.py
index d2e10786ffccc366000143ed596d6d55af21c122..f9cb3ef7d52035cf7b9713e1382604edbea5aba7 100644
--- a/research/cv/simple_baselines/postprocess.py
+++ b/research/cv/simple_baselines/postprocess.py
@@ -12,9 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
-"""
-postprocess.
-"""
+""" postprocess script """
 import os
 import numpy as np
 
@@ -24,8 +22,9 @@ from src.predict import get_final_preds
 from src.dataset import flip_pairs
 from src.config import config
 
+
 def get_acc():
-    '''calculate accuracy'''
+    """ calculate accuracy """
     ckpt_file = config.TEST.MODEL_FILE
     output_dir = ckpt_file.split('.')[0]
     if config.enable_modelarts:
@@ -86,5 +85,6 @@ def get_acc():
         cfg, all_preds[:idx], output_dir, all_boxes[:idx], image_id, None)
     print("AP:", perf_indicator)
 
+
 if __name__ == '__main__':
     get_acc()
diff --git a/research/cv/simple_baselines/preprocess.py b/research/cv/simple_baselines/preprocess.py
index dab4bfd643dc2417d585c9fcd32bf648a8bdde82..3b734506d66169bee0602acc6572a9ae0c3eb60c 100644
--- a/research/cv/simple_baselines/preprocess.py
+++ b/research/cv/simple_baselines/preprocess.py
@@ -12,17 +12,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
-"""
-preprocess.
-"""
+""" preprocess script """
 import os
 import numpy as np
 
 from src.dataset import keypoint_dataset
 from src.config import config
 
+
 def get_bin():
-    ''' get bin files'''
+    """ get bin files"""
     valid_dataset, _ = keypoint_dataset(
         config,
         bbox_file=config.TEST.COCO_BBOX_FILE,
@@ -62,5 +61,6 @@ def get_bin():
         np.save(os.path.join(id_path, file_name), item['id'])
     print("=" * 20, "export bin files finished", "=" * 20)
 
+
 if __name__ == '__main__':
     get_bin()
diff --git a/research/cv/simple_baselines/scripts/run_distribute_train_gpu.sh b/research/cv/simple_baselines/scripts/run_distribute_train_gpu.sh
new file mode 100644
index 0000000000000000000000000000000000000000..2fc25938f8a197fbf0bc86cd877e10abcb6b007b
--- /dev/null
+++ b/research/cv/simple_baselines/scripts/run_distribute_train_gpu.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [ $# -ne 1 ]; then
+    echo "Please run the script as: "
+    echo "bash scripts/run_distribute_train_gpu.sh [RANK_SIZE]"
+    echo "For example: bash scripts/run_distribute_train_gpu.sh 8"
+    echo "It is better to use the absolute path."
+    echo "========================================================================"
+    exit 1
+fi
+
+export RANK_SIZE=$1
+
+rm -rf ./train_parallel
+mkdir ./train_parallel
+cp ./*.py ./train_parallel
+cp -r ./src ./train_parallel
+cd ./train_parallel
+
+echo "start training on GPU $RANK_SIZE devices"
+env > env.log
+
+mpirun -n $RANK_SIZE --output-filename log_output --merge-stderr-to-stdout \
+python train.py \
+    --device_target="GPU" \
+    --is_model_arts=False \
+    --run_distribute=True > train.log 2>&1 &
+cd ..
diff --git a/research/cv/simple_baselines/scripts/run_eval_gpu.sh b/research/cv/simple_baselines/scripts/run_eval_gpu.sh
new file mode 100644
index 0000000000000000000000000000000000000000..410c9649d0ee8a3e5cb1710d3cc4cc7f0d49e9ca
--- /dev/null
+++ b/research/cv/simple_baselines/scripts/run_eval_gpu.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 1.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-1.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [ $# -ne 1 ]; then
+    echo "Please run the script as: "
+    echo "bash scripts/run_eval_gpu.sh [DEVICE_ID]"
+    echo "For example: bash scripts/run_eval_gpu.sh 0"
+    echo "It is better to use the absolute path."
+    echo "========================================================================"
+    exit 1
+fi
+
+export DEVICE_NUM=1
+export DEVICE_ID=$1
+
+rm -rf ./eval
+mkdir ./eval
+cp ./*.py ./eval
+cp -r ./src ./eval
+cd ./eval || exit
+
+echo "start evaluation on GPU device $DEVICE_ID"
+env > env.log
+
+python eval.py --device_target="GPU" --device_id=$DEVICE_ID > eval.log 2>&1 &
diff --git a/research/cv/simple_baselines/scripts/run_standalone_train_gpu.sh b/research/cv/simple_baselines/scripts/run_standalone_train_gpu.sh
new file mode 100644
index 0000000000000000000000000000000000000000..799313ad7abbbfbea8c911cf420f2517b0cba336
--- /dev/null
+++ b/research/cv/simple_baselines/scripts/run_standalone_train_gpu.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [ $# -ne 1 ]; then
+    echo "Please run the script as: "
+    echo "bash scripts/run_standalone_train_gpu.sh [DEVICE_ID]"
+    echo "For example: bash scripts/run_standalone_train_gpu.sh 0"
+    echo "It is better to use the absolute path."
+    echo "========================================================================"
+    exit 1
+fi
+
+export RANK_SIZE=1
+export DEVICE_ID=$1
+
+rm -rf train$1
+mkdir ./train$1
+cp ./*.py ./train$1
+cp -r ./src ./train$1
+cd ./train$1 || exit
+
+echo "start training on GPU device $DEVICE_ID"
+env > env.log
+
+python train.py \
+    --device_target="GPU" \
+    --device_id=$DEVICE_ID \
+    --is_model_arts=False \
+    --run_distribute=False > train.log 2>&1 &
+
+cd ..
diff --git a/research/cv/simple_baselines/src/config.py b/research/cv/simple_baselines/src/config.py
index 578b40c04fd0d2757cdc489ceb9ffa73fa1f7cef..1b28618c505cd947a10d9a463f5f483423701148 100644
--- a/research/cv/simple_baselines/src/config.py
+++ b/research/cv/simple_baselines/src/config.py
@@ -12,14 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
-'''
-config
-'''
+"""Config parameters for simple baselines."""
 from easydict import EasyDict as edict
 
 config = edict()
 
-#general
+# general
 config.GENERAL = edict()
 config.GENERAL.VERSION = 'commit'
 config.GENERAL.TRAIN_SEED = 1
@@ -27,7 +25,7 @@ config.GENERAL.EVAL_SEED = 1
 config.GENERAL.DATASET_SEED = 1
 config.GENERAL.RUN_DISTRIBUTE = True
 
-#model arts
+# model arts
 config.MODELARTS = edict()
 config.MODELARTS.IS_MODEL_ARTS = False
 config.MODELARTS.CACHE_INPUT = '/cache/data_tzh/'
@@ -50,7 +48,6 @@ config.NETWORK.NUM_DECONV_FILTERS = [256, 256, 256]
 config.NETWORK.NUM_DECONV_KERNELS = [4, 4, 4]
 config.NETWORK.FINAL_CONV_KERNEL = 1
 config.NETWORK.REVERSE = True
-
 config.NETWORK.TARGET_TYPE = 'gaussian'
 config.NETWORK.HEATMAP_SIZE = [48, 64]
 config.NETWORK.SIGMA = 2
@@ -66,7 +63,7 @@ config.DATASET.ROOT = '/home/dataset/coco/'
 config.DATASET.TRAIN_SET = 'train2017'
 config.DATASET.TRAIN_JSON = 'annotations/person_keypoints_train2017.json'
 config.DATASET.TEST_SET = 'val2017'
-config.DATASET.TEST_JSON = 'annotations/COCO_val2017_detections_AP_H_56_person.json'
+config.DATASET.TEST_JSON = 'annotations/person_keypoints_val2017.json'
 
 # training data augmentation
 config.DATASET.FLIP = True
@@ -76,7 +73,7 @@ config.DATASET.ROT_FACTOR = 40
 # train
 config.TRAIN = edict()
 config.TRAIN.SHUFFLE = True
-config.TRAIN.BATCH_SIZE = 64
+config.TRAIN.BATCH_SIZE = 64  # 32 in paper
 config.TRAIN.BEGIN_EPOCH = 0
 config.TRAIN.END_EPOCH = 140
 config.TRAIN.LR = 0.001
@@ -84,7 +81,9 @@ config.TRAIN.LR_FACTOR = 0.1
 config.TRAIN.LR_STEP = [90, 120]
 config.TRAIN.NUM_PARALLEL_WORKERS = 8
 config.TRAIN.SAVE_CKPT = True
-config.TRAIN.CKPT_PATH = "/home/dataset/coco/"
+config.TRAIN.CKPT_PATH = "/home/model/"
+config.TRAIN.SAVE_CKPT_EPOCH = 3
+config.TRAIN.KEEP_CKPT_MAX = 10
 
 # valid
 config.TEST = edict()
@@ -93,10 +92,10 @@ config.TEST.FLIP_TEST = True
 config.TEST.POST_PROCESS = True
 config.TEST.SHIFT_HEATMAP = True
 config.TEST.USE_GT_BBOX = False
-config.TEST.NUM_PARALLEL_WORKERS = 2
-config.TEST.MODEL_FILE = '/home/dataset/coco/multi_train_poseresnet_commit_0-140_292.ckpt'
+config.TEST.NUM_PARALLEL_WORKERS = 8
+config.TEST.MODEL_FILE = '/home/model/multi_train_poseresnet_commit_5-140_292.ckpt'
 config.TEST.COCO_BBOX_FILE = '/home/dataset/coco/annotations/COCO_val2017_detections_AP_H_56_person.json'
-config.TEST.OUTPUT_DIR = 'results/'
+config.TEST.OUTPUT_DIR = '/home/results/'
 
 # nms
 config.TEST.OKS_THRE = 0.9
@@ -105,7 +104,7 @@ config.TEST.BBOX_THRE = 1.0
 config.TEST.IMAGE_THRE = 0.0
 config.TEST.NMS_THRE = 1.0
 
-#310 infer-related
+# 310 infer-related
 config.INFER = edict()
 config.INFER.PRE_RESULT_PATH = './preprocess_Result'
 config.INFER.POST_RESULT_PATH = './result_Files'
diff --git a/research/cv/simple_baselines/src/dataset.py b/research/cv/simple_baselines/src/dataset.py
index af06907cb88e89bc378c0c32b8e76e56fff3d352..026add34b0e72e093f0547a9650379bec2a7cef4 100644
--- a/research/cv/simple_baselines/src/dataset.py
+++ b/research/cv/simple_baselines/src/dataset.py
@@ -12,16 +12,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
-'''
-dataset processing
-'''
+""" dataset processing """
+
 from __future__ import division
 
 import json
 import os
 from copy import deepcopy
 import random
-
+import multiprocessing as mp
 import numpy as np
 import cv2
 
@@ -29,14 +28,15 @@ import mindspore.dataset as ds
 import mindspore.dataset.vision as C
 from src.utils.transforms import fliplr_joints, get_affine_transform, affine_transform
 
-ds.config.set_seed(1) # Set Random Seed
+ds.config.set_seed(1)  # Set Random Seed
 flip_pairs = [[1, 2], [3, 4], [5, 6], [7, 8],
               [9, 10], [11, 12], [13, 14], [15, 16]]
 
+
 class KeypointDatasetGenerator:
-    '''
-    About the specific operations of coco2017 data set processing
-    '''
+    """
+    About the specific operations of coco2017 dataset processing
+    """
     def __init__(self, cfg, is_train=False):
         self.image_thre = cfg.TEST.IMAGE_THRE
         self.image_size = np.array(cfg.MODEL.IMAGE_SIZE, dtype=np.int32)
@@ -56,9 +56,7 @@ class KeypointDatasetGenerator:
         self.num_joints = 17
 
     def load_gt_dataset(self, image_path, ann_file):
-        '''
-        load_gt_dataset
-        '''
+        """ load_gt_dataset """
         self.db = []
 
         with open(ann_file, "rb") as f:
@@ -134,11 +132,8 @@ class KeypointDatasetGenerator:
                 })
 
     def load_detect_dataset(self, image_path, ann_file, bbox_file):
-        '''
-        load_detect_dataset
-        '''
+        """ load detection dataset """
         self.db = []
-        all_boxes = None
         with open(bbox_file, 'r') as f:
             all_boxes = json.load(f)
 
@@ -245,9 +240,7 @@ class KeypointDatasetGenerator:
         return image, target, target_weight, s, c, score, db_rec['id']
 
     def generate_heatmap(self, joints, joints_vis):
-        '''
-        generate_heatmap
-        '''
+        """ generate heatmap"""
         target_weight = np.ones((self.num_joints, 1), dtype=np.float32)
         target_weight[:, 0] = joints_vis[:, 0]
 
@@ -300,6 +293,7 @@ class KeypointDatasetGenerator:
     def __len__(self):
         return len(self.db)
 
+
 def keypoint_dataset(config,
                      ann_file=None,
                      image_path=None,
@@ -307,7 +301,7 @@ def keypoint_dataset(config,
                      rank=0,
                      group_size=1,
                      train_mode=True,
-                     num_parallel_workers=8,
+                     num_parallel_workers=mp.cpu_count(),
                      transform=None,
                      shuffle=None):
     """
@@ -315,9 +309,8 @@ def keypoint_dataset(config,
 
     Args:
         rank (int): The shard ID within num_shards (default=None).
-        group_size (int): Number of shards that the dataset should be divided
-            into (default=None).
-         mode (str): "train" or others. Default: " train".
+        group_size (int): Number of shards that the dataset should be divided into (default=None).
+        mode (str): "train" or others. Default: " train".
         num_parallel_workers (int): Number of workers to read the data. Default: None.
     """
     # config
diff --git a/research/cv/simple_baselines/src/network_with_loss.py b/research/cv/simple_baselines/src/network_with_loss.py
index d671e68ea30f67251d062ba3795200a7aba6c27b..336d97834546d828f65f6eaf06449dfa9cb7183a 100644
--- a/research/cv/simple_baselines/src/network_with_loss.py
+++ b/research/cv/simple_baselines/src/network_with_loss.py
@@ -12,9 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
-'''
-network_with_loss
-'''
+""" network_with_loss """
 from __future__ import division
 
 import mindspore.nn as nn
@@ -23,10 +21,9 @@ from mindspore.ops import functional as F
 from mindspore.nn.loss.loss import LossBase
 from mindspore.common import dtype as mstype
 
+
 class JointsMSELoss(LossBase):
-    '''
-    JointsMSELoss
-    '''
+    """JointsMSELoss"""
     def __init__(self, use_target_weight):
         super(JointsMSELoss, self).__init__()
         self.criterion = nn.MSELoss(reduction='mean')
@@ -37,9 +34,7 @@ class JointsMSELoss(LossBase):
         self.mul = P.Mul()
 
     def construct(self, output, target, target_weight):
-        '''
-        construct
-        '''
+        """ construct """
         total_shape = self.shape(output)
         batch_size = total_shape[0]
         num_joints = total_shape[1]
@@ -64,6 +59,7 @@ class JointsMSELoss(LossBase):
 
         return loss / num_joints
 
+
 class PoseResNetWithLoss(nn.Cell):
     """
     Pack the model network and loss function together to calculate the loss value.
diff --git a/research/cv/simple_baselines/src/pose_resnet.py b/research/cv/simple_baselines/src/pose_resnet.py
index 47b8f6f2535c2dd24b0831ddacae365d9b01b3e1..69c473644608f48f348e70725c0979ebefc508c5 100644
--- a/research/cv/simple_baselines/src/pose_resnet.py
+++ b/research/cv/simple_baselines/src/pose_resnet.py
@@ -12,9 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
-'''
-simple_baselines network
-'''
+""" simple_baselines network """
 from __future__ import division
 import os
 import mindspore.nn as nn
@@ -24,10 +22,9 @@ from mindspore.train.serialization import load_checkpoint, load_param_into_net
 
 BN_MOMENTUM = 0.1
 
+
 class MPReverse(nn.Cell):
-    '''
-    MPReverse
-    '''
+    """MPReverse"""
     def __init__(self, kernel_size=1, stride=1, pad_mode="valid"):
         super(MPReverse, self).__init__()
         self.maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, pad_mode=pad_mode)
@@ -39,10 +36,9 @@ class MPReverse(nn.Cell):
         x = self.reverse(x)
         return x
 
+
 class Bottleneck(nn.Cell):
-    '''
-    model part of network
-    '''
+    """model part of network"""
     expansion = 4
 
     def __init__(self, inplanes, planes, stride=1, downsample=None):
@@ -59,9 +55,7 @@ class Bottleneck(nn.Cell):
         self.stride = stride
 
     def construct(self, x):
-        '''
-        construct
-        '''
+        """construct"""
         residual = x
 
         out = self.conv1(x)
@@ -85,10 +79,7 @@ class Bottleneck(nn.Cell):
 
 
 class PoseResNet(nn.Cell):
-    '''
-    PoseResNet
-    '''
-
+    """pose-resnet"""
     def __init__(self, block, layers, cfg):
         self.inplanes = 64
         self.deconv_with_bias = cfg.NETWORK.DECONV_WITH_BIAS
@@ -122,9 +113,7 @@ class PoseResNet(nn.Cell):
         )
 
     def _make_layer(self, block, planes, blocks, stride=1):
-        '''
-        _make_layer
-        '''
+        """make layer"""
         downsample = None
         if stride != 1 or self.inplanes != planes * block.expansion:
             downsample = nn.SequentialCell([nn.Conv2d(self.inplanes, planes * block.expansion,
@@ -134,16 +123,13 @@ class PoseResNet(nn.Cell):
         layers = []
         layers.append(block(self.inplanes, planes, stride, downsample))
         self.inplanes = planes * block.expansion
-        for i in range(1, blocks):
+        for _ in range(1, blocks):
             layers.append(block(self.inplanes, planes))
-            print(i)
 
         return nn.SequentialCell(layers)
 
     def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
-        '''
-        _make_deconv_layer
-        '''
+        """make deconvolutional layer"""
         assert num_layers == len(num_filters), \
             'ERROR: num_deconv_layers is different len(num_deconv_filters)'
         assert num_layers == len(num_kernels), \
@@ -171,9 +157,6 @@ class PoseResNet(nn.Cell):
         return nn.SequentialCell(layers)
 
     def construct(self, x):
-        '''
-        construct
-        '''
         x = self.conv1(x)
         x = self.bn1(x)
         x = self.relu(x)
@@ -186,6 +169,7 @@ class PoseResNet(nn.Cell):
 
         x = self.deconv_layers(x)
         x = self.final_layer(x)
+
         return x
 
     def init_weights(self, pretrained=''):
@@ -205,18 +189,16 @@ resnet_spec = {50: (Bottleneck, [3, 4, 6, 3]),
 
 
 def GetPoseResNet(cfg):
-    '''
-    GetPoseResNet
-    '''
+    """get pose-resnet"""
     num_layers = cfg.NETWORK.NUM_LAYERS
     block_class, layers = resnet_spec[num_layers]
     network = PoseResNet(block_class, layers, cfg)
 
     if cfg.MODEL.IS_TRAINED and cfg.MODEL.INIT_WEIGHTS:
-        pretrained = ''
         if cfg.MODELARTS.IS_MODEL_ARTS:
-            pretrained = cfg.MODELARTS.CACHE_INPUT + cfg.MODEL.PRETRAINED
+            pretrained = os.path.join(cfg.MODELARTS.CACHE_INPUT, cfg.MODEL.PRETRAINED)
         else:
-            pretrained = cfg.TRAIN.CKPT_PATH + cfg.MODEL.PRETRAINED
+            pretrained = os.path.join(cfg.TRAIN.CKPT_PATH, cfg.MODEL.PRETRAINED)
         network.init_weights(pretrained)
+
     return network
diff --git a/research/cv/simple_baselines/src/predict.py b/research/cv/simple_baselines/src/predict.py
index dc4f2be6935c72fcc597a30a18e98a84991f7583..4c96e33062f21c2492f24b5ae1bbc86a8efdeaa4 100644
--- a/research/cv/simple_baselines/src/predict.py
+++ b/research/cv/simple_baselines/src/predict.py
@@ -12,21 +12,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
-'''
-prediction picture
-'''
+""" prediction picture """
 import math
 import numpy as np
 
 from src.utils.transforms import transform_preds
 
+
 def get_max_preds(batch_heatmaps):
-    '''
+    """
     get predictions from score maps
     heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
-    '''
-    assert isinstance(batch_heatmaps, np.ndarray), \
-        'batch_heatmaps should be numpy.ndarray'
+    """
+    assert isinstance(batch_heatmaps, np.ndarray), 'batch_heatmaps should be numpy.ndarray'
     assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'
 
     batch_size = batch_heatmaps.shape[0]
@@ -50,10 +48,11 @@ def get_max_preds(batch_heatmaps):
     preds *= pred_mask
     return preds, maxvals
 
+
 def get_final_preds(config, batch_heatmaps, center, scale):
-    '''
+    """
     get final predictions from score maps
-    '''
+    """
     coords, maxvals = get_max_preds(batch_heatmaps)
     heatmap_height = batch_heatmaps.shape[2]
     heatmap_width = batch_heatmaps.shape[3]
diff --git a/research/cv/simple_baselines/src/utils/coco.py b/research/cv/simple_baselines/src/utils/coco.py
index 8587e6209a119c605100af423a317386013b2a74..470dedfcf1d79d17e2ae7c9977e0f29a6fe51a74 100644
--- a/research/cv/simple_baselines/src/utils/coco.py
+++ b/research/cv/simple_baselines/src/utils/coco.py
@@ -12,9 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
-'''
-coco
-'''
+"""coco"""
 from __future__ import division
 
 import json
@@ -33,10 +31,8 @@ except ImportError:
 
 from src.utils.nms import oks_nms
 
+
 def _write_coco_keypoint_results(img_kpts, num_joints, res_file):
-    '''
-    _write_coco_keypoint_results
-    '''
     results = []
 
     for img, items in img_kpts.items():
@@ -62,9 +58,6 @@ def _write_coco_keypoint_results(img_kpts, num_joints, res_file):
 
 
 def _do_python_keypoint_eval(res_file, res_folder, ann_path):
-    '''
-    _do_python_keypoint_eval
-    '''
     coco = COCO(ann_path)
     coco_dt = coco.loadRes(res_file)
     coco_eval = COCOeval(coco, coco_dt, 'keypoints')
@@ -87,10 +80,8 @@ def _do_python_keypoint_eval(res_file, res_folder, ann_path):
 
     return info_str
 
+
 def evaluate(cfg, preds, output_dir, all_boxes, img_id, ann_path):
-    '''
-    evaluate
-    '''
     if not os.path.exists(output_dir):
         os.makedirs(output_dir)
     res_file = os.path.join(output_dir, 'keypoints_results.json')
diff --git a/research/cv/simple_baselines/src/utils/nms.py b/research/cv/simple_baselines/src/utils/nms.py
index b9af36428508b70997e212ef6ab0b4bf50300b2e..77483c79a0ad70d90dc6cf61caadc3cc09ace497 100644
--- a/research/cv/simple_baselines/src/utils/nms.py
+++ b/research/cv/simple_baselines/src/utils/nms.py
@@ -13,16 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
-'''
-nms operation
-'''
+""" nms operation """
 from __future__ import division
 import numpy as np
 
+
 def oks_iou(g, d, a_g, a_d, sigmas=None, in_vis_thre=None):
-    '''
-    oks_iou
-    '''
     if not isinstance(sigmas, np.ndarray):
         sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72,
                            .62, .62, 1.07, 1.07, .87, .87, .89, .89]) / 10.0
@@ -44,6 +40,7 @@ def oks_iou(g, d, a_g, a_d, sigmas=None, in_vis_thre=None):
         ious[n_d] = np.sum(np.exp(-e)) / e.shape[0] if e.shape[0] != 0 else 0.0
     return ious
 
+
 def oks_nms(kpts_db, thresh, sigmas=None, in_vis_thre=None):
     """
     greedily select boxes with high confidence and overlap with current maximum <= thresh
diff --git a/research/cv/simple_baselines/src/utils/transforms.py b/research/cv/simple_baselines/src/utils/transforms.py
index 00c8d41dd9ffe400988eb9949eedf75764738879..6b853baeac579732f8dc3f7b296ea9a04225deec 100644
--- a/research/cv/simple_baselines/src/utils/transforms.py
+++ b/research/cv/simple_baselines/src/utils/transforms.py
@@ -13,13 +13,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
-'''
-transforms
-'''
+""" transforms """
 from __future__ import division
 import numpy as np
 import cv2
 
+
 def flip_back(output_flipped, matched_parts):
     '''
     ouput_flipped: numpy.ndarray(batch_size, num_joints, height, width)
@@ -55,9 +54,7 @@ def fliplr_joints(joints, joints_vis, width, matched_parts):
 
 
 def transform_preds(coords, center, scale, output_size):
-    '''
-    transform_preds
-    '''
+    """transform_preds"""
     target_coords = np.zeros(coords.shape)
     trans = get_affine_transform(center, scale, 0, output_size, inv=1)
     for p in range(coords.shape[0]):
@@ -65,15 +62,8 @@ def transform_preds(coords, center, scale, output_size):
     return target_coords
 
 
-def get_affine_transform(center,
-                         scale,
-                         rot,
-                         output_size,
-                         shift=np.array([0, 0], dtype=np.float32),
-                         inv=0):
-    '''
-    get_affine_transform
-    '''
+def get_affine_transform(center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0):
+    """get_affine_transform"""
     if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
         print(scale)
         scale = np.array([scale, scale])
diff --git a/research/cv/simple_baselines/train.py b/research/cv/simple_baselines/train.py
index 79a5bf0ec177bb33982fca440f78e2bfdbbf8434..fe0532df42139f2fa1bab057ebe877f30d991c97 100644
--- a/research/cv/simple_baselines/train.py
+++ b/research/cv/simple_baselines/train.py
@@ -1,4 +1,4 @@
-# Copyright 2021 Huawei Technologies Co., Ltd
+# Copyright 2021-2022 Huawei Technologies Co., Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -12,18 +12,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
-'''
-train
-'''
+"""Train simple baselines."""
 from __future__ import division
 
 import os
 import ast
 import argparse
 import numpy as np
+
 from mindspore import context, Tensor
 from mindspore.context import ParallelMode
-from mindspore.communication.management import init
+from mindspore.communication.management import init, get_rank, get_group_size
 from mindspore.train import Model
 from mindspore.train.callback import TimeMonitor, LossMonitor, ModelCheckpoint, CheckpointConfig
 from mindspore.nn.optim import Adam
@@ -38,16 +37,10 @@ if config.MODELARTS.IS_MODEL_ARTS:
     import moxing as mox
 
 set_seed(config.GENERAL.TRAIN_SEED)
-def get_lr(begin_epoch,
-           total_epochs,
-           steps_per_epoch,
-           lr_init=0.1,
-           factor=0.1,
-           epoch_number_to_drop=(90, 120)
-           ):
-    '''
-    get_lr
-    '''
+
+
+def get_lr(begin_epoch, total_epochs, steps_per_epoch, lr_init=0.1, factor=0.1,
+           epoch_number_to_drop=(90, 120)):
     lr_each_step = []
     total_steps = steps_per_epoch * total_epochs
     step_number_to_drop = [steps_per_epoch * x for x in epoch_number_to_drop]
@@ -60,11 +53,10 @@ def get_lr(begin_epoch,
     learning_rate = lr_each_step[current_step:]
     return learning_rate
 
+
 def parse_args():
-    '''
-    args
-    '''
-    parser = argparse.ArgumentParser(description="Simplebaseline training")
+    """command line arguments parsing"""
+    parser = argparse.ArgumentParser(description="Simple Baselines training")
     parser.add_argument('--data_url', required=False, default=None, help='Location of data.')
     parser.add_argument('--train_url', required=False, default=None, help='Location of training outputs.')
     parser.add_argument('--device_id', required=False, default=None, type=int, help='Location of training outputs.')
@@ -75,30 +67,38 @@ def parse_args():
     args = parser.parse_args()
     return args
 
+
 def main():
     print("loading parse...")
     args = parse_args()
-    device_id = args.device_id
     device_target = args.device_target
     config.GENERAL.RUN_DISTRIBUTE = args.run_distribute
     config.MODELARTS.IS_MODEL_ARTS = args.is_model_arts
-    if config.GENERAL.RUN_DISTRIBUTE or config.MODELARTS.IS_MODEL_ARTS:
-        device_id = int(os.getenv('DEVICE_ID'))
-    context.set_context(mode=context.GRAPH_MODE,
-                        device_target=device_target,
-                        save_graphs=False,
-                        device_id=device_id)
-
-    if config.GENERAL.RUN_DISTRIBUTE:
-        init()
-        rank = int(os.getenv('DEVICE_ID'))
-        device_num = int(os.getenv('RANK_SIZE'))
-        context.set_auto_parallel_context(device_num=device_num,
-                                          parallel_mode=ParallelMode.DATA_PARALLEL,
-                                          gradients_mean=True)
-    else:
+
+    context.set_context(mode=context.GRAPH_MODE, device_target=device_target, save_graphs=False)
+    if device_target == "Ascend":
         rank = 0
         device_num = 1
+        context.set_context(device_id=rank)
+        if config.GENERAL.RUN_DISTRIBUTE:
+            init()
+            rank = int(os.getenv('DEVICE_ID'))
+            device_num = int(os.getenv('RANK_SIZE'))
+            context.set_auto_parallel_context(device_num=device_num,
+                                              parallel_mode=ParallelMode.DATA_PARALLEL,
+                                              gradients_mean=True)
+    elif device_target == "GPU":
+        rank = int(os.getenv('DEVICE_ID', "0"))
+        device_num = int(os.getenv('RANK_SIZE', "0"))
+        if device_num > 1:
+            init()
+            rank = get_rank()
+            device_num = get_group_size()
+            context.set_auto_parallel_context(device_num=device_num,
+                                              parallel_mode=ParallelMode.DATA_PARALLEL,
+                                              gradients_mean=True)
+    else:
+        raise ValueError("Unsupported device, only GPU or Ascend is supported.")
 
     if config.MODELARTS.IS_MODEL_ARTS:
         mox.file.copy_parallel(src_url=args.data_url, dst_url=config.MODELARTS.CACHE_INPUT)
@@ -106,9 +106,7 @@ def main():
     dataset, _ = keypoint_dataset(config,
                                   rank=rank,
                                   group_size=device_num,
-                                  train_mode=True,
-                                  num_parallel_workers=config.TRAIN.NUM_PARALLEL_WORKERS,
-                                  )
+                                  train_mode=True)
     net = GetPoseResNet(config)
     loss = JointsMSELoss(config.LOSS.USE_TARGET_WEIGHT)
     net_with_loss = PoseResNetWithLoss(net, loss)
@@ -123,24 +121,25 @@ def main():
     time_cb = TimeMonitor(data_size=dataset_size)
     loss_cb = LossMonitor()
     cb = [time_cb, loss_cb]
+
     if config.TRAIN.SAVE_CKPT:
-        config_ck = CheckpointConfig(save_checkpoint_steps=dataset_size, keep_checkpoint_max=20)
-        prefix = ''
+        config_ck = CheckpointConfig(save_checkpoint_steps=dataset_size*config.TRAIN.SAVE_CKPT_EPOCH,
+                                     keep_checkpoint_max=config.TRAIN.KEEP_CKPT_MAX)
         if config.GENERAL.RUN_DISTRIBUTE:
-            prefix = 'multi_' + 'train_poseresnet_' + config.GENERAL.VERSION + '_' + os.getenv('DEVICE_ID')
+            prefix = 'multi_' + 'train_poseresnet_' + config.GENERAL.VERSION + '_' + str(rank)
         else:
             prefix = 'single_' + 'train_poseresnet_' + config.GENERAL.VERSION
 
-        directory = ''
         if config.MODELARTS.IS_MODEL_ARTS:
-            directory = config.MODELARTS.CACHE_OUTPUT + 'device_'+ os.getenv('DEVICE_ID')
+            directory = os.path.join(config.MODELARTS.CACHE_OUTPUT, 'device_' + str(rank))
         elif config.GENERAL.RUN_DISTRIBUTE:
-            directory = config.TRAIN.CKPT_PATH + 'device_'+ os.getenv('DEVICE_ID')
+            directory = os.path.join(config.TRAIN.CKPT_PATH, 'device_' + str(rank))
         else:
-            directory = config.TRAIN.CKPT_PATH + 'device'
+            directory = os.path.join(config.TRAIN.CKPT_PATH, 'device')
 
         ckpoint_cb = ModelCheckpoint(prefix=prefix, directory=directory, config=config_ck)
         cb.append(ckpoint_cb)
+
     model = Model(net_with_loss, loss_fn=None, optimizer=opt, amp_level="O2")
     epoch_size = config.TRAIN.END_EPOCH - config.TRAIN.BEGIN_EPOCH
     print("************ Start training now ************")
@@ -150,5 +149,6 @@ def main():
     if config.MODELARTS.IS_MODEL_ARTS:
         mox.file.copy_parallel(src_url=config.MODELARTS.CACHE_OUTPUT, dst_url=args.train_url)
 
+
 if __name__ == '__main__':
     main()