diff --git a/research/cv/ArtTrack/README_CN.md b/research/cv/ArtTrack/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..176aa2f1fd2cd4885edbb0fe345bf0691065c48f
--- /dev/null
+++ b/research/cv/ArtTrack/README_CN.md
@@ -0,0 +1,371 @@
+# 目录
+
+- [目录](#目录)
+- [ArtTrack描述](#arttrack描述)
+- [模型架构](#模型架构)
+- [数据集](#数据集)
+- [环境要求](#环境要求)
+- [快速入门](#快速入门)
+    - [MPII数据集](#mpii数据集)
+        - [单卡](#单卡)
+        - [多卡](#多卡)
+        - [推理与评估](#推理与评估)
+- [脚本说明](#脚本说明)
+    - [脚本及样例代码](#脚本及样例代码)
+    - [脚本参数](#脚本参数)
+    - [训练过程](#训练过程)
+        - [MPII数据集](#mpii数据集-1)
+            - [环境](#环境)
+            - [训练](#训练)
+                - [单卡](#单卡-1)
+                - [多卡](#多卡-1)
+            - [推理与评估](#推理与评估-1)
+    - [评估过程](#评估过程)
+        - [MPII数据集](#mpii数据集-2)
+            - [评估](#评估)
+            - [结果](#结果)
+- [模型描述](#模型描述)
+    - [性能](#性能)
+        - [评估性能](#评估性能)
+            - [MPII上的ArtTrack](#mpii上的arttrack)
+- [ModelZoo主页](#modelzoo主页)
+
+# ArtTrack描述
+
+ArtTrack是2017年提出的基于卷积网络的多人跟踪模型。现实的场景十分复杂,通常包括快速运动,外观和服装的大幅度变异,以及人与人之间的遮挡等因素。为了利用可用的图像信息,ArtTrack使用ResNet端到端地将身体关节和特定人的联系起来。并将这些关联合并到一个框架中,在时间维度上把关节(人)关联起来。为了提高视频推理的效率,采用了基于局部组合优化的快速推理方法,通过建立一个稀疏模型,使变量之间的连接数量保持在最小。
+
+[论文](https://arxiv.org/abs/1612.01465) : Insafutdinov E, Andriluka M, Pishchulin L, et al. Arttrack: Articulated
+multi-person tracking in the wild[C]//Proceedings of the IEEE conference on computer vision and pattern recognition.
+2017: 6457-6465.
+
+# 模型架构
+
+ArtTrack的总体网络架构如下:
+[链接](https://arxiv.org/abs/1612.01465)
+
+# 数据集
+
+使用的数据集:
+
+- [MPII](http://human-pose.mpi-inf.mpg.de/)
+    MPII人体姿态数据集是一种用于评估关节式人体姿态估计的基准。该数据集包含大约25000张图像,其中包含超过40000名带有标注的人体关节的人。这些图像是根据人类日常活动的分类系统收集的。总的来说,该数据集覆盖410个人类活动,每个图像都提供一个活动标签。
+
+解压后的MPII如图所示:
+
+```text
+mpii
+├── images
+│  ├── 099890470.jpg
+│  ├── 099894296.jpg
+│  ├── 099914957.jpg
+│  ├── 099920730.jpg
+│  ├── 099927653.jpg
+│  └── ....
+└── mpii_human_pose_v1_u12_1.mat
+```
+
+# 环境要求
+
+- 硬件(GPU)
+    - 使用GPU训练模型。
+- 框架
+    - [MindSpore](https://www.mindspore.cn/install/en)
+- 如需查看详情,请参见如下资源:
+    - [MindSpore教程](https://www.mindspore.cn/tutorials/zh-CN/master/index.html)
+    - [MindSpore Python API](https://www.mindspore.cn/docs/api/zh-CN/master/index.html)
+
+# 快速入门
+
+通过官方网站安装MindSpore后,您可以按照如下步骤进行训练和评估:
+
+## MPII数据集
+
+### 单卡
+
+```bash
+# 用法 bash scripts/run_train_single_gpu.sh TARGET CONFIG_PATH [OPTION] ...
+bash scripts/run_train_single_gpu.sh mpii_single config/mpii_train_single_gpu.yaml
+```
+
+### 多卡
+
+```bash
+# 用法 bash scripts/run_train_multiple_gpu.sh TARGET CONFIG_PATH CUDA_VISIBLE_DEVICES DEVICE_NUM [OPTION] ...
+bash scripts/run_train_multiple_gpu.sh mpii_single config/mpii_train_multiple_gpu.yaml "0,1,2,3,4,5,6,7" 8
+```
+
+### 推理与评估
+
+```bash
+# 推理与评估
+# 用法 bash scripts/eval.sh TARGET CKPT_PATH OUTPUT_PATH
+# 根据实际情况替换ckpt文件
+bash scripts/eval.sh mpii_single ckpt/rand_0/arttrack-1_356.ckpt out/prediction.mat
+
+# 只推理
+python eval.py mpii_single --config config/mpii_eval.yaml --option "load_ckpt=ckpt/rank_0/arttrack-1_356.ckpt" --output "out/prediction.mat"
+
+# 只评估
+python eval.py mpii_single --config config/mpii_eval.yaml --accuracy  --prediction "out/prediction.mat"
+```
+
+# 脚本说明
+
+## 脚本及样例代码
+
+```text
+ArtTrack
+├── config
+│  ├── coco_pairwise.yaml                   # coco数据集pairwise配置
+│  ├── coco_eval.yaml                       # coco数据集推理配置
+│  ├── coco_train_multiple_gpu.yaml         # coco数据集多卡训练配置
+│  ├── coco_train_single_gpu.yaml           # coco数据集单卡训练配置
+│  ├── mpii_eval.yaml                       # mpii数据集推理配置
+│  ├── mpii_train_multiple_gpu.yaml         # mpii数据集多卡训练配置
+│  ├── mpii_train_single_gpu.yaml           # mpii数据集单卡训练配置
+│  └── tf2ms.json                           # tf模型参数转mindspord映射表
+├── environment.yaml                        # conda环境
+├── log.yaml                                # log配置
+├── patch
+│  ├── 0001-fix-lib.patch                   # tf代码补丁
+│  ├── 0002-pybind11.patch                  # tf代码补丁
+│  └── 0003-split-dataset.patch             # tf代码补丁
+├── preprocess.py                           # 预处理脚本
+├── README_CN.md                            # 模型相关说明
+├── requirements.txt                        # 依赖列表
+├── scripts
+│  ├── download.sh                          # 下载预训练和数据集
+│  ├── eval.sh                              # 推理与评估
+│  ├── prepare.sh                           # 环境预处理
+│  ├── run_train_multiple_gpu.sh            # 多卡训练
+│  └── run_train_single_gpu.sh              # 单卡训练
+├── src
+│  ├── __init__.py
+│  ├── tool
+│  │  ├── __init__.py
+│  │  ├── preprocess
+│  │  │  ├── __init__.py
+│  │  │  ├── crop.py                        # mpii图片放缩
+│  │  │  ├── mat2json.py                    # 由mat索引转为json
+│  │  │  ├── pairwise_stats.py              # 生成pairwise
+│  │  │  ├── parts.json
+│  │  │  ├── preprocess_single.py           # mpii 预处理
+│  │  │  ├── split.py                       # 数据集划分
+│  │  │  ├── tf2ms.py                       # tf ckpt转为mindspore模型
+│  │  │  └── utils.py                       # 预处理工具
+│  │  ├── eval
+│  │  │  ├── __init__.py
+│  │  │  ├── coco.py                        # coco评估
+│  │  │  ├── pck.py                         # mpii评估
+│  │  │  └── multiple.py                    # coco推理
+│  │  └── decorator.py
+│  ├── config.py                            # 配置相关
+│  ├── dataset
+│  │  ├── __init__.py
+│  │  ├── coco.py                           # coco数据集
+│  │  ├── mpii.py                           # mpii数据集
+│  │  ├── pose.py                           # 数据集加载
+│  │  └── util.py                           # 数据集工具
+│  ├── log.py                               # 日志配置
+│  ├── model
+│  │  ├── losses.py                         # 自定义loss
+│  │  ├── pose.py                           # 模型
+│  │  ├── predict.py                        # 推理
+│  │  └── resnet
+│  │     ├── __init__.py
+│  │     ├── resnet.py                      # ResNet
+│  │     └── util.py                        # ResNet工具
+│  ├── multiperson
+│  │  ├── detections.py                     # 目标发现
+│  │  ├── predict.py                        # 推理
+│  │  └── visualize.py                      # 可视化
+│  └── args_util.py                         # 命令行工具
+├── eval.py                                 # 推理与评估
+└── train.py                                # 训练工具
+```
+
+## 脚本参数
+
+```yaml
+# 数据集
+dataset:
+    path: out/train_index_dataset.json
+    type: mpii_raw
+    parallel: 1
+    # need about 13G GPU memory
+    batch_size: 16
+    mirror: true
+    padding: yes
+    shuffle: yes
+
+# mindspore context
+context:
+    # GRAPH
+    # mode: 0
+
+    # PYNATIVE
+    mode: 1
+    device_target: GPU
+
+# mindspore parallel context
+# 若该字段存在使用数据并行
+parallel_context:
+    parallel_mode: data_parallel
+
+# 训练轮次
+epoch: 25
+# 是否是训练
+train: yes
+
+# 关键点数
+num_joints: 14
+
+# 数据集中关键点编号,与all_joints_names对应
+all_joints: [ [ 0, 5 ], [ 1, 4 ], [ 2, 3 ], [ 6, 11 ], [ 7, 10 ], [ 8, 9 ], [ 12 ], [ 13 ] ]
+# 关键点名称
+all_joints_names: [ 'ankle', 'knee', 'hip', 'wrist', 'elbow', 'shoulder', 'chin', 'forehead' ]
+
+# 评估阈值
+pck_threshold: 2
+
+# 数据集处理时热力图阈值
+pos_dist_thresh: 17
+# 全局放缩
+global_scale: 0.8452830189
+
+# 是否使用location refinement
+location_refinement: true
+# location refinement是否使用huber loss
+locref_huber_loss: true
+# huber loss权重
+locref_loss_weight: 0.05
+locref_stdev: 7.2801
+
+# 是否使用intermediate supervision
+intermediate_supervision: no
+# intermediate supervision 在第3个block的层数
+intermediate_supervision_layer: 12
+# intermediate supervision 的in channel
+intermediate_supervision_input: 1024
+
+# 限制图片大小
+max_input_size: 600
+
+# 学习率
+multi_step:
+    - [ 0.05,0.2,0.02,0.01 ]
+    - [ 500,2700,4600,6500 ]
+```
+
+更多配置细节请参考`config`目录下配置文件。
+
+## 训练过程
+
+### MPII数据集
+
+#### 环境
+
+```bash
+# conda 环境
+conda env create -n art-track python=3.7.5
+conda activate art-track
+pip install -r requeirments.txt
+
+# 下载ResNet101预训练模型
+bash scripts/download.sh pretrained_resnet101
+
+# 安装依赖,转换模型
+bash scripts/prepare.sh env
+
+# 下载mpii数据集
+bash scripts/download.sh dataset_mpii
+
+# mpii数据集预处理
+bash scripts/prepare.sh mpii
+```
+
+#### 训练
+
+##### 单卡
+
+```bash
+# 用法 bash scripts/run_train_single_gpu.sh TARGET CONFIG_PATH [OPTION] ...
+bash scripts/run_train_single_gpu.sh mpii_single config/mpii_train_single_gpu.yaml
+```
+
+##### 多卡
+
+```bash
+# 用法 bash scripts/run_train_multiple_gpu.sh TARGET CONFIG_PATH CUDA_VISIBLE_DEVICES DEVICE_NUM [OPTION] ...
+bash scripts/run_train_multiple_gpu.sh mpii_single config/mpii_train_multiple_gpu.yaml "0,1,2,3,4,5,6,7" 8
+```
+
+#### 推理与评估
+
+```bash
+# 推理与评估
+# 用法 bash scripts/eval.sh TARGET CKPT_PATH OUTPUT_PATH
+# 根据实际情况替换ckpt文件
+bash scripts/eval.sh mpii_single ckpt/rand_0/arttrack-1_356.ckpt out/prediction.mat
+
+# 推理
+python eval.py mpii_single --config config/mpii_eval.yaml --option "load_ckpt=ckpt/rank_0/arttrack-1_356.ckpt" --output "out/prediction.mat"
+
+# 评估
+python eval.py mpii_single --config config/mpii_eval.yaml --accuracy  --prediction "out/prediction.mat"
+```
+
+## 评估过程
+
+### MPII数据集
+
+#### 评估
+
+```bash
+# 推理与评估
+# 用法 bash scripts/eval.sh TARGET CKPT_PATH OUTPUT_PATH
+# 根据实际情况替换ckpt文件
+bash scripts/eval.sh mpii_single ckpt/rand_0/arttrack-1_356.ckpt out/prediction.mat
+
+# 只推理
+python eval.py mpii_single --config config/mpii_eval.yaml --option "load_ckpt=ckpt/rank_0/arttrack-1_356.ckpt" --output "out/prediction.mat"
+
+# 只评估
+python eval.py mpii_single --config config/mpii_eval.yaml --accuracy  --prediction "out/prediction.mat"
+```
+
+#### 结果
+
+```text
+& ankle & knee & hip & wrist & elbow & shoulder & chin & forehead & total
+& 66.0 & 70.9 & 74.7 & 65.5 & 72.4 & 83.4 & 87.0 & 84.2 & 74.1
+```
+
+# 模型描述
+
+## 性能
+
+### 评估性能
+
+#### MPII上的ArtTrack
+
+| 参数          | GPU V100                                                                    |
+| ------------- | --------------------------------------------------------------------------- |
+| 模型版本      | ArtTrack                                                                    |
+| 资源          | Telsa GPU V100                                                              |
+| 上传日期      | 2022-01-11                                                                  |
+| MindSpore版本 | 1.5.0                                                                       |
+| 数据集        | MPII                                                                        |
+| 训练参数      | epoch=25, steps per epoch=356, batch_size=16                                |
+| 优化器        | SGD                                                                         |
+| 损失函数      | SigmoidCrossEntropyWithLogits                                               |
+| 输出          | 关键点坐标                                                                  |
+| 损失          | 0.016214851                                                                 |
+| 速度          | 1292.854毫秒/步(8卡)                                                        |
+| 总时长        | 3.2小时                                                                     |
+| 微调检查点    | 496M (.ckpt文件)                                                            |
+| 脚本          | [链接](https://gitee.com/mindspore/models/tree/master/research/cv/ArtTrack) |
+
+# ModelZoo主页
+
+请浏览官网[主页](https://gitee.com/mindspore/models)。
diff --git a/research/cv/ArtTrack/config/coco_eval.yaml b/research/cv/ArtTrack/config/coco_eval.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e53c52d35efa755d68ce242de18b8b07f90c28c9
--- /dev/null
+++ b/research/cv/ArtTrack/config/coco_eval.yaml
@@ -0,0 +1,45 @@
+dataset:
+  path: ./coco
+  type: coco
+  parallel: 1
+  phase: val2014
+  ann: person_keypoints  # 'image_info' or 'person_keypoints'
+load_ckpt: out/tf2ms.ckpt
+coco_only_images_with_people: true
+
+context:
+  device_target: GPU
+  mode: 1
+
+all_joints: [ [ 0 ], [ 2, 1 ], [ 4, 3 ], [ 6, 5 ], [ 8, 7 ], [ 10, 9 ], [ 12, 11 ], [ 14, 13 ], [ 16, 15 ] ]
+all_joints_names: [ "nose", 'eye', 'ear', 'shoulder', 'elbow', 'hand', 'hip', 'knee', 'foot' ]
+num_joints: 17
+
+global_scale: 1.0
+pos_dist_thresh: 17
+
+location_refinement: true
+locref_huber_loss: true
+locref_loss_weight: 0.05
+locref_stdev: 7.2801
+
+pairwise_predict: true
+pairwise_huber_loss: true
+pairwise_loss_weight: 0.05
+pairwise_stats_fn: out/pairwise_stats.mat
+pairwise_model_dir: out/pairwise
+
+intermediate_supervision: true
+intermediate_supervision_layer: 12
+
+max_input_size: 650
+multi_step:
+  - [ 0.005,0.02,0.002,0.0005,0.0002,0.00005 ]
+  - [ 10000,450000,750000,1050000,1550000,1800000 ]
+display_iters: 20
+save_iters: 200000
+
+nms_radius: 5.0
+det_min_score: 0.2
+use_gt_segm: yes
+gt_segm_output: out/predictions_with_segm.json
diff --git a/research/cv/ArtTrack/config/coco_pairwise.yaml b/research/cv/ArtTrack/config/coco_pairwise.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9d31899c999629c20af945f7a6dce464edc3b6e2
--- /dev/null
+++ b/research/cv/ArtTrack/config/coco_pairwise.yaml
@@ -0,0 +1,30 @@
+dataset:
+  path: ./coco
+  type: coco
+  parallel: 1
+  phase: train2014
+  ann: person_keypoints  # 'image_info' or 'person_keypoints'
+
+coco_only_images_with_people: true
+
+all_joints: [ [ 0 ], [ 2, 1 ], [ 4, 3 ], [ 6, 5 ], [ 8, 7 ], [ 10, 9 ], [ 12, 11 ], [ 14, 13 ], [ 16, 15 ] ]
+all_joints_names: [ "nose", 'eye', 'ear', 'shoulder', 'elbow', 'hand', 'hip', 'knee', 'foot' ]
+num_joints: 17
+global_scale: 1.0
+pos_dist_thresh: 17
+
+
+location_refinement: true
+locref_huber_loss: true
+locref_loss_weight: 0.05
+locref_stdev: 7.2801
+
+pairwise_predict: no
+pairwise_huber_loss: true
+pairwise_loss_weight: 0.05
+pairwise_stats_fn: out/pairwise_stats.mat
+
+intermediate_supervision: true
+intermediate_supervision_layer: 12
+
+max_input_size: 650
diff --git a/research/cv/ArtTrack/config/coco_train_multiple_gpu.yaml b/research/cv/ArtTrack/config/coco_train_multiple_gpu.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fb7b1ba00b2db8e2bf2404b2c15771049a5ac075
--- /dev/null
+++ b/research/cv/ArtTrack/config/coco_train_multiple_gpu.yaml
@@ -0,0 +1,50 @@
+dataset:
+  path: ./coco
+  type: coco
+  parallel: 4
+  # need about 24G GPU memory
+  batch_size: 16
+  phase: train2014
+  ann: person_keypoints  # 'image_info' or 'person_keypoints'
+  padding: yes
+  mirror: yes
+load_ckpt: out/pretrained_resnet101.ckpt
+epoch: 25
+train: yes
+
+context:
+  # GRAPH
+  # mode: 0
+
+  # PYNATIVE
+  mode: 1
+  device_target: GPU
+
+parallel_context:
+  parallel_mode: data_parallel
+
+coco_only_images_with_people: true
+all_joints: [ [ 0 ], [ 2, 1 ], [ 4, 3 ], [ 6, 5 ], [ 8, 7 ], [ 10, 9 ], [ 12, 11 ], [ 14, 13 ], [ 16, 15 ] ]
+all_joints_names: [ "nose", 'eye', 'ear', 'shoulder', 'elbow', 'hand', 'hip', 'knee', 'foot' ]
+num_joints: 17
+global_scale: 1.0
+pos_dist_thresh: 17
+
+location_refinement: true
+locref_huber_loss: true
+locref_loss_weight: 0.05
+locref_stdev: 7.2801
+
+pairwise_predict: yes
+pairwise_huber_loss: true
+pairwise_loss_weight: 0.05
+pairwise_stats_fn: out/pairwise_stats.mat
+
+intermediate_supervision: true
+intermediate_supervision_layer: 12
+intermediate_supervision_input: 1024
+
+max_input_size: 650
+multi_step:
+  - [ 0.05,0.2,0.02,0.005,0.002,0.0005 ]
+  - [ 32,1400,2500,3200,5000, 6000 ]
\ No newline at end of file
diff --git a/research/cv/ArtTrack/config/coco_train_single_gpu.yaml b/research/cv/ArtTrack/config/coco_train_single_gpu.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1c6b7c28240fad85d83975fe53e39d12cd2bf08c
--- /dev/null
+++ b/research/cv/ArtTrack/config/coco_train_single_gpu.yaml
@@ -0,0 +1,48 @@
+dataset:
+  path: ./coco
+  type: coco
+  parallel: 4
+  # need about 24G GPU memory
+  batch_size: 16
+  phase: train2014
+  ann: person_keypoints  # 'image_info' or 'person_keypoints'
+  padding: yes
+  mirror: yes
+load_ckpt: out/pretrained_resnet101.ckpt
+epoch: 25
+train: yes
+
+context:
+  # GRAPH
+  # mode: 0
+
+  # PYNATIVE
+  mode: 1
+  device_target: GPU
+coco_only_images_with_people: true
+all_joints: [ [ 0 ], [ 2, 1 ], [ 4, 3 ], [ 6, 5 ], [ 8, 7 ], [ 10, 9 ], [ 12, 11 ], [ 14, 13 ], [ 16, 15 ] ]
+all_joints_names: [ "nose", 'eye', 'ear', 'shoulder', 'elbow', 'hand', 'hip', 'knee', 'foot' ]
+num_joints: 17
+global_scale: 1.0
+pos_dist_thresh: 17
+#scale_jitter_lo: 0.85
+scale_jitter_up: 1.15
+
+location_refinement: true
+locref_huber_loss: true
+locref_loss_weight: 0.05
+locref_stdev: 7.2801
+
+pairwise_predict: yes
+pairwise_huber_loss: true
+pairwise_loss_weight: 0.05
+pairwise_stats_fn: out/pairwise_stats.mat
+
+intermediate_supervision: true
+intermediate_supervision_layer: 12
+intermediate_supervision_input: 1024
+
+max_input_size: 650
+multi_step:
+  - [ 0.05,0.2,0.02,0.005,0.002,0.0005 ]
+  - [ 500,22500,37500,52500,77500,90000 ]
\ No newline at end of file
diff --git a/research/cv/ArtTrack/config/mpii_eval.yaml b/research/cv/ArtTrack/config/mpii_eval.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..16117f7ccdfe40d67c859a8c4ccde7ee86a0e14b
--- /dev/null
+++ b/research/cv/ArtTrack/config/mpii_eval.yaml
@@ -0,0 +1,32 @@
+dataset:
+  path: out/test_index_dataset.json
+  type: mpii_raw
+  parallel: 1
+  batch_size: 1
+  mirror: no
+  padding: no
+  shuffle: no
+train: no
+output: out/prediction.mat
+pck_threshold: 0.5
+
+num_joints: 14
+all_joints: [ [ 0, 5 ], [ 1, 4 ], [ 2, 3 ], [ 6, 11 ], [ 7, 10 ], [ 8, 9 ], [ 12 ], [ 13 ] ]
+all_joints_names: [ 'ankle', 'knee', 'hip', 'wrist', 'elbow', 'shoulder', 'chin', 'forehead' ]
+
+pos_dist_thresh: 17
+global_scale: 0.8452830189
+
+location_refinement: true
+locref_huber_loss: true
+locref_loss_weight: 0.05
+locref_stdev: 7.2801
+
+intermediate_supervision: no
+intermediate_supervision_layer: 12
+
+max_input_size: 600
+multi_step:
+  - [ 0.005,0.02,0.002,0.001 ]
+  - [ 10000,430000,730000,1030000 ]
+
diff --git a/research/cv/ArtTrack/config/mpii_train_multiple_gpu.yaml b/research/cv/ArtTrack/config/mpii_train_multiple_gpu.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..05298eced807a82ec08a437cb4b7bce8c4eeb910
--- /dev/null
+++ b/research/cv/ArtTrack/config/mpii_train_multiple_gpu.yaml
@@ -0,0 +1,48 @@
+dataset:
+  path: out/train_index_dataset.json
+  type: mpii_raw
+  parallel: 1
+  # need about 13G GPU memory
+  batch_size: 16
+  mirror: true
+  padding: yes
+  shuffle: yes
+load_ckpt: out/pretrained_resnet101.ckpt
+context:
+  # GRAPH
+  # mode: 0
+
+  # PYNATIVE
+  mode: 1
+  device_target: GPU
+
+parallel_context:
+  parallel_mode: data_parallel
+
+epoch: 25
+train: yes
+
+num_joints: 14
+all_joints: [ [ 0, 5 ], [ 1, 4 ], [ 2, 3 ], [ 6, 11 ], [ 7, 10 ], [ 8, 9 ], [ 12 ], [ 13 ] ]
+all_joints_names: [ 'ankle', 'knee', 'hip', 'wrist', 'elbow', 'shoulder', 'chin', 'forehead' ]
+
+pck_threshold: 2
+
+
+pos_dist_thresh: 17
+global_scale: 0.8452830189
+
+location_refinement: true
+locref_huber_loss: true
+locref_loss_weight: 0.05
+locref_stdev: 7.2801
+
+intermediate_supervision: no
+intermediate_supervision_layer: 12
+intermediate_supervision_input: 1024
+
+max_input_size: 600
+multi_step:
+  - [ 0.05,0.2,0.02,0.01 ]
+  - [ 500,2700,4600,6500 ]
+
diff --git a/research/cv/ArtTrack/config/mpii_train_single_gpu.yaml b/research/cv/ArtTrack/config/mpii_train_single_gpu.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6536b3a35a476472b6ef4b1497c4f4d50c87b819
--- /dev/null
+++ b/research/cv/ArtTrack/config/mpii_train_single_gpu.yaml
@@ -0,0 +1,46 @@
+dataset:
+  path: out/train_index_dataset.json
+  type: mpii_raw
+  parallel: 1
+  # need about 13G GPU memory
+  batch_size: 16
+  mirror: true
+  padding: yes
+  shuffle: yes
+load_ckpt: out/pretrained_resnet101.ckpt
+context:
+  # GRAPH
+  # mode: 0
+
+  # PYNATIVE
+  mode: 1
+  device_target: GPU
+
+
+epoch: 25
+train: yes
+
+num_joints: 14
+all_joints: [ [ 0, 5 ], [ 1, 4 ], [ 2, 3 ], [ 6, 11 ], [ 7, 10 ], [ 8, 9 ], [ 12 ], [ 13 ] ]
+all_joints_names: [ 'ankle', 'knee', 'hip', 'wrist', 'elbow', 'shoulder', 'chin', 'forehead' ]
+
+pck_threshold: 2
+
+
+pos_dist_thresh: 17
+global_scale: 0.8452830189
+
+location_refinement: true
+locref_huber_loss: true
+locref_loss_weight: 0.05
+locref_stdev: 7.2801
+
+intermediate_supervision: no
+intermediate_supervision_layer: 12
+intermediate_supervision_input: 1024
+
+max_input_size: 600
+multi_step:
+  - [ 0.05,0.2,0.02,0.01 ]
+  - [ 500,21500,36500,51500 ]
+
diff --git a/research/cv/ArtTrack/config/tf2ms.json b/research/cv/ArtTrack/config/tf2ms.json
new file mode 100644
index 0000000000000000000000000000000000000000..cecf7ecbee026d97235486e2c5829905b181a125
--- /dev/null
+++ b/research/cv/ArtTrack/config/tf2ms.json
@@ -0,0 +1,530 @@
+{
+  "pose/intermediate_supervision/block4/biases": "net.part_pred_interm.conv2d_transpose.bias",
+  "pose/intermediate_supervision/block4/weights": "net.part_pred_interm.conv2d_transpose.weight",
+  "pose/locref_pred/block4/biases": "net.locref.conv2d_transpose.bias",
+  "pose/locref_pred/block4/weights": "net.locref.conv2d_transpose.weight",
+  "pose/pairwise_pred/block4/biases": "net.pairwise_pred.conv2d_transpose.bias",
+  "pose/pairwise_pred/block4/weights": "net.pairwise_pred.conv2d_transpose.weight",
+  "pose/part_pred/block4/biases": "net.part_pred.conv2d_transpose.bias",
+  "pose/part_pred/block4/weights": "net.part_pred.conv2d_transpose.weight",
+  "resnet_v1_101/conv1/BatchNorm/gamma": "net.resnet101.conv2d_same.conv2d.1.batchnorm.gamma",
+  "resnet_v1_101/conv1/BatchNorm/beta": "net.resnet101.conv2d_same.conv2d.1.batchnorm.beta",
+  "resnet_v1_101/conv1/BatchNorm/moving_mean": "net.resnet101.conv2d_same.conv2d.1.batchnorm.moving_mean",
+  "resnet_v1_101/conv1/BatchNorm/moving_variance": "net.resnet101.conv2d_same.conv2d.1.batchnorm.moving_variance",
+  "resnet_v1_101/conv1/weights": "net.resnet101.conv2d_same.conv2d.1.conv.weight",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d1.conv.weight",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d3.conv.weight",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/shortcut/BatchNorm/beta": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d_shortcut.batchnorm.beta",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d_shortcut.batchnorm.gamma",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d_shortcut.batchnorm.moving_mean",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d_shortcut.batchnorm.moving_variance",
+  "resnet_v1_101/block1/unit_1/bottleneck_v1/shortcut/weights": "net.resnet101.layer.blocks_cell.0.units_cells.0.conv2d_shortcut.conv.weight",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d1.conv.weight",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block1/unit_2/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.0.units_cells.1.conv2d3.conv.weight",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d1.conv.weight",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d2.conv2d.1.batchnorm.beta",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d2.conv2d.1.batchnorm.gamma",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d2.conv2d.1.batchnorm.moving_mean",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d2.conv2d.1.batchnorm.moving_variance",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d2.conv2d.1.conv.weight",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block1/unit_3/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.0.units_cells.2.conv2d3.conv.weight",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d1.conv.weight",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d3.conv.weight",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/shortcut/BatchNorm/beta": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d_shortcut.batchnorm.beta",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d_shortcut.batchnorm.gamma",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d_shortcut.batchnorm.moving_mean",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d_shortcut.batchnorm.moving_variance",
+  "resnet_v1_101/block2/unit_1/bottleneck_v1/shortcut/weights": "net.resnet101.layer.blocks_cell.1.units_cells.0.conv2d_shortcut.conv.weight",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d1.conv.weight",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block2/unit_2/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.1.units_cells.1.conv2d3.conv.weight",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d1.conv.weight",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block2/unit_3/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.1.units_cells.2.conv2d3.conv.weight",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d1.conv.weight",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d2.conv2d.1.batchnorm.beta",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d2.conv2d.1.batchnorm.gamma",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d2.conv2d.1.batchnorm.moving_mean",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d2.conv2d.1.batchnorm.moving_variance",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d2.conv2d.1.conv.weight",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block2/unit_4/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.1.units_cells.3.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/shortcut/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d_shortcut.batchnorm.beta",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d_shortcut.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d_shortcut.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d_shortcut.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_1/bottleneck_v1/shortcut/weights": "net.resnet101.layer.blocks_cell.2.units_cells.0.conv2d_shortcut.conv.weight",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_2/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.1.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_3/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.2.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_4/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.3.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_5/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.4.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_6/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.5.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_7/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.6.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_8/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.7.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_9/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.8.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_10/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.9.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_11/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.10.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_12/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.11.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_13/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.12.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_14/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.13.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_15/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.14.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_16/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.15.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_17/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.16.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_18/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.17.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_19/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.18.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_20/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.19.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_21/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.20.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_22/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.21.conv2d3.conv.weight",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d1.conv.weight",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block3/unit_23/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.2.units_cells.22.conv2d3.conv.weight",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d1.conv.weight",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d3.conv.weight",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d_shortcut.batchnorm.gamma",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/beta": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d_shortcut.batchnorm.beta",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d_shortcut.batchnorm.moving_mean",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/shortcut/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d_shortcut.batchnorm.moving_variance",
+  "resnet_v1_101/block4/unit_1/bottleneck_v1/shortcut/weights": "net.resnet101.layer.blocks_cell.3.units_cells.0.conv2d_shortcut.conv.weight",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d1.conv.weight",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block4/unit_2/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.3.units_cells.1.conv2d3.conv.weight",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv1/BatchNorm/beta": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d1.batchnorm.beta",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv1/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d1.batchnorm.gamma",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv1/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d1.batchnorm.moving_mean",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv1/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d1.batchnorm.moving_variance",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv1/weights": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d1.conv.weight",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv2/BatchNorm/beta": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d2.conv2d.batchnorm.beta",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv2/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d2.conv2d.batchnorm.gamma",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv2/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d2.conv2d.batchnorm.moving_mean",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv2/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d2.conv2d.batchnorm.moving_variance",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv2/weights": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d2.conv2d.conv.weight",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv3/BatchNorm/beta": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d3.batchnorm.beta",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv3/BatchNorm/gamma": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d3.batchnorm.gamma",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_mean": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d3.batchnorm.moving_mean",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_variance": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d3.batchnorm.moving_variance",
+  "resnet_v1_101/block4/unit_3/bottleneck_v1/conv3/weights": "net.resnet101.layer.blocks_cell.3.units_cells.2.conv2d3.conv.weight"
+}
diff --git a/research/cv/ArtTrack/eval.py b/research/cv/ArtTrack/eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..f01457818bf81bd1262afba3255d6d0bf53a9a6f
--- /dev/null
+++ b/research/cv/ArtTrack/eval.py
@@ -0,0 +1,173 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import os
+import sys
+
+import matplotlib.pyplot as plt
+import mindspore as ms
+from mindspore import context as ctx
+import numpy as np
+from scipy import io as sio
+
+from src.args_util import command, create_arg_parser, TARGET_COCO_MULTI, TARGET_MPII_SINGLE
+from src.dataset.mpii import MPII
+from src.dataset.pose import Batch
+from src.log import log
+from src.model.pose import PoseNet, PoseNetTest
+from src.model.predict import argmax_pose_predict, extract_cnn_output
+from src.multiperson.visualize import show_heatmaps
+from src.tool.decorator import process_cfg
+from src.tool.eval.pck import enclosing_rect, print_results, rect_size
+
+
+@command
+def test(parser, args, cfg):
+    if args.target == TARGET_MPII_SINGLE:
+        if args.accuracy:
+            eval_mpii(cfg, args.prediction or args.output)
+            return
+        predict_mpii(cfg, args.visual, args.cache, args.output)
+    elif args.target == TARGET_COCO_MULTI:
+        if args.accuracy:
+            from src.tool.eval.coco import eval_coco
+            eval_coco(cfg, args.prediction)
+            return
+        from src.tool.eval.multiple import test as multiple_test
+        multiple_test(cfg, args.cache, args.visual, args.dev,
+                      args.score_maps_cached, args.graph, args.output, args.range_num, args.range_index)
+    else:
+        parser.print_help()
+
+
+@process_cfg
+def predict_mpii(cfg=None, visual=False, cache=False, output=None):
+    """
+    entry for predicting single mpii
+    Args:
+        cfg: config
+        visual: if True, visualize prediction
+        cache: if True, cache score map
+        output: path to output
+    """
+    cfg.train = False
+    ctx.set_context(**cfg.context)
+    out_dir = cfg.scoremap_dir
+    if cache:
+        if not os.path.exists(out_dir):
+            os.makedirs(out_dir)
+    dataset = MPII(cfg)
+    dataset.set_mirror(False)
+
+    net = PoseNet(cfg=cfg)
+    test_net = PoseNetTest(net, cfg)
+
+    if hasattr(cfg, 'load_ckpt') and os.path.exists(cfg.load_ckpt):
+        ms.load_checkpoint(cfg.load_ckpt, net=test_net)
+
+    num_images = len(dataset)
+    predictions = np.zeros((num_images,), dtype=np.object)
+
+    for i in range(num_images):
+        log.info('processing image %s/%s', i, num_images - 1)
+        batch = dataset.get_item(i)
+        o = test_net(
+            ms.Tensor(np.expand_dims(batch[Batch.inputs], axis=0),
+                      dtype=ms.dtype.float32),
+        )
+        out = o[0].transpose([0, 2, 3, 1]).asnumpy()
+        locref = o[1].transpose([0, 2, 3, 1]).asnumpy() if o[1] is not None else None
+        pairwise_pred = o[2].transpose([0, 2, 3, 1]).asnumpy() if o[2] is not None else None
+        scmap, locref, _ = extract_cnn_output(out, locref, pairwise_pred,
+                                              cfg)
+        pose = argmax_pose_predict(scmap, locref, cfg.stride)
+
+        pose_refscale = np.copy(pose)
+        pose_refscale[:, 0:2] /= cfg.global_scale
+        predictions[i] = pose_refscale
+
+        if visual:
+            img = np.transpose(np.squeeze(batch[Batch.inputs]).astype('uint8'), [1, 2, 0])
+            show_heatmaps(cfg, img, scmap, pose)
+            plt.waitforbuttonpress(timeout=1)
+            plt.close()
+
+        if cache:
+            base = os.path.basename(batch[Batch.data_item].im_path).decode()
+            raw_name = os.path.splitext(base)[0]
+            out_fn = os.path.join(out_dir, raw_name + '.mat')
+            sio.savemat(out_fn, mdict={'scoremaps': scmap.astype('float32')})
+
+            out_fn = os.path.join(out_dir, raw_name + '_locreg' + '.mat')
+            if cfg.location_refinement:
+                sio.savemat(out_fn, mdict={'locreg_pred': locref.astype('float32')})
+
+    sio.savemat(output or cfg.output, mdict={'joints': predictions})
+
+
+@process_cfg
+def eval_mpii(cfg=None, prediction=None):
+    """
+    eval mpii entry
+    """
+    dataset = MPII(cfg)
+    filename = prediction or cfg.output
+    pred = sio.loadmat(filename)
+
+    joints = pred['joints']
+    pck_ratio_thresh = cfg.pck_threshold
+
+    num_joints = cfg.num_joints
+    num_images = joints.shape[1]
+
+    pred_joints = np.zeros((num_images, num_joints, 2))
+    gt_joints = np.zeros((num_images, num_joints, 2))
+    pck_thresh = np.zeros((num_images, 1))
+    gt_present_joints = np.zeros((num_images, num_joints))
+
+    for k in range(num_images):
+        pred = joints[0, k]
+        gt = dataset.data[k].joints[0]
+        if gt.shape[0] == 0:
+            continue
+        gt_joint_ids = gt[:, 0].astype('int32')
+        rect = enclosing_rect(gt[:, 1:3])
+        pck_thresh[k] = pck_ratio_thresh * np.amax(rect_size(rect))
+
+        gt_present_joints[k, gt_joint_ids] = 1
+        gt_joints[k, gt_joint_ids, :] = gt[:, 1:3]
+        pred_joints[k, :, :] = pred[:, 0:2]
+
+    dists = np.sqrt(np.sum((pred_joints - gt_joints) ** 2, axis=2))
+    correct = dists <= pck_thresh
+
+    num_all = np.sum(gt_present_joints, axis=0)
+
+    num_correct = np.zeros((num_joints,))
+    for j_id in range(num_joints):
+        num_correct[j_id] = np.sum(correct[gt_present_joints[:, j_id] == 1, j_id], axis=0)
+
+    pck = num_correct / num_all * 100.0
+
+    print_results(pck, cfg)
+
+
+def main():
+    parser = create_arg_parser()['eval']
+    args = parser.parse_args(sys.argv[1:])
+    test(parser, args)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/research/cv/ArtTrack/log.yaml b/research/cv/ArtTrack/log.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e2d9f1e9ea4354c054fb1148dc39913d9469652f
--- /dev/null
+++ b/research/cv/ArtTrack/log.yaml
@@ -0,0 +1,40 @@
+version: 1
+disable_existing_loggers: False
+
+formatters:
+  simple:
+    format: "%(asctime)s - %(name)s - %(levelname)s - %(filename)s:%(lineno)d - %(threadName)s - %(message)s"
+handlers:
+  console:
+    class: logging.StreamHandler
+    level: DEBUG
+    formatter: simple
+    stream: ext://sys.stdout
+
+  debug_file_handler:
+    class: logging.handlers.RotatingFileHandler
+    level: DEBUG
+    formatter: simple
+    filename: ./log/debug.log
+    maxBytes: 10485760 # 10MB
+    backupCount: 20
+    encoding: utf8
+    delay: yes
+
+  error_file_handler:
+    class: logging.handlers.RotatingFileHandler
+    level: ERROR
+    formatter: simple
+    filename: ./log/error.log
+    maxBytes: 10485760 # 10MB
+    backupCount: 20
+    encoding: utf8
+    delay: yes
+loggers:
+  ArtTrack:
+    level: INFO
+    handlers: [ console ]
+    propagate: False
+root:
+  level: INFO
+  handlers: [ console ]
diff --git a/research/cv/ArtTrack/patch/0001-fix-lib.patch b/research/cv/ArtTrack/patch/0001-fix-lib.patch
new file mode 100644
index 0000000000000000000000000000000000000000..fe713cea0dd8ee4b6afb9016f709d5c8fc5ac69c
--- /dev/null
+++ b/research/cv/ArtTrack/patch/0001-fix-lib.patch
@@ -0,0 +1,59 @@
+From d4cef68e789363498e4035b7ca47e64f3ebef6b6 Mon Sep 17 00:00:00 2001
+From: yiguangzheng <78542727@qq.com>
+Date: Fri, 5 Nov 2021 12:55:06 +0800
+Subject: [PATCH 1/2] fix lib
+
+---
+ .../andres/graph/multicut-lifted/greedy-additive.hxx       | 7 +++++++
+ lib/multicut_cython/solve_nl_lmp.hxx                       | 3 +++
+ lib/nms_cython/include/nms_scoremap.hxx                    | 2 ++
+ 3 files changed, 12 insertions(+)
+
+diff --git a/lib/multicut_cython/include/andres/graph/multicut-lifted/greedy-additive.hxx b/lib/multicut_cython/include/andres/graph/multicut-lifted/greedy-additive.hxx
+index 03df92b..3efbda1 100755
+--- a/lib/multicut_cython/include/andres/graph/multicut-lifted/greedy-additive.hxx
++++ b/lib/multicut_cython/include/andres/graph/multicut-lifted/greedy-additive.hxx
+@@ -8,6 +8,13 @@
+ #include <algorithm>
+ #include <map>
+ #include <queue>
++/*
++GCC 11 started to explicitly require including <limits>, <memory>, <utility>, <thread> 
++according to https://www.gnu.org/software/gcc/gcc-11/porting_to.html#header-dep-changes
++
++https://stackoverflow.com/questions/4798936/numeric-limits-was-not-declared-in-this-scope-no-matching-function-for-call-t
++*/
++#include <limits>
+ 
+ #include "andres/partition.hxx"
+ 
+diff --git a/lib/multicut_cython/solve_nl_lmp.hxx b/lib/multicut_cython/solve_nl_lmp.hxx
+index 4113d89..2b18aaa 100644
+--- a/lib/multicut_cython/solve_nl_lmp.hxx
++++ b/lib/multicut_cython/solve_nl_lmp.hxx
+@@ -9,6 +9,9 @@
+ 
+ #include <iostream>
+ 
++#include <array>
++
++
+ using namespace std;
+ 
+ template <typename T> 
+diff --git a/lib/nms_cython/include/nms_scoremap.hxx b/lib/nms_cython/include/nms_scoremap.hxx
+index f0bdb67..1128728 100755
+--- a/lib/nms_cython/include/nms_scoremap.hxx
++++ b/lib/nms_cython/include/nms_scoremap.hxx
+@@ -8,6 +8,8 @@
+ 
+ #include <iostream>
+ 
++#include <array>
++
+ // namespace bp = boost::python;
+ 
+ using namespace std;
+-- 
+2.32.0.windows.1
+
diff --git a/research/cv/ArtTrack/patch/0002-pybind11.patch b/research/cv/ArtTrack/patch/0002-pybind11.patch
new file mode 100644
index 0000000000000000000000000000000000000000..77c08875757f650738959e1fc34fc2757e7fbd91
--- /dev/null
+++ b/research/cv/ArtTrack/patch/0002-pybind11.patch
@@ -0,0 +1,165 @@
+From dd3e9d7203a49b25c3acc199c314794dd998f48b Mon Sep 17 00:00:00 2001
+From: yiguangzheng <78542727@qq.com>
+Date: Sat, 6 Nov 2021 14:37:26 +0800
+Subject: [PATCH 2/2] pybind11
+
+---
+ lib/multicut_cython/multicut.cxx      | 45 +++++++++++++++++++++++++++
+ lib/multicut_cython/setup_pybind11.py | 13 ++++++++
+ lib/nms_cython/nms_grid.cxx           | 38 ++++++++++++++++++++++
+ lib/nms_cython/setup_pybind11.py      | 22 +++++++++++++
+ 4 files changed, 118 insertions(+)
+ create mode 100644 lib/multicut_cython/multicut.cxx
+ create mode 100644 lib/multicut_cython/setup_pybind11.py
+ create mode 100644 lib/nms_cython/nms_grid.cxx
+ create mode 100644 lib/nms_cython/setup_pybind11.py
+
+diff --git a/lib/multicut_cython/multicut.cxx b/lib/multicut_cython/multicut.cxx
+new file mode 100644
+index 0000000..03d52af
+--- /dev/null
++++ b/lib/multicut_cython/multicut.cxx
+@@ -0,0 +1,45 @@
++#include <pybind11/pybind11.h>
++#include <pybind11/numpy.h>
++#include <iostream>
++#include "solve_nl_lmp.hxx"
++
++namespace py = pybind11;
++
++py::array_t<uint64_t> solve_nl_lmp(py::array_t<double>& unary_array, 
++                    py::array_t<uint16_t>& pwidx_array,
++                    py::array_t<double>& pw_array,
++                    bool is_sparse_graph, 
++                    bool solver_type, 
++                    bool do_suppression, 
++                    bool logit_in_solver
++                    ) {
++    py::buffer_info unary_array_buf = unary_array.request();
++    py::buffer_info pwidx_array_buf = pwidx_array.request();
++    py::buffer_info pw_array_buf = pw_array.request();
++
++    if (unary_array_buf.ndim != 2 || pwidx_array_buf.ndim != 2 || pw_array_buf.ndim != 2)
++    {
++        throw std::runtime_error("numpy.ndarray dims must be 2!");
++    }
++     //申请空间
++    auto result = py::array_t<uint64_t>(unary_array_buf.shape[0] * 2);
++    result.resize(py::array::ShapeContainer({unary_array_buf.shape[0], 2}));
++    py::buffer_info result_buf = result.request();
++
++    //获取numpy.ndarray 数据指针
++    double* unary_array_ptr = static_cast<double*>(unary_array_buf.ptr);
++    uint16_t* pwidx_array_ptr = static_cast<uint16_t*>(pwidx_array_buf.ptr);
++    double* pw_array_ptr = static_cast<double*>(pw_array_buf.ptr);
++    uint64_t* result_ptr = static_cast<uint64_t*>(result_buf.ptr);
++
++    solve_nl_lmp_cpp(unary_array_ptr, unary_array_buf.shape[0], unary_array_buf.shape[1],
++                   pwidx_array_ptr, pwidx_array_buf.shape[0], pwidx_array_buf.shape[1],
++                   pw_array_ptr, pw_array_buf.shape[0], pw_array_buf.shape[1],
++                   is_sparse_graph, solver_type, do_suppression, logit_in_solver,
++                   result_ptr);
++
++    return result;
++}
++PYBIND11_MODULE(multicut, m) {
++    m.def("solve_nl_lmp", &solve_nl_lmp);
++}
+\ No newline at end of file
+diff --git a/lib/multicut_cython/setup_pybind11.py b/lib/multicut_cython/setup_pybind11.py
+new file mode 100644
+index 0000000..31bb337
+--- /dev/null
++++ b/lib/multicut_cython/setup_pybind11.py
+@@ -0,0 +1,13 @@
++from setuptools import setup, Extension
++import pybind11
++functions_module = Extension(
++    name='multicut',
++    sources=['multicut.cxx','src/nl-lmp.cxx'],
++    language="c++",
++    include_dirs=[pybind11.get_include(),'.', 'include', 'src'],
++    extra_compile_args=['-std=c++11','-O3','-fPIC', '-DHAVE_CPP11_INITIALIZER_LISTS'],
++    extra_link_args=['-std=c++11', '-L./']
++)
++
++
++setup(name = 'multicut', ext_modules=[functions_module])
+\ No newline at end of file
+diff --git a/lib/nms_cython/nms_grid.cxx b/lib/nms_cython/nms_grid.cxx
+new file mode 100644
+index 0000000..563cebd
+--- /dev/null
++++ b/lib/nms_cython/nms_grid.cxx
+@@ -0,0 +1,38 @@
++#include <pybind11/pybind11.h>
++#include <pybind11/numpy.h>
++#include <pybind11/stl.h>
++#include <iostream>
++#include "nms_scoremap.hxx"
++
++namespace py = pybind11;
++py::list nms_grid(py::array_t<float> &scoremap,
++                  py::array_t<uint8_t> &grid,
++                  double prob_thresh)
++{
++    py::buffer_info scoremap_buf = scoremap.request();
++    py::buffer_info grid_buf = grid.request();
++
++    if (scoremap_buf.ndim != 2 || grid_buf.ndim != 2)
++    {
++        throw std::runtime_error("numpy.ndarray dims must be 2!");
++    }
++    int W = scoremap_buf.shape[1];
++    int H = scoremap_buf.shape[0];
++
++    int grid_W = grid_buf.shape[1];
++    int grid_H = grid_buf.shape[0];
++
++    auto scoremap_ptr = static_cast<float *>(scoremap_buf.ptr);
++    auto grid_ptr = static_cast<uint8_t *>(grid_buf.ptr);
++
++    auto v = nms_grid_cpp(scoremap_ptr, H, W,
++                          grid_ptr, grid_H, grid_W,
++                          prob_thresh);
++    py::list result = py::cast(v);
++
++    return result;
++}
++PYBIND11_MODULE(nms_grid, m)
++{
++    m.def("nms_grid", &nms_grid);
++}
+\ No newline at end of file
+diff --git a/lib/nms_cython/setup_pybind11.py b/lib/nms_cython/setup_pybind11.py
+new file mode 100644
+index 0000000..9dd7bcb
+--- /dev/null
++++ b/lib/nms_cython/setup_pybind11.py
+@@ -0,0 +1,22 @@
++from setuptools import setup, Extension
++import pybind11
++functions_module = Extension(
++    name='nms_grid',
++    sources=['nms_grid.cxx'],
++    language="c++",
++    include_dirs=[pybind11.get_include(),'.', 'include'],
++    extra_compile_args=['-DILOUSESTL','-DIL_STD','-std=c++11','-O3','-fPIC', '-DHAVE_CPP11_INITIALIZER_LISTS'],
++    extra_link_args=['-std=c++11', '-L./']
++)
++
++# extensions = [
++#   Extension(
++#     'nms_grid', ['nms_grid.pyx'],
++#     language="c++",
++#     include_dirs=[np.get_include(), '.','include'],
++#     extra_compile_args=['-DILOUSESTL','-DIL_STD','-std=c++11','-O3'],
++#     extra_link_args=['-std=c++11']
++#   )
++# ]
++
++setup(name = 'nms_grid', ext_modules=[functions_module])
+\ No newline at end of file
+-- 
+2.32.0.windows.1
+
diff --git a/research/cv/ArtTrack/patch/0003-split-dataset.patch b/research/cv/ArtTrack/patch/0003-split-dataset.patch
new file mode 100644
index 0000000000000000000000000000000000000000..062f13fed2236cf44f75ea046a2b41f87f5790d4
--- /dev/null
+++ b/research/cv/ArtTrack/patch/0003-split-dataset.patch
@@ -0,0 +1,1177 @@
+From d72df7b178b87ccc4bf8b0726993cf5a2757050f Mon Sep 17 00:00:00 2001
+From: yiguangzheng <78542727@qq.com>
+Date: Mon, 29 Nov 2021 10:48:55 +0800
+Subject: [PATCH] split dataset
+
+---
+ matlab/mpii/split_dataset.m |  35 +++++++++++++++++++++++++++++++++++
+ matlab/mpii/test_index.mat  | Bin 0 -> 11578 bytes
+ matlab/mpii/train_index.mat | Bin 0 -> 45585 bytes
+ 3 files changed, 35 insertions(+)
+ create mode 100644 matlab/mpii/split_dataset.m
+ create mode 100644 matlab/mpii/test_index.mat
+ create mode 100644 matlab/mpii/train_index.mat
+
+diff --git a/matlab/mpii/split_dataset.m b/matlab/mpii/split_dataset.m
+new file mode 100644
+index 0000000..ed0d8ce
+--- /dev/null
++++ b/matlab/mpii/split_dataset.m
+@@ -0,0 +1,35 @@
++function split_dataset(dataset_path, test_ratio, train_dataset, test_dataset,save_train_index,save_test_index)
++if (nargin <= 2)
++    train_dataset = '';
++    test_dataset = '';
++    save_train_index = '';
++    save_test_index = '';
++end
++    load(dataset_path, 'dataset');
++    origin_dataset = dataset;
++    rp = randperm(length(dataset));
++    test_amount = floor(length(dataset) * test_ratio);
++    train_amount = length(dataset) - test_amount;
++    if (isempty(train_dataset))
++        train_index = rp(1:train_amount);
++    else
++        load(train_dataset,'train_index');
++    end
++    if (isempty(test_dataset))
++        test_index = rp((train_amount+1):length(dataset));
++    else
++        load(test_dataset,'test_index');
++    end
++    train_dataset = dataset(train_index);
++    test_dataset = dataset(test_index);
++    dataset = train_dataset;
++    save([dataset_path,'-train'], 'dataset');
++    dataset = test_dataset;
++    save([dataset_path,'-test'],'dataset')
++    if(~isempty(save_train_index))
++        save(save_train_index,'train_index')
++    end
++    if(~isempty(save_test_index))
++        save(save_test_index,'test_index')
++    end
++end
+\ No newline at end of file
+diff --git a/matlab/mpii/test_index.mat b/matlab/mpii/test_index.mat
+new file mode 100644
+index 0000000000000000000000000000000000000000..8d3c5d5096ef12b0270fb3965360251b8ad0c9ea
+GIT binary patch
+literal 11578
+zcma)BGhi+ZfU9lWwr$(yt8E*vdbMraw%vZUZQH#6hTAVwrVPoHL|#};PFRG9oq>r+
+zURaIZ+{)IBj!4PY$kp7z*^Y-uNmNr>frE{XNYvTP$koi0$ibe6NZ!GoNWsCKh=rSo
+znTdyum4}Iqh=qxTndtusp#KA~v^?T}ypawF$TOQR#X}7T;<Hs4*$jpX3ZzjinK*(@
+znIkwqe_TZjOe~eiG=suQqbZB62wR>tlYf(rHIvQi#kOg&R6cWCZqb~*Dx=T;^gGw3
+z)+OgN`}5~h7z7AN@IU?EQ@sA)uV04CUyHqf>EUm{+%MJ^yQQOKHRaOt#IJY$Z~jjk
+z<3v#rphGoY;ailEtfR{}|Ap%(*xq6*j4)N{{%!jw>}D>x5b?RNo2YlFZ^JqIS9fP1
+zI^9xCUR$}(!HnjLFS}3syUn8zUi(>!^Y1hMdj!efU0Q$KjM;jX(f*8$dKzqeQ_p>^
+zU^nbBvHWq1@@=rvsgkaV<CLZZra_(g5#tud+(l{Y3xw7HVFFmYD}v#R6bCZQJ=;0)
+zpf>ci4bX`CBl{Ec<;!kRAq<kbk7_yqQx%e7M;EjScQLzeuE}%zm^VY0))iV8t}wWW
+zql2-{jMB-#!AzrI<fGWDd|6R!B=kA(aFC1>kC19g5p0N(L?i$Fa#Gz#)i4&RH%HpE
+zMH(1pxzVJv9eZH}SA|d5iM(a57t>c#;>4E!f{{6FBjaIc9^JY8X)6QDGfVK-?(@Wp
+zp}M4TWTo45LgW3y#~kuX^3Cad6uj;RPdVSlMYZV|*%S-6fKAg0@20O7&o~u~$%%3k
+z%}8ZTeZq(|ToivtRF+F<TAF~yrz9q+UrZ)pQByZW0nKnG!Iq&hc!O9}b>T44@NDJ|
+zkDkE7146ctpGsPOTv$!NpejjXGgo*EV`udx<8NDu;bPq)U|fP0(MPf4)U9nXn_D&r
+zwrgP?^RDV$HX8qW1m5@F@NDc#mdEXoGq6UF)<JdX@<pH;vp90J$q>7*VbIy#YKQms
+z!XR1R0as_({K#l?lVCx@y=uNp31SgO42T_#Pa)Plmvvna`zEW~?TfcjC$26=es|e-
+z%u6RzlE9L5p;eZkU-4dP!l~Cx%*9vXUKe0}7w3uSCgKSt5armj%|OH2qy93Ysh;Y*
+z+zB8)sDxgV4U8*r`v?85Uy59R*&=csTs=_`bAYe(Z%JrQwM@JWGI8&9JORYLXvL}J
+z(vmn)WAY6@H}p-?8GL#$H3{QQ_K(#TCW#+&*tBKV-dSiK(AOaKX~v%SZ^gljv3))W
+zv1X-M3intP<OJtZ<ub-uWo(x}PWgiG$V1k*alBs2vR7MtFb_Gm4vAGe)b4^`^_9d}
+zc(*FjKmP`+R97lr0@mZY*f@_}hAALV*qi+I4o2nbQbODn3jAj2_r9pU%7D|Yr8Qic
+zc6XJqpqK=74VISZzzM-D-qyXc=;SlDeIsYcVP#LKHwz6d-(dgPJbeGSyfZ&LEE<E-
+z+I=?-dYLVI!kZBp10ee`E*bk{>N)5W?^xa|nW9b8gFgK{OnHX<285-aQ?}3Yj%x&w
+zFTXz#3kD=(0gipa@Zf%TIU3WU6e!S5WgJQcBXsCAbf6+*G6Z2Ejp6<J{g32nIG@0m
+zQ9<t$!*%-r+D#YC6do`A5}KE??7D}H4|(l?5n39~O@`wWh$JuX_aLAS4qV4v&mu=M
+z3%90ZM<jKyUuwTm7it%$DMw-hKddjTmR6TQdtf67<HRenSJW$FtmcxL^|Fo4bjiSU
+z4%O3x4K$EPpLMv+;Dv>|w~c2l+B+Ayt#-mUp$6tM_E)=|>QNi;%lNFM(pmG@j`VF)
+zgg6_lO(-B?I%t%h7DPv&rb+R@>c&8$$uG`gp9;`QhToD({b0POEQK7)V*3yh<$($y
+zZwT?!_P#OOuv<?42IMN6<SP*?U59m}2T_TfutAD&(hE*HH(zi&nqz6skRI5VnDBYE
+zRfaI6=Ga=Vr+xZnYj0zG?a|+E{UK-Gx;s&Ono~Ir=<K?0P=3U7IzGea`sB=BTU`)R
+zq``55$?xox(wx1Wh-|njEF~(GUB*X~Fg1kRcb1-B0aYg2JN=6)4w*928i+I<Ao)DE
+z9BVH5e_6BUY`}u4sxTNMxk9aF8dZk*(>1}_kuSjb7f<##RpT<$I-8o#{L6%b1_Z4D
+z&Ma6{@zPO)9iwyW-Q1-T4)f=<#QB+GV;(9fClG|A?=_xXwW<&^bqEHW<X%K!Ct&tU
+z@P(4PH^uiXuSFxRt?~!2NQZQ6ex*H`2aWB!Z1KvRV^u9X9_a_G=cWbCZIkKSwS?$<
+z@<ZJRYo^GOo#WiX=$RkBIjKLi&SZP0jg*vlhDDxmY5NWzAMlhtyJBy^z5UUjX}-%6
+zqyfR6LvPq!qfJC0FzRaG^c~g3WFBUODpWZ->(4T<(iqmXs>!iqshnzVYY&k}S-yJW
+zPKzk5Th~rIL@REj3;NiJvyX{_+8K8A5r$|MK+g#w_PX<|pc6e>Uv@_rFA*GMeH&+%
+z`cQ9h3;IFsrzuDGVa~Ot2&x+!CYcTNsts6QfUWa)smDHl5sFB5^1kse`hDwXvb-6l
+zG*`?mk@B!zT7tdmQ1B>oCi4-CWC928<hH*|lg%p+iaiZFh&U&8|J-2%k{MN~sOTr)
+zlf956x>UwkW~z23iY*m6WzT$Pg|$=q2;X(P+n!)oNe{(Z<z4fQQTb3ZaK38Nb#WaU
+zQ~6-7LTf5L&wF>~<6kW<M-db{m-s18gYVn{^qYmP((=EV46C!Z1WT-%0(C2+-qW5j
+znWB0LB+l6}X%m~>a#v{?n|D+}R13s(1MXIFBC%k<k*3Oq7pZP|%aeh&@{&`@JW+eH
+z{eYdUwaHF|(X<WAWe!MWzsmb!iY|gl!Oi;av$%ep_XQBr<07}eg`|1#Z)c(Lz0!ye
+zwM?tDGf>I<kw!A1e>pkl4*u}q-0_juzeC2yzhM(gF6CcNtgL>mEoiaME~*OldHKG0
+zbS@K!3T&%u+rcu}aB0p5`XoUMSzg4XGdfrCq@CE(JbD<MSF<z-tCDoNB-FES+aT4f
+z$5mkcs!TLg)1Q<%vIZD!hG7^1l%`>3675Mpgo)(#?y=#eoZK+O?wcQDF;R0i9&nA!
+zo5T88hKm&@oNxos#ED0#QUr;92K`93u}f2XQ%DPR?2`V})h!1r{mgX?t0lEndKca>
+zf6IrUWlL=4>^IjUKL1%em|DL-c}J>ylFP~B4cjECy{>>zysP`OY+QkE25&!vHS2MG
+zOI(0cWD0rP41?845f?H$D3s}ER|z7t>^H}1TL)pTfBKhnOei7b{$=PU>bU4i@jiI(
+zz*%L!>W%Fcbxd>u^loB{=Rx>DYD;f5BlnVAmz*&&Py0OGSu?udq%0HY8FFdvsThWs
+zuf#JdTY(^maKDiKX{acLdJGJ?%gpKJXUTtp^?H2eGgez%_9&^@(4_*X*iLcf>EpOr
+z>mMP!D8jQet-E$wxHH#l)XTY`iJ*8+*-s9uSmI<lVIWR<vh)hiATV|&!$RI)dus$-
+zhv8=QzOv$vG3vzeiw)3y(s0Wp4O7!@uPE9uElHmhFZv7#IEUd@I10Y2PQz0)h6}C;
+zLy~h9w3lK_T6Pfj5<fgW;zc!O>y5M3Asdn%HCa;iX3-1RaJd?>bmMTVXiuAC<p9w}
+zf@ZZR57(IE&0Iqo4c;mdKW_@Z+OqJ^YJC&NG>~Rp83s=L^6zDY0Yg=No1u0YqgxzQ
+z@tFnHJ-7wtJd%$V=Hi^41>As*;%Lw!P~_uCxe?ZL8hpA4E5x&#<KLZ{%ZxN1l(|{2
+zu{WI6$lg>#H7_te)bU!a^4yI0yZ(uC0Fd4vkoV1;v86U(_g+kUi~15xSw>C%axVKi
+z$>8rZl6JxD-!iJMH&~z^Pbz#I4X8Y11eKU_`y+FI(X`~AW(0`Y(JPPQh(l13O-6M=
+zS1exT5kqPL>)%oX%XebqspVFN3O<@!K+iCNaqs%O6iRDvSXv0f>yg{Zr9#vz5N-c%
+zqxV8tXJ6np{B><z<4#g6gW(_i{CF)$@x0{NqZi{ZH>>6zp~GS`zjErc5TVx;eYzT<
+zlZ6#nb`T10lvs)4=dVL@@!8es8iB2C)i&<tB3niKOu<Uy=09atJ~P)cUOh4#{EDiy
+z`y!#C;s^i~Ut!!ffBS9eOUvxg!PCM;T~p4-RqwXL{R4vygFBE%JcPoZRN6Q(6e8}~
+zi{8OOTl2)?DoeJ(sj1r~E$CopV|k+`%wBcgzVyYP@va^oO5HkdtX;!9^v|yT?lVSi
+z{D!>x-}sJLn+O*(Qd-q*Sh7VoRWw~GLcOS-@TWo&$1C2M+aHt2-a?~F16FX)U7y}#
+zVK3-?>v?mccHy~H$FqyqAx4?Xtk$`g!jpyAm8Z3?$=-<^oI~;)lEz^9{ZhC2l@~Ox
+z2tW3+wy?pg`@Xo^aV_-EMr6rrc-a;=?A_LGOE%Y-#gXQ+Ii|3xQc6p}+8*VnHOs>2
+z{l%&qZhL`*Mr<|2$-mK8R}9rW<FrPh!aCGa0(so(&LVS1vv7Zn^9}{{TEVrPo`0-!
+zetklTT+5G=k>;tKp(MDAAN}zw75aZGCs{Wqf8j|pa;>Re?MJ;T$W5qM;78|Aa6Dck
+z*fN80bFXhGf!WWK>p6W0v2`8uJV!8Q2*y7G$BE+0uWP6|Lq8E+A#*g&)3huvx(iz3
+zOiAqBkK1cO^U1-yp9>yf-wEp(Ne3jP7n>J!c8QEMVI`m(4Er~?=KkOqa`S)8)m2`|
+z1{`FS`))>`y%G44Qn^QDdMx3!{6gUd=iKOGR(PP`h{(I7W3MrvxAnq|&DbXKBs{mj
+z7hCuEbdz%O=5sH6F^sP$s@m3jjq}>HX~M-eW;f|7dN{mpjXibnhL+S0Mhb2KSO^a>
+zXwq=;chsw=Arb;I{L9^H<SuUDVc0}wvpvg;1|dayl_HDVL{<y6Gc51sA=GXjR|F=V
+zch*bemiaSfZj@TtYp$>mdVvIBYiKJ%R>{juIi;rYaUXO+7D}%hLB3gt<{U-dv-@fe
+zGw3-3GWL;)@^{T~)uo^$>>SdiI572=WU6tNG?EzEm~B>(vO)prukoA1SS3)8cmc#!
+zy?bOI3Tl7G9$F<cGyNUwKolbR9_vzkO}y8bYwf-EImXp$N{3fnspyyFFF{y)lMzcj
+z7r^-EeM4RmC>6UJ6eCd1=(T7^V*PXdIzf-RwkM~RA&;l6oU><&5-$*l4UGHqF_&S#
+z!w*okr)yEEocAf&)s6-3Mg!}^a1FMbTaCQFnSKN(i3JoagB+@UF_VNk`jRj=gO+Kq
+z*Xa30byC~xrR&tj4>L3W1#cEWh79(L&GM{a{G!jMkD_-A*TAyAityL?uSQIg*udLB
+z9}pF`TqliMYqyG?_0VPD`or-Y#<2I4X~;x27RD$KT<DpUG)v&&-)0Bdj`@@S05`E6
+z28QYpooy8muEj(a@xSibz?npU(B#PCw+r`Jed;|K7_Ln^yu$(JUiZVv#~MdVPApnY
+zAWW}H=;_Qk`+Lk<0#^8qSdKaWARgIYbbr(}wzUw#T^-HYlIkiX$5NbHomiEzbz4o-
+zVjdx~#TjkzF-T$jVz^+9Z@LkM6wYJAy4tX97n^lJ9iZL%&Sakfy{E|ZI>kz+`xYA-
+zo=1D(&G4ChyQqb|umq|aD230ia(+eGtm-vlY7)49Cv^yOlR?pAC;Nk4QVZr;^gB$=
+zgTK!llN;<hUXr|nLL{$=-wy%LVjp}aUl;s^F?_*&sBDzPin(cW&swt13)LB)Sagu<
+zu7qdH`+JH09rTP9u)p6bPaH?7Jm}gRR$9RtIMz9I_R7cP-rPft<KTC)7V4f6b%@Dx
+zlc{5(B-Q6sBM;h}mLi{(@1qJmxRInPid+Z*gFgHXD%>{nxEYZ83w`V}%N;j-DfUfn
+zqJrH5dM7pP|K)U}m=KhV`c?n6m>ZDTcarG%T?*cTw8e<s5i;Mv7}TYI*Qp!>ryc*%
+z+{Mh&T2ZnV*%RSQaEIZKN|=QUsyuEe?Jmu=J|$$k0jZn?u9<VphW5RF@Vapwc4c;4
+zr`yu4IkHOJNBWBa(?q?ZdbK^95)G@a>JkUetu7dLyBEMDfhQT64br#~DVkyDT>p(2
+zCMxGTEydVc&Uf2Lz$|(+`zEnCaR44Fj~y^C;Ea}G%q~$&vSWP#NROLr^cR29DqGpY
+zG@m4p2wR4$d+%m|${GEwF41nPe}Fo(T(qTDwKK<KQ^Sl|Vr1N1Zs1l{WR8emwvcCG
+zQT0S)k9oFv1-;i90C%Imqe-6hxAp<j&Yo_?)J@&T8OAZf^<WlT?iuJEY)ku7Kbsbt
+zPzS1a@ewVIh>sw}q==8HYRvrap~2Wr+NDL<t)`lmcO6VTLr?t+b*rXu<|^qWF5|!W
+zJ!jODz)Mcjl8p{J*5Kg>@(W{DjKH#GyC&%x1^R;BMf_TffSM6H^-oC-=Y6~v`X#3A
+zkqhgaMaTIeirP<I?W^r;Z&3`WW0qsi@HFpavpC;E?Mc%!zjYmvA<U4aIiUSha@rIW
+zbohCN$IJ}T{DG%kubQm?byhO=q=1WAl-X-qilSoz9z2y<A6u7NF{dSmtMr3U&Hnmz
+zbhtQ*01ihH?)exJd<2+;eUbt0J%K5jjo*Z>`xN0*;5n+EtIuk04x=W)!v^|-+KV}V
+ziaIaaa1j?UI_EM3lcuZ!0yBGDFHARE(Nc9lx5UesAsSW>1X-k__0IkN4f$a`_?^go
+z&^J*k$G7muneqNLEG;l~SE0(3WFKSk!^r{wpG{{~TqYIXHA$NVR^@{)nR==%+bVLl
+zMUG<%v{LjG^;4{yzp8|>n?J=497c`@cT_ahP99+;gRibsn*HhsaMVllVc)TkO7IzY
+zbF^>G#~DugWIlTh*(p-zYpYRQk}=k-5%Vqy$;#X1>VFO&-+j+!1tYH1i859*js`EM
+ziS_XBPVTVTQF)cq-li+>GFFK0ps;ikGYZj1r3Zb|eTNjyk^ltLU7}^(V#8RW<GT@`
+zhWUXK000~MJ^G4Z^P>ThS^ac|g^}!u4}MpqLPgbbs#WR>fw^1jym9oG7I|42#nzi@
+zM$2!V{{5JlodGYjEh=ir*AWbKLsDm=nV9fUk4`A?0I5%6#(j<*q{aM@b}!CH4o@dz
+zK8YgttiCzKvFyQc__R>Sjxlvt;OL_?eB=^pmB<kq8NojY(Q#$h)G=P(z(S$A&`7#d
+z9H_5gB`2TI48L?9^zIoi<b=o_N`<t)!k@&?8%0$~Y7zA<#gZdd>nqNMPWk3I!sv$d
+zyEk2RqYd`V#nrm+!jyQvn|(zkn6?-I8Yhf$VN*cmH2BUy<|dii0a6p<n;JW~_l{?e
+zkY4);m02VX06fa59VNO|y*0KaJt^EDZx_B5hb=_+TJwOjzbOXmzArv&M;h8-#cg8i
+ztcSx`G2GQLwc+q}9FTZDlXT5lLh9_OPsyLrp2yTBis}XWt?MNE$W0GppXbF<Oq16i
+z#^qW<v<CbH2o4&aYJmC>eY~p_Z`Y+kU=pRcjxFeI`kL)Q|I3d4=yBu=u$sG?&sWh!
+zAG>rvfv)V%NPAZMTN6Sg0Jy9ep@s9s{(d|d8s+Xy3(5<PUQ(R!7S4R7dxZpE7bND?
+zf<eJ6*-By>(YxT7`uruZYV=o8unOYTChk4zwy|=;;51x~WzB;TjA612+~~`Z7=jvz
+zp57jhcxYbEqldo+@!KefL@Oq{!l=PuTzxUGNy<aBTdZk&70%NQiQlngB*B{mzbPjS
+zt7<IoXg=#&`x+(^5HWgYkV1lalr`M*U06NqsVTpsd9)LjoG=v)Q`KQoP}+3wj-V(7
+zovw`u^F8HtY5#<YD6Na~M>H0D%5DE9^YN4}IEl8S@TgutBT(;RTt*i-j0G_-npvJ1
+zND5KxtXyDZxy6uo6qwC6<<$!r%^s>**`9b03UWXySVflw{yljU%kV->Ic@rsd0%cA
+zj>`OgR$CUZ;us=b%Qh(ttWM+lM5{r<@<A}RIUj-I_ZAh|gX+8m9mB@7FjxvW7^Ym6
+zQqRsn(ZCpJO@BaprCJRft!U3hrNceo^YOAGJwq{&l^h3)SS{>S5?`#3b*Cw%6Z~b!
+zC9Hw}n%6*KbjwuPG{t4r8PX2bgI6q((+K%xa2!|#3rYdqyvkM?3$<;k2&QDtL#w6j
+zI3QVkO_XfQc02Sc%j{GkLS^$76iNF&?=9(~Aq@LsJ@uBw`Xp{FZJ!fVx<FtPZi{Ms
+z9%Jd9VX)lV@20$hzEYQqTWm}_`pqU;Z#h%^CS>`Q*<MLj$36>o9A8wgFLZWB#}-Uz
+z*A!S|MiCYZdGpwxI(WN*&-l#`y#UW;mFp4@3_Hm#nK_^jTUq#x-cIu$<(_>@_zCr+
+zi5?@9IGc-##I>ulMfm7|(>!X)8Bbo)ObU<?vT=ubu6DW-Fwppp6~ob4^3y!CZeKKK
+zPC6PpU!--vcoI;U_*{_vxP2r&S-<-z=KJV0iiBHdDOkwEQF2!0z);!Y02Dg#Oyyx@
+zkqT};a=laA)~_Kc1rr!?sxeM`tLPPVjuFs$F1n2ou@ix@&`kS?!f_ZBb(_E^=03q!
+zXi=Pi0&sTcMlzu(z+Sy?6oG7n?cb+w5bjRwbw`6jQXYptU@zmcom$rWtsFu8v5QeU
+zRk?NCvpNSE4s6v4&j^%PS2J14+z42k!w2ti$C~?qWiQ>?M*C573@!}SseRp;mpbNf
+znrbx(%)~R7pT^Es!SPJs3C8dPuOv%UzGkBuF4HAVH9W~ZY_MV_Qe=#8N=Hi{Ut+Qw
+zp~MZ6v~ZCqMk%dLr+gv2i0s0)VSf(a12^Wf7kLug)A%dRBx|M|7y0TfR9S?iy-r}I
+zTg(FngTWnw6N~*uy69=aHHz9y<z^)4A>Gk>fn76}(sHifm$IAbOs5}k(*c_fCr=#H
+zCpF;!Be?o4DrchItktn~@*>>ulLV{58{v4j4Adn}osuW^4n$T>mL?+%hSm*x{ZOOC
+zvOj)}mK&vhx#i-@B)gBL>e*8sUdK!L(TSsk*Nn?7P^7-<a|X@9kL(1nTH+xs1SYu&
+zS$w$no&9OTRL>~BSa!f_NZUerOXsuPON%Q&MU1KJ&I#mPcP^Lgb0o!DJ`iRVJ9VI=
+z%M87+#)urW@6_j-p5|OrYUUXT?jkQ{^G1dzjO?$6p6O%P9q}pP67%G>VpdeRjOj;&
+z+0A8~vT>HZs?hrNt(V$O16iiSjmM6HaG_Q}zgq7Fc;@w9zvaLB&X^On@4OtaaZOGA
+zmJf3bs0v46ZOk%8rBOc>rZnDKE9=cw9ojQ&N2QdZ!NhpBeT^J?n5vw13uj({vrlkD
+zJaBMToz_3a@eSNDA-n2r8DkkmN0hFI#k~u!g;O_EcD>~j%t0IjE;K3>_|ZkD$Iv^T
+zh@j!7`KbOF;n1&orf7@ZCj<M}YGU<dm*~zI=2}#eUYd?mp9;ES+j*ar^NDxBscx^9
+zISUp1Y8Fn$(S9aWdM~~EA|4jck$B__Sm_$?5{qM6vH9h+Dhac6Td+?8c*0J*oMl-m
+z=0>BNd>Liz3m?zL`m?^JC;}Nuxtls{<GxxQ{H>7Pm3B%yedUg;WewiVnET24s3RM#
+z$di*EEU~(;XN0DG>Qd}~GimtI=>r$G`qh3{@#&v3fMXtbpY6xY{z_-yf49~4?R|b=
+zFB#(Y$uPKaZ_3nYV{6T#ie-IM-=}=s9uV`--l?i^wKvF^-8w%BQ8;rZ>eU~%Kz(h@
+z*?u2}NNpZ&B&FYUWAxiF6Vut$`HR2WuPx#r{@~zlKI=Z#%Jn2Oc<b>hqK{)u^+6iV
+z`;f=f`i;j_W`v4|8KzUecODg9IPj=7<2;yup{>QDm9AyClr8U=IzblaIx|`Y%TlrM
+z0-{=9wPsja*3*pgkGHRlH(p(&Lz3KR)lr)OB)NNX#ryDiAX9u}gS?k-DZyK~$@8;C
+zt!btPO_lA8UmFk80&Bk1@8vKwucNW49(S(~z2vt||K0>K%5{O<OpA(r%rk|3<8H*S
+z!3f}Yn4V}Q$S_t3D~A$4)P>Y|WtF~Gp1o%6?rTiv==TsL<}6_+B_t&qxQZPM$ZZ(>
+z!L!w37Rh2F){;!|=cNh4?}_X$YuuQdMD3aQ#!b}ur##hK^Z#Mv>w_pC?Mo)N8<!M@
+zKWOI$7=f%A9+1N*U?e|Lnle)C39EBWLYVsoV;>`^I%*^CpB+p!OM@iZAuDdQLtY#s
+zh<`04CCLu(wd#=>IED&!;V^f6ZbCZej#U=!pR5n(m7v=EFl6uOUl2m@QbANS-(?($
+z`WAQf&8Z9DkFbW-!;om2es4rx-A*ARReE)HXb08xCan=bsydc?bU5xO+*I_EHu_#6
+zwq|#ux&M18GNE#0a5FT|GY$I@BLE@8KxbCad@lJBF_?KPGm5v$he_VcZ_+V0K~wm{
+zIU1rM(xsApUyS=ejy@IgYBp6}<s7M#9p-?wIG^un_g#QMaAr4po(uhO?*vT*Nz*9*
+zc&|k}g^>ye;L%mA<K5usendFrFw}o#Ybf#eJIx#Kpnr_v5+97oSN<+EPAp~rBYZ@P
+z2o&o!Y;7?5h(o>~cTxS7f9-yH0;To9V&n4%_m2Fw6O=&+<K}4?j6lt6#CM+d1v7{i
+z2Oe+pj~!3#yZA@zUFbXBo%)Z<u-p6F19Cxv`mU&;G(8*w7{ss!drlb}qnP}RP8>?u
+z&;S15zu*JVLMUN_K!4&zfsZB7l;cH_QdEJDQNTLH#lg^kk-@n2pI4q&c~&~B*E^@|
+zc{N&hCRbWCF15xsRt#pnI~sMyD%}Wc4{LruQy&3Rh3&^Fb9iDqxs%TkvO80)==ZIW
+z^k1P{*adPq-AL#@czzik2q0iaTF8e!U#Qqrmg4g23Zo7bbx9U?>QDKHpl%t5NT0<_
+z5Ia;I``&c~j)A(AppqS@EE;UCI&O0lfr|>c!UnHadyQ1tO)Ja?*z|ur^cHyT^tLye
+zdrRAD_1O~-DtZ;vv;tb+L#Pniuna00vjC?6+a_29Sjm>2kJXVwJ6#gnIU{X*XP;-X
+z!*d>$YN1k#OtFwj#r-Jl?c%(~)Pgo;9yGwaz?^S3-FMGBh+Wg|=fh=CQg!%99w}5e
+zK|dpV+VSe4x|#IRiVhWw&Z%vnc?>0MCZ?949d5&U)T)~A-?y2s8Vj2PVbOH1(ixdP
+zih+2^OvwS*XhYba3A$?$uN{#slpbUfRav5a{|Ad3mk^#d9hb0JHmZm+E{APVcOiwp
+zYI>g=YorO0n(X+02o|EMhyHVUF-pN==D8<~`FZS&<Luzd!;(yy$mjMrN<&*w{es;_
+z%jKW0w8PS_IN@+zyInbSw+={rV(rkX!_BC;`_cZR$?=<eXHG1k{cl%2m}>z@=rwHD
+zl*PDvz6lMLufbl`^vtvu2bcn^=DvGWpc{h?VltWF@n-l8j9sMY6yWGfye0y+ilP~8
+zJT9R_#2@CU1H&_mAMg}`?<QCG-f$>)m>q!zJ5!Q)vIZJTIe$Iv{k0=O;s_Qg`L)tY
+z{Jhp#=dM=p7)RvV%8re+g0b%_-(g(l!*#zEGzPbR*Wag{yO$>p%DvXu^DlQeG(!BM
+z!54mT^VL{z=||=WCH|-gMf{5RUd@A9_bMahe!6jnJ&jMeyY}67P%J~c9Y#Ejw}U5D
+zAxKpDPXmd#u$fUmFZx_~<MGwv6wIx0Yo|Xz{_aD>V{K-JSVDUb-Gb{As&p(T?izGe
+zMzr#Cpxx7upgVte`-XqGI!Ka-_6|`iDhB%VvKzcrRUQ(jPc=qVUgY&6k_Q*7Oz5nz
+zj9Yog^U-OO;LIFBV*F~-wPEP*hiG;Ocu%+XcvNPF65hW&4zv-`I`;qic|2>N(6Ms>
+zcN=hwF<V%!C13Iu7w<b4>ZtC)Tbc}w-d>f8p(7rRMM++3z%X9&HD+%JJQC=c%bq_d
+zvuCO|dP~*ZDwE!KoV`(H+qH&?Vc<Md*<~oJ++Dl4WhPBshH$SU8ZgZ~E6mQS!@pU$
+zH>#mTF(kzGeG=+AsGU%A%-(~+lDI667DMu7-8gW}4dtBc@;^^n1H)(X26g-Rsk--g
+zo|KI6YZr$ZaUd+Ly@y*8fT?qb?Gmt~E-}oCQ}tgLKu9#IJapcvPN8`Dh?~v}axNFQ
+zGS-+y2|x8!4Lx)h>7p*vSG`jqy)@=q?!4g5c##ICV*YGG=~I2Tl|jV0(q!KLN&dcO
+zrUaU{Muc8P@1_#tb^$-0O?j5#k%@luuiUo-DYY7$ZlZgC0&Xlb`YdK4s+r=I@wB)z
+zOVrAmC<FSGQ5zA&6<Ud4#&NBEM+T^jJu3eFlLldsU9Eh*N;HBo_Fem{UJ54<-YPrP
+zvbr6Ffl$`K8=VuyFIxZy!jTsI@0=UsC%b~mj-<h4HgT_E+fW;hbN}{{w@64Jr^LT2
+zS6GZxlG!3dDms>t?=qIJB{5lkGdLgcA0J}9cW!H`8J8^Wt_$1ziH$BWGV6F{QbV<R
+zIu9R{qL<ibGQn77(%sBYOMnh-u;79~e4xJu^gvc#ztYd4<k@^A!W;a&P^XG(b}?s1
+zlI;EEGk_Vsk-MLH3k76yi~g3Hf9tu^G+Z;{U_o%-Z(;p{?_$i;U^1_>OHF+Ga7A+(
+zG1SJ)cG6|do}lUw{tc^5+_Cy6)u!i+H(ytf@tL?4=?rnFsXJc`ISWLV`wS{^7wV$)
+z(Uwt8?r!qN?m++MJUw?3(!cza<hR(l6Hkwu-lg@AnKf98(nqtIJh>Qq(OVP=uc-$9
+zc4y&@2uHEM!UU2|E<bDe{e_snTdMV(XE_bth2+!l_c+^2;!@|!^#~h^3Vxygnd{+T
+zA)Ts~fde|%x`Q^@%<vC2j1=`cYmZN-{ZN=!)gbl!J42ke?4P*a&a2%kNf7w1hT+2Q
+z*+OUnjL>hBo2hk8)|#s_7^z1p+O&Wjy-e77y}{?G=EBW4@=QUdCRqinA|(CrX<XXj
+z#<-Cj{XE`PQsixOI$TdZC7kzvj=j&gqKFe>-pDs7P+6V@Zfkn#VFCsNiJT|;R_FZ)
+zG(e^bY^qrXjT1b}4D%`K-BYFo1}Glvbc01!^OFp@z;T+hfDP&{%QuJpnBt8`DVF&L
+zIX;SLrrw9o$ZL5>E2}v>S2=!7Kd#@3!|roqJyD49(Uu5p()_2rzp3(Sj;7>7)-me6
+zOK?)c-2cA#W_1QJ;+H+c!%(v-bavV4>I+FmnA5#@7NxzB+Yun6o>^3}x4}(yavEBO
+z4!?Ffct&9iL?~^iwhX@7283TsdlA3pMk_s~J|~+hjerW-<OV9pP(CLagmad71z62*
+zRCkk;s*AoV)^f<4-l@0q^f@+k-%%v`T%vI<%PV_`3O|jXlXyMXsHNht*GBrOE_$C7
+zn-Hxk<xAFp%#s2q(MtcN`zLBxzrMe;1=;-2kj|ZOus1@%_q!T@KiPNL8p<rUaQ<kd
+zpq<~Rp?l9dmnX!!#gQ}OYENp32TU!@SqO2f>AIvaW&DV0xKEQ=u*zvU`1W~Gx{b?H
+zT(elrF<O|Hn`@i6W7|XfVPCTX<(JS7*%_(2kP2>aa@xW(obNFYFy}lPKLyf+?y5SH
+zf0J11dZ4~#rcnh9Zeb%Bod$Chf=s+dmf@5=QYMiCTX4N^Hp@&~pNbqxZ=dEW=97K$
+zTtHkjTQi+VY~qL~ym;=tc5fE<ozaqE>_Ge`46k#Q*j7gFyvY}Cy^$Sp%DW1L>nTqr
+zXEpKSaAGvZ{7&hS<IwRDF`vk5j_K+06_t!-ZiJMKQw+(DgJrdpYCzF$7k#4iPKivx
+z{yj~++3%nBbHfKGdi_>YUyd(PTBHqt=R;yqr_f!Y6k~918s=Pn6nO2F3f{XrV-`LU
+zF%EP8`+UmOZpS=uiX;`v2J1z{TzAgm!31AKNCao?(X1n`Db9tQ%pK_><_@W=JNhPy
+zL`YZk#9?MHlgu9cwK3~abJjhnuhdrRqFf2nO#BedG25POxfGpzTAaqhRVgeJq`KxF
+zUl*-s7Hb-Jr)F$8x;lx74+T_F12|83N&7-ZlMy;etTn=7P|oQQ>Ey<qXNn3SRtS#P
+z>|vQstA5BJex}Er#7Ov!dkor@9oGQScBp8H{FYDHfRmL9q&a&qU^oj`v^Ih+Zs<u!
+zv<|cJdUYD4(ab!i{|FzBWod_mNnou60uAgFf3Hsq;~U6!Y;rSuBd)3x$@RfIvNK$F
+z1~>fc8^$%?LBxNi2#NW}1C~*&PqCX)Ou9_Rm)Q2W#Mb#!XIb95#x~iBNq*BiuH;Dd
+z-D7=|>;0be4ELI(9z%b~GHRwN_UKpjE24?lf66({8N_lCV7gO_s@nEe9uj<TU7z&~
+z_Lp7^(co6-LlFYg#V*3p%Mf|6g!zW$t5Qjmj%-3_!;$mx+!5*?7zDxcxc;4Vsbd57
+z!zV>mSDsqw+yBep*49aRJ@&#$%{d}*1mITCccCYaRiAdS>f+)#X6ck8(xd1MoiN&A
+zo*9F|+eEL--DR!}O6_BFmE#`{iu)<?i<qyYAKZnTv#_nkBlv@CM*eK_X02;6qjk}t
+zXKIk?Cni<!9sS}W7~CUbMMstp@09#4KEd8EcZhSwbRR1?PC56h{?>a9x3FAKuZc?7
+zU+{2WZnz}NgXtcdqPUP9*Y@vrMX|OknFEI#;5}3W-p+q$+2&SbjZhH6R&7HDCG5Xy
+zE(ru{+AG@gbjuD2`YVNjoL%2%7Wao8pKyUPnYVvEq&0f;SoA^D&2fZsqAI&Ao;)yx
+zLd2;X%MkyMQDq2MU!PWbc(<6RmQ#qjQe#f~x_!Pjh+ssY5<EBtxooiu?8w&N8tEl|
+z59;bPWguF2x|_x$`$?fSi+%-h%B<VJKSTIp6_x>Nh88cUe6@@f?WjGWJJzDf-^gW@
+z`&+@W$LWylyt1DxyJQGjchT695R;7Br}eM;McqgzQ=^Ki0nnwy7Q&b%Ibn=Pbm}=a
+z=ENX#go%L0lj2@<D9g5dr7eg>;fJvlR4IsfqOW#0y&21QILBQ3*tN+lb?=o9)>i&Q
+zxntIIh`3cJO<g&?ejiOWpfY0s%wa9%$W8YBz-PA3%hFSv3+*)`Xdk<R+^&{y8ji98
+z#eeHmu%va|Wcr2I5oh4>N>4Y)LtS*>MxC9!;Ks?kM-@$E^MdlbmPSJK%Kh+D`5BIS
+k+Js!N8Uvx=(%&C}N06u8SY0#(`>1`Mi(g<nE1JCj0XfhF8vp<R
+
+literal 0
+HcmV?d00001
+
+diff --git a/matlab/mpii/train_index.mat b/matlab/mpii/train_index.mat
+new file mode 100644
+index 0000000000000000000000000000000000000000..c932535930abcfdeec4f3c10be4fb1ea86fae06a
+GIT binary patch
+literal 45585
+zcma%?Ly#s4w4}@ILYHmZwr$(CZQHi%FLv3sZQC~I&cwX^%T=y(b0W@{DJ!TVBPc|`
+zO2<GTE2u(iW@%$eL!e+|=wfE?WXnaMAgnGa$HqcKAnasn=wfO@U~k7oAZu?&AZPDJ
+zz{E+w$iT(I%*DV$z{J4BNbtWD(EkWnQWoJqANdam$Rmd)4pIRNV(mvWDx#<XOr(s-
+z6ov|0G_ka(fCe~`l)~V}#*H~^IU|>)*fMMV%ItReiiL5}jKx-Mvm|p`F3bOS_qE%-
+z_jB9%mB*bP1PCbS>o1VR?<{7(@AGeO)9+;8?_B%u>fJBK8b6}+hw-?A$Jy@_!7t-|
+zFE6QY+?#MNzc(2jPASZ$6+C&X>nYI7x_PL)rgL+uE%NZ;chJav1H1=`M`DA@uJNUl
+zCI;eUtwo?IuDMc6GRyFzK>eSC)AWtrMi8*N>{V2`mNylDw<-0a(Dp5QY`&@h27GZM
+zH`A|FwG3*j?wan^p0)asf8o4~$anm!Gs&__5=fU{r;fuxM6aBx@s=fS!lwCOiu2<T
+z#_ZcJ;q7Cpg_FY?#A$u6^~N#ugWj+!%Z)+oREFE}d*&X_A?1au8Om#nsNhDH#c2ot
+zo@5X79#NkIj-s4v26Q=F*`~{*@4I0<wU6es!mRWyc@J0j*dLIRr8doHV4?)z9RGLE
+zmF@N_U-WmCV@A>THEY}|M3K3KS0|dwut?R?Vp$2j89&)7_52EdLmlmT!NWdm+QDo&
+zDKbd&lv;MpsnI6sIqf~&D7}rGIVrg?b3Uj!zLEN(8y{Qwj_#W*n^-f6qtlt)>&2L6
+z2b*sbdEmEjhW6Bpt*Z7-PjuY9YiXWBy+B#|6_%?%B(awcs4u;OnNcnWg4a5m$or^z
+zjNg(Tw08CJxpozHG}J6hZsE%c-?&36!nsyqOy&0=*$PaB&7F<JlTtCH_o<COmkRzT
+zHjX~FU2|^28t59UjE!y)T(k$tyEZrKo(%pLB&6S=U-0~Gr6Ubx6@gs@DUfyh?_9m8
+zU^yxoqYOzJvbQG)t)r^2vabS=o%fNDeg7P_o~jSqmMVKMl?CY^e^BG4Y5<;z?Gipr
+zEOQSXPipXL?sSh5&V*;M&4IiyV$<$g@TI43B+U~ej01#p_U#0+dP_EW1UKYJfTf-G
+zp6g|D3jP3u$=VXB5?LjMRrm&VllYem!nO7<LtOqxJ;^(p*7sBkKWRSa9-qh?OA*{|
+zc})Tu`?`^s7k^5;nzuOCR!3Oda!G6BRfOUu74bx55B<rRgu|{_DecR_GV1>8b9QGI
+zS<^M4>41UB8tu3Ig;v#F7C0k(rn^j#{<4PBz53DGmnzo-sJoCe`%P_|EW1o{B8g=9
+zsGpiG8+bRnim<huy}t`(4e&?gZrrbNMANDBq5z)lJ1KsK^}(M6VCMKa{ds02vU~6#
+zjyi(kP^T#!tsey5BD`U302=B?6vMUf<hZQg5W^hjv0uOV%5E+?m$~Dt<df=488IEy
+z0)|G#m&O)ohtQ@NoRnUN-yc!RPfGSCWD3*^cPBsA4H9QP5*iai7;wPNQuCQyH#{#d
+zN3%2kIH^Y>kHHh0%;-*$>^>ncrxp}KXd8$HvH39{G63pO+RgR2g&Bxo;d-UUOR19q
+z7JZ>e9Ro{fv8i$KVEx-jSXxekFH$CTR&z8W?u__~Dwc#lq`T^)`{|Vn1$S%ZlsB^(
+zZhHSkSkEFW$5&c%aO}Wds?0;4{C7%jLp{X!9`|PC$>uWg025K)rgS6wVp8|k&|UCO
+z{VMx%d%^MR28;z3Wv1LKqqBy*!yiPR%+`>P9`95?8rogt{_xVzV1d<mioCFJLMNDu
+z81rUU9~L#m770Oa?H}C9c%q+y4HLJsBp<VLoo&gY(k{XybirS&;#2VUH}9ES&dEju
+z)aCUbD?P-&t}24w%$-EU%#<_X?_pu$;OB(So^F)`&k}nqBwW2#*pRweP84NsqZ1iI
+z9hW_JUg$lt8DbNidhLT;^mR?9otD6_dMu!vyjexAo@m~{gnW=@=MallT1%~(=UonN
+ziWVNjte<M2G1_EpSe`b!v~AjmcRzZ58qdWRyp{%6m_GRC&93r;Fr1aHJrK&?kS;Zc
+z9;*bF`9MmIbdIt((Y`i1i0GpR*RKz&eF}aWec5iAhZ0re=hr6<vtp(9%qyQ2??<CZ
+z)-6R=*bui|)Ive@Rqhod947!>T1dt4rcDCX1*TKOr@|@Wd+ia~yHEfm1AFR8XZ#to
+zed0<8SHxqL<Gw1Y*EQ8G@Hnos|0>`BGr~2kz)a;(xgA#-2d8R=CI3bCOpk*7k$ur_
+z-*v@sTvrx-jA{e*8nJ;=K`}%o8^^nbUzk7ln@RFoEljy$Yk*KM$;523KjNQpK4C^{
+zAi>2<%lB)&pQ#vVz%(wgL({y(TAvDaM31C<KH`4&%Z#8UCB}o@als6CVq`?)5&ogW
+z<VeRN0AA+ro2w@0o%M3zjI*t}k3FcpHI6CsQa4b>t@t;ec~FBy_mHeDSLEyq-gVbt
+zF0~R?z;t|~?ZY;V*iG6{4w-vpVRQUCjPt}A=^sr(A1q^>ee-9U6QSAVuwbVq!vs4c
+z#v~@d4U!>9EOtD39MM1tD{bSQ#i_OX-=9Y3c+KM_Znhw5R>JG0j>H3P4wHcZLRfTn
+z7c@)FmljU}fe-OJca4OFz7G&n1#RWi=e?&-Q?mh$_r1HCA_~H~1DdN+55oiV37=o5
+zj6SK_>vC7jqUT?%0aMCBblO{xa;iSl@b6xy^oyO0g1H(+&u#D(IyJ{raF%e^IKaJp
+zQg<_gNsvw5^*R&7Dwr=Q)e~MTgH>pg?R#vA`?ECH(V1i41Ph6)Za^w)Va-r@^$(Vv
+z(*&_SMqbOPAT^nhrvY(Fx<dRI|3jWTg$b32>c-lY`?|5kV<JO6Y)Dn5k<FeT=qU!i
+z$SxuJvWtd@ZJQC&Gv~Yeh!<3G-ptJ|YQ<tCknSNEQ3I*Vuwd13COW4Suj#utoa{Xo
+zZY`VwQ|?N(J63<CQ^3*N00SUeVS3zOhr*)Vt%nwm_I;h+F;|4&rg}wMhI^sBQT@Zn
+ziRjg4V>`|`rDp0Uavvnb0^f)1DUT+YF@$vBC>6QX!drtw4xq?C{$?W0mB(Ha!F#PU
+zDC#uYmIpo?f@N8C=|9<{c|)sFRCESGspq9e+=f<Z$`c$)XTcTvDUp@Qxt3|Awb+FG
+zE20R?JE~zvZ>yhy!qz}-el%lZ0IR5#h86<KQaRVlnjciD3)uwm32d(q4(Yv<YwLmh
+z)sK9T2*!^}40f<oReLcr)pwQT`A$MGv>c$7<d?34e%mKm<KExNDvO8}Cyj-=Mtczt
+zYG~e?Z(t{uFH3vOL$*ApU%5i8Aja8P@!WLg>)h%o>sZYzWkr;4zgrrQfgfhZ+8tA8
+zOYiO{1;IfqW5XQ##TQn`5r_I%X3j-@m3uyOnQ{%Mwe|IoN$PJP=d`cO^kT6c=0oo&
+zymMUCakrfXzJW_8B20f|WQmUmQAp&B?OU_CFAdF(wGXU962JGiAQ68Zfz|3;|9L#_
+zd3q2#+~q@+JlmQnk8-24ycA+3QyMRkJ*pLBByB@!J@tG2G;y#!Kg;GEdjdG5y`XgE
+znv>vG=(~?Yy8*m|(l3t-?@p_Q#0Tb{$1%3QvVCNyZ$0)ej5EX7nD8KhW2WE{1~QID
+z4<TPAlR#%2IJY*;!jo>w#FfWBm6-l}!vrv<PiKH{WY=)(u=m7NZBNXg<91A)5+_mJ
+z3)%&iwN0Tk#?wFob7E+ocv&fr;CJB2`NEHz9M(VhA_IzY2z=H!^{)GX=)?ejZg|)H
+z76+R_R7mac<vegKLF&VvtJvd3Wb0`MPRyB*8sk_yI81>K{mb96ub{OYPk|Rn-E$wk
+z-zjIMH`uPK9rN2&PN3BhSu?F*(TMveLM{md7?b2fE;~WBIVck7qd^yU9e#-!Ykg{&
+zac?ZfkczxpP!Gn1*SxJ357}@Wxfk(8Rr8B6OPWU=7h?~m6JhcSCnFz`gUKQX28&xq
+z*;g~P=-@gR`zoGD3)7@F!nNLGrKUP*z!$4`$w6b^mGcU{TN-1SQ2sl_-X?Jk7lInn
+zm!4}%^^w|KqKV9WW3}D&)eYv6*t2oJtiIZ~^LD_+AXEY>>P0gi6Qe{+6p;oEa;T22
+zexkh9SE*Ga9Nw-q4=4dh#f=|~m~Pb<b$&@L>>pD*OS!X=L1$>;_r?fz*mz7fqt#O%
+z`=d>eiC9DJP9`TA^*kO$?j=T|32DBO+asBk7%CjHd?IZjcG5J^78U<-<i*3v(*jg2
+zISW0N8MbzI1GJ>q4*nEb>p1i6%DGSC^L%T&to5GjhCoz?2SK>eIJDP(CvAOfzNSzj
+z#?YOPgwv-OGSRo>g|tL`%-FCBy96f@jTD~ot^HrNOX7a9M-OC0Im=9js(K_J*uQKh
+zWy#tkrI@k8=j0WyaA<2j#=mYYb~Oy$6`VW#u(-X7jTG;Yri$+tA&OpnEc2d0P=~b5
+zTejIMKWeaYXBRK2chuEBvc~%JB*Na1<T%z@LRXTgm8Vg;3?YtgzKQt8UP<Zc%fVBT
+zq)`TN-nw`(&F3BF{)WuHNX#PNEznx6#BNPr66@qs4MWu$^q!_#(qi<wMzixeC{^u-
+z{-s5ePLMD1E42ff!;h8ES&I-!)x@a*yzmGX{^Hxa-TY(L9->%8NhTYUEFnm@9va9y
+zTY`Hrw4WC7g<9>9x-(01pAU4^%jbG(f2AFWVH#@gVw6Gc!?ib0{>_*PKE^FZ6j7Mt
+z0-e64)mNG5KGN^_PtLuEibeVNz$?TtnKQL&ZZMkHORmyfpBpr}az;V)WI2QR77gdP
+zR(h8k^K5Q#h<<>I40Ryd4aWCzWzhKwiHy#i+SAuc#)8yUWROWJ)4POPNDL8W=iOIc
+zSlLzPKx9o`oL>aef$5cfA?6Z(6YJ@q*-!risXza~9)f&>-LbS2zi%B1Rb%|B&roZ|
+zbq?i%*tz6V>rGivk%q6M_!z;Nqls+s8bP_Txtd;I?n&M`>dGl4V~<D^fij~!;55*G
+z4>OW;ru3~?-*RX-%V~jhRp#1Oki#foyVPiw7JO#P?G(L0<GK1M6OnN-V|g=kLqo?8
+zgM1ffO;(uf4lRsj3^66~3V5O@qw8jI<uev|?9JMODV+y0WkHz0C*CZ62}RGKH~lKj
+zwSN!yQ29$KNOr6Xv8-x`XWSmG{>g$Xy#t+FTvUa4%^mc45l8mp#GuhGU^uy3xQ`oG
+zWPVPXiOHD21MFt3@5nEr_W|?758FcKFSDb5CZrBszm@nL`zA`tU39xl`^3NQwM!Z*
+z=RGeAmag16wCf<Z{BCKw)g6NzCa?+?(3GjUK<Ftk9pgtUmiX5Hnb@3iu!M09Ch0gB
+z7UrUi#4p4cGy*4G@m`buP4rHJF7pZRj+Hl-tKvnuKtSfAtM*iWQ?+nXhU6ZB{S}Hk
+z6$Jk7J+J%Zxx_6d+1P0if3JIbIRQ?ZeWrizW{v}wKY=Mo4^pi_+xW+(M##6XHoXmb
+z9!Cijbc!T|B-b)xhMXs$7PV2KtM`iHQbKNvPrkQ!sTdz`9;3Dq{*n!q=uN*e&cM5v
+zoY4XuT$@b)V2#iRrkOOcG&ZpRL%q5=%8h%iNu!6#-o=*t2|%dQ8Slx4$X*`m^=Es0
+zb=>aBut%G9yuz`lbFI(QMtbDm33=@9!;bf_R6~`*v9~oIJKxpV!2C71rE{s{-oh28
+ztk)vhL=%bldcK3&oH`p%&JJVPbCsD#5k~kl5mrGLa$I>spepu8FTjdEDz(J?d!&DI
+z$6UR%5sBYyflHowr81}hZ3X2aCJcd5@5T}nDR|D-jNf(r&etK@Nydi48mlIz4YfDp
+z5tE(TMN(_Nx#-yH^8HSA{dt6H8t;cwz<oNVDfvzpaO?c!M)9tLE678qs8AUW*Ut}>
+zvXW0cqaya5gA^^6wxJU~{-(~Fnz08h%p9w`qwWYBRBlnAPbj14r6nH7v<Y!?@P~qN
+z)^qyFS<W*Iin<z#Ao@+|<nto?BHoJbnZXa-s~GHr%#5eGL+540d1k-Bbgv;tZQ5nY
+zR!Q0i>%l$f-+TNA^Y~)ib7I|c@ut0kepOrM=AZj9X<g9$(waODMTcZR?U$m8Fz)c7
+zMg%)zrmX%}qh6J!qw*MgrcSC@LTv_hP!)!^Ds6PC>No$`L72}}^ZVLQIYmVa*BPtc
+z&_nQO0q2EuYYr;6SIuLQwX3t5A;|(h31i;|yXgRS6-(CA6{9L~k*og7n$$SahfqOW
+z61|SJ=Unebf54vCjo*>036F!mPg~MZr!=?Jg>sHi9YMU14dmB<$x<%-DCh<s-jz*=
+z_pPMtsw03C_Vs4(|8aD-rz8QG=hats`oZ@=&%RWb=f*XLHakI_C7JrX$~<&?fKG{i
+z%l`g>N|Ol|<Mu<vw_!-0qpE<wSj)F&{kKSVpjSlZ{LAt!aYeI9rm@pk|5QGiy|ap;
+zDipBv>gpEWB-gO<PrhNkdFt9l0=Ffo(NG*`Q5P^hA^ujf?q1LqC)&232QOtv%Jo*j
+zYDSmrJKJZF_7|<(i@SGt3T_+-4`px>m<x^fXlKqBDMwsfrEalXme1j~ev9FWi5!Ip
+zHJX8@g@?IjAKxnf)+4a4a{?cDmOuu)&SpkTPDMpAOzp+xzqFuQH~A8&lJpdmVG|rM
+z?>hXdKK~s6p0PU!;y3PMEnEWg(*YVGB>Q}V(VEQ8$3qa{;IGn>W~*Z-S`)a7s$al!
+zUK7mEj15GtaCu2^mraJJ?I&sy4v%}()5@|^kMGq)wNf3;wcIP=zg*T?;d?tZZhImh
+zmos&a-MFV~81I)b63>e4X#f5xA?y+jWgst6LFRd>Kt#_=g{jTj>#y*wDUMYhkI*8+
+z=&3Eu=TRN+LyO6y)e1nw{(WV*PgxBp=)hf=se`{Mjt2^AmWv>Nm0etGIb`K=X!D9Z
+zO9eOM3J`q@T&i-w-yHL`R}#5Ji}Kw|vYVAuq!RDb5Pq=+U}kGbViB-mI0xa|@$9-)
+z2qgB_|6rci96Ev~KcIbR|J0nR7ch2Q_nnTT!&^<uT4-V?VokIOMI|>jzDSE_l`4`R
+zmpV)Pdiy$`dPHoc4mJ{wy6T&30W@W<()ky57GBbij00m>gJvtci#Q1U6mc*4P8M8c
+z8S{_<3|B&3ZoB|4&boc@3s@;=NjWpY;<MeocOlAGLmT0Owi{P5>vKH|N(ydrO5M#w
+z<!?e3biVG33hN4A{qhi|_+M^jLrH2#F!FS8&u=Mi%bL0HQt7X`GxG5MCvjO|L$!~E
+zuEqhP7wNFX77@Itzf2d6bSb%w_({tOZ~MI^yz{5cbq3!Wi*K^5>6oHX&sb#6?WJAg
+zH98Y|e|WGhBfH9s3CLhN2@QT!9>91sSX%2deZ))5FdF2{b{31iPyej0zanbu#1uIu
+zip9#97~e$4Oc84Ux`RDbO<|!O1j*_cZS~=h4u*1`uMDS?D6u)<b^2ocb|sIO0->KR
+zk*MhE9<wa6+Sr#y;Y(lLbO*NCqBsx6wpC!y_2KmzwqBrnc77ljqP|VTsvm&H^BQ5S
+zncY9=jhwVw(0nTX1n4*6{Ch%(DeQHwIpk)gPaxA1e0J=&MAusl$B_CEl!Zhq|BjwI
+z@y&#v@ww0>TOS;(Y)#0j!Yy(?C;D=l+4@3-o6Sx3MTwg9V4@%PD6Y2K9jT6_wmm2_
+z-@^JK?Fk5-7%;uQ%erj)(O!4lhm`<*wXL5Y2D+)tRDz_@pvjb2a=zuBe&+lBlLkW_
+zvMs;S#9wcLS<bXA8DNjfYH@M@GE&h#*g*I13tXKWccX5B9@SUBAO1`8J|&QyPW~sr
+zHl6<>#~uT$9SsXGY4?pWYsrPQEu7K+R)iGqS($mGg%nbMip*&smIhr&t)sad*gDr)
+zWLi({xy!8<b*aitnwCUwna_;BN(iEP9So(S*=go|Sb|o3%MqmFr0Xi8VTVcKGQW#h
+zvV&|pOVv5Hn)PZT))f%NRra^e#VT7EM)B;=7KoFpm$0*8u!g)6hKg}l!#C^;XkJCh
+z>{c0>@Q5eO5Q1ZTgm30gm$HtUb-n}U-xSXQmb4xbHC3)+K=}u|wC^zkbfYllqq}^>
+zv5;PL+|eYg85r-nBfVPw&`gWHjI<Sh$#=+4jdBDtB+ZX74X6RESyRNiSMvdicNa@U
+z=Nc;#qgv_Tv5aDrZxii?hjqoCnv1_K3Rm*4uXf2efSkb}j0BVX*HD2z6-L282N<o1
+zk)V^nsW)sp#e1?dzU%6<IHaKcD3s(ssKMj8Y381@nd`Zk$I{1W)C``hZ7AxB+VUSc
+z%#C{xvpMs+7V%TgUYift+nI|@JI-&q#@M&Zy24CJdkd8v9`}0-s~3M9#qKxMoHm;~
+zHjrF76iQ%lLj(H^6uJUPyA?O$XQP@!{Eb&0fDdFV&Le=Kq+Ukn{!R<~XsO|`E%(G~
+zXf><9^jpv3%5iLY5px!lsAJIhS4_`=9_xolX9bkCzhweaJSyAepZ_#t@u)8^ysfxU
+zJq@KFuBq<m39#+H><r2&;WGObZ>g(fYRTvsaNOibV&h<wuaC8BfhWlP@jvgh(n|KF
+zYsytS+=B9?(=usWUn$d>%LU5^xo`3w8~;MJYHVg#xADryhsl>&rFaFbV!eYe<GN2<
+zZ)`OwHl;7G=-9L5Pu*Pu^`x_<=9%rr)<)(Y{4S)L{RZFBd3Dho`4VK7<V;?H3QC<g
+zo;j3SSDadkiS}=tpdJfUuYTLw@IYyHX6+4=FJ~v|HxEwKC(NdaANYo3|4S_Se96iZ
+ztg$k+%~A}2@G-|zYMWP{#t;8ff?F`jvdOivrB#gm+!i8pF+Cur4ifaCBi%{`7FIr!
+zaU(`_9BxZruv<DEc&qxQC_S?lNj2uF>IMGPY+}AlBwe(pK*tn6+%?vo@{52mtsmvM
+z#}6}qQYEW(Lb}M0nSBT*$a>aO1>E0a?nqSe9drDZm<okJjNzt9gN2(>W-;QuQzd4`
+zX?+5{J+$fAVvX1L$1f`<(?jRE++2xS^PlFLP8a1Dz~~Pb#a7^iqTI0_8GCaL;3?p}
+zI&V$QinSpc5Qqj=vqL4yVmav3pP>*vfrrGjcB08F%Owyh3;ZrOuL_1dcpx8$_zE<4
+zdSo^Pe-#+gXev+?o?rbt&%4x&!iCsS`DsU~`3vNYh*6%kP}=W5cF|PFLbMg0#r5{u
+zjQ@<=BAxPQ9qtrKM@@^rj?0-$U7)a7Zg>VfPgsMlpnida$kxskZXe8r4RPH;#bxP7
+z?xHl^T^0Md`g?Emgfc{HJK0H9tkTs`fUsFg+lD`QbW^?!Po=Emr6982=R?0zbSap?
+zog-lznkP+76l*0bI4dNeaNpMY0ii&Ltnsj#|LC39JTiWiZk$u~8KrnCgUUH=khRzR
+zOXwfA;c*o$xzVUIyZY`jFg4%bH?3;Q;+oaDWa?=OqN%>YJ@bWpVeL0z6j&-h6+b#k
+zl9M2{O(@P^mNeu^l*yD|ly8YdarsIANW=v6JO4E2HEIBb6mHZNKIwa@h5MC2B+WJ5
+zunKSDUIjCU%fDW5@T1wa&h=B12{-AJd@eWzg**EPdqRGf#(m8*wu04%WNa&i^>B0F
+z;q}n&YEJczLbP#2?6aB8pjH-dMX*uoD_4KH{A3X19+#h$WpR0<p1o;F8u`vu23coh
+z--yqSG*V(Bz7=15{I&7PC00NEm;wZlI6Lht3^)dMx?<5L^Qt)Q<|lDIyww#S^f#1$
+zOs$0|!8CK<u)Ip$?T2w@t-jf_`B+h26;ySq={5GhTukc`2DZ`j9p;yIWJb;L&q7PX
+zX@ML0B3*rsi`ri_*(&}7pdgo%zrt$k-F$22&kQ8P<a5aut4{6-9SXe9vS$eI*qX+H
+z(kP_kIOFs+o=Cn$<F4y29A6~g>W8aK^fjNE>5SfO1vBA0cKZTDeA%=g*7wr@#d*%{
+z1uf46qyyAz8X6R8(ZCjQycAQ8Q^%O7zEtkYN(t+ZZZdi&h~0915Tg-(iUqFIlxikl
+zWj<+U%;zqyvc~e@PIpsy`kv(O7MoCqHdk^ZpR2OQ0F6q00gYU&z7B9X<z^#V9yFs~
+zrDWHL3y2(Z`$Sw}!EfoO<`9HD=Gx{Df1>n2;|gUgZz@SvV@;%kYc5%*9C%Azs7;-Z
+zc?^OKpwzNhM}C92N~(Iuu?npml7~pZ@rS%Av-J73O9wA?cmY}=lYh_^Q2ZzgI?bMB
+z;lUX|L+{vP)bxAt1;xNOb)p%934_pmhW6ov50rqSYJdrubg-B=n2w4gr7b<H?6>(w
+z0k*`2(m~|D34{nTQbe2W#CN{pex<yuLtQ}uUUS{)lxmQOz7l}K*|E62XM(qEdZF~}
+zC?K0xd<Up2hQ`W2$R2rK{kEZw?S8bvF*hv-jBbz(oZeTfGCf?a0Tpz;7|%gt;r5jK
+z6ZY(MEXVzMJ?gpvYN<uEDU1eTZ$mo~x)$BiJkhHGn4vxinFiyJQqWN~L4%?<RX0SN
+z=)i_O<(@ga1p}#G4b;-F6Pp`~O_UMs9QzPsg!w0!1i25<KIp-L<50aXH!uCvTcw`a
+zoFJ=F@wLBeuNvGj))6Ye<woundZals*$R~Sug_k;t?iZ#lTzm*INO&7tP{toe=B~x
+zJtZ%+;zCY5()?CXDoRs!%g|t`rI}==3U1g;ihlhp{kiLZ`x9J;5$b9o_P!SR3kd}D
+z`}=1FQWyzp5Xhj##$*C5m9hdVVnWP>f&c`jsEA+!5p46Zw)+xbeS5i~uKTRE+IDf#
+zanf8@+x@D#dUJ7c@tLQ8k5i&r<qy}B4im78ZevGgtib4VtrfjhaZYQWQNBqsUYR|<
+zo-lHWW^Q2?w<ls^7<Qar=yrffi(tiW1|A@7nURFQ004tY%I}8o#cxE78{TO|5RUJw
+z-V3ZMR|mXHufflwevWS4da*tIWU1}f4dVoC_P{R0HgAKu)GgAKM=75~fxK~l;g?<l
+zIBVP*s+eNT#Punb?@h0T=Fs11GKsuBZV^xn?_1kxZI>7RIN4N|3Cdtse<Xl>tRi&=
+z<t(LTQLSjDU?l5%{`;b70|E$$JleIRtLIZ_0r~f>iO)LWTK-g7rw&9>j>OCx4sb&G
+zttw!TcE~6su0iC1>J{%SFBR~wunI1h=_lWSR=yM-#&1i5=mQ$ZhAkwAY5OQ@Sye~c
+zQkh|J|6(cma>XE4Wy^o(4YOmZ7%L{L`dZp|KypK268w(B`y_6E0-*N5R&YWI_TrmH
+z48U(4HpO|V_&9nXKD1ox>xW<4CiFOm8wSxD&nmX34jUl$OOk}7mvpWp3DdP(Q-hgF
+zcVL5d*Gr3Zu83D%kyp8BG%^1SiJ^7ZQjG-v4Yx7HX{v9y;(rF7C-`AoGpHYbU<1s`
+zu=-bjsq$ocAbzxOCUIEmOaB$l18dp8&T~6=svu=Y_LCJApOHqV6fc>`l3<O$L=KgC
+zIyb=7tjne7-C_iG*W0Jdr2O3+nGo3e&XgANXL?m!rKnx%vf`q~IK=KVVC_CGsmQEm
+z$`1bQaA3Y7+Jlvy(aGtO$HX+35H~c;(!3Acf4Q67KrARSm3eAAg8e|wQ~J<9^Erwh
+zFNK_V2_z&fI^ulwJT}{;8iVuEWwM+e=w~h_+*CoY4)!{#v>mF;Dp%uqz;z_>HsC_#
+zo^>ZT7`@v+8Iaoby1SELdIW-;Pm+A-Gs9u;S=)}q9{4^mC&zO>X`oYgWxAPn8!z|D
+z9M{?&I!OM|Jcz!V28eC(;mFL)e1P+Jd<9Z(ae-B)^25+8Jh?5W{{*u~r|ePeDfTDC
+zy*Ht)?>(JQv6ymjsZ>d_U9?h=A65k7(q+ky*~j5I^M<UtZOa`&47n0#k!Hz0`;7eE
+z@G&(v^MTT2tBHVeRS9~)mpE~&*l>!P<tokl2M(>~DJuTHIc32D;lDaILtAG9(LpU(
+z$j7J5S$3T<qN+%JWx)ksb>k`gTexhgzodH$qxLm&CMpbYG6gY)wCpnC#<#nu_d`ow
+zAe)CgYGpNOT)H@#5?+|sy@>9|+#cT<HgdIPEJ%qLa&ujE`<d1w)}tB`x@2`wKq5Vk
+z0d%J7Z{dvARn-Mv%joRMOACQPxw*RQFC-j{$`B)K|FCsa`E&u3*SP2^(nrTq`H=Ce
+z-z!_vQ`PNTkG47yB2VX~c9SCYR~W?4we|-tus2u7L0r@nN$h~H`@Lsh5bLm>wm;B$
+zH&~RRY7iVcM~v-(FY%qMuk@s#<04mRFS%_c&fMdkQp>SRJgok!ONq1Yjq@4<RjXq%
+zzG#6sO|N#_wQW;d1eb4XVbLX<T>;jjA`yE58`pWV*!r2XqiMasWW~I?!n9*+9A`p#
+z(|XQ&^BjmCYAyA3AOe*4m423*5kL6F=6I8wyzWF^+2RXoDYy-8pYW0MiCG)U1Qc@Y
+zdhe2@UlPpqH$Zy`-5ao|HNsFy>^9PYCM_{OD?_E+N57M(k!%z(daaa+0=#Z!1K4v>
+zM@n(!hL~64hV)>d__SIpP0^T01ysKmzH-y@Vo|O`bNe)y^i`KrzpAxnyixgUMk1tO
+z=f=h8N*fklvJZh_?9|X#%R!#axrAc|YuG3Ly~{(@?PYJuF`5Qd!f9QuH`qU|4!AGd
+zS2osrpVzp~gH-SRL04{R#af28%qiJ`<L=At+tU_<JQ*jhKnSCK`_1}^yP?82<^WOA
+zP{`X1BWBP~Sl^Uf5Tn}01|`;R4W&C~J6K#1PmBZv4s9!#Z11*5FZQ13+p@E<yMJwb
+z^Llnj8ExEXM}JaaR()-1AL7(t%?sT*TpMfo?duNeukawTE8c)YPt`g52URQh3CU30
+z|J8`I;^w?aroTBkvIxoNQ4xeQBN~ErbPP9ELwx<P!@7llYiC{Fb@Y6QkN5<OoC;DY
+z!jAH;T5QnZ<4_~?0bAsZUpSpGQ<bgxL2Cw4KgC;Y;__u_3~#G<VB1eF&d75F3f;IV
+z8Z$1&yQDTmp#j5dazGpvW_;xzVJ>yA|Gv2SE9~m+u_^-FL2H-mo#en$Hh%exsjwTK
+zC$Z`c_ZQtB_0@e$>I#lx!JB4HTWxS-APMNCP)qWyHn-@v=%~Khx5d`-Bl0-#8MrI|
+zp1yEm+oEEW?)X$aeq8p5j(Vg#7^>dh^wwIZn;kg@%M^$=V>6?MqaRwy)U&r4u5yCO
+zub=D79Xgy=I#N31bZXUkRk`S=3kug0<%#c=!8rU~*Vg_j+okvA{7TUCG_oqF>ZzFc
+z@bYWCk_*O|WzWCk-@4uHCA`GXoDIH0j=Z>{P5~Cx70tfM0kUfbuNihe3wXkE&qQy~
+zCysg}zxSu->G(=;+-2IPD~#Jum5FpKN<#pnCb|s*_sIEkE=eG;vw{<hf5m-_S){v)
+zS+xiBx1funs_Y`+_k<C0tWAH0YZK-227I`XLT7)u9cMrG7@En+{RBWW;B;g3L#uN5
+z3zG?8t#6I}=6t5_*MKuHFkq&RW(cp6D}43uh2kwF>53Lc=9!3t8fcpPqFu75)>|zB
+zb%v_O<?T-I3PoIv2;Q6KO;&${0?4wNV^Tma?;P;h<C|dbFos$NJfgHEAhtPU;yhoO
+zgHO;3tG9`#$j^bF@>`=X7aG{m*x5BSWSTLT7MoFq8?w7#gW8e746yo(v@#&kwWXDH
+zQX>NwXA-`Atli0RQn#L8WWMqCW^k{+Y#mGw%3DFS-<igI_Xt<qJ#2|cQqU>LIn{jm
+z+ko#>raem5Z@LgL{|ffW3LV0!{MK=1Q6JS3=Co6vIa*z~nsn*r<X`4bIr?JO%3o~<
+z6u)d=y-Iv~Do1%;_qoj|>BZH%L2aSGa1ZK+TGg-?vQBqISWv+bna}~UQ!Y~FZuSn!
+z5=f;h(xa!3k{yRn-c<T?EY!HNfDi~VtX7t9IX=Q}OHKH96Zc7Q(8-1#4vKp9I>2A{
+zyS%1qFS*(N1^ts@gN|O(<RwZf-K`(HCzaNu#dS$E3#PrDT4&QJ+C;nG+mhWN*@=zy
+zElcL;`VAWVxjS6Zb$uczvYk5TGzV%5cn9xo?}e~|kVvmx-VMhwPB#HdzCE+=O%Le@
+z(ZAppkP%#!^c1nP1Vos7vr3q$F7}bq%640Ox>}ZxvRiNL8UOFLkWiz}KSE8vlQPL>
+z>RBp;8gp&WY#nCBf*+z2iOhT}s(SjUmG6>^Oj=7TupEW+g`1RS$9(iWOB}1Air(6%
+zrWdh_pKgZ`C(FOWHVsx3B8u+-^^EW)*S5G&SlHKuYammCdP@z1C`tKJE01X;Ks%cJ
+z<TA<sGza1OWZM=~l+n2ta2^Ct9NMWj1d}=lu1v}8lgS47blJ)57#@>v7f{EwGd~Ah
+zP@XumcFGLa>(z)1UB7{BNb?2VWv5+mZmXQy7nI*a@8=DiLg?5SM6U5^@ya}lyEFS*
+z*KBjOhGKJLBO5@YlsOnHO5XMvDd&#+#2h+UPWH0epC_R6TAl=68+AM|oME~FbDem_
+zzJXRi{+Y|0rB><l`lVS`bUFx~R%J8zF(O_3!H@u%B~pfkTok@ph12FeU7#4Ku_L@>
+z{I0eYL8EofZxXAo^vvcZ%vRi*-EKWfBR>L*DnAk(8*M`9k2p|o>19ChF5Abp^~bt4
+zEoQk6BbRMCqjX|D5cKFM=1(QXqxk%5SYy#gp)1_gn36rl$d`8C<jVh2_AM-mYvUT;
+z$#8{a(WqUm!JbPWq!F=x$%5<D`IWWYPi!&_3iS&lUT<MyQEnA>D#F$%DET$kj`oGz
+zR=f$v*rJQBB1Lzsd-Id+)hcxzez42&f|ghPNmU2^3VN&g++T*CHu)6FgJbO8z2MGJ
+zSAKGthF)gPhKs%RBkz!50;EU7*g?YB*)A~-SPRQc$f!^|n<ZXr`ZUEqyfyU~@~zjJ
+z|C%E-1J6M@3JPm4GuRk&N=nXW{9acIYhiHbqDwfn0E;WfGg)bSa@^BF==ujR6*#S7
+zL&O>yd(^Y&Nt#83Kd_NeL&~-N9%hmYZPW2rhP+)?)em|Kmf8DLX)^|{VqXN}(>a;d
+zZLF$obYSE6nCwJh)NxfMLN~p<pzqz_{s#^13<?TbZdr4fZwQ>d<{kFl-A^mV$X_jg
+zxSa?2-I@dRz;y{kmGsOVzEn|#K^ga$);<UAH?^x>prkcrL-g_S+K2l<{773<arwC~
+zl_}vf>YV0NsF{&RD}lho9qqE0PFTaqFx)wgAqT?ls3|`EHUC9Q5#q3sKgD6|Gz7B+
+zgJs8`w=Q9o{)yic@1~-&ie$J$P*>Tzd9(Z(Sw7@$J-IOQU@**a&WoqVrPI*&lq*4!
+zA)%qXcG8i?sUzI#7y7$zP=dA&D(j=kHazB0o|OMt%`lv}YrWB$W{cmKTmxhO>fkqU
+zwk`kD|4hTC8%{e3Oda)JU87^&ISP->CL9f|HWEH%&BV51@xXSNe}7^PJ5#<9Gz48S
+z^QDcI2Ca#<-dFheA<Wf>>W9{AQgHa}@_xcQ!}F)G=^C6}mTb=W=BS=ltQn^<^UDak
+zfGumeyt$uyK{@FSk2AL?rEhjSadSG-JU<+c@+qzN!b@dqnHZR#nE)$2lch!(gzYg(
+zM@g`CMlO|V2gUb-3yFSl<^;iR8qmAva@b4W6KhKn;|>P9I&i{XC(Kj&W%SzIP3L+F
+zrwXyO>NL}+-y8$+O*hMgTjzLDS;Npr<t^|-#U;UvdB;B}I;^3*<c7>jOl%CDA`-vh
+zocJNeAJCxSQ)ei+f;MP5Ye<a4z<oOVrfH2%oRt}=A<9?#M;gB(plp;a`K{QH(Z?&9
+z_!-H4Y{P<dxR>%vTL=ELk&(Jtzn9@tRyn#W*|W=n^)UA$e*!X2)w4O_`#r(J^6?=E
+zy`Vjz&`=$a_C{>4Q%>Y77wc?`_)Pw5{awB%O1Q3#+_f)-mRd;$tGe&Lyn)neZhEXz
+zLP&C~TZ2cYrOt}eQe0Mdg=PNlOk>t*+DAS6NH0x&V>373S*ktQ5lUusc2-$XPN*^x
+z#wFJU7E!mJ$d@7QdU{y@@2`B<-_NnX%N`Ys7KD}=z4|*u^Cvse3W?}aIqO<-T$V)`
+zBSWjy+|U-~Q0}VFG{5S16!3a>YPj^?qOK+@K&BIhp_gBY)sK6$O*Y5EkE|ixoQFKC
+z!!|jhv(dKu^@Dj_q&O&;;@HZ;r5-fyy7UL$rR#a&iH%97A&U{pH5}$FRse9yI2G_x
+zV;*)m=183NYXgXUQSsJnqebJg7t)p(fjy*@UdWWPA4ToHNg$OIoL@Xo-)ZIiT4D6O
+zO5Q0S^&ipIx^rNc8WVAHu@^eUifGX2YhtgmW^gYwq(qPGX0YXuJmqF?7D_?V_^R9s
+zZA`tttOM~!B+_B72y~d4qBJPS^kxogCk?A~X%JLc)qZ{UG~ie*PCpLdPF70Lj`=<?
+z0T3T&yvSq*St*ANr<t#;LCzznD$;2MnxgnDo82;;z30BTbmoSi{}j5u1&_^ZR}7SP
+zn=C9%c4{<~&$pkn%sN;Csa+Fz@)O-jsUM+Y7qKrnV6{7{$gF5_YCP}_V7w%xHJkib
+z@e@V*WWDV3fw$wjNN`CDOI%oeBe}infC&>X^HKq5`*=Sz3Oqgo<HXlO`L^D%l!o}p
+zy-Zy-eulQT-3YpUyOHi_yuqYZTvDDNg#VsNdfPD_HGaDxDtH9)7rHCNj`ScqWh-j*
+zt*HBR(h$$)2bN916f6SWbt82V;0b&PbD3=}ImIu@;gnC77QB&<me(5>^>en%^frml
+z)L(W{4PQ-*=|2hM29{TNLS*T|1f{J<u#(#{q)J$-5SZf8zK7Q`U;4sb8Vz9z{yNmV
+zhP(X7CvBo%82B*kN`2>ov+_$a&c)Z|UY$2vHfZqM_P{^`OIdx7w*&}GTV7+R<y>XW
+z6G2%$@XeTD6rL~zZ-JJ;3usG>c67wW33qQ=?5@w1?Ok4}k?K#|FSw93-I;=&J#a@S
+zU~L{N|Ala~Hdl{*3;YBJP;SQjS&?(_3AS1GmE*M^e0y!lW5JK(&bpj1>KBd7cHwVC
+z{0ZMV(+SN=)LZ0$oFgZ*8farT1Wjcr5u6(1Pf^!zni(NjDaJ#i5vo&ZEbzJi7v?s*
+zQIW$}TdorXG0(eNKj;YYHsKNYggLLqn3*OdfALZFijh5C-T{Qu<)FCd`%Q-deGkc8
+z>Kh00G7~VB@<wepnJ?Ib;@GA#{%6Q*S5jAerdO5Q#lIn%rIoj!A(jP(b>WkFpEG&p
+z7W>i`ZwtH5Z`ds}Tq=lESB~W-KdOBsckU;P7f{oNK?>oI^AYFITqZQe?F+})i2vRK
+zBf&>b_{NWtBjhujr@<I~R>hs^KGC`9u#7z2qotYAT=8XIGucMl%hxr&vpPfd&gl~B
+zG0M%}RcY+hQ)Zq_77=^$8?B+hs+w{T*$^NRB$$c&R7-)_T)$~fjW)Dfcw)tO+I9y@
+zbOxw-^5avb5a*}BsSH4}d-sp|j(#qU68ihWIMt2=^(Zt>wLF~`O5w4ObTxKXv}5-Q
+zPMpjh{Q`BwfSwWo_D3T2M8Htgqw+(3{uUKkKsFGX$LBThO?i|uGNQhf#o(g)%sSBk
+z@riRB-#d$?E~hi+6?`xPf0EqgbewHY9LYR=X2yGF;@)3I{``D_=1I5Uo5&|y`4B%m
+zZ`X0r9obWer@$w~kiuFuDqVIw{q(};(%Jry;simJdR|%h*2`?jj`@?lr#^n!*F27S
+zhIPY$<t+EDt~UJH>6~Wl7DBTBmq<R@n!sJ_0NnN5c$(9!+e-mDsjXg-{IR^Y)L_4U
+zuEIlxQsy@eSEe^%7tTCO$W)w_bS8gtCoFv*OK~!izI~%;`);vNW!z`cnpn@9kY<1`
+z72XP_Jhj{C9ec6)X-Pxg!^Anz-|025LF>`qHgC%3x3+e8M}kbID-T@h2h6Grj)*?d
+zHnPPEP9iBp;iHL7;mSI#l3mztbS?#V*$Oe#lkl>~P(WJy_@y>dwp7*Pgal!*zJ(KZ
+zaAiu}T>ZIOF{m6)8H_atI}ljHcN^v;mJXwb!K>2A7)}*=h`cMf<Rh%&%a2#_;LkO}
+z<`U%96JR$6w*5)W_&X|WczyOx>%!yOP`=Vp=_e2;EC9kYZVSBO99`5{>hG(DF7<6g
+zuVVWY``53rXTe9Woa#O4`Z=4Zcj1-Yxuuic;KLUsZSsac5v?NpZDoHC+5y&y>IpP?
+z#H#sEczX(rf!=Y~Ro<;fT{%phn;V)xG=7qd;b>vFhrhv1bT3i8$mCMZHVZX|655(s
+zqkxz-MAK{n*;Y57m=X8BoN2d#eYqk_x3AeE-{xgUijn9r@W}lN36vTgaa{#j8c`~}
+zlh^`jCUzdVM4IU6@?&$FLK8z5ngcgJh}n|9nP0K)b^$1YVLpv1B@P;)pWAx^QQ;K0
+zZnlizgirQhJjH+Sn!U-K@qen8!A2COpkfvNpctz6A>Y7X6jk*Pf|!kqpkI(zW4QY~
+zaryd^bw`;!;gL}{>ul5;q~!rmU_0Gx0SXzhE;|!U=3of*<WC7+$WeS%jlB*eo~YN_
+zZ9VDvt*Xmsc+*}oY*IZ+IPzZY<UucSz!A_}d@IE?6!v1q@pEITz?M_JIN@&~7uE|L
+zUg4Ht_S8iTtg-caUzne+D>ql6FAPk%Hs;zviw`{-g=&^(h?l+(=8gcWbv98A<9nx@
+zW%9`K``{>eu`Fa4MbUtnq(`t7oifN3y?eoQM9zQ35uJ@%RSY&H$4EXkC#B$^Z)M~b
+zoJE4nUaEUGDJC&{)wn?r&DFMVdjx4)(Q1qIk0tdMA1>$4RR!l^?Q-|Iki}*P+CWZU
+z(KooMDhC3V0vr>+Ev8w$+>Yb+R=HY7w_{B${VNw?3^Bmk+d!An%Tr51LHt3#fK4J{
+zOy5pChTY|OcDG<*CJx)KuKd$*8NCcE*(3dC9l?*yq=?7}N^oOa@y1yxZ`7%&j5u2h
+zWZ?MBiZ)XPEoWrtx#Ah|H<Rn8l~>Z(wOX(SKNpG2PvINaK|3wrQ;90bWD{-N@sOr(
+z2Gc0t&acZC0)xNLmR{{=VzvP09iJ|o?Kb`neV3#k>uaqIRgf;%tfP!~5vpac#D9Nk
+zS6Yi3*PPGuk(sY{z5GaWlk8Ui?r9GWvZ52<Z!BHIr~vz0JSZHqokM!sy|VV<of{Ww
+z+zTL<#>mWGt2OAGuB_RIwt%d7>opfm03P5W$urefX2bEF)AzKDCKIZ>dY47P4!N9w
+z4sp1blaX=zb_U#!l>Ox1Cz7B={d&e0=yLQr%U7|dD`~0FKkY1g<7SX&Cj9B|661(!
+z)wXN&{!&_0pJ4&k@b|)3Cg`>5D)~sL_BDa2oqIck5=*?JPMdTrd&fbjg4&t)7_Pnz
+zx-VSuxEE`8vh#3@jwG_;0HXsB&DyTH9nahCU$b2v)LY~5s&BRI+|>3KryG02sqeel
+z_=w`!hZ)KgmMa_#J(exE_9I+DU7x|`poe9>wGJZJ7ipom<w2Jge~xzE4qcKGMYjcr
+z{_*zPXX(4y!KXm`Ph3=E<8+GnrGBb|vcU{rTl@e#gtK=($PVDPq<+9kBYItX=rj#<
+zNGK>T#(0)|2Ue7>>$yFBi5|!JSAVAP#NWxSRo)jIh(ATAL!Qvxl|Q)r89CXz3z|lq
+z#!P3N3|L}Em*cN;d;)xwZt3&T6NK9boOm@2`U6|;5KRPTI4%UX?~Y#Fln;&j4CDc0
+z8@x&{6Bwa~HoswVfew}gJTX|d+uD~-DD2NI<?s5J=niG$0u|?H%LAHw9V4d?DEpU7
+z`?bQn>c}{HfgpN4jaO1mK)>88^mmax(G#2}m;=BE+!wG1Q5(1oZW9cHA5uxoaoTkM
+zu;Dp=CiP|@OAIxuDi^qvR{ksgo%VOrWv{-k@%@FRW~8L&q*Ierp@(vnUF?9v<B~w;
+z#>)rm0Y8U}CCKiO282~qNzfNFobTKrHkMY}uLMGgKjr@`O*8LR+N`h*`kD2qO9}oV
+zno>kuH2x}$WI!C_#nw0aDK^3Td(}JUGwYR3#PI>5vAnal1`w$p?3!ktt(R&P@7nIC
+z#*vh)uTuyG@b)C9Q8Uoq+pe-q*B*3KY+%P+MOj3tpB7LX2er%u2>$gkqBqPsOgj=)
+z;$ad<iXT_<fzgn8sQ&TNxR5#Aj^ZV965Z89xs{)8QcC;sJ@Db(P3b$l$h$n%y!8Yx
+z14F=9r1r15(7zS$wfB;qv45w9706q6$mfBZSl+V3JHnxZWQov*PlQ)~^lFiAt<ou*
+zc}lYpqplgke53Ow%Rnbv>?v+Vt`d!|y0L1jc5Nr;x^4c0MsF5xzYlzeNj`bt4TpOc
+zY5ENr%0!*{^D>#DYa?u6s?M1YimRarG7mhm7q`Mw6gxk2jf2X)$k%{N@PUN04kRug
+z+tt!z{uAA)K13ipPLA|y$rO4%B6`(1tyo!Zq^Cl$qrqBj+jI!w<bCrp;t4((TCSy1
+z0EC6j0sC3o@V80pAEBdkBXTtyv{B=|+<ajz$Pj&*WOd`zat&$*5G=_yHJ)vI5hcNz
+z*J(ynVE8|J&aDor(8Q;iXPMWck8~1ZE|z{OZ1pIFpJZ`6R8NhB<iKrNE9ma*wI%Jk
+z6kjsl#wcLju@a2Rj3_P}UD&KaL_~;syd!FCzbDQIyM(%a!WrBRK0+M}To<k<VEdB9
+z06^?iKYZ<H7(?d6ibL-j=(T{hFf*47Z@O!Ea}E))ssPd*`$5T!Xk`_u`}TTjGHp|b
+z=9h{X2EXn%w^ttc?N`<qL7lXdcid;#vUav^VmWL2{U+6G6+wyT9%`(ABF=EUN5-2P
+z9fF9`5SQBV5o2Ae#j-!AJKFQH9dYvk8mn}sJQCFu+ZDu2@AM!R1aMxeC`gqPBv?KZ
+z`mtwLok2>B@ws--Negoaf5!{;N#q{urGz?bqhvh>+MBzN|GV<{Q%lOJwD*V~@c4D8
+z`x9`%!%p%+5N8_M{>=m23Vwj`B|Y@V={Bv+y-)4T$misbRW~C3h|EE+v3K_Da$b!M
+z{dfA<{gW)mQuT(QmyrkibC;wY)#b;~_nfD68;BE3M}89*OHgEm-}U4imI69s(3RAl
+z!mr?n+@O`K@V1;2{<iS>N6TOt3?*1Y+m-4%S)lAk_LDZ7`o63g)YMyjQTm4Tmjy;5
+zJB^GfKbU|{9auTmT&8J}&y~)C#4gHj1w{S5+_}NI4)yAbSj@zX>+8N=gCK57)Jaq^
+z5Mno$M;%A@ndpzgUNa8@!T@fGLuxCq)U3yCLC*S&-fDjq@boL{m6({C9qR;x(7SwQ
+zxOzrAE?QcTaeTZy0|B%-lGU*Iy2$>&jn<Lh2UO7-yfQOk6TM}dBe2_F;Qt5Rm;XO>
+zFZ;ivcx5n1VUaK!ohwqRqJjt+aK#hWnT`oJ^=%8iORuUb>v(IO&)R+0<z{zX_v?-k
+zRdrq6&Ys(M<XPl9;vdaRl~swwpO~hGo&Qd&FL@ZFLU{syv^-eO-VG#SYVccqmS*U)
+zZIw3O8bBl2a>|M-FGGcBa7F0Y&!5~M+aA_9xDLplvq|TXt-G7kNC)OCs=ec+aVzmA
+z<%#<?-&2`t2}$JEyr?K@9Ou7oTaxnk7OL=<0gTuTIbXn7<!~V}l<PC*t~w&Lx6IAS
+z#p+H|q**3bAuw0TYt;zFarLfB*qPwl{{clny1#4JJFEct6?>faSJR4WM7^C^%g@ix
+zCy%YGvV-Z<vMu4d#6tr>bf{*X;E83M?G*94Vz>DW4r#exIlJ|=(pdPZG$?%3v=e$N
+zYZAWS+?QD&nFm5+KR{ECbGaKknj71DHx?WX@tb%0@!1n`QSOg?5PC$@4AGyg=Y%A1
+zn7J~$vwIC&2&z$z4Cf`Yh@;Yj%rMH3qSFo)1zzp0ovP@RO}GCQ80j|`gqt^K#Tnmr
+z%n9r+HfFyqdZk<nI&12XEeTy~rqfAQpRvSO+;fbrs?b!oiRwDJjsFGDlRjm4vWH^O
+z%B$%mA|5b|xuu$<E-!vncHU8>AJzIqaUyLhCt|9kPX*g(D*F6xj`~jaXTW>SpVqGu
+zl;+PZm-Qr}uYxv<M?0$fY|~cVaM#tqj{5&8g!GgO)uJ{&L%lZccgY%;Icd0{cUItg
+zO_!v+yjz*!nsOYi4{GR$VYFGLP~H^OfVjc;YmmXIbDz;41T=R!Ft1C4Az4<PX`K6p
+zN*w-z{J$D0WQX|^<#sdP)qosTaW7d*SDBxuMj<B<rq*>fzpEYS{JQCoI<7ZDflMdz
+z8wrPW9RrH85(5AQ?p!Hb+!wp7m>b{I#o_K`2=(i9r)%~wo;ZJSKXLrqB*}{A4^Zj4
+zFMT@*6QWleNYu@)ACaiqn)GAOks>upP!WS?SRL{!@I|y%pUySBY@z0E<BIN(>3`V2
+zv(L9ZWB)1{iQCQFR(P@S7wY%9Q1s!Pdt9jZY5D*%3|-E>!@JU&*To{HjVys5{*pdb
+z_GicNyuWoJ+t9{l{)d)K;V2r8Rh2ipP0ua_H_-6RJ}JVxo@=VQW2P2rFc(?9lpMAx
+z+)TbnA_}INlvI>}Qf>xskKeb?fd6QpgTv&X<HI#8yE1$vad2l#LSz4x%hl?sWW+!A
+zFUsE%pv75yzKv8L=ln`JCoysxY4eSqtaaA^V86wDmY=oVr3zWIDvQMH)_CjjN4iFL
+zUTzjJ3k;{L%$AwbCCnxD7XTOV&9!?V?m%tiT5>qKCHIxf=N{!s@X*zv@F9I1aL&?>
+zE3j<#z6F*TIYNAp)}<>R#th_F<nRR9K^ZtKe+k)NlmK1S^zo->eeT`s-p0CG7;yxm
+z5%6eVZ{u<P1XI}dkNdmso8}$054uXJdt!=mfY=W?XiE~u0A8S%MYvVZ0He#Eo5#AO
+zP_uDgYfE@mvQ8ce|5CQLx;($AF{n`lU$=eW9|tSjZgx*ccjq6*K;@G<^W1Ra+~&!l
+zC&rV#SZ$@=>iE~WR;Gyv5r4I6^6sH4d2?MbZWsPm;RMgPe0ff?V6u^E_c~S<La?X3
+zN6Q?5C;ayM0YPuq@q(f1^EQxE)ctv>L3XU}BuA1xfi*oiINgihM4my&^PaZn8V(h}
+zivD8)Tdol{0LBRN=|6d|!T-l0xbHN{5PXqIbwqQx<}AzI>8#q)E2;iHU08Ro9c?UE
+z4w7wByntnQPBKmRUw1I%-71o%i^~8thyL{~kG!q2iazza(<a4a$5`+moS`TT<OT(n
+zvPXZ;K8f5N|3kjTeMhyfI;L&GJryCjv%MpXS6wR+JsdD$IBuqXc*tEowxb2HEp)SC
+zd)pZnuDdar?Vil*D3}m<=^{!TMJDP<slIt|ORr#Z>@jnLf0}e=QzJ@&*L17On-jX!
+zmI4awL&xWWN#;aTH+N?_m3+rZrFnwa$o95yu*sOMI*&gM&!!Y;_T`h>=k%OU>C<0|
+zJ`mpnUqDOoZZ;@<FhEWeI2p1k?`&B<A{NfKU6my$e-p?RO@cS+KE%IRjP`RJsN`07
+zCQ&00+7E}<BwFo%SSPsORLJUvbART4mvgamob`b1uabf;eb!$&e3O|z+Ika=anCe!
+zDtA^t^51taVQtH|5RuZSmSs#tp51*W>m~6lL|6SSGE52BS;{{HVcp;270F$;pKCAh
+zDY;7wCDHi?AWzWok5DUFmRO}bt)Jdh=zG<%i@nQroByC$7sFHzD#vGud0%D*97AY@
+zz!%mP!2QG+TJNN4MnJ~<uBI6A8qjluyY;YL5P#&nubJ6vKy6BYQ!t!ANAjFkKp+JK
+z;m-m)n%PZblBv=x__bb*5*gT*(@44zyJ~^79y0xv#t2{L-6+@&*dLyi+Mb5UGR@Db
+zmxdm@GtLVTaAY##Q|p}ORRv@sr+lyWCz-P_6Scs1lHoeI|58qn>}DZ-PwDB%JH>Xs
+znRqL2f7>s92);4)K71t5N8H%67Jw?6UR)>b!cH}x4vun={7Wn8#YI_rdt^03;kmH&
+z_@yv%H?uN8zL-2(B@|3!P4dZ%cR*pZ4{=xjw@S<Fbzg1)hxU?5C^X(joRjx!`jT;2
+zTbJEsV|N@Yc#>)@L?B?{4W$Xpl$xh*Aibq01G-+LFJ}}3z5Nn!Tx8#nl2lM4GfP00
+za_`NmMUdty8}FTLPy?w4o;9xgl()a-J@H2AXNg?sCc^%LaS2`6!xsb5uuU!PUGbcx
+z=XKs9-A0lOB*@g{B`l}%0$KxHAiS9ML{{$mDdOWJP<^2L;Yk*(Cm#ASJc1+4`x2Uh
+zoFVp6OBsXgWWpU`c6NPUtUNb_aLXl&Y38=3vN(2=b`MENIp1T!9>mNoD1~op{fo35
+zJ2SNkwHIYh76H*6$%>cQ^Za)CKmx6f^ZpB*Zo3=Eb+0Ir_ToU-1@*F5<bq5Z`E?N?
+zZe~x280y=hACZ2~tw4};&alt5)ADD*mUaepu-;z%K-`vD@4DOWuR<r6<T5EVkl9^d
+z|09l2xT}5vEF9Tc{3zHNc|s-OExGQ-Xkxtfmh4&aW@UcDjF9Brck(KQVh8V<^FU5c
+z@FW^x)Jwwd*;RtdmCAROD{%9?)aV+=7ulyO_TmngK4cvi<OkPheUhrcu5N-ewXkJv
+zKSoiS59;s3m{ONOEmX_b(2{^oA&cD%(6qj_e`uI)*ybLA`GOdbPRxGWd!}(nYgX-t
+znq?Ga-Wbdr&F|dSx{>O8@xG2e=se=|mW;L#xhj(O>ME9y70`a1uUct8SOhaa#}5I`
+z>dkLwD@I70th#POtFhyw>oNALutz>VXwSWh_O#4STx#~I8<|I4=fMvp@AR+4vvoDb
+zJ8F@7Bjs>vd)}c631(2<7uxR%t|x9OUJ7u)cSSQ<BMtkRW65_=SE|n@7I!^G4A!(&
+zeqBa#@yq;VEQ{E6Ni9sjOm<NO2od^QlG<8I`y=s}NrW5D5oEvTd30h#LB%-7W>F%A
+zjQ^=|7+$l>;{3ce_IE9Noci1<w_m5ApGp5sd9S%?f*1CoLoH>71Bg9h4gC`W3~?Vc
+zP;3{yL5((vg)*EKHr)Svz*)N<{;>5-4IDazG?2dM8N=(dZ)h(?DDziYe5g^DUhG-L
+zbkEz48sr*jP4gDVg4!Xai(K#F5cp1S#55V)kUbQvu6-MBQ%6DL6HS1IiuHnRxM`Xa
+z1-+3`xDIt8^{=Qc*kI^S?kmQJw{-qqG2pzNh$#N4=uB8kkGo&9beVGkG~w4&wj_uC
+z#(pO7Ib%gDFc(@e)iYH@?`TFjwAe})zc2h+v{JZHu)6eGco+C}`Ap$HL$PKWp4Bl!
+zI}$XEFc)st?2rBy9d6H545r<SovC<An3Mm;@>O&SU`*B?E+s2QpN~678Jb?Df^++@
+zZ9Y%kiqIlvpZucMj~Zw(b<~*6{sG)==%V;p9xw0@8CTSej!W)UHWW_<{#*GLAdT!2
+zW{aN6cUmtM*IV2SVucfZFDKe`PzoWlWx3?%l>PASn1`;{v|*fbwUsawu*l0v>{oxp
+zmaz9b*HFT}mwK%#O$#{u#$5uLB5eqOtykj_NmRPK=K^9$&3ZB;dbu4C><opv4@Jn@
+zd65QKL}0LJB(GJ6&HvE6sY#^y1^cw>yeGa6@#^9cCE%Pb?7v*wIbQ?Ub<FT6diQ3F
+zXcut5Fn1&!kak^XokF-xN2DmlLoka<zQ_%F?(=IkcK`d1d5*Z~kWAH{7H)*?CY>kz
+z!*Os=2-b6sgy~I}kOJ5|{A`fa=m0mh7?YQ}@-c5idnA8rrpZZ(3CV2ZvC{4=PxGPV
+zPK^{4r4s`Uc3I(e$qsi;S9QsI%zg%|w@_^|oUO~$54Jz>i2Q2`Ulc_k4>`Rym-|$A
+z20Kl337~EGwevUBDBm9KsHi>f7xP}~X|5~e=#c8!Y*;nJa+6*LUxUoC;)`wu?HUB*
+zu4}+g^?%UqCwD|ICN^PO!P>3^)g8gD1tUl;>;+XP)Lrso6_;1q)0YC&n#|{X^J+d6
+z2D}d$^xliq%}5vS78}YP2tEU!Ma-niOZR(~h<DPP@&1NgEkD@C!?nbPS$Sd<c&HE%
+zzt;5_5~{e;a+UqDc{IK>FT!67Kc&tj?jnysZUSy|8k{pZ&q321-PnsYE}gi*QC#NF
+z#nf>xNtZZwC@Z<Y#C~G@$NR=%#%zsFsDt~~6fLPUN4E0zbIYtxC3eVj(SP;Oydk*$
+z&O;7fycNk8XW-{qR$IF{w_&DZ^#2z#Ji5Aiaob_ZY0aNqpBdrsgNkNve@>`$A$@Dk
+z#h!DqxkbB0Z=}-$qq-g-Vsd-O5yuF_u{=45<Gw5N%i_-coYT;@wg*{Uuf;t}x*#?V
+z5#;wJ%9tR2Tds{Tx&*9($z*~H4Vy8usxG6imK^e?m_ND3=lXrqaf$|8%LdCBlUH3<
+zzm|82cgb|IDOf1fqL|Nma)moo7n)|M16}bJo#TtF4V}})#;Q*Y%{>z>`MqC+8bt>p
+z=No>a%yJ=e)Z$W5M{_5nLBA|(1mg%6f~cjuEzc=V=Dbr04JQSE2&UCcF-%eo;#NS{
+zAokKc)IbcT!K=<2L*BHeg(QehE{6kMJ|$ssY6fIB#6c7p4B(Kp4;f2-P5VfLhG%!n
+zAur5K)kf-K(<R3&+bHt%=08#W`HM3*xlju+(+3xt)NW(<%3L;YLct1tZH}$zaw8>k
+zE5bv~@e#VmGTzf3Rs>m}BOkyHGw#&7A^GMtLTF}$s^6JP&LknIay=O!F<PpwBhEQj
+zH-3pv;V+hvAqvqM?>pqI=F5~#@Y`iekw(=B#5{mb)v5&|XXI8whf__w6z{pxmH)bI
+zRCR9857rMfRIf*-Lz2o20DTmS9ae+R6Lwt}O*FrYEkz&YZHm>%tkTt)-=m9~W`{2Z
+zyP)?2CmZaY3ABtC1Sz9Ol<@Wn6V3OwNkA4-!(c7zgQ|wKiet-+Xn(5xSCNMXLPzAB
+z5DzBZDlN<<V=&05a+17Sy#fhre%NqN&#wJQXJOXM2#i+J_<R*kD7{7erS+nyQoB5V
+zSsuQ5pYyq5uW)T{dkGOCaSh8~YFivFh8<IX3@?KAm&_^#wPlqa?JnrmC4UxNkWA@q
+z*YE53zF{9DkGr6FtYu;+PyZb*20;P0$bO&|MXoVkDHLUw3qYC+Agt@Vvf25Qy1%19
+zs6FWg+34nZZf*Ree0s-B$j>EzVK;ULx$BvgofW08NrM`HHGmbn^R|#jk~ik+Si3y@
+z@)?FLE%oLg7T^9!E2uV~wl7dEIO|=b1JK_1nTB0OR|OVJ4d;{M{@|MSQGp8dZG<;+
+z9zB61^ad~ma)zq0_m{92B6ZJ{@j!O76Z_m<P8p7Q1s~daGck#21R-IQb$jZ(=9RcN
+zfD7OWy!+;3ZOIm3Eh97^z;Hc8Jz!0V9nRlroWxk%^TY-OUQ#mYSCyT9vVmWJ!cfw9
+z42DrZA=H;h#o-E9M<cd}9KnC9-0tm^Z<8OvUx`m|_*-z#v5<O7e;sJZ>^5A0S>$5r
+zmef;prtNW8$gRebOs}Yq+t+BW0pzlSq8It&#0#8x<WZ^T$f>GfMJ4#h-G{wE&k|HL
+zXOj>H|4QI3ISL;RUnn@<{(Hwd%CF*ax%Qq9lsB<OzJ=h&Ijr*a>5C?^mxX^OZ;Bgw
+z*p4h3y<1-RDRp5@pDx^)RWz=v-{NeV;&D*R;pb~I_)(1Sq-PzUB>PcUD!c_*WpH^%
+zXj1MF^<?Ca2_xxo-Er6WDr%Dw-RRQ5m^uz+g6ffV3k40Q7OxJVv<ui<g%ylf1tn>#
+za}czrb6d$%c-$Dtj(S&_P5i4pV-lLmB>_v>QPs!V&fpyV&(J8xM%#nR^$_cZsYXOc
+zXFe$($tT-H#3kI$RzSfwEho@B4D*;`XI|^{+@(>nbXRnH$48(y`Z4%3x*v%}Gssfd
+zG5uL3oiER-&f!L8^0K|B%89M@)Hf8XxX5l0Gi*VgscwsIrF(l8sA6eDz`U%fUGWzD
+zdE2a-Z`jTn6K)=1Vm?$mkyTPLp^+^**R#CLT=!TbYCV<AQg04?$@m8MnMOb_C<v)S
+z1!u%sP>Zd;3c`L;%nsd1m9-OvQuIg5P@^BeExsBxg!i6wl985AN;#WrL;T90+H=aZ
+zf}p;p^FD)xpmK-xeBRoD%(wp_uwq!%ZvK7wEr&`f628%JBqZkqa3pV6h{8f=KQ>zM
+zQ3KL_024@Rz`x}~#II;asgA6^<|ELf=uwdWq+dl&y6WN!81Ky=-M`!VD|!N~n#+U(
+zN;>6n!%^&2RKF`$wyl;%Iu$JkOlP)(uEPFB--IFSUvy%@u^t~1<~$?3YxZJJfL(!p
+z%DTimkf3`=CDC)GG}TmB_#a|?&TaLJrfCf;3s%@+x^NE$)n~YyN@9Lw?b5bZe5Hld
+zA^h7DAM+>$v%!(jSbeM#D6XWntF~tbjJtgcw8txNBtse$!2_O{Ln7Zmy}(t(O3K&v
+z49&mC`$DE{oxmN!-qWz$zt?mhxw!dV>nbK9aM;uVm>Q#$yr`eTZ@16qS@ah5Q2fW-
+zE5T0<tJ9}@`@0UogZ^bDqv(sW<HQx|bJXp6hZINK=)_^G_4m9^)L6sD)Qj4!q_q(W
+z^2_WiiPc?2u-|%L7!UcNNm{^!At`K)+tl^Z+wqBY8}cr@GrH5+<Kt=K+RCod+2#N&
+z=4&qHO2pXwygtho?;Bx>t)=)k(O%e|(4$sBYmE1Gu*zIpaY#9~l<%Dkt9Ne?@{w-}
+zcezy>oZr)~kv+;;g_xYRO*jElLf(KiXMgKgmP*XE*wVrl)8p!jMq=+2<I9e@j=hAp
+zo-2ZVj!w;*`dtNk+)`;y{tD}70t#B!^b_q(UfeUk=|s%|@lqAMY9gXvbSd{Wo{)1y
+z|97>)vng?~mz;e-Ft=<N9oQXC9yMQfip6c@Ah9(9_O&}q&_6t<YH`MCEk{VRI<F#I
+z%24LH^q+hyDXMC}eHZNjXHG$i_N<!=oeJL8^i=juQmWL5KlOcY9F*SKJ-Y|nFqeqo
+z)Fy41?@j)8Mez?MODULQV4z>_x6V}lYduQ7(EKy)qnyF$C7=`wEBx}~Ot<%T<s@Z@
+zc7#U=H->NI4k^bl|JSe>_1bhrVigD)cSPL8hu)30p6Kb&L+B=nS)|Nbr*Owk!BmPn
+zh&;n=&Op0b+15L!hGp4YxxIdnXfUA8_q}*hyPR7qW246tEDV41hMI9K6t$P5<jly~
+z(Fsg!)WL*uY9e-_g02v#2P`J}Dqwx%NJcUFFnpS2Ld>eCCcp0bwWF4ihP+m2>g3^z
+zvL?|2^FR7|vd<*PwZ|a&q<NiBRA1K%U5@Ittvd;GGVszFb<O!lq67NBvoax3;%ft{
+z`$g6a<+zm9zR=U3q68L+gRv`tTk0ujToof7t((iiHB3yX(}`Yo&#9I@fzgb;ty>bs
+zK`W5YJ{;X+`&;^*+ym&T@icYYx1uM?Q2uA=ob)rZ7`Z_BN5LVyovP@37{+t<mOUeu
+zRyrCkW;c-*CH4s03>wP4)JfWEJ5cxwW^V5$^i;>jnpeGUYOdf@I#~@PRF_^a6yr>G
+zXcL6lKv=~19pVF?Zd?w+8rKA0$%=D`&aKuj<>N@lY(J3RD96J-Ngu}7Q-&99FdVAg
+zV;ad^0<LC)N*C%kpchnr&#WsMmJr*f>ECtlR;W~|!e+tx_K`(`Sb=vU=K{I1M*`*)
+z&hS1r-G%5ImuQ%E5{e}Cguw^CuKQQx4XucN3;Vh5c1;uVDD*m;6W&@*chL!{u3GL5
+z&lBnLm_0+pA0YjZy}4sAlAYd+USEH^dMb+I?5;cR8==MIK6TA3{G;?r$6QLE7hg71
+zTGWQ<k`_Ob0##P^Q#L<-Ryvv3quD~gt4t_1(=W7d<vgX<r-u|bOL8F!^nAf3iP-Ie
+z-G|VfhZskUh0@ZPu{OJEMj)6st8#EpD$7=;#ERjcny<y@sl8&j5=^;gno>8kc&}Il
+z=C@8(h-wx7Wgc66MrLe$Q#KjNul*VT;jI;HvHr)cG|W;Fy=T#HlRJ>tjTPLNp@}qy
+z#BNAcPmV7ZmDyhQz_OaSqunQTlLTF5RR)RahId?v1Lp9*6W+`}j>*&<P(Nw57fd23
+zbw9=tn(e^-=B3c*^nZ;UR9=;z7GmZh*I2i8=xKF`bv9<p4)*Mh`hrSH(Dw`On&o`p
+zCbOIMw64xWjLolkB|idZbiI(LwKn_2$|;RM1jew5R&d2*+f7`fU`C3hoLmY=7ggN=
+z0Z~-FMOW$`jNORi^ADAMqsUcBW4N%lWlDHiL9lzQSI=4{wDw-sK5eyi{E-}7`mzWb
+zdB9XtXPLSZ$BE0>v*~jp8rBo6(v&KBPq+n{mcC2ViI&3mFqzSLhCgH;HkfB<+9P<u
+z`J6Jc`9^en(^uJ7%~R2=_WnXI2o?Dc%(DH5L1Mlwe=oS3D~@E-Zwhwh<$+$*<>U7T
+zUS^;4{tt66aHFImJOTKi{2C~QeJ)reTWEQRsL4Nz+RHc;lEr28pEbWx6?rfPuB{UM
+zvfgOk@BXB=rt)O|UVaKHpp0P7)>Lx9`CeTG+|j*^I$OJ_ZHVACY_)qGa4A5=d|g@J
+zeK|d|{CoL9?jJ=T)me@F#M<`beoo|F$1CPrmKAWM5X7|OeyxNS&A~3W3?;0n8sXbn
+zHnwa|FxR=6RSj@;CsSkVouc=FrPeR2Oo`R_t<{aTW8goj@XA;0PoQ_&ujP*PAX*zU
+z8}sjpPgK5Xt+(GMkJJ~2<4j+8C<`QpJ3*{YbPLc+-BFkoXcC9I1`3`_{wK8g205p*
+z4;8&d(9#W{435*VI<&31+&zi_Ye?%>WCJN)GB5nda~hy7+$axX9uoGXh9<wS(v|ts
+z_Be*$uc>CCSS`>a?JxWX%c`;K+S)avVs9zGC2s1Lm$#}GxqlRX28N+Sl%JLB{fmt+
+zlHD1m;HS!AU7Fg<o_Ir9$M#AA{z=uRuE$AC*7BgaHbeS~cOEXPno2$5Orv*s{{Sr$
+zoabKW=?x;;($?E>spCW)FKZ6@M|MwqSI2AUG!KJ$Tr)K~zZS;&RxsD&N2lr^sb;Ae
+z-ir}X?-R-a`In?G?Wj7P;cHe`=wO{6`^p_}#3a8{To*P4hdW2Ymc};H&xM{Fw<~9M
+zjp>cCe{YE4wMc7xFQ88qZG0eTB+LSgu!nk)+Bpmp{IdC2K82>%EH3PiY*anel31&A
+z_BZjR!^Ou*!D6-~Snw{DZ2`qbH5~T*7~(+ufj;wJ4ZBc!>=J8<*XT}g6WJMsOy2J!
+zBoD?*QM;S4t-BJ*Vpk!&aXm7TH-T>^Wv6!H9|!#IRSkbS4s|xtud___(WK?Bm^nui
+z@ZY2<Ap-Lk3YmVP?|w%G={NS#%uR$%@rbnu8&JEgGUe|@PJ5?!LJ8cq(DZE=j#Q=}
+z7lPOAwM8=^%)DBP>uB|9-5Zf6#m+3(^3>BA8F3l6vh;rB5@>BGP`3jc)7-ECtdUH0
+z@_yk+<<dkaAz*B8eaL&SaJcqCYdx*h5l)V!E^60(0h(ork}6ayYv(IJG6otR_Z;rH
+z!aISf28xm)fhdDz-UMFs(@O-lkx@YJ9LjQZdC^41Wak=DN8wr=IJyNin6-#-K=x(R
+z%&HAcRf|T!lnn@Dg1cHn;a%X$tZ(W4%nP+SglBeE!$J-n{<!89e?d<Y0cGz?UCI3k
+zya+XpFjfm_8rLJKo#NQk^#Suvz8m;yd~o#&`cCvRk~Mg&ski;1rZ=AIxu-1z;QfQx
+z>l#1tRHYBMFRC7c+h<fnUS-+2!+qa&wn`#u2zMBw++b{2i@a+B7fxuHQe15OGj2A=
+zmHX-Q@j?PxzS4^T-v;j~DZxd$7-EXe8@`II%|D6yi9A4m-afl&0Mg!_&1$LKQb`g6
+z7!Po}Vs~xJszw$6)uO2X4D&ZNRQ;84pyOF#kK}Yj)XP@qc(GOe4l86)3}RbJ*a~|@
+zcxfF8IKaOJ&?;S?&9X;cj`OgtnnO|C^9g)pI5GTx00030{|wXzLlOwX0ALW1C5noI
+zEM+)AP*K?k3Wy@Ir^u8cTU+;bR##n@t!rI%?Ro8at!rI-=e4fAuB)!DW!D~8UElZo
+zh-W^v9zaJw6)-(jc{6LuOdBE>L>1N&lOwSQcAWQv>L%_O`<VDn?b-%D;xX+zLACO}
+z(xds>k!izLPbZCps2XrI7tRD2YF>znM;eOK8?P2Wb#jsO<GY)gfnoS_kum1k9E<XA
+zXsy3mI=vAS{-+icYmC=dHv*X1KRO4)T*^V5+n|%6PFWtsCp#0~X#wRd4h?3pW0vGV
+z;!f-a_!7iD+;F>({sZE&TaJ#$4;p&CHM!DORfLC8BYuj~bYBq{HpTc&mU)=t#_9HI
+zZMJzYA_$y|1`&2M-_@YPUx|^xp0<A7MjenZCiYd_Dt=IouGpGeft~?d-txLm0PTkV
+z%a|bUtjuc3Dd5)$*d8K0*FlV924Q|yKM2&eQ#j+RN63C4WhGMVuQZzGW7W6v=-3+1
+z5Hj4g5J^|x)7{rE$aPj9Lh^!_azJc)!?yCZ*|zxJI7EOY+>i~2O`)&Xg{VurZm_C3
+zm(UBlAttWth-8pBtqCcahWNp<k8ztZz_A+EZsbSi*IjS+v{)4T14offT1U<XbRB0R
+zy3lcsd^imSGenEMl`YiR0Lx{PmKN(+7FgJ_25WBK#y-^E1;WOvc<_wMSY~7=4sR#r
+z9<deDkEm*$Z@6E`W|PIg)`phuBdkxS=Rd6EwFx_?2#06|!j~94@q_Jt2_owT^I9Ao
+z;59<ht*A3Kb4#bdceuBeKBKNrds~XZXBdP@D{vmCC_140UiISQO4!gwo1YpDaAr|o
+z;Xw+OGu?9&wZb5AkB<DJ{gX5{_qE1nj^MWnFU96S;~+`q^&G6Q07wZb+K>5tmVMpo
+z@RaZ|JDNM1eAWAbd=$@#Wyyc_{DfJ==c9V*^D!+Iy|6+Aul}&@H)Lbwme!x#?x==6
+zkvNO_knLqIh8rs1ao#C*l|I*>tX-GU96+?XBG<EN{F|ZknLnmpktbSaa1Q4hX({);
+z@-0DMl{oo6JTAPk<wf*ARjKkW^JTe?iUl96YS+-A3%k077_?GX8tvn<vD2^z)u(f6
+zTzQ6VQ39c)?Q&jcQq?@pbOS1I{*rk$y+@ND-icx>8u`DrV0de}h5RE_pSr4SBs<XY
+zfRrO+6>n`^L--#pXdaPw6??<F6yvTHSMJk()-B^@b2AGki`~$$?EvgT_D5?MBPbNw
+zzt6lQ9jtrM{6#pf`z>oyr$X8;Zbt~L2FIMtdgBi6k-#&b-FZ;LWn3T)*JnxJ>4Msy
+z^WlO8$#IBt+?URQ-djo*agyy|ETQ}}2p2n=W{NL~K-zmPmu%ytjAo2)YUNM$y8!PZ
+z!?IslhnLZs$~sW_XM=~dBuyFYcNNt5rL(9*NjT+Td5JPhrLT63m7IKvY7Xt|q$|b6
+zYd!yP-1chSe}XZ|am*P-&t-UOMy;xIm_x2xOhT6S;Ebfz+)I%6UU<`4VQWNd>1&=*
+zxgn#e;F@z><Ue8uOPKdi`ZHiGzen|(7%LY8Ee0oaMnWPlZo^Xlr@qZRt{q+d#(E=f
+zL#?CJOTW~-hTpDSN?GAbYa8S=wFd(`DE#g(3_QL;^hrdk0Vfw!^*ODjLea~F)TzMH
+z>%KT!BA)VC^_B3cs-sSC%pWoN>HeppwUAl|+VYz7is&HaVi(i9x&uih@*wn*Vn!iW
+z3<hYed%(q#J9VFkTg(3q8;ia-?S&#OV3P<o!gW6QN;Rsz5~{4u2;Y_d%j-y+CChPr
+z!y4w}Lx)$6F?^IyNSzn;XH|2j6B^2R+CAQPI3evf9v6MVH@`~H-BdX^T_bqmnwo-E
+z3@1&2?u=NxcktsCTPkk&<bkQO582a7-IC0D2^QaIV+IW6sXh}?M?ys!Uog1UM%HY4
+zU*pBZiJaA>KbttE`xMnpu?XK;3Q$03r7YV;;__NT!6X1M^KvmZXBwuk`*nz{S<_-5
+zZ;)+@&u%!&1d}Bt<4Dg+1@4#r53w{-QP&6iHXfBC@vf@`<wfZ&8OvlF!u_#L^55mJ
+zt6@P*&3Bzk6*Z+Vh5f2{D8DuvNDFN*mvNlsAL`hi`?}MvxkQlZon?1+srwOvD#|Fg
+ziFb7$Z}?(Zso7@ursYJGL?|SvD2vKwB{q<7_(d&|R1XIPU^lZ_`;!9=Q1}Pqh=8%$
+z<pu(vtlx={)ByF}a#tF}EY=Q2yv}*S>h4}2PqjZb>?-=Pt4(CgIRhOO+aQQo0sj5|
+zsethbruKR33;k$Jc2QVZ31{c7a!*SeVOq#s#`z{XRy7hW3B1pf<F9I`%3=N|s8i;X
+zj`?6;qMM-RPf3h{^jUJ6U63D~-^6#*Ym66Yw}m8fP3s--zCgEei-L#a`j%i`8gPYp
+zz%j?HPK5p&Teqt@tIRnmBFs2Rvzm{Vf5LulUsZM{@uc_~=^XWL-O$3k*a@bFHWv#E
+zPYt~7s0EoFbE3(LslJ~Zw`3CyP1VgscHPWEWS~f-%B|I0@n^C_9ze+kzzgjVw9xP$
+zR}^e3HZy;>S&03L8|FRqbtO1UuOCwcmi!{!>O!U-$`5BZTMr?w%JW^Bfs&$i-4d0{
+z(xE*}<>$YZkHXH>t*)M2xeK4?zSCt4&PfjxXViU3Bn%H^N4p(yrh5i%LbBQSncGm#
+zw;d470^@DNv;`&4^o*ANLTT}BpO!ffjKJLO=u7+o%m*5<z4aSZfAO>P8bSqVQ}ZZL
+zdI;cE78Izqqvvzz^%p?NI(+b>dcT!`_ChAreVg}@dDHT%goQ>>1j&;0r8PUK;{cbl
+zL+A^VboZL3uP{EuS;?z{U0GGBTsp4Xl|4LBlYgmoQav^f*S!?5Rp-!ms}8mtSN=i#
+zO)#<WGIfqB0Q_HRZVN0xwTqE!U^da7?qAxM$E%pH4R1Pnm9~y=O<!7oS=%6$NulW)
+zG)#EMrqp*9u8U8!Eyh~lb3^2+tF}AJUKB$3f=SM=mDIzZi6)wVl175V_~&XG^XI7+
+z$}Z8cq9o>|X;5={#sWPJP+GXz|F<iGsdbGGtT#PV^%l@G=9DI}p%O&LzuEoC1nf}5
+zL-=0IOGQPDK(7hk3O;xJN|{vjXMr34QSo!yQ;8|{Bz2fQ1~G+ls^A&41OK-cB(VdR
+z%5KSQpbt&N@L_>8U58p!zXvrvt508A{4N1WKp_RZgXO1VYUFO-2*l^onhGv%5Y<jw
+z*C=kXQa=kH=avYDs49Fz!4Kk2_uTM`+)KVC1d=X+e_Eajpj6u(oT3Gk2X$P3R{rWv
+zGe*Q>1p(9*a<2EZcX8k}u!nL};_JRE^6B@L29>J(>(-CN9)P6kyMpK5f3-(yt~U9T
+zw^Y*=9r4GGjaI7ncB(eKqhV;~0t~o&Duc_OL7zjIpReQJGf%d1=tFIj3nP^T-_MY%
+zy2bLZ(c4M3<X!r4%?s~l76ng4Qq_pE^qi-LvVuQa&E$d1y;;esB@MCq=@d^A<fMfE
+zF0uh86y=$+E$hjJHIov(8E5>j;z`&X28uqGq^wVQz?KVAH3ZDN6-#9dH3$N0%u|CW
+zk$=&1$m!~xj;o+R)UA2lZf@)cyosWy(}Kex<ch3NZl|buo9rv^a8Nw2KBGxe+Nes+
+z_K&GB!-q2-$1fr-Bw3I@I(>D++2+_t5KuGF<&+i?(`*NjGW9EOM&5i>nEaH!!{GEc
+z(GJ%ZWaZS(7LSz_H2@%875ka6<}T@%;@!Fy(zoPx(j(X!$Gs{#W~#O{zQw&r`#>U6
+z9Houm^@W6~6Pe>-n^B`vZH>Ujtj5RuZ#t&gC(G6W`w_?0mG!xl_jT+B3aqvLey7(?
+zW%xLcY6aPN)JgWl@(uQE$uE|{^v{hu0qE*UvTLnuVM*j2ur=$m`e|E{ey-(D7(4!?
+zLC()%YU4eqYdK|XN9a1bo7c<#6?3#GqvT{~zWi0G+y#mplddc!Wr}D|AP=i1q3`JE
+zzNbJOb)He;nVK$3?+t>xCbn#25Yn+ECc<AUs2kbbM}J(M9T`<4Wi8^ptXb!?7Tmy}
+z?ffizi4uSw>C<v-S%=J<!4D`D@)$vt`f>XxM?VM;>}Jr=>xKVcW^{%tnlT1X2yxm<
+zYKzIgcuYllWdZA`G(&f)a3{QlHA^3`ZmM|-C~v$@FoZ_HtQo`7YDI5g0O+xtATk$*
+z1wpY_E59%^vyqt6<Z|FMrji;*bb1yA-tjy+K6z)ZIIElZt+Olo26(TyUP$1sFVW}y
+z9xfy2`I^a(2<9d(N2{zSXx%@8rkLk*9e_3>yP8`WMOp%%<HHL!(1hh3#b4+bt*se{
+zeJ1Ny%wqSZ{0)+=!3Bj;+A76c*9&)j=EhujxTnP>9I6tBTdkK-b0l=&Jjb)R2su1t
+zXx-bi6zNF~Hh~?RWb%gbcrx=#&P-JwR3`ccy};3%0~PId(>&LT#^A2QHSKD(wUMbg
+zE+z|ph_Liq?F4cQdbW|}LleQxC!xDTEzzjMn}!mtw}6^=g^l@nS))8qP$%kE;Bst^
+z`J2jsgH(12IFwiC)x}zB2j@O#2$+|-DEZ>@{QQ^1w^goOGFaF7Fzd(U`i_YWiFCMP
+zfS(FE+4gq{8Uk^$Ylln8_&J^lK7nYc`&QGW;+x17MY(Ci%CC$5LKfuLbZcC=oGDqT
+z*7ceb*nd;$wkz4yemwZBVY;+3^@YmG9VvgG@aGK={t@n>Z)Q5N{rV?){q8g!+ToF&
+zruE619o+oa;2(3P(tk8V>K}(+35kxem1y!bkTd&?wzU2xWK40Okpp+TnS_zb?XXwK
+zf{N4S?QC$zMesvoLVbciDqcpx0q1p_5In*0*sS6zy^Geyo`L)Y_lIGrV+VR8;al`v
+zerY#HQKw&=$t2Q9>l&7XPsBp_D*3bqGIuUGRry`%J92OOMcpA7m?G$&r$yjm*#n4A
+zwEVQCRUehrp%+3zlES;h!!i7st&nf>xiuNBt2>M}=fhhV0{yR*;*K+lgI%LIPu)BZ
+zwS`NAk>|Squ4pg%4!_g=kE^Yt$GIM{&2~)t#Cn+imN=_z74t9qPb{ZveYr(5v+P5e
+zg7z^ztN4c`l(<rNy)}j31VDfvXl|Bzn(ilmhYaI=1p~S_7QPq$nnP>Q0t=e|$r;Gm
+zouAvZGA2}*q5sBaR5=|(AY_G5wOy5teqJ%deXV3{yOJDX!MmO&e<wvPM^a~6Ym}>^
+z{a(6g9V3Wsb{aEprK16e=39=j?xS>7g|^zA-VpTwcV-MUe+N-%=g18#C2t3(ivQ5@
+zJ(x;5Q*9*ZK;IgN!U>vY*2vC1U1tcd5&*=Gq7{LS)DKQB^^^B{-4(<hdk?s=3MgTd
+zuSKqFH?l~!!JYiB2=;Swee-11QnWrb)6i@<5*&ob2~f@xnmq}F__nx|Q6|08oF$y$
+ziG*&l=(Huy+kAcvzoHfTl)o0X%+y3}$eRjlARQK63+(8;!p;g<1n$_v>`D50@)F|S
+zMBL8ARWX`6&bucOMrXfAIXnj{r!WTO+S(pddK7qZpE^<XXDt@}TZa<!4?o74h2C$T
+zYHE@1<^XduS=;c3;_|#nHM4XwYajMM{Yv5gh}SyDw^52WYa6^%>VvqJmMN_B#d*L?
+z(|-CQ^5kw*u_|2)-ob+he>2cS_CPQ9V)%3OdiqawA^TvXNO!fIn;cZr7B~*x;rZJB
+z7Bit{MgA%!+bQ!<nx5fP7*ozI-CW+gvU9E|U}u4~h!c=CY{+kmt;+pK*=zB^`}2Df
+zldLMK7;~I`F7udW6IGJst*Q40yvyC^;Qt8zQBMR5WdGtxil4)$h{Lt)B4;xayRd2~
+z;1vB-SKQf-9Fc4&%!)i0+$=<p8j|;mr>Jg-d9nfB=ft&D^Zk=?Io|rJ>4ovK*Y$I=
+z-k`2H_Dg1Jvg`Y`8Vfiy9WmKC$gfIk4E?E(ac?yBX%gnE;hp~Z%HyaVjH9px-4%vK
+zrIUSUqCbQ!!Xr7!Vvq-^{G4{#zK&y8R$%s1WbKahZ}Z`_+ra%r`%2cg-fu94pHqF{
+zDZFN{PWv48$Zj&t1MKo^T~)LoJx{hy*~lAIvPV!}ZKpmqO(@<8I{*%53^6L)Y`E1k
+z7|17MM6((;vr(!4f(Yg?3RgG3^9K(Hgl4*O;v~B%Lvm2})RV59Q8xm81E}UXm9n%;
+zb+zWL)X_=3DpPYPot<@%NS9g5@1m&5Uc-v;Z@IswZGfc2kF5Vy?g8oR)g9xDm_{cu
+zA(ZMir<dWUc2FyJIq!nysBMI67QSe(AH$DQ_A)rl*P3n$FUTir6TlOcvs_W@R@BCd
+zi<x2iJW#tsV#reL3oD=xxswo-x+r!W8p57R|7hspw--$5T$z}OYf5{<{(*X?d;&zJ
+zcgtb$nB$9Q4e*1q1F(VdqF^U)fhzzRKzUizL7JM+&b+Gjv9aODNk)C0q0@XLzP&Ms
+z|F_~t*mGi?`<L)!Z`7&)65{Maz4^~BiZrTSTz^FQvG%m`fLW5g*)+r|Q=tk6Mm9xM
+zVQk5u>}aUNIj`ba+atHDK0|sadb)HS+AS`jebc7KALZ}R^&n^a4lwonYQy06UBVyB
+zpXU7o_`n;$+TXD_eYORV7KYbH9yFF#Y=`g2Kj1LH*VJpgw`e0N25+#XH+rG%8Dd*^
+zME0laZ1frJ4o~j;)V0_OGjoz($@B<I*P#+?!%?gP9thys7rOSM@a^!BF#9=l0)bn&
+z4mZKtFF4@Z)2{Zu#g$u$o*RVy0tR=H>ln+LE-9Q+dEe7RuCV7-Jj=h!o7Msml1bZ`
+z|CGIC?v>xj*k76=Z8a`*dkaV+Zf;MRqr6r+p8zhDhJVGD*cJlIE59VY$W^Sh#yS_;
+zxezc1g5s4(H+5;MarC3v!J^M0oyY3DBi`@6N%Z4pquWf=dBf@5xD@WQv#NYy?)Bt*
+z;O(-X?E4b0ptW5W?LQ;`CSh_Xwx>jIxreQuG*s!6h}{y*^Or)a%1z(LCbFvxzW7sE
+zKJzs%J8fY0)yQaXhxDoUW$o#v`S=6mWoS?7lIT@nu;e6ZUTB4RNeGpGgq^N<V#%N%
+zDqC)wN)!r=;amRc`702;h%apei!T*bMJs)={Mn{Y)$bXD6=B5FMuozj3KR_TBv{Lo
+z9#4&LbctJ7ZeTDEQW~;Wt6iM~L5q|X(e1i^e|7tPbDCviU7!1w)>S$Qyvx11(UH5P
+zCfPnscDn0WR|{pcOD6A1C8L+D>CW*P8z>tz17IbM?+ON{H)h_&%^)`x&rW=>lqGhM
+z2ejvEcO~nbZ_-9Grbb5F8Vg2AM<fn|4>c)R#U@&|NPi!#w_Fb#M4azJR$y6*!tPdZ
+zw7Ipyr0pIPSQBs&g4LqhFgTt0h9V=6b~X~8JI<5FK>=Og3fn191oN|pW)BX!Yc5GP
+z!A2?j4U@~psbbwrg>!}Jg460=+63K8{0x%7H;$Ljdt+;$UFkXyfq@QV-XmkWBStRZ
+zh<tL>9!5TXt>m_$xoau*83$C#3Y<4CtPjBYaxPY{!yd`IKwu@c(jK^p=qNg&yhK_W
+zp_Cj-E+l@ky$YYp5%Bw4+p)wzs&f-XY@3QjG8WlqWo%{q6nv=MMH*aPr+nqttBsLP
+z{<YwEdX{r@jL??FMCZq7E8|!3TS7lKO$?|@H`CjwRh*q(5RlA&i(TdTy6q&LgV|PJ
+zTLv>c=d_l;rxm#G(Qk#n)o)bvmo|F$8t<`<#s7;k!WZf)crN`r$4gC-Ilb$hbclam
+z_Vc`h{BxzBs~*F?bgkvo<I_rj#tzieFiZJ&&dN-=X_$Op`FDkO_FC2V@?ziB5HV`%
+zyu@jbexH3(^%dcp;!OHD*QBa0$tdHf;3?4FjQuqdp(=PH^2zs#n#_ijQ_{Pv$`(RL
+zqg4VrX>X=bFzH1fYm88##}Qq~8rD`QC01$ykSaIh>y~4QhxEfOC;VI6Dqvqqdc$t@
+zSnN($75le3V@o~I-eoT*fd(Wtp*Q(X6TS~$ZZk35j%hYD3#051Zenwthe0<xehAPC
+zy|%Sj253I~M0_>mjo}TgC_cI#Ci;=UFepR!3X=G7vTYG@D(Ts-T^Ru+^NY9B;YLKm
+z?9@1Jb=xxcL;<f3fSlkQ$?9}w%SNN~ErCc4`37WyS;5TDVdbu<5+b(I{+3+LU0A#v
+zf2?t8;}`u0j7S1Zn^UpObr|uwEFqIuIt+z*#{v_(XDaOK8WTD1rMO45EwCA1fi=Qa
+zv}`pgwE%YoxS~~wtFxVhT%q3g-g6}z&+5-cic<Z^mmZ&ipj%h<sO@F?bzfWgV5l2~
+zOk1P-E6I|N1pnj@dkSE0OhL!K4uN>SWl=*nR7~AiDtBl?!$5zCr-imt3Ki|aGWYfR
+zuR{w=y2LKk)#!NF<tA>G1^QK?-uI<;LF1IBU&?MbZK*w|*L9FMJV}bzMrWrDRGLU+
+zTaS5U*YX?;w-J#G9nrE+yiT<$K$Y&abMT^^c(R>a1kA#%O52hz)=%fY4MUS(fi-e9
+zb7bA;#FM}l>__B&>GHg{jvuXWNQZ0MF!l6XkOgW+DATYW{n~Xg{~+&m=kv_x2yae|
+z$7x?dy#U{6IBHo~qRVNpj7a~=Oyd|~?@b43Lo}4M9jH&r@zR)#0}++a1b!Dh-{36t
+zXCvtsoSSs36L(qPc3g~)EBg*M8Eq(<Y&cx9or`78D%>2kGxGTpT9zA$$wk=p%m6K5
+zo5tXyY0`m0vF-wUW(b?N6ycI63vIQQYK4Oq$#WKG9FSy{D_Z&f9@_iNerIOwl-BK_
+zyD@Sx1ojasl-y<9$%NBI*cfv}4DbDDx8chCG2F1cn0LK*Ci^?aPu&&56T&^Q+1e*X
+ztGlNZ4R0Rq|DRD9Ev?>+SP1^uj8x2TT%+jCQj@P#w$s*<;UZ|@oy=mfXFW|sC=F<2
+zRPPv(9T0s(xZkz0^|$U}=wyJb?t|}U^$}0_25IXdhq4E>AfmrFTxn}g{T&XA@4|1f
+zlyY?vkDZ<QEO8jSKQ)iA+B^ig0ymU`4&_HPga7N=ClKO8wgc>WHP)_wr0Tpr<rz|r
+zW{d8-OpmfX=Tci2v7TJ%AsD+$E<zW1>cchCOZ+<IT7C$}kPZg}%u43u=stU!gl@T*
+zMFd`~+zQ>_Ebe~K-W8eP4<#0bKNMxDE$tZGa#W`3elT3*&HDrf1u(>Dq|={~DmQi_
+z71-5fvrO3Rxtd+^F(kMwQd};AyKx0uf#!%3xg=BXUWV(DJr-M29Kc_mvbvr;US3Ku
+z)&H0wD!G(=P`^VmMSM1AA@xncNzTuq!i*QbLGfQRyE-YVC;nDr(DE07W1ND@2NlFi
+zpc$N@Kx*ZQ&SAPykY3O{>@)Z!#+urVnlt*>-oJ8YK~^&yQd3K)VI3k@cFM}fo#$tm
+zo~A#A&&wR4XSHpNj1(?xkmZaf9>_1HUyJrde#c$N(n-EYEf6Z|u4eh|cXAQb^ad~G
+zDq$CWW65zqH}!4sYgC=|9PSg8miDzikZQ#i>i={923usxDqA6KAsUr1@CQyHYmB^-
+z|EzL(80WxBZ@`6Z%W{tAL?lUYt~4{XJpiJ2&?1KKfzup*@S9+-o}1bVIi_=)%HZRg
+zas_uofXrEyYoiaqJ)W)c60AM=D*q^SlW};RjX1u3EVxs?4!!_8#bV~(CTG?NG_5T=
+znO^j^HoxIU=QEQ-ag02#Dp>cuV?TFjORtP){oV7XZGRP-eYEZ0v~8gg)YqkTgfp?i
+z+_Q`W34YtOngqaSZS5?p%nDZ0{-jlvJOpXf3d&e0CHlC=T6R1!qyCN>MBc*MX?zq7
+zna)*=MJ_cTDOliCW)X$k`H!l))e__d^NaGd+zFORP11^Oq+ItWGe0>atU{Zh%WCnW
+zQ(0%!$0&n9=b>9W9(58NL%O%pMsmx_IDAchfVr7aPT%A>YSZ%ip`%(G-R%&UxGA?^
+z(^vVdHk{O=&-&%`$B`-3YlL>fYRz<HQl6*iYI~$4vR%y`&54SU{7CaemnwET6wf%*
+zqHbSVxh5<UCXK(@7?Pto?XUr)AL;nat%%z#8sxj+QNV30hAN@5F;lSZ@usfz5s7$T
+z=uHGlQWp;-_oj8S3LCG>Jk*BPEXcn?c%;XDS#*Pwp0bOiWpk1}&;Vg|q%Yf4=D>EL
+z4LDo*xV-!JhosSA8T=If8h43UR<xJ+F!U+Dhkd4WC2CxV+OZzqU(sr2f#;AuSN0Qq
+zHM;?8n})|01XfYcD`%w&4BeTL_>d4xH?HJRa8=R~p2}w$jw2pb51}3sVpB^hsr5J>
+zCvK2o9m<BkymhrD-dXC~=pN!l8USYO9$ITiYJl^UAAp}=Qu#`55OI+t4XV)#V;Op?
+znW|~wjK_2p-6lfpcC=JDz_h$@L|RO6C3Q!kbQ}%F?4yX(uJy!V@K^6=5vy=A76rsR
+zoZge_v*f42KO0iWzw$PPC3c7Db73cYQuVi%9YkLHXVpJxtI*4#wIvz3w#+?+bVL{H
+ztSY2CR^St_RmL0sfN=<Z`3%hgV|p1<<I&%CWke0BK6_m@pnGrOonRfj-(pAU(`*KO
+zEuyBS1c0&epP2s-009601i=SD5{Lo-P(_9chzNp+fXGm`ECpGDBBHWEK#+|xL~M_1
+zU0q%Cy4F?K-oL%Bu3gu<E3aKw*R{uW?Q7ZY{(BPCblGI=dOfm%k&x7T9dJ1qP1Aoa
+zST7HNMo`9<>fQ0IdS_=+jeH=RN?A>ff~oOD*S$I(GaY8hN9O#as!|WsR-_#mGFO_H
+zfyBtyn>GmtRc^&DmP1pg;hpvF$Uo@~*`EqFNgnYY5`I$<^N!U@xLewp<v;VTH(skN
+zN1<9Fs8!ZA8M7oGdDVyy?DdqbtOZSXQJYB%%p!1a$F$nhjn;UZ{;~XHczfqJsOvdh
+z$y<gA)>h4P?uE!`v$ySY^S07az#D)jMG=2XKCTR)h&Sw$EKdnD`ACKDO7NAUM+Qgv
+zZ6*FVXC`ta;w;Nqv8DBfJ>p(~>c#V@W6R3S2W%fDN+#R!yt1@*4kH>ZQe7$epL)Du
+zU&+D@kasu!NHZ!+RE7~BplxgY0bWFY&n~KbkXTx$S9z2xQo93lu~E*tnxzHxd2H?&
+z(q8!>s~H6q^pb5Y6BF}Wq{@|bH~1WPFiy@b_Vx*eCU)VW9kuZ*AxzskSAXsP_)GUQ
+z0H@^$7z;Saar<L+Ti7RbznboJ(BS*cY;V5VPH1h6vao^c{7bM0#pt#T>~-k>5=!q3
+zCI@r0>lu0#@lQ()Xsjw`_{1_2c-VVEcl3X<oKBCNubNs!G<bWY*vGnGk@s8wjt{F|
+zBEG<0+S#n&T3*|p3IB1f$moJj)V<FGAcNptc~i1w`Dm81hGy;ycZDf+Hd-upO3aA?
+zgE4&0__S9+d4O%`7y2B(RCeB)mA%O-C1!D}WeuJoj5W%!P-kO)-SVs*u|*NNbr1YA
+zg+a?Qy)-S)(Hdl&uI9Nw9;_N!7u}U6DA(h8ooo2R>m`^a^>y~ia&F!r_}83l_{R2M
+za`x0$v?LO(kq=#jxZcRJL!>LD_rYJxqw3pzPA?l!)yC=g!;o*{*cP>X?Ix*oyk8LI
+z1VkR4Hyd_4$|8L-UQA_oAAqP@f<ZC|U}#RPvj1ARLc58A%v&6v2BX_VEgLiYiJrPD
+z+eY?~*>e$>NSknERVnmA&FZYrz*?h11tN5^=G&0CT5PueRB94+r7lVyR=YQshq-2J
+zK?l&6;Hr8ebX?~?>v52U!SpQC_u*Wk=kP?Kx+=l@)!{^!IYwB+m^B&^iLAC)!k|>J
+zOtCHhkLK30$G+qB;kGsM-!LmOJ=JxREc!O-06CGmtqfi_plVe}nuI%gEtl&=My@_j
+z3FvHruI0>0T*ZJ1H!bR}f0{<LKV+RM^AjpbgYxb-9kJal9$YJhUW_bs%xQjQzEj`V
+z`U|)VG8lk?j|^R{sP-zDM^iQaA0rPs9~uVoZP0A-FyVW_CE<KwhWmNjciN*ayL^Nv
+zTE2nwT>UfjI}s#*xZ`c8vV37|X<)f;IC`IS8dVc{$!ITI$M^tQ2Hs@(6SRb#N{k^a
+zleJ_i5WAgJ_9SF)Lzh2Zu}u5IcD(Qy_h|@*6ZB+|_hKY~olyim7HA@_<6yx6Q*-wQ
+zT0VG~{ypzR=52gE7|Hs<8z@`b{swg^V&(BGhl|gmtl_beznpV2G1(Iv59YffGb>(b
+zIl_Kog?zJvsQelGrg|dbi27XjvaA85YqDjg-|!#MNzaC=E%3uA5k?9h)v&(sO>UQI
+zR_he{lVWaTcFCXaTaD=KFQpeM0l^b8K_F=BR(%7+_t=~l)K3(ex{!FMKrTNdKsU_5
+zCRo9$wX9=|GZhWuJCefA&csCiGR5Dy%K$?f&ZY-@wdTeIIP0Nru4pT2a}7{AKsymx
+zP(7Se0=MhBGj}(wfc7%p;M-GEy={5lX3onzYMopZ#GX;fg_OL@HDt#k%(e91*nITc
+z>^26}veC1E_gK~6QAq?~H+FxhRzl8a(J>(H*|z@Ji)dsdESv;BpE<=plhSP^W$eIh
+zM=EmZwTsCWWtqDFvZc+}M8vLC)o8$z3WvVbldAbXLrH$D{kCbg%92uA`@6u&^5A*r
+z7Wp;i<Bt3M>A?Lavi`aKg7%lJj?O)iC8$ZQkL%Ii<%knZIdBojgrSw3LG5qcqx#cu
+z$G56e8sxGc70M{50IlVZ!e2=ulzHAIjRT+ukjILiqZ#x*>d`okQqwf8{1l2EW$T(V
+z^z2iR?813SZHVWW@lhUgg4Fh)*@&b&W5jjvCaPIFv8|x|r~ESdOZF#E7Ge=T8|$w-
+zX~Cw>AuHfhET80avLj-n(cqt=JQGC-$|YV7IJDidJl*Kt-P$4eg}x6qg8W4HB^e+<
+zVRu`0^sFtTf&YZJ;?nig^!I>VP*2lDF-d>SzrW#z?I`dM(Fw;09maJsC~soPM)LDv
+z6Dp|g<z+|7M)J3UDeU=%3=<YdlCT;ahW1#p=8@zK$cO#G&TF4nG2Jwu2jfkb0IUBD
+z>_}o7zix2Q7Zv_yzu?d`6|yb0NZL?LW{0Yyz&0Dw>;IYyXO3caS3-SjO6JAL@K5xl
+z_uEt)?acemAG5y$97A=N@8|qgH!=SSBqsPvaJ^~|;Jo-7Br~=WaM5xJyj#FZj|VIg
+znV1iZ14RFHSc|JmuNTL?qgeA9IMaTbHTA~zon)m=X3t<wEO;-P*7RfYXV_-?Z?zu_
+z&9J{Imzb5<qs8~x`R$H6K|)F`veht0#Bf4J#Aq4>8D0LI1Bsqc4R&uNzLt`>IUu=U
+zb*UPE**nC%K|Yy0*pe-5aDQVRfi=pA8F0})!3WMwzEG51zPo;N{XWbI<O=<Gca!Bh
+zXI!Q`*TDd0!h{2KgUy$uy|kF~IP;jG$oM*~<^*##x1Ua>YR9=t4GPLN#g@X0P$*nN
+zKLe=l_&2H!UkHhkc*D%ryMSADXV4Qs4-@m#IbcEM)2>$$M&MP(Hu((8w3Z=$Z1x-n
+z$?%_?WIk(n4muT^T)U+_=2>389&}xj0-R4tfg$;~q4CBYCBMjq)ci%-OWxTqJPi-e
+zHkIbX>Mpvz&)<~Z2D4@DExKiQp<ice3#1*DvYkB=b6+u<zJzyNIHk&KSjHR-eIDCW
+zdY?1SI8L>uQK`CHau-}4(5tOoHnoOSgIX${Q`^S=$#Vf$VVRNYG+XTn>aR?k3?*(V
+zXP~Lnr;IYoKzC=W8Z|_i2!~`p!fB*EIYV(D{S$k*n3MIh{iD^lY-mm&5U+5B`BfCL
+zBy_pu5b{3brS(H_LgB5bsx>n@)NmqsNdxqL<EhLo@l-*tRTWsMw&&D8^e2ln<$Fus
+z;!{mS0{58jJhu%eiN<1G5{Ot(>g^tt{X8OXJEQwp;qffRJSc3hSY0Wz?95mSF*m+#
+zqnF8<qVf~q*D_nxk&H_iM}D*^U;oyI$nTO&md!7GsXd-aKpco4G1IGb>Ls9aocwyQ
+zXOVq@<vjUNfvmtMfH*~=53H(Kgam2YYKB|Cmtg8wKs0d!x|STIJ0Uj&S7cAh=<rdJ
+zdXR|03YF&hxF52|(pA}D3x(zbm9$?iZ%0S-Hc*xek7brHI$#S+CuJ7myG>J#Cp=z#
+zL2fq3L%X3yM{4DNyF~<h4DG=?vr4zfBCT-v=*&j`d(u$oiQsV*lDVBZF8Hmff6YE(
+zW2UckIkMh#i?LmjOn}3*%A0LMp_BAJ%nt3#_=CLML&5rULEL3#P3|z%`}70n58+|)
+zy6kmUF_prx7zPsN$NO5IR8>WnI<H1P*ikiEy5rJ6>{QS_>nw;{0MA<D2Kvt`dW2g@
+zL#)Q&G|FbfK47gjRB5Z3N1sTZCH}(y9>Hl61Eab|soS+RV*}?#^Ss>r?p*g%B9pKc
+zFt+(Njoz4_9Ai8oRuYC3tuC2rNa2P@irQ3xp~2abfk;bwOrEh>t_XuKOC*i&QcYPK
+zJD&jX^u-v2qtCLtWd`q(cZ}!;uC9krSVVvqI_29{kvx><w8azqM*a&Y&oNfP!kn_r
+z68hT)P{7TahN0q&93Q2}I3KskwK_M<xM00z?b7WC$ulUeP*yW)pR&SNO1@F$O!aEc
+zx&NZS@h(USh=Kx$H_^EeIp3AGc`L60-urI}GFn)O&z;NJ0$~KMGPI)ptex-3&X-r6
+z0k-5{@|>-|pWI}7g)bE8I{q#&z!L1chHrR@n!_R(X}>^SP=g>l4<g<-Jp=X61El)E
+zcf(U<(>W6Ih;%^^mUGRirLV{@2DgEot!B_m0wN2VW3iMQ3^5$K7`HoDV`3*p<Trvk
+zVdsnxz@Cyi;vMm=qV=5ngzlCHfp^NIhT5urv7~Z9VFLJFbZ^ii{}u5vF%@7G?lSoh
+zb`;7^W?ai(*fh+(15>EvHy@8|$syxzbN7}WbU}POi@QSA7C`(4>o#db@f`U4tOJ4X
+zOUzZ(4!a{sajQ%fK)|Z1E&Q<DP;IKZPs(7$>klZ=%o1R%d5(WZ<G-au{+i&1hFt#K
+zOk#GZYqkGK_6?`9INiDpR4JI8?6dN+1~E^CAGfqw4hsI01he1NBq{{7hYi;=Z)K&_
+z!y1^S-@?X}FEvu@w%B`pU-ACseO-LGrx!5H{ES@!$WdlAEEYVdAeR}6itTbrYwbk&
+zmIfwub0?_wQ7g~+!T7hNJ7Zk;e%}tf++c#ca_iv|<Iem<&fAi!*lDJ7+C#dNcv;uX
+zmV>}{V?lTXrm_JcdBSa{4He2{Mfv?}->F}vH5o5NpX0kkil!r7#iaB0?Y3t1gOV#{
+z<4Q~0KbIKztBKRmpJ`f@x%N(Nv4l_#v$&e{mfoysuDQHH)R|HedZet}J+LPwU#Fdd
+zN-EabbKBU(Zlg_L&Yn{Y>A8%U6FyaQyCz6!1(=LWCD&rFOZ_yR8D#iE5?g+6SP!15
+zNEO}25RyIa(Gf0?i5qOkwl2U)qqjpDSx*p0O*HJEL4v)OH=jJsGtx7b1l6uHf!aq_
+z?~$z6<zy%+KZqT6zqda9UAkDj5udL+q!|So-CWQXMVFYyQDGS#(ljZu@lxnTX00v|
+zqHw=*Jh$8gLy>EscaUk>5zAog1j6%T1KwA+n;mmq%Wbz5IyAaXCO5R!d&Ui_0T=g!
+zuF=ZjV9mR<(K)DbLCZjY*jSj$Ol=iRMQo>mGCvcJ1lMr;F~R)trG4qqf<?IVXi%`n
+z&a^)42zgGMN7C>xop?9-5$P_10(==ciP@jJ4|Nx+v~q}<V5A9JOrj%3d~mdCp+V_<
+zKuU-XXpkmMI%>MxddIP*%2P5v2yMzj@)dJXW9#>OE|v|o*cEZiPbR<VDsWTrEc3(s
+zDC}x>Hh!M5q~Q~4l{98t>ign;g`Jf?7+v8(%exau+4cN~`gF|g>@Sl#bYKN|5_x9u
+zMfJRb-=cq`M<M!09<dzwe+4sI{F&1#OEgjFypmlJEJ6X~D|-sp2#O26dBW%;(+&Ws
+z>vQfOmFF2O=9x-FJD`4_b7nP}2PG^PJ#Kwic2qQ&>XA$q{L6V+(;HY|$W7jbj_C9W
+zy{-?T+nU?_1yZMC8l<FZH=|$WHU3BVx~e6VYl0%j{)9#Gk+|Q0PZU-+mpC&n8gfd>
+ztX$R2mhtLu+Zwt51ZFg>6}o{T%v|IM6w^pH90!BqJ?#-DPJY|8nVE*ID5FZ&MThcp
+zbv!*y`i28ZKF;XxW2t)8TS6dTj9(;$!O#|w&#&2R+nuP_T@}4(yGIz!JXDIUQh{7-
+zWOFlZVE`+<(Cr9(A&f<lLMRKXaBbTluqA7mSIdUMD<oY?zaN%b0A|2XmcDKL7U*X1
+zRISuac6H~|&O3UIb|PX8<W$!)wave=Mum=7e@2b8|EKr4yh%>i|M>qa6SyYjpXn*h
+z{SPEXaQq|q)vyT+e>X`4wss0Wq<pnx<EKn*55PAZdkeV|Gl;Z>TAco?0;QET#`BsX
+zXx|(9pw^lA-q3Q^m#EY`g8q{Kg0q+4HjPiD(vwTyo9&vtQFCh|_ie{`_B(x7!?^lE
+ziE+&vy(_AZ5M-F;ohI)RL9uEwWmIe(B+tHF{HfN+zCjor98T&>uCg35ewMH7{76aj
+z?{HS+Z6Z#J+$NJCCx!DII_XUxoXbkxhZG9yG@}y;e*_`zTFP3U08|$=(UY|W)zqJh
+zpI|3*pJk%Vzt{fBIYs)unp3o{V``G+&I1mvJ69ENUy<9cJKuc78Z3EBdz~yU{Rwl@
+zc#1HymI}w{HUfSC&8d3ZOtj%5Gq~dwizB!5Gr4%tL-gU;IN{vl%}OD83i>SNNQa8C
+zGH|-}xFivMzzGVE)t%3~gx}+v#IlI3^fJ~SYc_gUM`hg@Ts9947-ramI$ZuEQIjy4
+zDhd5NI%2y+&sp0m(-E-wAZ`_fZ0>^RsJFQWai>WR<rpG!LPFaFJ)mbgcSaMg0|(De
+zEz%r{UTj8X{MPgr++IrS9M9a1{Q@239bBEJTvM5=hlVyFqnQW9X61o4SVI)IiO=ZH
+z(@oMncXc;SPblLfE6zc$Y2Pv$bDnDUS|0@OQBaD@ln>DT6n5hg(;AZ|I~R8%G*x|%
+ze5H9q#E?BkON+dxs(E9mk6Fdk+O*Yh&GU`8t$bu)iXD`wuHfL$HJ#!Cycfuxp0c18
+z7_i1$SJbt3SGSkCcXH3A_UomX-~7W32+wua??rrVXXd%G8}f!qVe(?l)+iqlNxo1n
+za!k#V*W5yl)Pndqhz)$Zum;ncX;Ev@ik9Py)u=AtJPJU$!#mUQJstpQEH0%S@Aznp
+zw(XGyO5WRkO%sqeVOz46wHgEel2>bZ8OJG^1+5ZW)JNRLk3huL+YS4HLnU8TeBXA9
+zI!d{#{JrTUY=YrR)30_*XIn)%=CX4iht7t0`ZX8%X_hza&9u07V9v$pbbzA!3umSi
+z$j#`uTtBn)GVm2(XmTa)Eoc=|SzV=Hr)uU;t2Vo@H}eg95UaTy!%?182&~O?-Az5C
+zBN<=mAF0V6OFW|b(axp54t*gGr0wWh(V3yVgW9DoO`&yP>##w*jfLM_T@a^tp%Mcd
+ze+!p8SE6PJ96c#WQf@<RWzYq+%DI&R>Fb(*1FO>X&V?<jBVgw-`2(LsU)HXy4eH)y
+zod&)D_t{Rgq^e~RhVE2oW&G#x2o0A}f#7K8>*spEb6ZNrVQ!N5f-<NqHNAKtEy4KC
+zJWlk!#HAl%&!PY6`eGQs(nUCsk0voYi!bwTs~M&Fif<^`p*mxD#a*8|kSFoW0xyCq
+zT5`%3WxrG{C@}IVvK5+t$eLDC%*&W5Yi!sA*(^J+c;+bJ5j(5$For&|JTkU&Ljcw^
+zg(@!|!ga_;BP)_C%QjXmE&=7v3c#GxvifK}2)be*w12@!$kgW3od2vJT?;`zrvfAe
+zM>F>F%>q4Y6}O=0h3^OF*rw5?I-{_?PJN5LpaGWas6mun>j0O#xX##p>Osv_9=!Cm
+z=&tT->Eu+m|8A8PTjF*#C+PhwJ7WdlUBqL-M}#b6eSOr+bPveod6xsX<=iTmmGsG0
+z=A5L^Amz;;Y<}|rT~U@tn^E%;Ke_(0=Q@nsSgL>NXDMF6_Hpq!eePxLS7X!)JakP@
+zZrT;fV_e}*rMZgN0fNc``_z!cx>xh8B-z;)9b`XEZR#|}YElW`u&mpeFb1=BVI-lx
+zR`wQlypa$Napuv?4T<hyv@wpx@|k(B5dieR1QW4OH4-H84sN@cHnT#V%js)qxf&X-
+zyTP6Q-Vo05hwze>)}Nd%!5Y%kh&-`a@`^f@@VDzIhDYj@SL3mYiL|G(&uK2Je~zBK
+zR=h|!OP#5a2b`EN?DvYDb!V}Uh#vi8YgF9TX4Kp=R@8g_c*3qGqUsbnKRz2VMsomt
+zoluk(<M7pEt8`?!_Hv;%jzyz;R*8F+cgmE3(~i!L-o!-znj(j_+*4AUk{;ACdwiUU
+zfFw9iqmO>kZAK8y;m8R5mb$6Teev1#d0lhCJY{`JfPSTWZOLE-5&KmduNtgLS16(*
+zi&w#J=j>7JG%oTCOmBxy${AH*v^UaDS-^>vobQ~0YM!=V`8R+;T|~PW8Lb_z`AmJ0
+z)#(HihpCaMWWh7;Nx203S*WhBbbJ(Vq}I}#s%Gcis9e``v-G~=Omivyuh7oGP{d#s
+z4%XoZq`$WMjO%jFWJ_XIxDQ1T{RI0WDovy@jBF4ij_G%p+X-b2ABY$1%Q~ia{nbKh
+znM_-SUJCpR7Y|g|zNxGtd}<p~eMtN&nwfe>eTo|`A5mEfnCQII-3a80-bF7MAo#_E
+zt#vwgQ`ZBVFK4VU!XHN*pQq36hwMeI#Td+i%zllJq=wLpyk*WC_0w3@ox5=pjctV%
+zb63xNsE8@iqG2C0*_J`L7Wb>nCe<iK0cv+wnYbEQX^#g1<+vK9eJp;zG>fr8HrVtz
+zQCkmlhgta~GvllH7FR2z7x6<Ct>-E0Ke0&qx9lq5BG!r`^k7TZh@*9zoQGjL&RqBl
+zO+IguT3q7SXR?2;9qn^OHwX2CgDso1)7!9i2J}HL)Y;n_FWp?C>?Zr-Ib$^&5V1}m
+zo)sQfvC{sDUxvBS@FQVg%SuTW?2R9hI8e5fow6<uEviQ%_oP>-yo4>RFXg|wrr|C)
+z*8<h#$<8|fyV?*RQN6G9rsQ2@CTw3|K*8bwMZXE5E_<FcKkzlapsGTP<>jU2YCH2p
+zdS(euc^cC{JT1Z_k1&s>5Fj<lhr%D|xuCdM(Wz-$tOeCSw0%+A@iy{xTRyr#{iy6Q
+z0`7XC|5Kmc+7;B9nq4+dZKT6HiF?k!BhV*5-gCB^%AarjC0|#xG&G%TgOAs4?z$S<
+zDc<FdAPtbwWwv@eZ&~?j^y=^#?WbHgE~H?$WE5m|-Btr3&jfpgE=U_}klEKbLl2~^
+zrw(iY<WPM_Gk%1j>i!7l!TK4OdS)aK<2<UL{Y&V<n(w^3G}CPjLX+VG@K$~xYqZf3
+z<|-EgsFgL|4$vLqQ0if2h%rX>40oUCZ`zhrD|WcgX!?9tK+C$<G9R$wEw$~N3oPB&
+zjQ^B3z~)LO5Sz;?y}$AL@~p`(^fB<?<5NvTYagbbwePb`E&Wov&3s+0)feh3ixc1(
+zvBS>NDpLm=Ib8QiqL=-Ion{2nhnMdy+g$v$MxD5$mT7--d@PdH4rnh*Y^+4$mo*LG
+z4rw;i)0HdHAMkTsS<rVmUD&f3@?hGuK{&>2Z~xL%!q$5)5Nk4p3@-6AeNQ*1G9xxF
+z(PBSS{hj(tDvy}VUMzjo7}lse@Z9#68qcZfAY`3HhWy<=82PTCFcxVzR(K8`ly&eA
+zGxkBAG};0;O~#5Q$x`sv3J__Pz`)(AnnRhLkqeb1vTCLm-pE;-z(SI;BUP(26b1dN
+z21K2_JtAZFp@#ad%gOIkQu-9xJ7%IDNq)_~9vc^l)LjVLYh$eg>+z<iWK88TCR)23
+zKN&wk`+Lh$-9`6XtSDk}O~REFl$C7I`m8TPPf$?l#Wrc^V|h-+-?3uQX82@exMilN
+zArEef_(sDzU6d@oE-E_V1F}-2)4AUhAGl_fUl5+f-wc=GATmEzN*gQK*DS%=MK^<6
+z(kjH>x~(8;*?9UK(Fn`^_$-4deUCWHb&$TDL+wVZ$0yUY9uOb90{=dQ&FcZy*nVZ_
+z3wu5JzWq^L^WUD~Ax&~Ka$tBZcca~#y9DxU_m^-!<u~8GOa}0O352W3GmaPJouOZ=
+z4C~}28pQGV5pf}N4Sl<5pCV%YQ6lVoofl1BF!t-XSaBV5hQckdN0u0a><6veN;;ZO
+zY0gSr_zMgUl*4BrKa-mPEZfn5Sl^p0)?E_4VlR#CNn{pjI?TGilpFF)ZdNs_;-+{3
+zLR9>b^^w|JzPgx8+8Oz;Jkrz4f532Ro`!7|AMskkMSLo+AM9y*a(Ou8o|l$cNPl1U
+z9!4gy3YuF|;$_HWhbbFIz6V>GHKJ{?ZD`SHNf%~F7?I625DZrnXI)O@E5p3>FNL?o
+z%A9_f$4E!lU@h5wEQiK=Qn^}3j4e^^2{mQ^!k*uHy6iPl75ca9e)yz_C%NkTB_Iq_
+zlau*solE^GqXshG3L*|J|3gX9WS3nD+i~N(<B2%FD|Q63x9VuchC~PSaEY&Se%?y@
+z?DkX8le9<iKcf9}H`Gwts;Hw97SCt<Y}-uS0neQvQY)+&l5<!P%6=FYZ~;vvk_~OQ
+zVsQRfv1X<QXwsahdJUEZA3O8nUv+=oGbuc?@Eralp)62=`3k(=Jk5T@pyx~t-g9i=
+zMKK%UHTBpQXyvy#N2#>D!9{E(zy2e`-o2o%B5_}{P?*H>fj@|T1jMpTPKC8yHLNGi
+zS;0f(PW4;-R-}XXZ+d#%gXHMlidTiDcypw!m7B`%d0j^A9#l#R|CfR6{3oN4T2v}%
+zMzRl>_r<J44Q6)BX#0C+MrR*)Q)NZb3UMFi3SoyQzjHGB4e%!BF6Db>KD3j6#q<KT
+zz;*_Fx`)xO=(=A57G$<KQ2${u6jbAt_Q|fV3mwdEz*mGbqNVcHoSA-xb0-$yUy>LA
+z`AHpgPr;p2Whait`kDj&a@E+ZLB<1xjn$`V7vccUElgYNOVOsx=l=hp$Hr!e4rTC)
+zCiw1FJS{kCK4pCym<d|ebBJ4RyPSvd^%@x%a1Nc|<+aspF;B?1Xa7*H*Zv;>0RR6D
+z)Cofq1j7K}9qfXQZNP3e#x`~XV}k*^fZb#F$rw9eW1KrcEwfV7)YQ~8uS`wN1I;Us
+zyzxpcjXX;{NxaXj)K@db?|Xj4W57uwTftM1pJ^_1Na)X9>`Z^w6Ip#GA%yoGYJR2o
+zNpeK5)xDDG%FcCX#nFa&Hdy*jMxgG9YG2P`rK9klq`zo7<ntUn=mdSAWIR8dsla^G
+z*leB>8^Ip*K!{BKe&lWh)LI<u2i6&HtE<6G#v0bIq+Z1kc`yL=PH)zhZg=jtw}TO!
+zS*)Gv?&fjq5pOlIJJra^>AC_`D>UKDWhlaX)kKXt^kaH**TL=|JLeQ@+m9l)*%#ox
+zFEpZl;vQpOZ2g>9Es{b2oH?QY>DrWI65|PNjaO_;r@xqP<V7yBmjvq@5}n1+$-xz7
+zSo3m+qJk;zBtL1Bo5n=7R0EvBe{NTys0C*EW8z}=-o{<2+l;GW3{L@xkZpi-@<Loc
+z{Ad3q#nEiE`yE1y{m}GGv7t<3GE<7m0%&?(pb3Dk*B>{p2;D8ap?;{_YhGc0(5(TB
+zF!ZVbEnRS$-`p_1Wr6OGY@S5ZJ*$4RhGZSi#K5+@oAp;afW|)ORnJ$nB~dwiGPj(B
+z0IgJG5t#xz@UH6DqInPlaCz!C!!_2ax}cM7BG6_^|J9%%dN)LPHySRO-mXO6v~Q}A
+z1LoRSrjvrcX0o7P$L%TayjXxtJWrkm3>DwzexExozuq>UGtKPvzO2kNovg!{`>JX2
+z-hf&*(;2lar!MU5h5c9Gi!oYXJ3qJ06+CXq&vUV=b*D-{F2P1qsm;cfvR>##&by8h
+z5ix|8!u_``$J=$D6LuPS7d4Tj$I8*a$m__0mRz1ec`80u8Pu&UWdtD2Q0Nog2JsTs
+zpwOmUrx(H|A{IktIr;<Z+;-=u42*0i>|Oki1R-luXGPJW@+p>7dI=Q(#9@d1=VfwI
+zLq<i@#2iq`d7OcB9G#A+=;XXew}p`x|JMI;P^>!|?~}zatD6r=R~uyVh^G=vMy!K>
+zQ%Vc(lypLP^w!Eg)3>~zM8CDqNRx6U4Kt)`l5eQD<10yTHK5kJArB1@PbomvJ+1qr
+zL2fSaTg`0QS{M$}7+w@+=FdQTB_>22kX!VA(P+BT>@O&36hIHjFV+5Kb%)Qh5Q%l-
+z++Z(H+4kD>(lSwZ!MM)H@f;}~$TO#(F_YdgfbCI2{?*Kh=q8(7;k5sgwNa9)+=5yX
+zcqm(d2=$P{zi3uEJ(bhgA5nmHH?&L^ykU=|qw0P+w2)#N77tQUUH|E~HeLua(HAu*
+zOn1P~GIA;OmYSl@1}uDQ#*79g>2}ovqAXiWxUJYncqi`B=aGJee-5L{S72JG_p^G!
+z!`#APRKL&tTLnWJCZAD$jW=;O%Cfuo^-d$%^fHpkyiq;fyicF+GKQNHno?=#XN@07
+zZ|E)EpmP_jNc<k})h!G<bPjVVw1bO)y>Hv!K3>)%;O5SA05F$*RA{gJtBiZy>+Cec
+z0mr+HEjbnSF3{eJq|rk0c{TZM%|*hyumE!qys7bh;857bzvNnmYLX6j)poCIyi6%p
+z5*=FaPQdT1A=SP@JA1$FLe6ElK^1M`qfl8~N;$mS`2cd4=Q3TUtwz`~@reIC1FlJB
+zfI4FK0lmyK1RhQMuBlpmMf9x5Zfx*R2R@BG_jH%s#4qmHre4xhSGT)uDfcXLZ+>wc
+zgsC@>?MoG9;#JO1qEaWnQ!Fk`tW)W|F9pEZ+Zf2aGOG%G8~uY(l{=tYqy1Ux=b7`O
+z<=KI`_yJR(ndn;T;-#lCys9<Uy=q2kY=9bhlehqQuWgX<oIO<~CrZ`1wwt866$;ve
+z(nU;K=tQQGxHDrlZ-vVOyazJNp42=s;?!<nCV{8i4I*)Iou`>~h?mL%!bRvq;G5)u
+z_MJFeTq3vwzz{SQ|0R5d|8{(;zojat81r|E7su9c7Qxm+DYCk<+v2vs)m)W58!%J1
+zhC8-?ykURt3ecyN7X`~;U~nk|-tnJkkqMGsMdc$#{EM^|);`0Jyu&aS?UcSHEw?n#
+zem5yayVwsiuo|Vh#nz%51>fAMY2H=$nfVoWM_Dk|jB?kz;kzN9iiNf3V|$$|3$9nk
+z`IE|(&atdEPqOEg2EzKjVv_?bf0QPg`r;|AwMp8pOWZ5nh&n3Q)Z~>t?L6nXpC4*_
+z43=bX63av<-KW$Sq-OI+;&)9`tL>b_ty8o|YtNGnl`qvvEe;kYeT?%5Cs{u1dgQ28
+zn<N|1XMx^o0d+P1N96OOC<9tuUb_}>M%+^MZ*W=}SoNB_%e4m0B-gtdy*^0HUQfDJ
+zbDMpM_H}u4vWp>O1kx{|J9U}PlZ3;$cPvw}yQ4#`NK*m#Df&$21l*T?KoN_7-+PHQ
+zBphRASTlUr%kwdv)r(3Ov@MPBFbiRCjFu3FG6%`UbQ_=2;?)M%8X5qZlq02G)+u2Z
+z15puV9FW4>_oA$Xe(wwN#}&J3lk5ehZT{<C6$2Z&mfK5PowFW12paMos(yl-i7QNO
+zi0=ywkn>?qq`g?)-D(a6PX>*Wjg_mCxY}ZDEcgNcK#g0~kIfM!YPTw{=Dn01!l5gE
+zZ5mf)YwE0e9}#I>Uw^eIuj(Wv8l1$acPFb3<*%*$|Ejh2!s4FXG$%ib#lR0+lEMeY
+z6Em+mUX)HzE)Lx)okSw#j4&H=CK7Y14q%5pe{p)%2CuS*7<&Zo&kdKA6*hONfB@18
+z?CIQ~Sfo3Z2kz9izILq&bQ3QZOw0I#IbQK|PjSaO-YVEP_GxNe`R$JBA#Lt`nWSP@
+z^ijqW;C91C@i8~G<tXeY%%+CkI7|OOHq9~tP}i}%aK7uVbXvNXPLGGJKg$%U7j7kJ
+z5mX&tk8MTFOe{va)FPl4vRj6W{nGh1q!Sl4ye{)PzA8I_3Un8s?~s@+EciNPDMvt9
+z7FJY7sx7WFmTE+kJx8x9Mx*v3#!`j?eO(2m6yVeL<47@C?|N6K$Rzaaw=D3ij2))p
+z$hul<a-if%-JO=NlRh0?0#IJ^+E|0O8EvMV61}<J<o=bL2D)=MMj9&b==zwCc@e>%
+zF)?U^b%6O0Y$U7|T2uY))lJlr!=Pup0Q@ZWrSDjFlgLZFg{%ThHg;P->Y2g!;-J8R
+z$|mS#-45hw#ro_f!N!i~72<}?$&YK^=spkGlFu6+vfes(mcz+6jhh%;$DitPu-m?K
+zf~AIbfKI)m?gr%j^f4qAxs{aLy}RSAUIMu&?=Kt|$fFFxn9z99h-ixQ2?d2&RW`Yu
+zNKD8^oO49c$Uh}R8iYTuI%E|vKTzHg9pl}0iVE$##kPYwUXbCbshOF&TSAU)YMTx3
+zVR`IdcV&yawN8!N%s@O6##sN9I%6^C>jG%WVGgL^vgDE6=-NoWpIcQmza`~=BT3fe
+z*+;m4vG0|I@!RcfY;rr)Lm^&aU-AFgw9|hh`LyCU-=60Eez5zMJ?MjUFeEiZgyZva
+zyYf7IkUtVBX+c(nGG-agJ+oC0v2T4*$8Hvl<7d9e+W}p`UlpSZ=bOO#eVQ`G>Be3?
+z9(!GFB92m=EVxrSrWtB%=-DO{NWX!WgnuG`R*sYv+W@%-s9|a+aEAW1erwP`*`53)
+z^Hd0dy<Bpd+|V@6w_RM49*1};OyVZhAY6TtzT$nB&1L7KyQ*)S##Tk7eTn6+S(;@e
+z7{5JzNwa|rryAUIi>C%>W=`gPP%PmCO8Q;LJ2r_F$jAQm%nZZ#P3Go_mb3Nk$`!P3
+zl9Gy#tyjXtNfkdx)(erVeX9U(gvO=~HBTTMX4-j?KCf&-<qXhl)UvjWszywph=!6-
+zvm|HjOHi*PU&M>kZt|t%+46Ad%LZBH-&ke-9{LAxRTHsfG=5J0n&1Ga!aXMM1KvyY
+z0o)ky{_s^oM1L&JsLK!=Lds5C6$BvY$SruuarIE~=W>S_ucCLuLOjs-Uy0g%9(5FQ
+zG|F}Bz*`7219I7Q!c4X%Yo)i=I!OP6blhnXe}|swnu1uOA!yFI-=ebPe<Ln%Zeily
+z2dZ1%r_Bfb-L>7krv+nt8a$EE51Qs1fzN{#;-{yU+gISr^FA?UN$2@KvCikxxGk{(
+zRhD;H7pJa85!_;8AlB`>7C1qzO3#a0s7031&@%TGo{&=|QThMn3^6U%6EJUUJ?;mj
+zwB<p9?D*D#v+`+2ou0gYTL&J-GiYtna)8rJL+@{+DUU|}pxlXORT3)BQ1#7?iOR|)
+zt&4p5+Ir1tKRvLn5}@b>erNoI&d8Gsp2b#|cbXO>qP2av6_vI8Z{W+F3kBmXO7<gS
+z7a<1;hGn{Fn8#V#prOesILdc6U^B}E=RseT@k)ns?h8o;irTi$C4z6o!-7cSD)Aoe
+zng#4^C~u`5ZpSt=ksE_kP-i4s!7dKGz^vUxUl>}iVc7mNcPQuaWC3Vf3cEkHwY;Uf
+z$GZgN(u}S;5_%DOL|ufhk3t|of{B0)GX;P8U9D9Rdtiq+V&qqej5^m3JZm6b1H+>w
+zhE#?8Q08gq66oGk4X4H|BpgQlX&O=+Si7))rbd7_BY(iQIhXSS<?RS{2}pj#a~r!m
+zHAi+^dQjx>csNO)jl<2H0DKj`SN>0QYaY;AUpvpbF(He-YCjg$AUEbu?#A<i{5`h)
+zToq+Q<5rNKQljW@*x_H9Pi)J!RRH;->7`cn=muc)E8Vcj)v?t6S;&RJi6(o!QDlbG
+zy8zxMu#`P1K7(J`O_BZ`m*bw%A7R=>Um}KUMp=u!bQ`YQk~*DtrXCWJl#L0&=$@jj
+z#`eU|*4H`rt;vS$979G^d9VFd#!PB%{DNUG`<(uzUTCv-N=lbzPUWow-6G6KpQYZB
+z<YiuMlT<%JUbTG}dc>cQ`K)}BZbt3-;23`z%0_(EI1|+$y^#kD|IFT=i)ak8=5|hC
+zhS*zbe^e~1f>ja2F9K<DJM~$|4)b38S=}q`t&+hSt*0qHwpQAeOirr!34ex!!;m>=
+z{96oHV*tb5!~xSV`$PJx5@W?|^BM9lTwMA{`#?@jaBZWBok|=`FLS&yj5PV`7RJ4n
+zqm^;=eAM_-1@9b&gJs$)AQy`%DokY`bv)@kZK{ocir79#zK_0}G=Wd}*7Nx}M$;%I
+zl5?r@JFp7lDx;_qlE$8W$q$N@(!Xgoh@4!Y)c5>Q!VW205(;_daY$Loim=JBhs@5)
+zhXnY5AiVp4=9Tk}{}a#_)}2CF=w*S-y9_arbS3`C2!<eAmxSw5=eyJhW7f4Ckc8y6
+z)`7W{j*n=cRJQ8og;czUe5O8_c!F65yk2pdCJ+#!musdFMP|9E23rfd1&h&Fu{H?l
+zK74T`HXP|l{7^Sw|JLzc{YRRe;SSyg42=DyFzMfy^&xMn^9^!>xtXUid}LT?3An(e
+z#SL%4^N^1<d3b0AF}lamueyZTU-zjUR5;Q$#cIn|rv>)QsqOr)Qa2k$Rc$J4XASDs
+z+szmTG88wlW=j@jW;nW|YTiWIxb%j)5*vbdg*j8GOdk_$NX=8+&zof!@{cM{GQ9pU
+z`<qNb!$jvI8Cpqo4CStB&F7FyT*8Um+T;QI|4_q*FxJzBO1!m8I#87fZ&S6=yxsI|
+z-6M%BLdd*cdMGT(B2l-<xEYM1XPpOHQnWRZkBh!;dBE=1tn6q4-oQ-`&ux-a*=5Uo
+z^B6^(>#023m#8n=cG{%rY~tm-(P~o5a!9HQUUv|sE#k_#t$M;4U_Ex9;tgs~PPYl=
+z8FcIouFtv07zyn0I&vO?(3T&G4~6%movj~c3bJvCjaFU^L2%-V!Z<Jktt+s)wiG`U
+zsX?Pcu{xu3HTF--2zgZ2a6<_L=nypDcO4HInX~fTn)%Lig){IwvfI=+q^;y;`3Q6Z
+zVLX`1UM=(2GKJd;p22f8TgwQnH}paz%79b;iu_U(bZl?gQYwusXi3Il-ovEDLOkpk
+z_Gm$-XN+O9EoMDsMfx_^mVmBPc+tzb{mN~4I5CM_TC}k!1B#b5A%Mj_(2IGk0D>W;
+zz0VMucB)7XS;RxA<4scF8vAisWv3u-QsQ?a-La=srQGamZxU9P<}%I0=1U|pcthuT
+z?E=XBk_Gf0K)mWx(qIIgt~8y?m`E=5^jcnPCc<wZr^r9k5$bjmudyF09_W(IpB6vO
+zy&1TJ{;+NdR@XSTPD5XsJ*`KgP*BEU!_8IB2iU(0Vg45HgX-V&uL4F|<a$c+G2)TN
+zclpopD;dCiiG42V2O&i=rh2$}TO_2?8PBxMrp$Kb2Sp)LuqbO{<NFLB?m<p8cbxH7
+zmw?FjR23YA3`71+9l#uc9FY{#<^FT7f$Rijn|>@<jPHp(gUZEUGq$V8S8OR<Ya@c@
+z_`>Z|0?e*WS@VEu@Gbqlc&Xxb<_+&?*2*4umLqx%pCjA~e6P8Y^hdlVG1oB~@?I4^
+ze1RdAo?)5H_2z-p7~0lap`)<%i;xTd19?&gtKlpgAgzdY6xd`lYEHDzffqKPjNC)@
+ziEy4zTIR;St6ox+l>CHbFdDT0CN45ndlK(<ex-b>UsMJ!)1hi&D;y3ADHdguMIZXc
+znEt9Ajh-qJh{m|TYz7Ht<UCg4o2kG@xh&4%A{JU6+Jhd@KC+$98j)NpQ4<bywRY@u
+z`<1H-ijgw$oJwOYn`5cOQMXwZ$0q?63g>`?)#D15cg}?|+RBYcSgl?P3m2Z^?!X-B
+zJll9A0F`g)KGcpVd+8u~-dYtoX-FgeYx^wPpuC6E0<gnYIJK$_)_NFB8Uwo=g!seu
+zfd0>lbs1cLG9T$a-qvEQFVctt8Be_&wHdU+b2@gBFHoOmOBmYnRmREb!2*TG13Iny
+uSo)l{v9^%aZk?1h?0%A}=Jz$esK%fYf`96pvcI=4tvgWx0Q?`{eymS91ucF6
+
+literal 0
+HcmV?d00001
+
+-- 
+2.32.0.windows.1
+
diff --git a/research/cv/ArtTrack/preprocess.py b/research/cv/ArtTrack/preprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4e4c8d3d4b42b9da358114cbd9ba8d341698026
--- /dev/null
+++ b/research/cv/ArtTrack/preprocess.py
@@ -0,0 +1,58 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import os
+import sys
+
+from src.args_util import command, create_arg_parser, TARGET_MAT2JSON, TARGET_MPII_SINGLE, TARGET_PAIRWISE, TARGET_TF2MS
+
+
+@command
+def preprocess(parser, args, cfg):
+    if args.target == TARGET_MPII_SINGLE:
+        a = (args.dataset_dir, args.dataset_name, args.save_dir, args.image_dir)
+        from src.tool.preprocess.preprocess_single import preprocess_single
+        preprocess_single(*a)
+
+        if args.split:
+            if args.save_dir is not None:
+                _dir = args.save_dir
+            elif args.dataset_dir is not None:
+                _dir = os.path.join(args.dataset_dir, 'cropped')
+            else:
+                _dir = './cropped'
+            from src.tool.preprocess.split import split
+            split(os.path.join(_dir, 'dataset.json'), _dir)
+    elif args.target == TARGET_TF2MS:
+        a = (args.checkpoint, args.output, args.map)
+        from src.tool.preprocess.tf2ms import tf2ms
+        tf2ms(*a)
+    elif args.target == TARGET_MAT2JSON:
+        from src.tool.preprocess.mat2json import mat2json
+        mat2json(args.index_mat, args.name, args.dataset_json, args.output_dir, args.index_offset, args.stdout)
+    elif args.target == TARGET_PAIRWISE:
+        from src.tool.preprocess.pairwise_stats import pairwise_stats
+        pairwise_stats(cfg)
+    else:
+        parser.print_help()
+
+
+def main():
+    parser = create_arg_parser()['preprocess']
+    args = parser.parse_args(sys.argv[1:])
+    preprocess(parser, args)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/research/cv/ArtTrack/requirements.txt b/research/cv/ArtTrack/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..41bb617e4598d820c2e241dd1e5244b996e9f07b
--- /dev/null
+++ b/research/cv/ArtTrack/requirements.txt
@@ -0,0 +1,14 @@
+numpy~=1.21.2
+matplotlib~=3.4.3
+pillow~=8.3.2
+scipy~=1.7.0
+imageio~=2.9.0
+pyyaml~=5.4.1
+easydict~=1.9
+pycocotools~=2.0.2
+pybind11~=2.7.0
+psutil~=5.8.0
+munkres~=1.1.4
+cython~=0.29.24
+tensorflow~=1.15
+mindspore-gpu
diff --git a/research/cv/ArtTrack/scripts/download.sh b/research/cv/ArtTrack/scripts/download.sh
new file mode 100644
index 0000000000000000000000000000000000000000..7292d5af6f589ae88cd8bb6f85c48d562b10081d
--- /dev/null
+++ b/research/cv/ArtTrack/scripts/download.sh
@@ -0,0 +1,108 @@
+#!/usr/bin/env bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+script_dir=$(cd "$(dirname "$0")" || exit;pwd)
+base_dir=$(cd "$script_dir"/.. || exit;pwd)
+if command -v curl > /dev/null; then
+    DOWNLOADER="curl -L -O"
+else
+    DOWNLOADER="wget"
+fi
+
+function download_dataset_mpii() {
+  mkdir -p "$base_dir"/mpii
+  cd "$base_dir"/mpii || exit
+  echo 'downloading mpii dataset'
+  if [  ! -f mpii_human_pose_v1_u12_1.mat ] && [ ! -f mpii_human_pose_v1_u12_2.zip  ]
+  then
+      $DOWNLOADER https://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/mpii_human_pose_v1_u12_2.zip
+  fi
+  if [  ! -d images ] && [ ! -f mpii_human_pose_v1.tar.gz  ]
+  then
+      $DOWNLOADER https://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/mpii_human_pose_v1.tar.gz
+  fi
+  echo 'extract dataset'
+  if [  ! -f mpii_human_pose_v1_u12_1.mat  ]
+  then
+      unzip mpii_human_pose_v1_u12_2.zip
+      ln -s mpii_human_pose_v1_u12_2/mpii_human_pose_v1_u12_1.mat mpii_human_pose_v1_u12_1.mat
+  fi
+  if [  ! -d images  ]
+  then
+      tar xf mpii_human_pose_v1.tar.gz
+  fi
+}
+
+function download_pretrained_resnet101() {
+  mkdir -p $base_dir/out
+  cd $base_dir/out || exit
+  $DOWNLOADER http://download.tensorflow.org/models/resnet_v1_101_2016_08_28.tar.gz
+  tar xf resnet_v1_101_2016_08_28.tar.gz
+}
+
+function download_dataset_coco() {
+  mkdir -p "$base_dir"/coco
+  cd "$base_dir"/coco || exit
+  COCO_SITE="http://images.cocodataset.org"
+  echo 'downloading coco dataset'
+  if [  ! -d images/train2014 ] && [ ! -f train2014.zip  ]
+  then
+      $DOWNLOADER "$COCO_SITE/zips/train2014.zip"
+  fi
+  if [  ! -d images/val2014 ] && [ ! -f val2014.zip  ]
+  then
+      $DOWNLOADER "$COCO_SITE/zips/val2014.zip"
+  fi
+  if [  ! -d annotations/annotations_trainval2014 ] && [ ! -f annotations_trainval2014.zip  ]
+  then
+      $DOWNLOADER "$COCO_SITE/annotations/annotations_trainval2014.zip"
+  fi
+
+  echo 'extract dataset'
+  if [  ! -d annotations  ]
+  then
+      unzip annotations_trainval2014.zip
+  fi
+  mkdir -p images
+  if [  ! -d images/train2014  ]
+  then
+      cd "$base_dir"/coco/images || exit
+      unzip ../train2014.zip
+  fi
+  if [  ! -d images/val2014  ]
+  then
+      cd "$base_dir"/coco/images || exit
+      unzip ../val2014.zip
+  fi
+
+}
+
+case $1 in
+dataset_mpii)
+  download_dataset_mpii
+  ;;
+dataset_coco)
+  download_dataset_coco
+  ;;
+pretrained_resnet101)
+  download_pretrained_resnet101
+  ;;
+*)
+  echo "Please run the script as: "
+  echo "bash scripts/download.sh TARGET."
+  echo "TARGET: dataset_mpii, dataset_coco, pretrained_resnet101"
+  echo "For example: bash scripts/download.sh dataset_mpii"
+esac
diff --git a/research/cv/ArtTrack/scripts/eval.sh b/research/cv/ArtTrack/scripts/eval.sh
new file mode 100644
index 0000000000000000000000000000000000000000..cf4e1a5db05f1d67ca3f95dde9d04926284bfde2
--- /dev/null
+++ b/research/cv/ArtTrack/scripts/eval.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+if [ $# -lt 3 ]
+then
+    echo "Please run the script as: "
+    echo "bash scripts/eval.sh TARGET CKPT_PATH PREDICTION_PATH"
+    echo "TARGET: mpii_single, coco_multi"
+    echo "For example: bash scripts/eval.sh mpii_single ./ckpt_0/arttrack.ckpt out/prediction.mat"
+exit 1
+fi
+python eval.py "$1" --config config/mpii_eval.yaml --option "load_ckpt=$2" --output "$3"
+python eval.py "$1" --config config/mpii_eval.yaml --accuracy  --prediction "$3"
\ No newline at end of file
diff --git a/research/cv/ArtTrack/scripts/prepare.sh b/research/cv/ArtTrack/scripts/prepare.sh
new file mode 100644
index 0000000000000000000000000000000000000000..871bcc842f7f3b76b36c8ed01f5f415fac292d0e
--- /dev/null
+++ b/research/cv/ArtTrack/scripts/prepare.sh
@@ -0,0 +1,90 @@
+#!/usr/bin/env bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+script_dir=$(cd "$(dirname "$0")" || exit;pwd)
+base_dir=$(cd "$script_dir/.." || exit;pwd)
+
+function prepare_env() {
+  mkdir -p "$base_dir"/out
+  cd "$base_dir"/out || exit
+  echo 'clone tensorflow version code'
+  if [ ! -d pose-tensorflow ]
+  then
+      git clone https://github.com/eldar/pose-tensorflow.git
+  fi
+  cd pose-tensorflow || exit
+  git am "$base_dir"/patch/*
+  cd lib/multicut_cython || exit
+  python setup_pybind11.py install
+  cd ../nms_cython || exit
+  python setup_pybind11.py install
+
+  cd "$base_dir" || exit
+  echo 'convert pretrained resnet101 checkpoint for mindspore'
+  python preprocess.py tf2ms --checkpoint out/resnet_v1_101.ckpt --map config/tf2ms.json --output out/pretrained_resnet101.ckpt
+}
+
+function prepare_mpii() {
+  cd "$base_dir" || exit
+  echo 'preprocess dataset'
+  python preprocess.py mpii_single --dataset-dir mpii --dataset-name mpii_human_pose_v1_u12_1
+  echo 'split dataset'
+  python preprocess.py mat2json --index-mat out/pose-tensorflow/matlab/mpii/test_index.mat --name test_index \
+  --dataset-json mpii/cropped/dataset.json \
+  --output-dir out
+
+  python preprocess.py mat2json --index-mat out/pose-tensorflow/matlab/mpii/train_index.mat --name train_index \
+  --dataset-json mpii/cropped/dataset.json \
+  --output-dir out
+
+  echo 'prepare mpii successfully.'
+}
+
+function prepare_coco() {
+  if [ ! -f out/pairwise_stats.mat ]
+  then
+      python preprocess.py pairwise --config config/coco_pairwise.yaml
+  fi
+
+  if [ ! -d out/pairwise ]
+  then
+      cd "$base_dir"/out || exit
+      $DOWNLOADER https://datasets.d2.mpi-inf.mpg.de/deepercut-models-tensorflow/pairwise_coco.tar.gz
+      tar xvzf pairwise_coco.tar.gz
+  fi
+
+  echo 'prepare coco successfully.'
+
+}
+
+
+case $1 in
+env)
+  prepare_env
+  ;;
+mpii)
+  prepare_mpii
+  ;;
+coco)
+  prepare_coco
+  ;;
+*)
+  echo "Please run the script as: "
+  echo "bash scripts/prepare.sh [TARGET]."
+  echo "TARGET: env, mpii, coco"
+  echo "For example: bash scripts/prepare.sh env"
+  ;;
+esac
\ No newline at end of file
diff --git a/research/cv/ArtTrack/scripts/run_train_multiple_gpu.sh b/research/cv/ArtTrack/scripts/run_train_multiple_gpu.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0b1c26936e6180a6429f841f9bd84e655857520a
--- /dev/null
+++ b/research/cv/ArtTrack/scripts/run_train_multiple_gpu.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+if [ $# -lt 4 ]
+then
+    echo "Please run the script as: "
+    echo "bash scripts/run_train_single_gpu.sh TARGET CONFIG_PATH CUDA_VISIBLE_DEVICES DEVICE_NUM [OPTION] ..."
+    echo "TARGET: mpii_single"
+    echo "For example: bash scripts/run_train_multiple_gpu.sh mpii_single config/mpii_train_multiple_gpu.yaml \"0,1,2,3,4,5,6,7\" 8 \"dataset.path=./out/train_index_dataset.json\""
+exit 1
+fi
+set -e
+index=0
+OPTIONS=''
+for arg in "$@"
+do
+    if [ $index -ge 4 ]
+    then
+        OPTIONS="$OPTIONS --option $arg"
+    fi
+    let index+=1
+done
+export CUDA_VISIBLE_DEVICES=$3
+echo "$CUDA_VISIBLE_DEVICES"
+mpirun -n "$4" python train.py "$1" --config "$2" $OPTIONS | tee "mpii_train_multiple_gpu-`(date +%Y-%m-%d_%H%M%S)`.log"
diff --git a/research/cv/ArtTrack/scripts/run_train_single_gpu.sh b/research/cv/ArtTrack/scripts/run_train_single_gpu.sh
new file mode 100644
index 0000000000000000000000000000000000000000..a55d156323800c27a7a9373950e59e359511a08f
--- /dev/null
+++ b/research/cv/ArtTrack/scripts/run_train_single_gpu.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+if [ $# -lt 2 ]
+then
+    echo "Please run the script as: "
+    echo "bash scripts/run_train_single_gpu.sh TARGET CONFIG_PATH [OPTION] ..."
+    echo "TARGET: mpii_single"
+    echo "For example: bash scripts/run_train_single_gpu.sh mpii_single config/mpii_train_single_gpu.yaml \"dataset.path=./out/train_index_dataset.json\""
+exit 1
+fi
+set -e
+index=0
+OPTIONS=''
+for arg in "$@"
+do
+    if [ $index -ge 2 ]
+    then
+        OPTIONS="$OPTIONS --option $arg"
+    fi
+    let index+=1
+done
+python train.py "$1" --config "$2" $OPTIONS | tee "mpii_train_single_gpu-`(date +%Y-%m-%d_%H%M%S)`.log"
diff --git a/research/cv/ArtTrack/src/__init__.py b/research/cv/ArtTrack/src/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/research/cv/ArtTrack/src/args_util.py b/research/cv/ArtTrack/src/args_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..cacf33e91320c76ccd5acc0fe497de607f84f02c
--- /dev/null
+++ b/research/cv/ArtTrack/src/args_util.py
@@ -0,0 +1,166 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import argparse
+import re
+from typing import List
+
+from src.config import load_config, merge_a_into_b
+from src.log import setup_log
+
+TARGET_MPII_SINGLE = 'mpii_single'
+TARGET_COCO_MULTI = 'coco_multi'
+TARGET_TF2MS = 'tf2ms'
+TARGET_MAT2JSON = 'mat2json'
+TARGET_PAIRWISE = 'pairwise'
+
+
+def to_number_or_str(value: str):
+    if value is None:
+        return None
+    try:
+        if re.match(r"^([-+])?\d+$", value) is not None:
+            return int(value)
+
+        return float(value)
+    except ValueError:
+        return value
+
+
+def compose_option(option: List[str]):
+    result = dict()
+    for o in option:
+        kv = o.split('=', 1)
+        key = kv[0]
+        value = kv[1] if len(kv) == 2 else None
+        keys = key.split('.')
+        cursor = result
+        for k in keys[:-1]:
+            last_cursor = cursor
+            cursor = cursor.get(k, None)
+            if cursor is None:
+                cursor = dict()
+                last_cursor[k] = cursor
+        cursor[keys[-1]] = to_number_or_str(value)
+    return result
+
+
+def setup_config(args):
+    cfg = None
+    if args.config is not None:
+        cfg = load_config(args.config)
+    if args.option:
+        option = compose_option(args.option)
+        if cfg is not None:
+            merge_a_into_b(option, cfg)
+    return cfg
+
+
+def command(func):
+    def _command(p, a):
+        if a.log:
+            setup_log(a.log)
+        return func(p, a, setup_config(a))
+
+    return _command
+
+
+def join_targets(targets):
+    return ', '.join(targets)
+
+
+def create_arg_parser():
+    common_parser = argparse.ArgumentParser(add_help=False)
+    common_parser.add_argument('-c', '--config', type=str, nargs='?')
+    common_parser.add_argument('--log', default="log.yaml", type=str, nargs='?', help="log config file")
+    common_parser.add_argument('--option', type=str, nargs='+', help="extra option will override config file. "
+                                                                     "example: context.device_target=GPU")
+
+    parser = argparse.ArgumentParser(description='tool')
+    subparsers = parser.add_subparsers(metavar="COMMAND", dest='command')
+    # preprocess
+    parser_pre = subparsers.add_parser(
+        'preprocess', aliases=['pre'], help='preprocess', parents=[common_parser])
+    pre_targets = [TARGET_MPII_SINGLE, TARGET_TF2MS, TARGET_MAT2JSON, TARGET_PAIRWISE]
+    parser_pre.add_argument('target', metavar='TARGET',
+                            choices=pre_targets,
+                            help='option choices: %s' % join_targets(pre_targets), nargs='?')
+    # preprocess single
+    pre_group_single = parser_pre.add_argument_group(TARGET_MPII_SINGLE)
+    pre_group_single.add_argument('--dataset-dir', default='.', type=str, nargs='?')
+    pre_group_single.add_argument('--dataset-name', default='mpii_human_pose_v1_u12_1', type=str, nargs='?')
+    pre_group_single.add_argument('--save-dir', default=None, type=str, nargs='?')
+    pre_group_single.add_argument('--image-dir', default=None, type=str, nargs='?')
+    pre_group_single.add_argument('--split', default=False, action='store_true',
+                                  help="split dataset to train and eval.")
+    pre_group_single.add_argument('--eval-ratio', default=0.2, type=float, nargs='?')
+    # preprocess tf2ms
+    pre_group_tf2ms = parser_pre.add_argument_group(TARGET_TF2MS)
+    pre_group_tf2ms.add_argument('--checkpoint', type=str, nargs='?', help='path to tf parameter')
+    pre_group_tf2ms.add_argument('--output', default='out/tf2ms.ckpt', type=str, nargs='?')
+    pre_group_tf2ms.add_argument('--map', default='config/tf2ms.json', type=str, nargs='?')
+
+    pre_group_mat2json = parser_pre.add_argument_group(TARGET_MAT2JSON)
+    pre_group_mat2json.add_argument('--index-mat', type=str, nargs='?', help='mat format index file path')
+    pre_group_mat2json.add_argument('--name', type=str, nargs='?', help='field name in mat format index file. '
+                                                                        'this option is also used as a filename '
+                                                                        'for output')
+    pre_group_mat2json.add_argument('--dataset-json', type=str, nargs='?', help='json format dataset file path. '
+                                                                                'output dataset related to index, '
+                                                                                'if this option appears')
+    pre_group_mat2json.add_argument('--output-dir', type=str, nargs='?', help='all output will in this dir.')
+    pre_group_mat2json.add_argument('--index-offset', type=int, nargs='?', default=-1)
+    pre_group_mat2json.add_argument('--stdout', action='store_true', default=False)
+
+    # train
+    parser_train = subparsers.add_parser(
+        'train', help='train', parents=[common_parser])
+    train_targets = [TARGET_MPII_SINGLE, TARGET_COCO_MULTI]
+    parser_train.add_argument('target', metavar='TARGET', choices=train_targets,
+                              help='option choices: %s' % join_targets(train_targets), nargs='?')
+    # eval
+    parser_test = subparsers.add_parser(
+        'eval', help='eval', parents=[common_parser])
+    test_targets = [TARGET_MPII_SINGLE, TARGET_COCO_MULTI]
+    test_group_single = parser_test.add_argument_group(TARGET_MPII_SINGLE)
+    test_group_single.add_argument('target', metavar='TARGET', choices=test_targets,
+                                   help='option choices: %s' % join_targets(test_targets), nargs='?')
+    test_group_single.add_argument('--visual', default=False, action='store_true',
+                                   help='visualize result')
+    test_group_single.add_argument('--cache', default=False, action='store_true',
+                                   help='cache score map')
+    test_group_single.add_argument('--accuracy', default=False, action='store_true',
+                                   help='only calculate accuracy')
+    test_group_single.add_argument('--output', type=str, nargs='?', help="path to save prediction result")
+    test_group_single.add_argument('--prediction', type=str, nargs='?', help='prediction path for accuracy. '
+                                                                             'or use yaml config, '
+                                                                             'single:output multi:gt_segm_output')
+    test_group_multi = parser_test.add_argument_group(TARGET_COCO_MULTI)
+    test_group_multi.add_argument('--dev', default=False, action='store_true',
+                                  help='development mode')
+    test_group_multi.add_argument('--graph', default=False, action='store_true',
+                                  help='eval graph')
+    test_group_multi.add_argument('--score-maps-cached', default=False, action='store_true',
+                                  help='use cached score map in yaml config cached_scoremaps')
+    test_group_multi.add_argument('--range-num', type=int, nargs='?',
+                                  help='range number. split dataset to this number')
+    test_group_multi.add_argument('--range-index', type=int, nargs='?',
+                                  help='range index. start 0. only eval this range index')
+    parsers = {
+        'preprocess': parser_pre,
+        'train': parser_train,
+        'eval': parser_test
+    }
+    return parsers
diff --git a/research/cv/ArtTrack/src/config.py b/research/cv/ArtTrack/src/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..0843c31d88eaab1919ec32b51aa99f7db42275e4
--- /dev/null
+++ b/research/cv/ArtTrack/src/config.py
@@ -0,0 +1,100 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import yaml
+from easydict import EasyDict as edict
+
+
+def get_default_config():
+    _cfg = edict()
+    _cfg.context = edict({"mode": 1, "device_target": "GPU"})
+    _cfg.parallel_context = None
+    _cfg.stride = 8.0
+    _cfg.weigh_only_present_joints = False
+    _cfg.mean_pixel = [123.68, 116.779, 103.939]
+    _cfg.global_scale = 1.0
+    _cfg.location_refinement = False
+    _cfg.locref_stdev = 7.2801
+    _cfg.locref_loss_weight = 1.0
+    _cfg.locref_huber_loss = True
+    _cfg.intermediate_supervision = False
+    _cfg.intermediate_supervision_layer = 12
+    _cfg.intermediate_supervision_input = 1024
+    _cfg.mirror = False
+    _cfg.crop = False
+    _cfg.crop_pad = 0
+    _cfg.scoremap_dir = "out/eval"
+    _cfg.dataset = edict({"path": "", "type": "", "parallel": 1, "batch_size": 1,
+                          "shuffle": False, "mirror": False, "padding": False})
+    _cfg.use_gt_segm = False
+    _cfg.sparse_graph = []
+    _cfg.pairwise_stats_collect = False
+    _cfg.pairwise_stats_fn = "pairwise_stats.mat"
+    _cfg.pairwise_predict = False
+    _cfg.pairwise_huber_loss = True
+    _cfg.pairwise_loss_weight = 1.0
+    _cfg.tensorflow_pairwise_order = True
+    return _cfg
+
+
+cfg = get_default_config()
+
+
+def merge_a_into_b(a, b):
+    """Merge config dictionary a into config dictionary b, clobbering the
+    options in b whenever they are also specified in a.
+    """
+    if not isinstance(a, edict) and not isinstance(a, dict):
+        return
+
+    for k, v in a.items():
+        # a must specify keys that are in b
+        # if k not in b:
+        #    raise KeyError('{} is not a valid config key'.format(k))
+
+        # recursively merge dicts
+        if isinstance(v, (edict, dict)):
+            try:
+                item = b.get(k, None)
+                if item is None:
+                    item = edict()
+                    b[k] = item
+                merge_a_into_b(a[k], b[k])
+            except Exception:
+                print('Error under config key: {}'.format(k))
+                raise
+        else:
+            b[k] = v
+
+
+def load_config(filename):
+    """Load a config from file filename and merge it into the default options.
+    """
+    with open(filename, 'r', encoding='utf-8') as f:
+        yaml_cfg = edict(yaml.load(f, Loader=yaml.FullLoader))
+
+    merge_a_into_b(yaml_cfg, cfg)
+
+    return cfg
+
+
+def check_config(c=None):
+    if c is None:
+        return load_config()
+    return c
+
+
+if __name__ == "__main__":
+    print(load_config())
diff --git a/research/cv/ArtTrack/src/dataset/__init__.py b/research/cv/ArtTrack/src/dataset/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9cc45ec5a33c9576f744df56bb3c5b73705aeac9
--- /dev/null
+++ b/research/cv/ArtTrack/src/dataset/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+from .util import DATASET_TYPE_MPII_MR, DATASET_TYPE_MPII_RAW, DATASET_TYPE_COCO
+from .mpii import MPII
+from .coco import MSCOCO
diff --git a/research/cv/ArtTrack/src/dataset/coco.py b/research/cv/ArtTrack/src/dataset/coco.py
new file mode 100644
index 0000000000000000000000000000000000000000..8aa462863b7cecaa23e6ddd8d1b51fe67596a85c
--- /dev/null
+++ b/research/cv/ArtTrack/src/dataset/coco.py
@@ -0,0 +1,180 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import json
+import os
+
+import imageio as io
+import matplotlib.pyplot as plt
+import numpy as np
+from PIL import Image
+from pycocotools import mask as mask_utils
+from pycocotools.coco import COCO
+
+from src.dataset.pose import Batch, DataItem, PoseDataset
+
+
+def get_gt_visibilities(in_file, visibilities):
+    """
+    get ground truth visibilities
+    """
+    with open(in_file) as data_file:
+        data = json.load(data_file)
+
+    for person_id in range(len(data)):
+        keypoints = data[person_id]["keypoints"]
+        keypoints = [visibilities[person_id][i // 3] if i % 3 == 2 else int(keypoints[i]) for i in
+                     range(len(keypoints))]
+        data[person_id]["keypoints"] = keypoints
+
+    with open(in_file, 'w') as data_file:
+        json.dump(data, data_file)
+
+
+class MSCOCO(PoseDataset):
+
+    def load_dataset(self):
+        dataset = self.cfg.dataset.path
+        dataset_phase = self.cfg.dataset.phase
+        dataset_ann = self.cfg.dataset.ann
+
+        # initialize COCO api
+        ann_file = '%s/annotations/%s_%s.json' % (dataset, dataset_ann, dataset_phase)
+        self.coco = COCO(ann_file)
+
+        img_ids = self.coco.getImgIds()
+
+        data = []
+
+        # loop through each image
+        for imgId in img_ids:
+            item = DataItem()
+
+            img = self.coco.loadImgs(imgId)[0]
+            item.im_path = "%s/images/%s/%s" % (dataset, dataset_phase, img["file_name"])
+            item.im_size = [3, img["height"], img["width"]]
+            item.coco_id = imgId
+            item.scale = self.get_scale()
+            if not self.is_valid_size(item.im_size, item.scale):
+                continue
+            ann_ids = self.coco.getAnnIds(imgIds=img['id'], iscrowd=False)
+            anns = self.coco.loadAnns(ann_ids)
+
+            all_person_key_points = []
+            masked_persons_rle = []
+            visible_persons_rle = []
+            all_visibilities = []
+
+            # Consider only images with people
+            has_people = len(anns)
+            if not has_people and self.cfg.coco_only_images_with_people:
+                continue
+
+            for ann in anns:  # loop through each person
+                person_key_points = []
+                visibilities = []
+                if ann["num_keypoints"] != 0:
+                    for i in range(self.cfg.num_joints):
+                        x_coord = ann["keypoints"][3 * i]
+                        y_coord = ann["keypoints"][3 * i + 1]
+                        visibility = ann["keypoints"][3 * i + 2]
+                        visibilities.append(visibility)
+                        if visibility != 0:  # i.e. if labeled
+                            person_key_points.append([i, x_coord, y_coord])
+                    all_person_key_points.append(np.array(person_key_points))
+                    visible_persons_rle.append(mask_utils.decode(self.coco.annToRLE(ann)))
+                    all_visibilities.append(visibilities)
+                if ann["num_keypoints"] == 0:
+                    masked_persons_rle.append(self.coco.annToRLE(ann))
+
+            item.joints = np.array(all_person_key_points)
+            item.im_neg_mask = mask_utils.merge(masked_persons_rle)
+            if self.cfg.use_gt_segm:
+                item.gt_segm = np.moveaxis(np.array(visible_persons_rle), 0, -1)
+                item.visibilities = all_visibilities
+            data.append(item)
+
+        self.has_gt = self.cfg.dataset != "image_info"
+        return data
+
+    def compute_score_map_weights(self, scmap_shape, joint_id, data_item):
+        size = scmap_shape[0:2]
+        scmask = np.ones(size)
+        m = mask_utils.decode(data_item.im_neg_mask)
+        if m.size:
+            img = Image.fromarray(m)
+            img = img.resize((size[1], size[0]))
+            scmask = 1.0 - np.array(img)
+        scmask = np.stack([scmask] * self.cfg.num_joints, axis=-1)
+        return scmask
+
+    def get_pose_segments(self):
+        return [[0, 1], [0, 2], [1, 3], [2, 4], [5, 7], [6, 8], [7, 9], [8, 10], [11, 13], [12, 14], [13, 15], [14, 16]]
+
+    def visualize_coco(self, coco_img_results, visibilities):
+        """
+        visualize coco
+        """
+        in_file = "tmp.json"
+        with open(in_file, 'w') as outfile:
+            json.dump(coco_img_results, outfile)
+        get_gt_visibilities(in_file, visibilities)
+
+        # initialize coco_pred api
+        coco_pred = self.coco.loadRes(in_file)
+        os.remove(in_file)
+
+        img_ids = [coco_img_results[0]["image_id"]]
+
+        for imgId in img_ids:
+            img = coco_pred.loadImgs(imgId)[0]
+            im_path = "%s/images/%s/%s" % (self.cfg.dataset.path, self.cfg.dataset.phase, img["file_name"])
+            I = io.imread(im_path)
+
+            fig = plt.figure()
+            a = fig.add_subplot(2, 2, 1)
+            plt.imshow(I)
+            a.set_title('Initial Image')
+
+            a = fig.add_subplot(2, 2, 2)
+            plt.imshow(I)
+            a.set_title('Predicted Keypoints')
+            ann_ids = coco_pred.getAnnIds(imgIds=img['id'])
+            anns = coco_pred.loadAnns(ann_ids)
+            coco_pred.showAnns(anns)
+
+            a = fig.add_subplot(2, 2, 3)
+            plt.imshow(I)
+            a.set_title('GT Keypoints')
+            ann_ids = self.coco.getAnnIds(imgIds=img['id'])
+            anns = self.coco.loadAnns(ann_ids)
+            self.coco.showAnns(anns)
+
+            plt.show()
+
+    def __getitem__(self, item):
+        batch = self.get_item(item)
+        res = (
+            batch.get(Batch.inputs, None),
+            batch.get(Batch.part_score_targets, None),
+            batch.get(Batch.part_score_weights, None),
+            batch.get(Batch.locref_targets, None),
+            batch.get(Batch.locref_mask, None),
+            batch.get(Batch.pairwise_targets, None),
+            batch.get(Batch.pairwise_mask, None),
+        )
+        if not self.cfg.train:
+            res = (batch[Batch.data_item].im_path,) + res
+        return res
diff --git a/research/cv/ArtTrack/src/dataset/mpii.py b/research/cv/ArtTrack/src/dataset/mpii.py
new file mode 100644
index 0000000000000000000000000000000000000000..59dd16e6d73195cb4a3d42e0eee92b7612e8c1cf
--- /dev/null
+++ b/research/cv/ArtTrack/src/dataset/mpii.py
@@ -0,0 +1,26 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+from src.dataset.pose import PoseDataset
+
+
+class MPII(PoseDataset):
+
+    def mirror_joint_coords(self, joints, image_width):
+        joints[:, 1] = image_width - joints[:, 1]
+        return joints
+
+    def get_pose_segments(self):
+        return [[0, 1], [1, 2], [3, 4], [4, 5], [6, 7], [7, 8], [9, 10], [10, 11], [12, 13]]
diff --git a/research/cv/ArtTrack/src/dataset/pose.py b/research/cv/ArtTrack/src/dataset/pose.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5420d51bbb5c84111544ba0d98f2a00bb1e3651
--- /dev/null
+++ b/research/cv/ArtTrack/src/dataset/pose.py
@@ -0,0 +1,532 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import json
+import os
+import random as rand
+from enum import Enum
+
+import numpy as np
+from numpy import array as arr, concatenate as cat
+import scipy.io as sio
+from imageio import imread
+from PIL import Image
+
+from src import dataset
+from src.log import log as logging
+
+
+class Batch(Enum):
+    inputs = 0
+    part_score_targets = 1
+    part_score_weights = 2
+    locref_targets = 3
+    locref_mask = 4
+    pairwise_targets = 5
+    pairwise_mask = 6
+    data_item = 7
+
+
+def mirror_joints_map(all_joints, num_joints):
+    """
+    mirror joints
+    Args:
+        all_joints: joints
+        num_joints: number of joints
+    """
+    res = np.arange(num_joints)
+    symmetric_joints = [p for p in all_joints if len(p) == 2]
+    for pair in symmetric_joints:
+        res[pair[0]] = pair[1]
+        res[pair[1]] = pair[0]
+    return res
+
+
+def extend_crop(crop, crop_pad, image_size):
+    """
+    extend crop
+    """
+    crop[0] = max(crop[0] - crop_pad, 0)
+    crop[1] = max(crop[1] - crop_pad, 0)
+    crop[2] = min(crop[2] + crop_pad, image_size[2] - 1)
+    crop[3] = min(crop[3] + crop_pad, image_size[1] - 1)
+    return crop
+
+
+def data_to_input(data):
+    """
+    transpose data to (C,H,W)
+    """
+    return np.transpose(data, [2, 0, 1]).astype(np.float32)
+
+
+def get_pairwise_index(j_id, j_id_end, num_joints):
+    """
+    get pairwise index
+    """
+    return (num_joints - 1) * j_id + j_id_end - int(j_id < j_id_end)
+
+
+def collect_pairwise_stats(joint_id, coords):
+    """
+    collect pairwise stats
+    """
+    pairwise_stats = {}
+    for person_id in range(len(coords)):
+        num_joints = len(joint_id[person_id])
+        for k_start in range(num_joints):
+            j_id_start = joint_id[person_id][k_start]
+            joint_pt = coords[person_id][k_start, :]
+            j_x_start = np.asscalar(joint_pt[0])
+            j_y_start = np.asscalar(joint_pt[1])
+            for k_end in range(num_joints):
+                if k_start != k_end:
+                    j_id_end = joint_id[person_id][k_end]
+                    joint_pt = coords[person_id][k_end, :]
+                    j_x_end = np.asscalar(joint_pt[0])
+                    j_y_end = np.asscalar(joint_pt[1])
+                    if (j_id_start, j_id_end) not in pairwise_stats:
+                        pairwise_stats[(j_id_start, j_id_end)] = []
+                    pairwise_stats[(j_id_start, j_id_end)].append([j_x_end - j_x_start, j_y_end - j_y_start])
+    return pairwise_stats
+
+
+def load_pairwise_stats(cfg):
+    """
+    load pairwise stats
+    """
+    mat_stats = sio.loadmat(cfg.pairwise_stats_fn)
+    pairwise_stats = {}
+    for _id in range(len(mat_stats['graph'])):
+        pair = tuple(mat_stats['graph'][_id])
+        pairwise_stats[pair] = {"mean": mat_stats['means'][_id], "std": mat_stats['std_devs'][_id]}
+    for pair in pairwise_stats:
+        pairwise_stats[pair]["mean"] *= cfg.global_scale
+        pairwise_stats[pair]["std"] *= cfg.global_scale
+    return pairwise_stats
+
+
+class DataItem:
+    pass
+
+
+# noinspection PyAttributeOutsideInit
+class PoseDataset:
+    """
+    basic dataset
+    """
+
+    def __init__(self, cfg):
+        self.cfg = cfg
+        if cfg.dataset.type in [dataset.DATASET_TYPE_MPII_RAW,
+                                dataset.DATASET_TYPE_COCO]:
+            self.data = self.load_dataset() if cfg.dataset else []
+        else:
+            self.data = []
+            return
+        self.num_images = len(self.data)
+        self.set_mirror(cfg.dataset.mirror)
+        self.set_pairwise_stats_collect(cfg.pairwise_stats_collect)
+        if self.cfg.pairwise_predict:
+            self.pairwise_stats = load_pairwise_stats(self.cfg)
+
+    def load_dataset(self):
+        """
+        load dataset
+        """
+        cfg = self.cfg
+        file_name = cfg.dataset.path
+        # Load Matlab file dataset annotation
+        with open(file_name, 'r') as f:
+            mlab = f.read()
+            mlab = json.loads(mlab)
+
+        data = []
+        has_gt = True
+
+        for i, sample in enumerate(mlab):
+
+            item = DataItem()
+            item.image_id = i
+            item.im_path = os.path.expanduser(sample['image'])
+            item.im_size = np.array(sample['size'], dtype=np.int32)
+            item.scale = self.get_scale()
+            if not self.is_valid_size(item.im_size, item.scale):
+                continue
+            if sample.get('joints', None) is not None:
+                joints = np.array(sample['joints'][0])
+                joint_id = joints[:, 0]
+                # make sure joint ids are 0-indexed
+                if joint_id.size != 0:
+                    assert (joint_id < cfg.num_joints).any()
+                joints[:, 0] = joint_id
+                item.joints = [joints]
+            else:
+                has_gt = False
+            if cfg.crop:
+                crop = sample[3][0] - 1
+                item.crop = extend_crop(crop, cfg.crop_pad, item.im_size)
+            data.append(item)
+
+        self.has_gt = has_gt
+        return data
+
+    def set_mirror(self, mirror):
+        """
+        setup mirror
+        """
+        self.mirror = mirror
+        if mirror:
+            image_indices = np.arange(self.num_images * 2)
+            self.mirrored = image_indices >= self.num_images
+            image_indices[self.mirrored] = image_indices[self.mirrored] - self.num_images
+            self.image_indices = image_indices
+            self.symmetric_joints = mirror_joints_map(self.cfg.all_joints, self.cfg.num_joints)
+        else:
+            # assert not self.cfg.mirror
+            self.image_indices = np.arange(self.num_images)
+            self.mirrored = [False] * self.num_images
+
+    def set_pairwise_stats_collect(self, pairwise_stats_collect):
+        """
+        setup pairwise stats collect
+        """
+        self.pairwise_stats_collect = pairwise_stats_collect
+        if self.pairwise_stats_collect:
+            assert self.get_scale() == 1.0
+
+    def mirror_joint_coords(self, joints, image_width):
+        """
+        mirror joint coords
+        horizontally flip the x-coordinate, keep y unchanged
+        """
+        # horizontally flip the x-coordinate, keep y unchanged
+        joints[:, 1] = image_width - joints[:, 1] - 1
+        return joints
+
+    def mirror_joints(self, joints, symmetric_joints, image_width):
+        """
+        mirror joints
+        """
+        # joint ids are 0 indexed
+        res = np.copy(joints)
+        res = self.mirror_joint_coords(res, image_width)
+        # swap the joint_id for a symmetric one
+        joint_id = joints[:, 0].astype(int)
+        res[:, 0] = symmetric_joints[joint_id]
+        return res
+
+    def num_training_samples(self):
+        """
+        the number of training samples
+        """
+        num = self.num_images
+        if self.mirror:
+            num *= 2
+        return num
+
+    def __len__(self):
+        """
+        the number of training samples
+        """
+        return self.num_training_samples()
+
+    def __getitem__(self, item):
+        """
+        get a sample
+        """
+        batch = self.get_item(item)
+        res = (
+            batch.get(Batch.inputs, None),
+            batch.get(Batch.part_score_targets, None),
+            batch.get(Batch.part_score_weights, None),
+            batch.get(Batch.locref_targets, None),
+            batch.get(Batch.locref_mask, None),
+        )
+        if not self.cfg.train:
+            res = (batch[Batch.data_item].im_path,) + res
+        return res
+
+    def get_idx_mirror(self, idx):
+        """
+        get real index and mirror
+        """
+        img_idx = self.image_indices[idx]
+        mirror = self.cfg.dataset.mirror and self.mirrored[idx]
+
+        return img_idx, mirror
+
+    def get_training_sample(self, img_idx):
+        """
+        get sample
+        """
+        return self.data[img_idx]
+
+    def get_scale(self):
+        """
+        get scale
+        """
+        cfg = self.cfg
+        scale = cfg.global_scale
+        if hasattr(cfg, 'scale_jitter_lo') and hasattr(cfg, 'scale_jitter_up'):
+            scale_jitter = rand.uniform(cfg.scale_jitter_lo, cfg.scale_jitter_up)
+            scale *= scale_jitter
+        return scale
+
+    def get_item(self, item):
+        """
+        get item
+        """
+        imidx, mirror = self.get_idx_mirror(item)
+        data_item = self.get_training_sample(imidx)
+        scale = data_item.scale
+
+        return self.make_sample(data_item, scale, mirror)
+
+    def is_valid_size(self, image_size, scale):
+        """
+        check image size
+        """
+        im_width = image_size[2]
+        im_height = image_size[1]
+
+        min_input_size = 100
+        if im_height < min_input_size or im_width < min_input_size:
+            return False
+
+        if hasattr(self.cfg, 'max_input_size'):
+            max_input_size = self.cfg.max_input_size
+            input_width = im_width * scale
+            input_height = im_height * scale
+            if input_height > max_input_size or input_width > max_input_size:
+                return False
+
+        return True
+
+    def make_sample(self, data_item, scale, mirror):
+        """
+        make sample
+        """
+        im_file = data_item.im_path
+        logging.debug('image %s', im_file)
+        logging.debug('mirror %r', mirror)
+        image = imread(im_file, pilmode='RGB')
+
+        if self.has_gt:
+            joints = np.copy(data_item.joints)
+
+        if self.cfg.crop:
+            crop = data_item.crop
+            image = image[crop[1]:crop[3] + 1, crop[0]:crop[2] + 1, :]
+            if self.has_gt:
+                joints[:, 1:3] -= crop[0:2].astype(joints.dtype)
+        image = Image.fromarray(image)
+        if self.cfg.dataset.padding:
+            new_image = Image.new("RGB", (self.cfg.max_input_size, self.cfg.max_input_size), (0, 0, 0))
+            new_image.paste(image)
+            image = new_image
+        img = np.array(image.resize(
+            (int(image.size[0] * scale), int(image.size[1] * scale))) if scale != 1 else image)
+        image = np.array(image)
+        scaled_img_size = arr(img.shape[0:2])
+
+        if mirror:
+            img = np.fliplr(img)
+
+        batch = {Batch.inputs: img}
+
+        if self.has_gt:
+            stride = self.cfg.stride
+
+            if mirror:
+                joints = [self.mirror_joints(person_joints, self.symmetric_joints, image.shape[1]) for person_joints in
+                          joints]
+
+            sm_size = np.ceil(scaled_img_size / (stride * 2)).astype(int) * 2
+
+            scaled_joints = [person_joints[:, 1:3] * scale for person_joints in joints]
+
+            joint_id = [person_joints[:, 0].astype(int) for person_joints in joints]
+            batch = self.compute_targets_and_weights(joint_id, scaled_joints, data_item, sm_size, scale, batch)
+
+            if self.pairwise_stats_collect:
+                data_item.pairwise_stats = collect_pairwise_stats(joint_id, scaled_joints)
+
+        batch = {key: data_to_input(data) for (key, data) in batch.items()}
+
+        batch[Batch.data_item] = data_item
+
+        return batch
+
+    def set_locref(self, locref_map, locref_mask, locref_scale, i, j, j_id, dx, dy):
+        """
+        set location ref
+        """
+        locref_mask[j, i, j_id * 2 + 0] = 1
+        locref_mask[j, i, j_id * 2 + 1] = 1
+        locref_map[j, i, j_id * 2 + 0] = dx * locref_scale
+        locref_map[j, i, j_id * 2 + 1] = dy * locref_scale
+
+    def set_pairwise_map(self, pairwise_map, pairwise_mask, i, j, j_id, j_id_end, coords, pt_x, pt_y, person_id, k_end):
+        """
+        set pairwise map
+        """
+        num_joints = self.cfg.num_joints
+        joint_pt = coords[person_id][k_end, :]
+        j_x_end = np.asscalar(joint_pt[0])
+        j_y_end = np.asscalar(joint_pt[1])
+        pair_id = get_pairwise_index(j_id, j_id_end, num_joints)
+        stats = self.pairwise_stats[(j_id, j_id_end)]
+        dx = j_x_end - pt_x
+        dy = j_y_end - pt_y
+        pairwise_mask[j, i, pair_id * 2 + 0] = 1
+        pairwise_mask[j, i, pair_id * 2 + 1] = 1
+        pairwise_map[j, i, pair_id * 2 + 0] = (dx - stats["mean"][0]) / stats["std"][0]
+        pairwise_map[j, i, pair_id * 2 + 1] = (dy - stats["mean"][1]) / stats["std"][1]
+
+    def compute_targets_and_weights(self, joint_id, coords, data_item, size, scale, batch):
+        """
+        compute targets and weights
+        """
+        stride = self.cfg.stride
+        dist_thresh = self.cfg.pos_dist_thresh * scale
+        num_joints = self.cfg.num_joints
+        half_stride = stride / 2
+        scmap = np.zeros(cat([size, arr([num_joints])]))
+
+        locref_shape = cat([size, arr([num_joints * 2])])
+        locref_mask = np.zeros(locref_shape)
+        locref_map = np.zeros(locref_shape)
+
+        pairwise_shape = cat([size, arr([num_joints * (num_joints - 1) * 2])])
+        pairwise_mask = np.zeros(pairwise_shape)
+        pairwise_map = np.zeros(pairwise_shape)
+
+        dist_thresh_sq = dist_thresh ** 2
+
+        width = size[1]
+        height = size[0]
+
+        self.loop_person(joint_id, coords, half_stride, stride, dist_thresh, width,
+                         height, dist_thresh_sq,
+                         locref_map,
+                         scmap,
+                         locref_mask, pairwise_map, pairwise_mask)
+        scmap_weights = self.compute_score_map_weights(scmap.shape, joint_id, data_item)
+
+        # Update batch
+        batch.update({
+            Batch.part_score_targets: scmap,
+            Batch.part_score_weights: scmap_weights
+        })
+        if self.cfg.location_refinement:
+            batch.update({
+                Batch.locref_targets: locref_map,
+                Batch.locref_mask: locref_mask
+            })
+        if self.cfg.pairwise_predict:
+            batch.update({
+                Batch.pairwise_targets: pairwise_map,
+                Batch.pairwise_mask: pairwise_mask
+            })
+
+        return batch
+
+    def loop_person(self, joint_id, coords, half_stride, stride, dist_thresh, width,
+                    height, dist_thresh_sq,
+                    locref_map,
+                    scmap,
+                    locref_mask, pairwise_map, pairwise_mask):
+        for person_id in range(len(coords)):
+            self.loop_joint(joint_id, person_id, coords, half_stride, stride, dist_thresh, width,
+                            height, dist_thresh_sq,
+                            locref_map,
+                            scmap,
+                            locref_mask, pairwise_map, pairwise_mask)
+
+    def loop_joint(self, joint_id, person_id, coords, half_stride, stride, dist_thresh, width,
+                   height, dist_thresh_sq,
+                   locref_map,
+                   scmap,
+                   locref_mask, pairwise_map, pairwise_mask):
+        for k, j_id in enumerate(joint_id[person_id]):
+            joint_pt = coords[person_id][k, :]
+            j_x = np.asscalar(joint_pt[0])
+            j_y = np.asscalar(joint_pt[1])
+
+            # don't loop over entire heatmap, but just relevant locations
+            j_x_sm = round((j_x - half_stride) / stride)
+            j_y_sm = round((j_y - half_stride) / stride)
+            min_x = round(max(j_x_sm - dist_thresh - 1, 0))
+            max_x = round(min(j_x_sm + dist_thresh + 1, width - 1))
+            min_y = round(max(j_y_sm - dist_thresh - 1, 0))
+            max_y = round(min(j_y_sm + dist_thresh + 1, height - 1))
+
+            self.loop_area(stride, half_stride, min_x, max_x, min_y, max_y, j_x, j_y, dist_thresh_sq, locref_map,
+                           j_id,
+                           scmap,
+                           locref_mask, joint_id, person_id, pairwise_map, pairwise_mask, coords, k)
+
+    def loop_area(self, stride, half_stride, min_x, max_x, min_y, max_y, j_x, j_y, dist_thresh_sq, locref_map, j_id,
+                  scmap,
+                  locref_mask, joint_id, person_id, pairwise_map, pairwise_mask, coords, k):
+        for j in range(min_y, max_y + 1):  # range(height):
+            pt_y = j * stride + half_stride
+            for i in range(min_x, max_x + 1):  # range(width):
+                # pt = arr([i*stride+half_stride, j*stride+half_stride])
+                # diff = joint_pt - pt
+                # The code above is too slow in python
+                pt_x = i * stride + half_stride
+                # print(la.norm(diff))
+
+                self.set_score_map(pt_x, pt_y, j_x, j_y, dist_thresh_sq, locref_map, i, j, j_id, scmap,
+                                   locref_mask, joint_id, person_id, pairwise_map, pairwise_mask, coords, k)
+
+    def set_score_map(self, pt_x, pt_y, j_x, j_y, dist_thresh_sq, locref_map, i, j, j_id, scmap, locref_mask,
+                      joint_id,
+                      person_id, pairwise_map, pairwise_mask, coords, k):
+
+        dx = j_x - pt_x
+        dy = j_y - pt_y
+        dist = dx ** 2 + dy ** 2
+
+        if dist <= dist_thresh_sq:
+            dist = dx ** 2 + dy ** 2
+            locref_scale = 1.0 / self.cfg.locref_stdev
+            current_normalized_dist = dist * locref_scale ** 2
+            prev_normalized_dist = locref_map[j, i, j_id * 2 + 0] ** 2 + locref_map[j, i, j_id * 2 + 1] ** 2
+            update_scores = (scmap[j, i, j_id] == 0) or prev_normalized_dist > current_normalized_dist
+            if self.cfg.location_refinement and update_scores:
+                self.set_locref(locref_map, locref_mask, locref_scale, i, j, j_id, dx, dy)
+            if self.cfg.pairwise_predict and update_scores:
+                for k_end, j_id_end in enumerate(joint_id[person_id]):
+                    if k != k_end:
+                        self.set_pairwise_map(pairwise_map, pairwise_mask, i, j, j_id, j_id_end,
+                                              coords, pt_x, pt_y, person_id, k_end)
+            scmap[j, i, j_id] = 1
+
+    def compute_score_map_weights(self, scmap_shape, joint_id, data_item):
+        """
+        compute score map weights
+        """
+        cfg = self.cfg
+        if cfg.weigh_only_present_joints:
+            weights = np.zeros(scmap_shape)
+            for person_joint_id in joint_id:
+                for j_id in person_joint_id:
+                    weights[:, :, j_id] = 1.0
+        else:
+            weights = np.ones(scmap_shape)
+        return weights
diff --git a/research/cv/ArtTrack/src/dataset/util.py b/research/cv/ArtTrack/src/dataset/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..0724fab32eebce07e30738b7b7b2f18b254d1b67
--- /dev/null
+++ b/research/cv/ArtTrack/src/dataset/util.py
@@ -0,0 +1,79 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import psutil
+from mindspore import dataset as ds
+
+DATASET_TYPE_MPII_RAW = 'mpii_raw'
+DATASET_TYPE_MPII_MR = 'mpii_mr'
+DATASET_TYPE_COCO = 'coco'
+
+
+def _parallel(num):
+    """
+    get parallel
+    """
+    if num is None or num < 0:
+        return max(1, int(psutil.cpu_count() / 2))
+    return num
+
+
+def create_dataset(dataset_type, dataset, shuffle=False, batch_size=1, parallel=None, train=False, num_shards=None,
+                   rank_id=None):
+    """
+    create dataset
+    Args:
+        dataset_type: dataset type
+        dataset: path to dataset
+        shuffle: shuffle
+        batch_size: batch size
+        parallel: if None, cpu_count / 2
+        train: train mode
+        num_shards: shards for distributed training
+        rank_id: distributed id
+    """
+    ds.config.set_enable_shared_mem(False)
+    if dataset_type == DATASET_TYPE_MPII_RAW:
+        columns = [
+            "inputs",
+            "part_score_targets",
+            "part_score_weights",
+            "locref_targets",
+            "locref_mask",
+        ]
+        if not train:
+            columns = ['im_path'] + columns
+        dataset = ds.GeneratorDataset(source=dataset, column_names=columns, shuffle=shuffle,
+                                      num_parallel_workers=_parallel(parallel), num_shards=num_shards,
+                                      shard_id=rank_id)
+    elif dataset_type == DATASET_TYPE_COCO:
+        columns = [
+            "inputs",
+            "part_score_targets",
+            "part_score_weights",
+            "locref_targets",
+            "locref_mask",
+            "pairwise_targets",
+            "pairwise_mask",
+        ]
+        if not train:
+            columns = ['im_path'] + columns
+        dataset = ds.GeneratorDataset(source=dataset, column_names=columns, shuffle=shuffle,
+                                      num_parallel_workers=_parallel(parallel), num_shards=num_shards,
+                                      shard_id=rank_id)
+    if dataset is not None:
+        dataset = dataset.batch(batch_size)
+
+    return dataset
diff --git a/research/cv/ArtTrack/src/log.py b/research/cv/ArtTrack/src/log.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb7f5c1ebcdf8c8e5c0c741f03b0a3ccda718b56
--- /dev/null
+++ b/research/cv/ArtTrack/src/log.py
@@ -0,0 +1,30 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import logging.config
+import logging.handlers
+
+import yaml
+
+LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s"
+sh = logging.StreamHandler()
+logging.basicConfig(level=logging.INFO, format=LOG_FORMAT, handlers=[sh])
+log = logging.getLogger('ArtTrack')
+
+
+def setup_log(file=None):
+    with open(file=file or 'log.yaml', mode='r', encoding="utf-8") as f:
+        logging_yaml = yaml.load(stream=f, Loader=yaml.FullLoader)
+        logging.config.dictConfig(config=logging_yaml)
diff --git a/research/cv/ArtTrack/src/model/losses.py b/research/cv/ArtTrack/src/model/losses.py
new file mode 100644
index 0000000000000000000000000000000000000000..10df5c7e6009ca67751b71d2f5172e1bd1774375
--- /dev/null
+++ b/research/cv/ArtTrack/src/model/losses.py
@@ -0,0 +1,55 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import mindspore.numpy as np
+from mindspore import nn
+from mindspore.ops import functional as F
+from mindspore.ops.operations import Abs
+
+
+class HuberLossWithWeight(nn.LossBase):
+    """
+    huber loss
+    """
+
+    def __init__(self):
+        super(HuberLossWithWeight, self).__init__()
+        self.abs = Abs()
+
+    def construct(self, predictons, labels, weight=1.0, k=1.0):
+        diff = predictons - labels
+        abs_diff = self.abs(diff)
+        k = np.array(k)
+        losses = np.where(abs_diff < k, 0.5 * np.square(diff), k * abs_diff - 0.5 * k ** 2)
+        return self.get_loss(losses, weight)
+
+
+class MSELossWithWeight(nn.LossBase):
+    """
+    mse loss
+    """
+
+    def construct(self, base, target, weight=1.0):
+        x = F.square(base - target)
+        return self.get_loss(x, weight)
+
+
+class WeightLoss(nn.LossBase):
+    """
+    weight loss
+    """
+
+    def construct(self, loss, weight=1.0):
+        return self.get_loss(loss, weight)
diff --git a/research/cv/ArtTrack/src/model/pose.py b/research/cv/ArtTrack/src/model/pose.py
new file mode 100644
index 0000000000000000000000000000000000000000..42c6c9af269cb4df61580f152e1e8f93f2ba5b43
--- /dev/null
+++ b/research/cv/ArtTrack/src/model/pose.py
@@ -0,0 +1,188 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import mindspore as ms
+import mindspore.numpy as np
+from mindspore import nn
+from mindspore.ops import stop_gradient
+from src.model.losses import HuberLossWithWeight, MSELossWithWeight, WeightLoss
+from src.model.resnet import util
+
+
+class PredictionLayer(nn.Cell):
+    """
+    prediction layer
+    """
+
+    def __init__(self, cfg, name, in_channels, out_channels):
+        """
+        Args:
+            cfg: net config
+            name: prediction name
+            in_channels: in channels
+            out_channels: out channels
+        """
+        super(PredictionLayer, self).__init__()
+        self.cfg = cfg
+        self.name = name
+        self.in_channels = in_channels
+        self.out_channels = out_channels
+        self.conv2d_transpose = nn.Conv2dTranspose(in_channels=self.in_channels,
+                                                   out_channels=self.out_channels, kernel_size=3, stride=2,
+                                                   bias_init='normal', has_bias=True)
+
+    def construct(self, x):
+        return self.conv2d_transpose(x)
+
+
+class PoseNet(nn.Cell):
+    """
+    pose net
+    """
+
+    def __init__(self, cfg):
+        """
+        Args:
+            cfg: net config
+        """
+        super(PoseNet, self).__init__()
+        self.cfg = cfg
+        self.resnet101 = util.resnet_101(3, output_stride=16, global_pool=False)
+        self.resnet101.set_train(False)
+        self.resnet101.set_grad(requires_grad=False)
+        self.mean = np.array(self.cfg.mean_pixel)
+        self.mean = self.mean.reshape([1, 3, 1, 1])
+
+        self.part_pred = PredictionLayer(self.cfg, 'part_pred', 2048, self.cfg.num_joints)
+        if cfg.location_refinement:
+            self.locref = PredictionLayer(cfg, 'locref_pred', 2048, cfg.num_joints * 2)
+        if cfg.pairwise_predict:
+            self.pairwise_pred = PredictionLayer(cfg, 'pairwise_pred', 2048, cfg.num_joints * (cfg.num_joints - 1) * 2)
+        if cfg.intermediate_supervision:
+            self.part_pred_interm = PredictionLayer(cfg, 'part_pred_interm', cfg.intermediate_supervision_input,
+                                                    self.cfg.num_joints)
+
+    def get_im_centered(self, inputs):
+        return inputs - self.mean
+
+    def construct(self, inputs):
+        im_centered = self.get_im_centered(inputs)
+        net, intermediate = self.resnet101(im_centered)
+        net = stop_gradient(net)
+        intermediate = stop_gradient(intermediate)
+        return net, intermediate
+
+
+class PoseNetTest(nn.Cell):
+    """
+    pose net eval
+    """
+
+    def __init__(self, net, cfg):
+        """
+        Args:
+            net: pose net
+            cfg: net config
+        """
+        super(PoseNetTest, self).__init__()
+        self.net = net
+        self.cfg = cfg
+        self.location_refinement = cfg.location_refinement
+        self.pairwise_predict = cfg.pairwise_predict
+        self.sigmoid = nn.Sigmoid()
+
+    def construct(self, *inputs):
+        features, _ = self.net(inputs[0])
+        out = self.net.part_pred(features)
+        pairwise_pred = None
+        locref = None
+        if self.pairwise_predict:
+            pairwise_pred = self.net.pairwise_pred(features)
+        if self.location_refinement:
+            locref = self.net.locref(features)
+
+        return self.sigmoid(out), pairwise_pred, locref
+
+
+class PoseNetBaseLoss(nn.Cell):
+    """
+    pose net base loss
+    """
+
+    def __init__(self, net, cfg):
+        super(PoseNetBaseLoss, self).__init__()
+        self.net = net
+        self.cfg = cfg
+
+
+class PoseNetTotalLoss(PoseNetBaseLoss):
+    """
+    pose net total loss
+    """
+
+    def __init__(self, net, cfg):
+        """
+        Args:
+            net: pose net
+            cfg: net config
+        """
+        super(PoseNetTotalLoss, self).__init__(net, cfg)
+        self.part_score_weights = 1.0
+        self.sce = ms.ops.SigmoidCrossEntropyWithLogits()
+        self.weight_loss = WeightLoss()
+        self.pairwise_loss_func = HuberLossWithWeight() if self.cfg.pairwise_huber_loss else MSELossWithWeight()
+        self.locref_loss_func = HuberLossWithWeight() if self.cfg.locref_huber_loss else MSELossWithWeight()
+        self.location_refinement = cfg.location_refinement
+        self.pairwise_predict = cfg.pairwise_predict
+        self.intermediate_supervision = cfg.intermediate_supervision
+        self.locref_loss_weight = cfg.locref_loss_weight
+
+    def construct(self, inputs, part_score_targets, part_score_weights,
+                  locref_targets, locref_mask,
+                  pairwise_targets=None, pairwise_mask=None
+                  ):
+        """
+        Args:
+            inputs: input images
+            part_score_targets: part score targets
+            part_score_weights: part score weights
+            locref_targets: location reference targets
+            locref_mask: location reference mask
+            pairwise_targets: pairwise targets
+            pairwise_mask: pairwise mask
+        Return:
+            total loss
+        """
+        features, intermediate = self.net(inputs)
+        total_loss = self.sce(self.net.part_pred(features), part_score_targets)
+        total_loss = self.weight_loss(total_loss, part_score_weights)
+
+        if self.intermediate_supervision:
+            part_loss_interm = self.sce(self.net.part_pred_interm(intermediate), part_score_targets)
+            part_loss_interm = self.weight_loss(part_loss_interm, part_score_weights)
+            total_loss = total_loss + part_loss_interm
+
+        if self.location_refinement:
+            locref_pred = self.net.locref(features)
+            locref_loss = self.locref_loss_weight * self.locref_loss_func(locref_pred, locref_targets, locref_mask)
+            total_loss = total_loss + locref_loss
+
+        if self.pairwise_predict:
+            pairwise_pred = self.net.pairwise_pred(features)
+            pairwise_loss = self.cfg.pairwise_loss_weight * self.pairwise_loss_func(pairwise_pred, pairwise_targets,
+                                                                                    pairwise_mask)
+            total_loss = total_loss + pairwise_loss
+
+        return total_loss
diff --git a/research/cv/ArtTrack/src/model/predict.py b/research/cv/ArtTrack/src/model/predict.py
new file mode 100644
index 0000000000000000000000000000000000000000..97a30dc67cf795f255f522390f418ca59c62b7b3
--- /dev/null
+++ b/research/cv/ArtTrack/src/model/predict.py
@@ -0,0 +1,151 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import numpy as np
+import scipy.io
+
+
+def extract_cnn_output(scmap, locref_target=None, pairwise_pred=None, cfg=None, pairwise_stats=None):
+    """
+    extract cnn output
+
+    Args:
+        scmap: score map
+        locref_target: location reference target
+        pairwise_pred: pairwise prediction
+        cfg: net config
+        pairwise_stats: pairwise stats
+    Return:
+        (score map, location reference, pairwise difference)
+    """
+    scmap = np.squeeze(scmap)
+    locref = None
+    pairwise_diff = None
+    if cfg.location_refinement and locref_target is not None:
+        locref = np.squeeze(locref_target)
+        shape = locref.shape
+        locref = np.reshape(locref, (shape[0], shape[1], -1, 2))
+        locref *= cfg.locref_stdev
+    if cfg.pairwise_predict and pairwise_pred is not None:
+        pairwise_diff = np.squeeze(pairwise_pred)
+        shape = pairwise_diff.shape
+        pairwise_diff = np.reshape(pairwise_diff, (shape[0], shape[1], -1, 2))
+        num_joints = cfg.num_joints
+        for pair in pairwise_stats:
+            pair_id = (num_joints - 1) * pair[0] + pair[1] - int(pair[0] < pair[1])
+            pairwise_diff[:, :, pair_id, 0] *= pairwise_stats[pair]["std"][0]
+            pairwise_diff[:, :, pair_id, 0] += pairwise_stats[pair]["mean"][0]
+            pairwise_diff[:, :, pair_id, 1] *= pairwise_stats[pair]["std"][1]
+            pairwise_diff[:, :, pair_id, 1] += pairwise_stats[pair]["mean"][1]
+    return scmap, locref, pairwise_diff
+
+
+def argmax_pose_predict(scmap, offmat, stride):
+    """Combine scoremat and offsets to the final pose."""
+    num_joints = scmap.shape[2]
+    pose = []
+    for joint_idx in range(num_joints):
+        maxloc = np.unravel_index(np.argmax(scmap[:, :, joint_idx]),
+                                  scmap[:, :, joint_idx].shape)
+        offset = np.array(offmat[maxloc][joint_idx])[::-1] if offmat is not None else 0
+        pos_f8 = (np.array(maxloc).astype('float') * stride + 0.5 * stride + offset)
+        pose.append(np.hstack((pos_f8[::-1],
+                               [scmap[maxloc][joint_idx]])))
+    return np.array(pose)
+
+
+def argmax_arrows_predict(scmap, offmat, pairwise_diff, stride):
+    """
+    argmax arrows prediction
+    """
+    num_joints = scmap.shape[2]
+    arrows = {}
+    for joint_idx in range(num_joints):
+        maxloc = np.unravel_index(np.argmax(scmap[:, :, joint_idx]),
+                                  scmap[:, :, joint_idx].shape)
+        offset = np.array(offmat[maxloc][joint_idx])[::-1] if offmat is not None else 0
+        pos_f8 = (np.array(maxloc).astype('float') * stride + 0.5 * stride +
+                  offset)[::-1]
+        for joint_idx_end in range(num_joints):
+            if joint_idx_end != joint_idx:
+                pair_id = (num_joints - 1) * joint_idx + joint_idx_end - int(joint_idx < joint_idx_end)
+                difference = np.array(pairwise_diff[maxloc][pair_id])[::-1] if pairwise_diff is not None else 0
+                pos_f8_end = (np.array(maxloc).astype('float') * stride + 0.5 * stride + difference)[::-1]
+                arrows[(joint_idx, joint_idx_end)] = (pos_f8, pos_f8_end)
+
+    return arrows
+
+
+def multi_dim_argmax(arr):
+    """ Getting argmax over the first 2 axes """
+    m, n, i, j = arr.shape
+    arr = np.reshape(arr, (m * n, i, j))
+    return np.unravel_index(np.argmax(arr, 0), (m, n))
+
+
+def interweave_matrices(x, y, z):
+    """Combine matrices by concatenating their cols: x.col(1),y.col(1),z.col(1) ... x.col(n),y.col(n),z.col(n) """
+    num_joints = x.shape[1]
+    id_x = (np.arange(0, num_joints, 0.5) + 1).astype('int')
+    id_y = (np.arange(0, num_joints, 0.5) + 0.5).astype('int')
+    id_z = (np.arange(0, num_joints, 0.5) + 0).astype('int')
+    x = np.insert(x, id_x, 0, axis=1)
+    y = np.insert(y, id_y, 0, axis=1)
+    z = np.insert(z, id_z, 0, axis=1)
+    return x + y + z
+
+
+def pose_predict_with_gt_segm(scmap, offmat, stride, gt_segm, coco_id):
+    """Generate all poses in an image using segmentations"""
+    if gt_segm.size == 0:
+        img_keypoints = []
+    else:
+        num_persons = gt_segm.shape[2]
+        num_joints = scmap.shape[2]
+        init_w = gt_segm.shape[1]
+        init_h = gt_segm.shape[0]
+
+        upsized_w = scmap.shape[1] * stride
+        upsized_h = scmap.shape[0] * stride
+
+        diff_w_l = int((upsized_w - init_w) // 2)
+        diff_w_r = int(upsized_w - init_w - diff_w_l)
+
+        diff_h_u = int((upsized_h - init_h) // 2)
+        diff_h_d = int(upsized_h - init_h - diff_h_u)
+
+        padded_gt_segm = np.pad(gt_segm, ((diff_h_u, diff_h_d), (diff_w_l, diff_w_r), (0, 0)), 'constant')
+        upsized_scmap = scipy.ndimage.zoom(scmap, (stride, stride, 1))
+
+        person_joint_scmap = padded_gt_segm[:, :, :, np.newaxis] * upsized_scmap[:, :, np.newaxis, :]
+        upsized_maxloc = multi_dim_argmax(person_joint_scmap)
+        maxloc_0 = (upsized_maxloc[0] // stride).astype(int)
+        maxloc_1 = (upsized_maxloc[1] // stride).astype(int)
+        indices = np.array([np.arange(num_joints)] * num_persons)
+        offset = np.moveaxis(offmat[(maxloc_0, maxloc_1, indices)][:, :, ::-1], -1, 0) if offmat is not None else 0
+        pos_f8 = (np.array((maxloc_0, maxloc_1)).astype('float') * stride + 0.5 * stride + offset)
+        v = scmap[(maxloc_0, maxloc_1, indices)]
+        img_keypoints = (interweave_matrices(pos_f8[1].astype('int'), pos_f8[0].astype('int'), v)).tolist()
+
+    coco_img_results = []
+    for person_keypoints in img_keypoints:
+        person_result = {}
+        person_result["image_id"] = coco_id
+        person_result["category_id"] = 1
+        person_result["keypoints"] = person_keypoints
+        person_result["score"] = 1
+        coco_img_results.append(person_result)
+
+    return coco_img_results
diff --git a/research/cv/ArtTrack/src/model/resnet/__init__.py b/research/cv/ArtTrack/src/model/resnet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/research/cv/ArtTrack/src/model/resnet/resnet.py b/research/cv/ArtTrack/src/model/resnet/resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..94bb77f88e82e39c6605354ca6d95286f333065d
--- /dev/null
+++ b/research/cv/ArtTrack/src/model/resnet/resnet.py
@@ -0,0 +1,314 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+from mindspore import nn
+from mindspore import ops
+
+
+class Subsample(nn.Cell):
+    """Subsamples the input along the spatial dimensions.
+    Args:
+        factor: The subsampling factor.
+    Returns:
+        output: A `Tensor` of size [batch, channels, height_out, width_out] with the
+        input, either intact (if factor == 1) or subsampled (if factor > 1).
+    """
+
+    def __init__(self, factor):
+        super(Subsample, self).__init__()
+        self.factor = factor
+        self.max_pool2d = nn.MaxPool2d(1, self.factor)
+
+    def construct(self, inputs):
+        if self.factor == 1:
+            return inputs
+
+        return self.max_pool2d(inputs)
+
+
+class Conv2dSame(nn.Cell):
+    """
+    stride 2-D convolution with 'SAME' padding.
+    When stride > 1, then we do explicit zero-padding, followed by conv2d with
+    'VALID' padding.
+    """
+
+    def __init__(self, in_channels, out_channels, kernel_size, stride, rate=1):
+        """
+        Args:
+            in_channels: in channels
+            out_channels: out channels
+            kernel_size: An int with the kernel_size of the filters.
+            stride: An integer, the output stride.
+            rate: An integer, rate for atrous convolution.
+        """
+        super(Conv2dSame, self).__init__()
+        self.in_channels = in_channels
+        self.num_outputs = out_channels
+        self.kernel_size = kernel_size
+        self.stride = stride
+        self.rate = rate
+
+        self.kernel_size_effective = self.kernel_size + (self.kernel_size - 1) * (self.rate - 1)
+        self.pad_total = self.kernel_size_effective - 1
+        self.pad_beg = self.pad_total // 2
+        self.pad_end = self.pad_total - self.pad_beg
+
+        if self.stride == 1:
+            self.conv2d = nn.Conv2dBnAct(in_channels, self.num_outputs, self.kernel_size,
+                                         stride=self.stride, dilation=self.rate, pad_mode='same', has_bn=True,
+                                         activation="relu")
+        else:
+            self.pad = nn.Pad(((0, 0), (0, 0), (self.pad_beg, self.pad_end), (self.pad_beg, self.pad_end)))
+            self.conv2d = nn.SequentialCell([self.pad, nn.Conv2dBnAct(in_channels, self.num_outputs, self.kernel_size,
+                                                                      stride=self.stride, dilation=self.rate,
+                                                                      pad_mode='valid', has_bn=True,
+                                                                      activation="relu")])
+
+    def construct(self, inputs):
+        """
+        Args:
+            inputs: A 4-D tensor of size [batch, channels, height_out, width_out].
+        Returns:
+            output: A 4-D tensor of size [batch, channels, height_out, width_out] with
+              the convolution output.
+        """
+        return self.conv2d(inputs)
+
+
+class Bottleneck(nn.Cell):
+    """
+    Bottleneck residual unit variant with BN after convolutions.
+    """
+    expansion = 4
+
+    def __init__(self,
+                 in_channel,
+                 out_channel,
+                 stride,
+                 rate=1):
+        """
+        Args:
+            in_channel: in channel.
+            out_channel: out channel.
+            stride: The ResNet unit's stride. Determines the amount of downsampling of
+              the units output compared to its input.
+            rate: An integer, rate for atrous convolution.
+        """
+        super(Bottleneck, self).__init__()
+        self.in_channel = in_channel
+        self.out_channel = out_channel
+        self.depth = out_channel // 4
+        self.stride = stride
+        self.rate = rate
+        if self.out_channel == self.in_channel:
+            self.conv2d_shortcut = Subsample(self.stride)
+        else:
+            self.conv2d_shortcut = nn.Conv2dBnAct(self.in_channel, self.out_channel, 1, stride=self.stride, has_bn=True)
+
+        self.relu = nn.ReLU()
+
+        self.conv2d1 = nn.Conv2dBnAct(self.in_channel, self.depth, 1, stride=1, activation="relu", has_bn=True)
+        self.conv2d2 = Conv2dSame(self.depth, self.depth, 3, self.stride, self.rate)
+        self.conv2d3 = nn.Conv2dBnAct(self.depth, self.out_channel, 1, stride=1, has_bn=True)
+
+    def construct(self, inputs):
+        """
+        Args:
+            inputs: A tensor of size [batch, channels, height, width].
+        Returns:
+            The ResNet unit's output.
+        """
+        shortcut = self.conv2d_shortcut(inputs)
+
+        residual = self.conv2d1(inputs)
+        residual = self.conv2d2(residual)
+        residual = self.conv2d3(residual)
+
+        output = self.relu(shortcut + residual)
+        return output
+
+
+class Block(nn.Cell):
+    """
+    Resnet block consisted of Bottlenecks
+    """
+
+    def __init__(self, units, intermediate=None):
+        """
+        Args:
+            units: Bottlenecks
+            intermediate: if is not None, the result of  corresponding Bottlenecks  will be returned
+            as a intermediate value
+        """
+        super(Block, self).__init__()
+        self.units = units
+        self.units_cells = nn.CellList(units)
+        self.intermediate = intermediate
+
+    def construct(self, feature, intermediate=None):
+        """
+        Args:
+            feature: feature
+            intermediate: intermediate
+
+        Return:
+            (feature, ) or (feature, intermediate)
+        """
+        out = feature
+        out_intermediate = intermediate
+        for i, cell in enumerate(self.units_cells, 1):
+            out = cell(out)
+            if self.intermediate is not None and self.intermediate == i:
+                out_intermediate = out
+        if out_intermediate is None:
+            r = (out,)
+        else:
+            r = (out, out_intermediate)
+        return r
+
+
+class Layer(nn.Cell):
+    """
+    Resnet Layer consisted of Block.
+    """
+    current_stride = 1
+    rate = 1
+
+    def __init__(self, in_channel, blocks, output_stride=None, unit_class=Bottleneck, block_class=Block):
+        """
+        Args:
+            in_channel: in channel
+            blocks: blocks config. should be generated by _make_block
+            output_stride: If None, then the output will be computed at the nominal
+                network stride. If output_stride is not None, it specifies the requested
+                ratio of input to output spatial resolution.
+            unit_class: class of unit. options: Bottleneck
+            block_class: class of block. options: Block
+        """
+        super(Layer, self).__init__()
+        self.unit_class = unit_class
+        self.block_class = block_class
+        self.blocks = blocks
+        self.output_stride = output_stride
+        self.in_channel = in_channel
+        self.blocks_cell = []
+        self.last_out_channel = self.in_channel
+        self.intermediate_block = 3
+        self.intermediate_unit = 12
+        for i, block in enumerate(blocks, 1):
+            units = []
+            for _, unit in enumerate(block, 1):
+                if self.output_stride is not None and self.current_stride > self.output_stride:
+                    raise ValueError('The target output_stride cannot be reached.')
+
+                if self.output_stride is not None and self.current_stride == self.output_stride:
+                    net = self.unit_class(rate=self.rate, in_channel=self.last_out_channel, **dict(unit, stride=1))
+                    self.last_out_channel = unit['out_channel']
+                    self.rate *= unit.get('stride', 1)
+
+                else:
+                    net = self.unit_class(rate=1, in_channel=self.last_out_channel, **unit)
+                    self.last_out_channel = unit['out_channel']
+                    self.current_stride *= unit.get('stride', 1)
+                units.append(net)
+            self.blocks_cell.append(
+                self.block_class(units, None if i != self.intermediate_block else self.intermediate_unit))
+        if self.output_stride is not None and self.current_stride != self.output_stride:
+            raise ValueError('The target output_stride cannot be reached.')
+        self.blocks_cell = nn.CellList(self.blocks_cell)
+        self.out_channel = self.last_out_channel
+
+    def construct(self, inputs):
+        """
+        Args:
+            inputs: feature
+        Return:
+            block result
+        """
+        out = (inputs, None)
+        for cell in self.blocks_cell:
+            out = cell(*out)
+        return out
+
+
+class ResNet(nn.Cell):
+    """
+    Resnet
+    """
+
+    def __init__(self,
+                 blocks,
+                 in_channels,
+                 num_classes=None,
+                 global_pool=True,
+                 output_stride=None,
+                 include_root_block=True,
+                 ):
+        """
+        Args:
+            blocks: blocks config. should be generated by _make_block
+            in_channels: in channels
+            num_classes: the number of classes
+            global_pool: If True, we perform global average pooling before computing the
+                logits. Set to True for image classification, False for dense prediction.
+            output_stride: If None, then the output will be computed at the nominal
+                network stride. If output_stride is not None, it specifies the requested
+                ratio of input to output spatial resolution.
+            include_root_block: include_root_block: If True, include the initial convolution followed by
+                max-pooling, if False excludes it.
+        """
+        super(ResNet, self).__init__()
+        self.blocks = blocks
+        self.in_channels = in_channels
+        self.num_classes = num_classes
+        self.global_pool = global_pool
+        self.output_stride = output_stride
+        self.include_root_block = include_root_block
+
+        self.conv2d_same = Conv2dSame(self.in_channels, 64, 7, 2)
+        self.max_pool2d = nn.MaxPool2d(3, 2, pad_mode='same')
+
+        self.reduce_mean = ops.ReduceMean(True)
+
+        if self.include_root_block:
+            if self.output_stride is not None:
+                if self.output_stride % 4 != 0:
+                    raise ValueError('The output_stride needs to be a multiple of 4.')
+                self.output_stride /= 4
+            self.layer = Layer(64, self.blocks, self.output_stride)
+        else:
+            self.layer = Layer(self.in_channels, self.blocks, self.output_stride)
+        if self.num_classes is not None:
+            self.conv2d1 = nn.Conv2d(self.blocks[-1][-1]['out_channel'], self.num_classes, 1)
+
+    def construct(self, inputs):
+        """
+        Args:
+            inputs: inputs
+        Return:
+            (result, intermediate)
+        """
+        net = inputs
+        if self.include_root_block:
+            net = self.conv2d_same(net)
+            net = self.max_pool2d(net)
+        net, intermediate = self.layer(net)
+        if self.global_pool:
+            # Global average pooling.
+            net = self.reduce_mean(net, [2, 3])
+        if self.num_classes is not None:
+            net = self.conv2d1(net)
+        return net, intermediate
diff --git a/research/cv/ArtTrack/src/model/resnet/util.py b/research/cv/ArtTrack/src/model/resnet/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..120a601785a9a542626aab155afc1ab39c7b7a0c
--- /dev/null
+++ b/research/cv/ArtTrack/src/model/resnet/util.py
@@ -0,0 +1,41 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+from src.model.resnet import resnet
+
+
+def _make_blocks(out_channel, stride, num_block):
+    """
+    make blocks config
+    """
+    result = []
+    for ch, s, num in zip(out_channel, stride, num_block):
+        result.append([{
+            'out_channel': ch,
+            'stride': 1
+        }] * (num - 1) + [{
+            'out_channel': ch,
+            'stride': s
+        }])
+    return result
+
+
+def resnet_101(in_channels, **kwargs):
+    """
+    get resnet 101
+    """
+    blocks = _make_blocks([256, 512, 1024, 2048], [2, 2, 2, 1], [3, 4, 23, 3])
+    net = resnet.ResNet(blocks, in_channels, **kwargs)
+    return net
diff --git a/research/cv/ArtTrack/src/multiperson/detections.py b/research/cv/ArtTrack/src/multiperson/detections.py
new file mode 100644
index 0000000000000000000000000000000000000000..957fc0855f325abe71cfa89a612534855f6eac6f
--- /dev/null
+++ b/research/cv/ArtTrack/src/multiperson/detections.py
@@ -0,0 +1,94 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import math
+from collections import namedtuple
+
+import numpy as np
+from nms_grid import nms_grid
+
+Detections = namedtuple('Detections', ['coord', 'coord_grid', 'conf', 'pairwise'])
+
+
+def pos_from_grid_raw(cfg, gridpos):
+    """
+    position from grid raw
+    """
+    return gridpos * cfg.stride + 0.5 * cfg.stride
+
+
+def pos_from_gridpos_offset(cfg, gridpos, pred_offset):
+    """
+    position from grid position offset
+    """
+    return pos_from_grid_raw(cfg, gridpos) + pred_offset
+
+
+def make_nms_grid(nms_radius):
+    """
+    make nms grid
+    """
+    nms_radius = math.ceil(nms_radius)
+    dist_grid = np.zeros([2 * nms_radius + 1, 2 * nms_radius + 1], dtype=np.uint8)
+    for yidx in range(dist_grid.shape[0]):
+        for xidx in range(dist_grid.shape[1]):
+            if (yidx - nms_radius) ** 2 + (xidx - nms_radius) ** 2 <= nms_radius ** 2:
+                dist_grid[yidx][xidx] = 1
+    return dist_grid
+
+
+def extract_detections(cfg, scmap, locref, pairwise_diff):
+    """
+    extract detections
+    """
+    num_joints = cfg.num_joints
+    num_pairwise_relations = pairwise_diff.shape[2]
+
+    # get dist_grid
+    dist_grid = make_nms_grid(cfg.nms_radius)
+
+    unProb = [None] * num_joints
+    unPos = [None] * num_joints
+    unPos_grid = [None] * num_joints
+    pairwiseDiff = [None] * num_joints
+
+    # apply nms
+    for p_idx in range(num_joints):
+        # IMPORTANT, as C++ function expects row-major
+        prob_map = np.ascontiguousarray(scmap[:, :, p_idx])
+        # print(prob_map.flags) has to be C_CONTIGUOUS
+
+        dets = nms_grid(prob_map, dist_grid, cfg.det_min_score)
+
+        cur_prob = np.zeros([len(dets), 1], dtype=np.float64)
+        cur_pos = np.zeros([len(dets), 2], dtype=np.float64)
+        cur_pos_grid = np.zeros([len(dets), 2], dtype=np.float64)
+        cur_pairwise = np.zeros([len(dets), num_pairwise_relations, 2], dtype=np.float64)
+
+        for idx, didx in enumerate(dets):
+            ix = didx % scmap.shape[1]
+            iy = didx // scmap.shape[1]
+
+            cur_prob[idx, 0] = scmap[iy, ix, p_idx]
+            cur_pos_grid[idx, :] = pos_from_grid_raw(cfg, np.array([ix, iy]))
+            cur_pos[idx, :] = cur_pos_grid[idx, :] + locref[iy, ix, p_idx, :]
+            cur_pairwise[idx, :, :] = pairwise_diff[iy, ix, :, :]
+
+        unProb[p_idx] = cur_prob
+        unPos[p_idx] = cur_pos
+        unPos_grid[p_idx] = cur_pos_grid
+        pairwiseDiff[p_idx] = cur_pairwise
+
+    return Detections(coord=unPos, coord_grid=unPos_grid, conf=unProb, pairwise=pairwiseDiff)
diff --git a/research/cv/ArtTrack/src/multiperson/predict.py b/research/cv/ArtTrack/src/multiperson/predict.py
new file mode 100644
index 0000000000000000000000000000000000000000..75e6593cf476312a20823b2fc2b9d5cbf406b026
--- /dev/null
+++ b/research/cv/ArtTrack/src/multiperson/predict.py
@@ -0,0 +1,394 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import math
+import os
+from collections import namedtuple
+
+import numpy as np
+import scipy.io as sio
+from multicut import solve_nl_lmp
+
+from src.dataset.pose import get_pairwise_index
+
+
+def logit_transform(p):
+    """
+    calculate logit
+    """
+    p = np.minimum(np.maximum(p, 1e-7), 1.0 - 1e-7)
+    return np.log((1 - p) / p)
+
+
+def eval_graph(sm, detections):
+    """
+    eval graph
+    """
+    # time_start = time.time()
+
+    unary_prob = detections.conf
+    coordinates = detections.coord
+    # pairwise_regr = detections.pairwise
+
+    cidx_list = range(0, sm.num_keypoints)
+
+    unary_counts = []
+
+    for _, cidx in enumerate(cidx_list):
+        unary_counts.append(unary_prob[cidx].shape[0])
+
+    num_unary = sum(unary_counts)
+    num_pairwise = 0
+
+    for idx1, cidx1 in enumerate(cidx_list):
+        for cidx2 in cidx_list[idx1:]:
+            if cidx1 == cidx2:
+                num_pairwise += unary_prob[cidx1].shape[0] * (unary_prob[cidx1].shape[0] - 1) // 2
+            else:
+                num_pairwise += unary_prob[cidx1].shape[0] * unary_prob[cidx2].shape[0]
+
+    pos_array = np.zeros([num_unary, 2], dtype=np.float64)
+    unary_array = np.zeros([num_unary, 1], dtype=np.float64)
+    pw_array = np.zeros([num_pairwise, 1], dtype=np.float64)
+    pwidx_array = np.zeros([num_pairwise, 2], dtype=np.uint16)
+
+    firstidx = 0
+    firstidx_list = []
+    for _, cidx in enumerate(cidx_list):
+        lastidx = firstidx + unary_prob[cidx].shape[0]
+        unary_array[firstidx:lastidx] = unary_prob[cidx]
+        pos_array[firstidx:lastidx] = coordinates[cidx]
+
+        firstidx_list.append(firstidx)
+        firstidx = lastidx
+
+    firstidx = 0
+    for idx1, cidx1 in enumerate(cidx_list):
+        for idx2 in range(idx1, len(cidx_list)):
+
+            if coordinates[cidx1].shape[0] > 0:
+                cidx2 = cidx_list[idx2]
+
+                if coordinates[cidx2].shape[0] > 0:
+
+                    if not sm.need_this_pairwise(cidx1, cidx2):
+                        continue
+
+                    cur_prob, ptidx = sm.eval(cidx1, cidx2, detections)
+                    lastidx = firstidx + cur_prob.shape[0]
+
+                    ptidx[:, 0] += firstidx_list[idx1]
+                    ptidx[:, 1] += firstidx_list[idx2]
+
+                    pw_array[firstidx:lastidx, 0] = cur_prob
+                    pwidx_array[firstidx:lastidx, :] = ptidx
+
+                    firstidx = lastidx
+
+    is_sparse_graph = True
+    solver_type = False
+    do_suppression = True
+    logit_in_solver = False
+
+    if unary_array.shape[0] > 0:
+
+        unary_array_solver = unary_array if logit_in_solver else logit_transform(unary_array)
+        pw_array_solver = pw_array if logit_in_solver else logit_transform(pw_array)
+
+        # time_start = time.time()
+
+        res = solve_nl_lmp(unary_array_solver, pwidx_array, pw_array_solver,
+                           is_sparse_graph, solver_type, do_suppression, logit_in_solver)
+
+        unLab = np.array(res, dtype=np.uint64)
+
+        firstidx = 0
+        for cidx in cidx_list:
+            lastidx = firstidx + unary_prob[cidx].shape[0]
+            unLab[firstidx:lastidx, 0] = cidx
+            firstidx = lastidx
+
+    else:
+        unLab = np.array([])
+
+    return unLab, pos_array, unary_array, pwidx_array, pw_array
+
+
+def get_person_conf_single(sm, unProb, pos_array, pwidx_array, pw_array):
+    """
+    get single person config
+    """
+    assert len(unProb) == sm.num_keypoints
+    assert pwidx_array.shape[0] == pw_array.shape[0]
+    assert pw_array.shape[1] == 1
+
+    det_type_idx = []
+
+    firstidx = 0
+    for pidx in range(len(unProb)):
+        lastidx = firstidx + unProb[pidx].shape[0]
+
+        curidx = np.array([False] * pos_array.shape[0])
+        curidx[firstidx:lastidx] = True
+
+        firstidx = lastidx
+        det_type_idx.append(curidx)
+
+    # num people == number of heads
+    head_idx = 13
+    num_people = unProb[head_idx].shape[0]
+
+    connect_graph = dict()
+    connect_graph[13] = (12,)
+    connect_graph[12] = (8, 9, 2, 3)
+    connect_graph[8] = (7,)
+    connect_graph[7] = (6,)
+    connect_graph[9] = (10,)
+    connect_graph[10] = (11,)
+    connect_graph[2] = (1,)
+    connect_graph[1] = (0,)
+    connect_graph[3] = (4,)
+    connect_graph[4] = (5,)
+
+    person_conf = np.zeros([num_people, sm.num_keypoints, 2])
+    SearchNode = namedtuple('SearchNode', ['pidx', 'kidx', 'hidx'])
+
+    search_queue = []
+
+    for pidx, hidx in enumerate(np.flatnonzero(det_type_idx[head_idx])):
+        search_queue.append(SearchNode(pidx=pidx, kidx=head_idx, hidx=hidx))
+        person_conf[pidx, head_idx, :] = pos_array[hidx, :]
+
+    assert len(search_queue) == num_people
+
+    while search_queue:
+        node = search_queue.pop()
+
+        pidx = node.pidx
+        kidx = node.kidx
+        hidx = node.hidx
+        loop_graph(kidx, connect_graph, pwidx_array, hidx, det_type_idx, pw_array, person_conf, pos_array, pidx,
+                   search_queue, SearchNode)
+
+    return person_conf
+
+
+def loop_graph(kidx, connect_graph, pwidx_array, hidx, det_type_idx, pw_array, person_conf, pos_array, pidx,
+               search_queue, SearchNode):
+    # find the closes match for current part
+    if kidx in connect_graph:
+
+        for kidx2 in connect_graph[kidx]:
+
+            # search all pairwise with compatible type
+            best_hidx, best_pw = search_pairwise_with_compatible_type(pwidx_array, hidx, det_type_idx, pw_array,
+                                                                      kidx2)
+
+            if best_pw > 0.5:
+                person_conf[pidx, kidx2, :] = pos_array[best_hidx, :]
+                search_queue.append(SearchNode(pidx=pidx, kidx=kidx2, hidx=best_hidx))
+
+
+def search_pairwise_with_compatible_type(pwidx_array, hidx, det_type_idx, pw_array, kidx2):
+    best_hidx = None
+    best_pw = None
+    for idx in range(pwidx_array.shape[0]):
+        if hidx in (pwidx_array[idx, 0], pwidx_array[idx, 1] == hidx):
+            idx2 = np.flatnonzero(pwidx_array[idx, :] != hidx)[0]
+            other_hidx = pwidx_array[idx, idx2]
+
+            if det_type_idx[kidx2][other_hidx]:
+                if pw_array[idx] > best_pw:
+                    best_hidx = other_hidx
+                    best_pw = pw_array[idx]
+    return best_hidx, best_pw
+
+
+def get_person_conf_multicut(sm, unLab, unary_array, pos_array):
+    """
+    get multicut person config
+    """
+    if unLab.shape[0] > 0:
+        num_people = int(np.max(unLab[:, 1])) + 1
+    else:
+        num_people = 0
+
+    person_conf = np.zeros([num_people, sm.num_keypoints, 2])
+    sum_prob = np.zeros([num_people, sm.num_keypoints, 1])
+
+    # compute weighted average of keypoints of the same time
+    for didx in range(unLab.shape[0]):
+        kidx = unLab[didx, 0]
+        pidx = unLab[didx, 1]
+
+        person_conf[pidx, kidx, :] += pos_array[didx, :] * unary_array[didx]
+        sum_prob[pidx, kidx] += unary_array[didx]
+
+    print("num_people: ", num_people)
+
+    for pidx in range(num_people):
+        for kidx in range(sm.num_keypoints):
+            if sum_prob[pidx, kidx] > 0:
+                person_conf[pidx, kidx, :] /= sum_prob[pidx, kidx]
+
+    return person_conf
+
+
+def compute_angle(deltaX, deltaY):
+    """
+    compute angle
+    """
+    angle = np.arctan2(deltaY, deltaX)
+
+    assert (angle > math.pi).sum() == 0
+    assert (angle < -math.pi).sum() == 0
+
+    return angle
+
+
+def wrap_angle(a):
+    """
+    wrap angle
+    """
+    larger = a > math.pi
+    smaller = a < -math.pi
+    a[larger] = a[larger] - 2 * math.pi
+    a[smaller] = a[smaller] + 2 * math.pi
+
+    return a
+
+
+def compute_features(delta_real, delta_predicted):
+    """
+    compute features
+    """
+    a = compute_angle(delta_real[:, 0], delta_real[:, 1])
+    a_forward = compute_angle(delta_predicted[:, 0], delta_predicted[:, 1])
+    delta1 = delta_real - delta_predicted
+
+    dist = np.linalg.norm(delta1, axis=1, ord=2)
+    abs_a = np.abs(wrap_angle(a - a_forward))
+    return dist, abs_a
+
+
+class SpatialModel:
+
+    def __init__(self, cfg):
+        self.num_keypoints = cfg.num_joints
+        num_keypoints = cfg.num_joints
+        self.cfg = cfg
+
+        self.graph_dict = dict()
+
+        self.same_part_pw_coef = 0.2
+
+        self.X_min = [[None] * num_keypoints for idx in range(num_keypoints)]
+        self.X_max = [[None] * num_keypoints for idx in range(num_keypoints)]
+        self.w = [[None] * num_keypoints for idx in range(num_keypoints)]
+
+    def load(self):
+        for cidx1 in range(self.num_keypoints):
+            for cidx2 in range(cidx1 + 1, self.num_keypoints):
+                model_name = "{}/spatial_model_cidx_{}_{}.mat".format(self.cfg.pairwise_model_dir, cidx1 + 1, cidx2 + 1)
+                # print "loading:", model_name
+                if not os.path.isfile(model_name):
+                    continue
+
+                spatial_model = sio.loadmat(model_name)
+
+                self.X_max[cidx1][cidx2] = spatial_model['spatial_model']['training_opts'][0][0][0]['X_max'][0][0]
+                self.X_min[cidx1][cidx2] = spatial_model['spatial_model']['training_opts'][0][0][0]['X_min'][0][0]
+                self.w[cidx1][cidx2] = spatial_model['spatial_model']['log_reg'][0][0][0]['w'][0][0][:]
+
+        if not self.cfg.tensorflow_pairwise_order:
+            tmpval = sio.loadmat(self.cfg.pairwise_stats_fn)
+            self.graph = tmpval['graph']
+            self.pairwise_means = tmpval['means']
+            self.pairwise_std_devs = tmpval['std_devs']
+
+            for gidx in range(self.graph.shape[0]):
+                cidx1 = self.graph[gidx, 0]
+                cidx2 = self.graph[gidx, 1]
+                self.graph_dict[(cidx1, cidx2)] = gidx
+
+    def get_fwd_bwd_index(self, cidx1, cidx2):
+        if self.cfg.tensorflow_pairwise_order:
+            fwd_idx = get_pairwise_index(cidx1, cidx2, self.cfg.num_joints)
+            bwd_idx = get_pairwise_index(cidx2, cidx1, self.cfg.num_joints)
+        else:
+            fwd_idx = self.graph_dict[(cidx1 + 1, cidx2 + 1)]
+            bwd_idx = self.graph_dict[(cidx2 + 1, cidx1 + 1)]
+        return fwd_idx, bwd_idx
+
+    def need_this_pairwise(self, cidx1, cidx2):
+        if cidx1 == cidx2:
+            return True
+        sparse_graph = self.cfg.sparse_graph
+        return not sparse_graph or [cidx1, cidx2] in sparse_graph
+
+    def eval(self, cidx1, cidx2, detections):
+        unPos = detections.coord
+
+        idx_type1 = np.array(range(unPos[cidx1].shape[0]))
+        idx_type2 = np.array(range(unPos[cidx2].shape[0]))
+
+        assert idx_type1.shape[0] > 0
+        assert idx_type2.shape[0] > 0
+
+        num_edges = len(idx_type1) * len(idx_type2)
+
+        tmpidx1, tmpidx2 = np.meshgrid(idx_type1, idx_type2)
+        ptidx = np.hstack((tmpidx1.T.reshape((num_edges, 1)), tmpidx2.T.reshape((num_edges, 1))))
+
+        if cidx1 != cidx2:
+            cur_prob = self.compute_different_part_pairwise(cidx1, cidx2, detections, ptidx, num_edges)
+        else:
+            cur_prob = None
+            ptidx = ptidx[ptidx[:, 0] < ptidx[:, 1]]
+
+            delta = unPos[cidx2][ptidx[:, 1], :] - unPos[cidx1][ptidx[:, 0], :]
+            dists = np.linalg.norm(delta, axis=1, ord=2)
+
+            cur_prob = 1. / (1 + np.exp(self.same_part_pw_coef * dists - 7.5))
+
+        return cur_prob, ptidx
+
+    def compute_different_part_pairwise(self, cidx1, cidx2, detections, ptidx, num_edges):
+        unPos = detections.coord
+        unPos_grid = detections.coord_grid
+        nextReg = detections.pairwise
+
+        fwd_idx, bwd_idx = self.get_fwd_bwd_index(cidx1, cidx2)
+
+        assert ptidx.shape[0] > 0
+
+        delta_real_forward = unPos[cidx2][ptidx[:, 1], :] - unPos_grid[cidx1][ptidx[:, 0], :]
+        delta_real_backward = unPos[cidx1][ptidx[:, 0], :] - unPos_grid[cidx2][ptidx[:, 1], :]
+
+        delta_forward = nextReg[cidx1][ptidx[:, 0], fwd_idx, :]
+        delta_backward = nextReg[cidx2][ptidx[:, 1], bwd_idx, :]
+
+        dist1, abs_a1 = compute_features(delta_real_forward, delta_forward)
+        dist2, abs_a2 = compute_features(delta_real_backward, delta_backward)
+
+        featAugm = np.hstack((dist1.reshape(num_edges, 1), abs_a1.reshape(num_edges, 1), dist2.reshape(num_edges, 1),
+                              abs_a2.reshape(num_edges, 1)))
+
+        featAugm = np.hstack((featAugm, np.exp(-featAugm), np.ones((num_edges, 1))))
+        temp = self.X_max[cidx1][cidx2] - self.X_min[cidx1][cidx2]
+        featAugm[:, :-1] = (featAugm[:, :-1] - self.X_min[cidx1][cidx2]) / temp
+        cur_prob = 1 / (1 + np.exp(-featAugm.dot(self.w[cidx1][cidx2])))
+
+        return cur_prob
diff --git a/research/cv/ArtTrack/src/multiperson/visualize.py b/research/cv/ArtTrack/src/multiperson/visualize.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd062a5cb5134132a238ab3efa24cbff3be95b59
--- /dev/null
+++ b/research/cv/ArtTrack/src/multiperson/visualize.py
@@ -0,0 +1,288 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import math
+
+import matplotlib.patches as mpatches
+import matplotlib.pyplot as plt
+import munkres
+import numpy as np
+import scipy.spatial
+from PIL import Image
+
+
+def _npcircle(image, cx, cy, radius, color, transparency=0.0):
+    """Draw a circle on an image using only numpy methods."""
+    radius = int(radius)
+    cx = int(cx)
+    cy = int(cy)
+    y, x = np.ogrid[-radius: radius, -radius: radius]
+    index = x ** 2 + y ** 2 <= radius ** 2
+    temp = image[cy - radius:cy + radius, cx - radius:cx + radius][index].astype('float32') * transparency + np.array(
+        color).astype('float32') * (1.0 - transparency)
+    image[cy - radius:cy + radius, cx - radius:cx + radius][index] = temp.astype('uint8')
+
+
+def check_point(cur_x, cur_y, minx, miny, maxx, maxy):
+    """
+    check whether the point is in the area
+    """
+    return minx < cur_x < maxx and miny < cur_y < maxy
+
+
+def visualize_joints(image, pose):
+    """
+    visualize joints
+    """
+    _marker_size = 8
+    minx = 2 * _marker_size
+    miny = 2 * _marker_size
+    maxx = image.shape[1] - 2 * _marker_size
+    maxy = image.shape[0] - 2 * _marker_size
+    num_joints = pose.shape[0]
+
+    visim = image.copy()
+    colors = [[255, 0, 0], [0, 255, 0], [0, 0, 255], [0, 245, 255], [255, 131, 250], [255, 255, 0],
+              [255, 0, 0], [0, 255, 0], [0, 0, 255], [0, 245, 255], [255, 131, 250], [255, 255, 0],
+              [0, 0, 0], [255, 255, 255], [255, 0, 0], [0, 255, 0], [0, 0, 255]]
+    for p_idx in range(num_joints):
+        cur_x = pose[p_idx, 0]
+        cur_y = pose[p_idx, 1]
+        if check_point(cur_x, cur_y, minx, miny, maxx, maxy):
+            _npcircle(visim,
+                      cur_x, cur_y,
+                      _marker_size,
+                      colors[p_idx],
+                      0.0)
+    return visim
+
+
+def show_heatmaps(cfg, img, scmap, pose, cmap="jet"):
+    interp = "bilinear"
+    all_joints = cfg.all_joints
+    all_joints_names = cfg.all_joints_names
+    subplot_width = 3
+    subplot_height = math.ceil((len(all_joints) + 1) / subplot_width)
+    _, axarr = plt.subplots(subplot_height, subplot_width)
+    for pidx, part in enumerate(all_joints):
+        plot_j = (pidx + 1) // subplot_width
+        plot_i = (pidx + 1) % subplot_width
+        scmap_part = np.sum(scmap[:, :, part], axis=2)
+        scmap_part = Image.fromarray(scmap_part).resize((scmap_part.shape[0] * 8, scmap_part.shape[0] * 8),
+                                                        Image.BICUBIC)
+        scmap_part = np.array(scmap_part)
+        # scmap_part = imresize(scmap_part, 8.0, interp='bicubic')
+        scmap_part = np.lib.pad(scmap_part, ((4, 0), (4, 0)), 'minimum')
+        curr_plot = axarr[plot_j, plot_i]
+        curr_plot.set_title(all_joints_names[pidx])
+        curr_plot.axis('off')
+        curr_plot.imshow(img, interpolation=interp)
+        curr_plot.imshow(scmap_part, alpha=.5, cmap=cmap, interpolation=interp)
+
+    curr_plot = axarr[0, 0]
+    curr_plot.set_title('Pose')
+    curr_plot.axis('off')
+    curr_plot.imshow(visualize_joints(img, pose))
+
+    plt.show()
+
+
+def show_arrows(cfg, img, pose, arrows):
+    fig = plt.figure()
+    a = fig.add_subplot(2, 2, 1)
+    plt.imshow(img)
+    a.set_title('Initial Image')
+
+    b = fig.add_subplot(2, 2, 2)
+    plt.imshow(img)
+    b.set_title('Predicted Pairwise Differences')
+
+    color_opt = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
+    joint_pairs = [(6, 5), (6, 11), (6, 8), (6, 15), (6, 0)]
+    color_legends = []
+    for _id, joint_pair in enumerate(joint_pairs):
+        end_joint_side = ("r " if joint_pair[1] % 2 == 0 else "l ") if joint_pair[1] != 0 else ""
+        end_joint_name = end_joint_side + cfg.all_joints_names[int(math.ceil(joint_pair[1] / 2))]
+        start = arrows[joint_pair][0]
+        end = arrows[joint_pair][1]
+        b.arrow(start[0], start[1], end[0] - start[0], end[1] - start[1], head_width=3, head_length=6,
+                fc=color_opt[_id],
+                ec=color_opt[_id], label=end_joint_name)
+        color_legend = mpatches.Patch(color=color_opt[_id], label=end_joint_name)
+        color_legends.append(color_legend)
+
+    plt.legend(handles=color_legends, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
+    plt.show()
+
+
+def waitforbuttonpress():
+    plt.waitforbuttonpress(timeout=1)
+
+
+min_match_dist = 200
+marker_size = 5
+
+draw_conf_min_count = 3
+
+
+def get_ref_points(person_conf):
+    avg_conf = np.sum(person_conf, axis=1) / person_conf.shape[1]
+
+    # last points is tip of the head -> use it as reference
+    ref_points = person_conf[:, -1, :]
+
+    # use average of other points if head tip is missing
+    emptyidx = (np.sum(ref_points, axis=1) == 0)
+    ref_points[emptyidx, :] = avg_conf[emptyidx, :]
+
+    return ref_points
+
+
+class PersonDraw:
+    def __init__(self):
+        self.mk = munkres.Munkres()
+
+        self.prev_person_conf = np.zeros([0, 1])
+        self.prev_color_assignment = None
+
+        track_colors_str = ["#F5591E",
+                            "#3870FB",
+                            "#FE5DB0",
+                            "#B4A691",
+                            "#43053F",
+                            "#3475B1",
+                            "#642612",
+                            "#B3B43D",
+                            "#DD9BFE",
+                            "#28948D",
+                            "#E99D53",
+                            "#012B46",
+                            "#9D2DA3",
+                            "#04220A",
+                            "#62CB22",
+                            "#EE8F91",
+                            "#D71638",
+                            "#00613A",
+                            "#318918",
+                            "#B770FF",
+                            "#82C091",
+                            "#6C1333",
+                            "#973405",
+                            "#B19CB2",
+                            "#F6267B",
+                            "#284489",
+                            "#97BF17",
+                            "#3B899C",
+                            "#931813",
+                            "#FA76B6"]
+
+        self.track_colors = [(int(s[1:3], 16), int(s[3:5], 16), int(s[5:7], 16)) for s in track_colors_str]
+
+    def draw(self, visim, dataset, person_conf):
+        minx = 2 * marker_size
+        miny = 2 * marker_size
+        maxx = visim.shape[1] - 2 * marker_size
+        maxy = visim.shape[0] - 2 * marker_size
+
+        num_people = person_conf.shape[0]
+        color_assignment = dict()
+
+        # MA: assign same color to matching body configurations
+        if self.prev_person_conf.shape[0] > 0 and person_conf.shape[0] > 0:
+            ref_points = get_ref_points(person_conf)
+            prev_ref_points = get_ref_points(self.prev_person_conf)
+
+            # MA: this munkres implementation assumes that num(rows) >= num(columns)
+            if person_conf.shape[0] <= self.prev_person_conf.shape[0]:
+                cost_matrix = scipy.spatial.distance.cdist(ref_points, prev_ref_points)
+            else:
+                cost_matrix = scipy.spatial.distance.cdist(prev_ref_points, ref_points)
+
+            assert cost_matrix.shape[0] <= cost_matrix.shape[1]
+            conf_assign = self.mk.compute(cost_matrix)
+
+            if person_conf.shape[0] > self.prev_person_conf.shape[0]:
+                conf_assign = [(idx2, idx1) for idx1, idx2 in conf_assign]
+                cost_matrix = cost_matrix.T
+
+            for pidx1, pidx2 in conf_assign:
+                if cost_matrix[pidx1][pidx2] < min_match_dist:
+                    color_assignment[pidx1] = self.prev_color_assignment[pidx2]
+
+        print("#tracked objects:", len(color_assignment))
+
+        free_coloridx = sorted(list(set(range(len(self.track_colors))).difference(set(color_assignment.values()))),
+                               reverse=True)
+
+        for pidx in range(num_people):
+            # color_idx = pidx % len(self.track_colors)
+            if pidx in color_assignment:
+                color_idx = color_assignment[pidx]
+            else:
+                if free_coloridx:
+                    color_idx = free_coloridx[-1]
+                    free_coloridx = free_coloridx[:-1]
+                else:
+                    color_idx = np.random.randint(len(self.track_colors))
+
+                color_assignment[pidx] = color_idx
+
+            assert color_idx < len(self.track_colors)
+            if np.sum(person_conf[pidx, :, 0] > 0) < draw_conf_min_count:
+                continue
+
+            for kidx1, kidx2 in dataset.get_pose_segments():
+                p1 = (int(math.floor(person_conf[pidx, kidx1, 0])), int(math.floor(person_conf[pidx, kidx1, 1])))
+                p2 = (int(math.floor(person_conf[pidx, kidx2, 0])), int(math.floor(person_conf[pidx, kidx2, 1])))
+
+                if check_point(p1[0], p1[1], minx, miny, maxx, maxy) and check_point(p2[0], p2[1], minx, miny, maxx,
+                                                                                     maxy):
+                    color = np.array(self.track_colors[color_idx][::-1], dtype=np.float64) / 255.0
+                    plt.plot([p1[0], p2[0]], [p1[1], p2[1]], marker='o', linestyle='solid', linewidth=2.0, color=color)
+
+        self.prev_person_conf = person_conf
+        self.prev_color_assignment = color_assignment
+
+
+keypoint_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255), (255, 0, 255), (255, 255, 0),
+                   (255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255), (255, 0, 255), (255, 255, 0),
+                   (255, 0, 0), (0, 255, 0), (255, 0, 0), (0, 255, 0), (0, 0, 255)]
+
+
+def visualize_detections(cfg, img, detections):
+    vis_scale = 1.0
+    _marker_size = 4
+
+    minx = 2 * _marker_size
+    miny = 2 * _marker_size
+    maxx = img.shape[1] - 2 * _marker_size
+    maxy = img.shape[0] - 2 * _marker_size
+
+    unPos = detections.coord
+    joints_to_visualise = range(cfg.num_joints)
+    visim_dets = img.copy()
+    for pidx in joints_to_visualise:
+        for didx in range(unPos[pidx].shape[0]):
+            cur_x = unPos[pidx][didx, 0] * vis_scale
+            cur_y = unPos[pidx][didx, 1] * vis_scale
+
+            # / cfg.global_scale
+
+            if check_point(cur_x, cur_y, minx, miny, maxx, maxy):
+                _npcircle(visim_dets,
+                          cur_x, cur_y,
+                          _marker_size,
+                          keypoint_colors[pidx])
+    return visim_dets
diff --git a/research/cv/ArtTrack/src/tool/__init__.py b/research/cv/ArtTrack/src/tool/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/research/cv/ArtTrack/src/tool/decorator.py b/research/cv/ArtTrack/src/tool/decorator.py
new file mode 100644
index 0000000000000000000000000000000000000000..415be68d35ff83b27f11745dd8bbef6ee5b816f2
--- /dev/null
+++ b/research/cv/ArtTrack/src/tool/decorator.py
@@ -0,0 +1,17 @@
+import pprint
+
+from src.config import check_config
+from src.log import log
+
+
+def process_cfg(func):
+    """
+    process config decorator
+    """
+
+    def wrapper(cfg, *args, **kwargs):
+        cfg = check_config(cfg)
+        log.info("config: %s", pprint.pformat(cfg))
+        return func(cfg, *args, **kwargs)
+
+    return wrapper
diff --git a/research/cv/ArtTrack/src/tool/eval/__init__.py b/research/cv/ArtTrack/src/tool/eval/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/research/cv/ArtTrack/src/tool/eval/coco.py b/research/cv/ArtTrack/src/tool/eval/coco.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b933f506b4b2d33b9deaaf26edc4eb05f037070
--- /dev/null
+++ b/research/cv/ArtTrack/src/tool/eval/coco.py
@@ -0,0 +1,85 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import json
+
+from pycocotools.coco import COCO
+from pycocotools.cocoeval import COCOeval
+
+from ..decorator import process_cfg
+
+
+def apply_threshold(in_file, threshold):
+    """
+    apply threshold
+    """
+    out_file = in_file[:-5] + '-' + str(threshold) + '.json'
+
+    with open(in_file) as data_file:
+        data = json.load(data_file)
+
+    for person_id in range(len(data)):
+        keypoints = data[person_id]["keypoints"]
+        keypoints = [int(keypoints[i] > threshold) if i % 3 == 2 else int(keypoints[i]) for i in range(len(keypoints))]
+        data[person_id]["keypoints"] = keypoints
+
+    with open(out_file, 'w') as outfile:
+        json.dump(data, outfile)
+
+    return out_file
+
+
+def eval_init(cfg, prediction=None):
+    """
+    init
+    """
+    dataset_path = cfg.dataset.path
+    dataset_phase = cfg.dataset.phase
+    dataset_ann = cfg.dataset.ann
+    threshold = 0
+
+    # initialize coco_gt api
+    ann_file = '%s/annotations/%s_%s.json' % (dataset_path, dataset_ann, dataset_phase)
+    coco_gt = COCO(ann_file)
+
+    # initialize coco_pred api
+    pred_file = apply_threshold(prediction or cfg.gt_segm_output, threshold)
+    coco_pred = coco_gt.loadRes(pred_file)
+
+    return coco_gt, coco_pred
+
+
+@process_cfg
+def eval_coco(cfg=None, prediction=None):
+    """
+    eval coco entry
+    """
+    coco_gt, coco_pred = eval_init(cfg, prediction)
+    eval_mscoco_with_segm(coco_gt, coco_pred)
+
+
+def eval_mscoco_with_segm(coco_gt, coco_pred):
+    """
+    eval mscoco
+
+    Args:
+        coco_gt: ground truth
+        coco_pred: prediction
+    """
+    # running evaluation
+    coco_eval = COCOeval(coco_gt, coco_pred, "keypoints")
+    coco_eval.evaluate()
+    coco_eval.accumulate()
+    coco_eval.summarize()
diff --git a/research/cv/ArtTrack/src/tool/eval/multiple.py b/research/cv/ArtTrack/src/tool/eval/multiple.py
new file mode 100644
index 0000000000000000000000000000000000000000..47e1fed285d30fa5642eb8744902bcd535bac360
--- /dev/null
+++ b/research/cv/ArtTrack/src/tool/eval/multiple.py
@@ -0,0 +1,191 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import os
+import json
+from json import encoder
+
+import matplotlib.pyplot as plt
+import mindspore as ms
+import numpy as np
+from scipy import io as sio
+
+from src.dataset.coco import MSCOCO
+from src.dataset.pose import Batch
+from src.log import log
+from src.model.pose import PoseNet, PoseNetTest
+from src.model.predict import argmax_arrows_predict, argmax_pose_predict, extract_cnn_output, pose_predict_with_gt_segm
+from src.multiperson import visualize
+from src.multiperson.detections import extract_detections
+from src.multiperson.predict import eval_graph, get_person_conf_multicut, SpatialModel
+from src.multiperson.visualize import PersonDraw
+from src.tool.decorator import process_cfg
+
+encoder.FLOAT_REPR = lambda o: format(o, '.2f')
+
+
+def test_one(cfg, test_net, sample, score_maps_cached, cache_score_maps, visual, dataset, sm, graph=False):
+    """
+    predict one sample
+    Args:
+        cfg: config
+        test_net: eval net
+        sample: sample
+        cache_score_maps: if True, cache score maps to scoremap_dir in cfg
+        visual: if True, visualize prediction
+        score_maps_cached: if True, load score from cache
+        graph: if True, calculate graph
+        dataset: dataset object
+        sm: spatial model
+    """
+    coco_results = []
+    draw_multi = PersonDraw()
+    cache_name = "{}.mat".format(sample[Batch.data_item].coco_id)
+    if not score_maps_cached:
+        outputs_np, pairwise_pred, locref = test_net(
+            ms.Tensor(np.expand_dims(sample[Batch.inputs], axis=0),
+                      dtype=ms.dtype.float32))
+        scmap, locref, pairwise_diff = extract_cnn_output(outputs_np.transpose([0, 2, 3, 1]).asnumpy(),
+                                                          locref.transpose([0, 2, 3, 1]).asnumpy(),
+                                                          pairwise_pred.transpose([0, 2, 3, 1]).asnumpy(),
+                                                          cfg, dataset.pairwise_stats)
+
+        if cache_score_maps:
+            out_fn = os.path.join(cfg.scoremap_dir, cache_name)
+            d = {'scoremaps': scmap.astype('float32'),
+                 'locreg_pred': locref.astype('float32'),
+                 'pairwise_diff': pairwise_diff.astype('float32')}
+            sio.savemat(out_fn, mdict=d)
+    else:
+        # cache_name = '1.mat'
+        full_fn = os.path.join(cfg.cached_scoremaps, cache_name)
+        mlab = sio.loadmat(full_fn)
+        scmap = mlab["scoremaps"]
+        locref = mlab["locreg_pred"]
+        pairwise_diff = mlab["pairwise_diff"]
+
+    person_conf_multi = None
+    if graph:
+        detections = extract_detections(cfg, scmap, locref, pairwise_diff)
+        unLab, pos_array, unary_array, _, _ = eval_graph(sm, detections)
+        person_conf_multi = get_person_conf_multicut(sm, unLab, unary_array, pos_array)
+
+    coco_img_results = None
+    if cfg.use_gt_segm:
+        coco_img_results = pose_predict_with_gt_segm(scmap, locref, cfg.stride, sample[Batch.data_item].gt_segm,
+                                                     sample[Batch.data_item].coco_id)
+        coco_results += coco_img_results
+
+    if visual:
+        img = np.transpose(np.squeeze(sample[Batch.inputs]).astype('uint8'), [1, 2, 0])
+        pose = argmax_pose_predict(scmap, locref, cfg.stride)
+        arrows = argmax_arrows_predict(scmap, locref, pairwise_diff, cfg.stride)
+        visualize.show_arrows(cfg, img, pose, arrows)
+        visualize.waitforbuttonpress()
+        # visualize.show_heatmaps(cfg, img, scmap, pose)
+
+        # visualize part detections after NMS
+        # visim_dets = visualize_detections(cfg, img, detections)
+        # plt.imshow(visim_dets)
+        # plt.show()
+        # visualize.waitforbuttonpress()
+
+        if person_conf_multi is not None and graph:
+            visim_multi = img.copy()
+            draw_multi.draw(visim_multi, dataset, person_conf_multi)
+            plt.imshow(visim_multi)
+            plt.show()
+
+        if coco_img_results is not None and coco_img_results:
+            dataset.visualize_coco(coco_img_results, sample[Batch.data_item].visibilities)
+        visualize.waitforbuttonpress()
+    return coco_results
+
+
+def test_list(cfg, test_net, idx_list, dataset, score_maps_cached, cache_score_maps, visual, sm, graph):
+    """
+     predict multiple sample
+     Args:
+         cfg: config
+         test_net: eval net
+         idx_list: sample indices
+         cache_score_maps: if True, cache score maps to scoremap_dir in cfg
+         visual: if True, visualize prediction
+         score_maps_cached: if True, load score from cache
+         graph: if True, calculate graph
+         dataset: dataset object
+         sm: spatial model
+     """
+    coco_results = []
+    count = 0
+    total = len(idx_list)
+    for k in idx_list:
+        count += 1
+        log.info('processing image id: %s %s/%s', k, count, total)
+
+        batch = dataset.get_item(k)
+        result = test_one(cfg, test_net, batch, score_maps_cached, cache_score_maps, visual, dataset, sm, graph)
+        if result is not None:
+            coco_results.extend(result)
+    return coco_results
+
+
+@process_cfg
+def test(cfg, cache_score_maps=False, visual=False, development=False, score_maps_cached=False, graph=False,
+         output=None, range_num=None, range_index=None):
+    """
+    entry for predicting multiple coco
+    Args:
+        cfg: config
+        cache_score_maps: if True, cache score maps to scoremap_dir in cfg
+        visual: if True, visualize prediction
+        development: development mode. only predict head 10 samples
+        score_maps_cached: if True, load score from cache
+        graph: if True, calculate graph
+        output: path to output
+        range_num: split eval dataset to multiple range. must work with range_index
+        range_index: only predict specified range. start is 0
+    """
+    cfg.train = False
+    # noinspection PyUnresolvedReferences
+    ms.context.set_context(**cfg.context)
+    dataset = MSCOCO(cfg)
+
+    sm = SpatialModel(cfg)
+    sm.load()
+
+    net = PoseNet(cfg=cfg)
+    test_net = PoseNetTest(net, cfg)
+    if hasattr(cfg, 'load_ckpt') and os.path.exists(cfg.load_ckpt):
+        ms.load_checkpoint(cfg.load_ckpt, net=test_net)
+
+    if cache_score_maps:
+        out_dir = cfg.scoremap_dir
+        if not os.path.exists(out_dir):
+            os.makedirs(out_dir)
+
+    num_images = len(dataset) if not development else min(10, dataset.num_images)
+    coco_results = []
+    if range_num is None or range_num == 1 or range_index is None:
+        coco_results.extend(
+            test_list(cfg, test_net, range(num_images), dataset, score_maps_cached, cache_score_maps, visual,
+                      sm, graph))
+    else:
+        lists = np.array_split(range(num_images), range_num)
+        coco_results.extend(
+            test_list(cfg, test_net, lists[range_index], dataset, score_maps_cached, cache_score_maps, visual,
+                      sm, graph))
+    if cfg.use_gt_segm:
+        with open(output or cfg.gt_segm_output, 'w') as outfile:
+            json.dump(coco_results, outfile)
diff --git a/research/cv/ArtTrack/src/tool/eval/pck.py b/research/cv/ArtTrack/src/tool/eval/pck.py
new file mode 100644
index 0000000000000000000000000000000000000000..b196662bff1988bdac2f6cc6ee591891aab5ef5d
--- /dev/null
+++ b/research/cv/ArtTrack/src/tool/eval/pck.py
@@ -0,0 +1,51 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import numpy as np
+from numpy import array as arr
+
+
+def enclosing_rect(points):
+    """
+    enclose rectangle
+    """
+    xs = points[:, 0]
+    ys = points[:, 1]
+    return np.array([np.amin(xs), np.amin(ys), np.amax(xs), np.amax(ys)])
+
+
+def rect_size(rect):
+    """
+    get rectangle size
+    """
+    return np.array([rect[2] - rect[0], rect[3] - rect[1]])
+
+
+def print_results(pck, cfg):
+    """
+    print result
+    """
+    _str = ""
+    for heading in cfg.all_joints_names + ["total"]:
+        _str += " & " + heading
+    print(_str)
+
+    _str = ""
+    all_joint_ids = cfg.all_joints + [np.arange(cfg.num_joints)]
+    for j_ids in all_joint_ids:
+        j_ids_np = arr(j_ids)
+        pck_av = np.mean(pck[j_ids_np])
+        _str += " & {0:.1f}".format(pck_av)
+    print(_str)
diff --git a/research/cv/ArtTrack/src/tool/preprocess/__init__.py b/research/cv/ArtTrack/src/tool/preprocess/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/research/cv/ArtTrack/src/tool/preprocess/crop.py b/research/cv/ArtTrack/src/tool/preprocess/crop.py
new file mode 100644
index 0000000000000000000000000000000000000000..8eec6bb274b96b913c0f8be975d2f798a6ff1367
--- /dev/null
+++ b/research/cv/ArtTrack/src/tool/preprocess/crop.py
@@ -0,0 +1,336 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import json
+import os
+from copy import deepcopy
+
+import numpy as np
+from easydict import EasyDict
+from imageio import imread, imsave
+from PIL import Image
+
+from src.log import log
+from src.tool.preprocess.utils import json_default, pad_zeros, util_set_scale
+
+
+def crop_data(option):
+    """
+    crop image
+
+    Args:
+        option: crop parameters
+
+    """
+    log.debug("crop_data()")
+
+    b_train = option["bTrain"]
+    ref_height = option["refHeight"]
+    delta_crop = option["deltaCrop"]
+    b_single = option["bSingle"]
+    b_crop_isolated = option["bCropIsolated"]
+    b_multi = option["bMulti"]
+    b_objpos_offset = option["bObjposOffset"]
+    save_dir = option["saveDir"]
+    dataset = option["dataset"]
+    # annolist = dataset.annolist
+    img_list = np.array(dataset['img_list'])
+
+    log.debug('bTrain: %s', b_train)
+    log.debug('refHeight: %s', ref_height)
+    log.debug('deltaCrop: %s', delta_crop)
+    log.debug('bSingle: %s', b_single)
+    log.debug('bCropIsolated: %s', b_crop_isolated)
+    log.debug('bMulti: %s', b_multi)
+    log.debug('bObjposOffset: %s', b_objpos_offset)
+
+    rect_ids = list()
+    if b_single:
+        mode = 'singlePerson'
+        for _, v in enumerate(dataset['img_list']):
+            single_person = v['single_person']
+            rect_ids.append(single_person)
+    else:
+        mode = 'multPerson'
+        for _, v in enumerate(dataset['img_list']):
+            single_person = v['single_person']
+            if v['rect']:
+                rect_ids.append([])
+            else:
+                r_len = len(v['rect'])
+                rect_ids.append(np.setdiff1d(
+                    np.arange(0, r_len, dtype=np.uint8), single_person))
+    rect_ids = np.asarray(rect_ids, dtype=object)
+    annalists_full_name = os.path.join(
+        save_dir, "annolist-{}-h{}.json".format(mode, ref_height))
+    if os.path.exists(annalists_full_name):
+        with open(annalists_full_name, 'r') as f:
+            try:
+                img_list = json.load(f)
+                return img_list
+            except json.decoder.JSONDecodeError:
+                pass
+
+    img_ids1 = filter(lambda item: len(item[1]) != 0, enumerate(rect_ids))
+    img_ids1 = map(lambda item: item[0], img_ids1)
+    img_ids1 = [i for i in img_ids1]
+
+    img_ids2 = filter(lambda item: item[1]['img_train'] == b_train,
+                      enumerate(dataset['img_list']))
+    img_ids2 = map(lambda item: item[0], img_ids2)
+    img_ids2 = [i for i in img_ids2]
+
+    log.debug("imgidxs1 len: %s imgidxs2 len: %s", len(img_ids1), len(img_ids2))
+    imgidxs = np.intersect1d(img_ids1, img_ids2)
+    imgidxs_sub = imgidxs
+
+    img_list_subset = util_set_scale(img_list[imgidxs_sub], 200)
+    log.debug("img_list_subset shape: %s", img_list_subset.shape)
+
+    if b_train == 0:
+        assert False
+        result_list = func_crop_data_test()
+    else:
+        if b_multi == 1:
+            func_crop_data_test()
+        else:
+            result_list = func_crop_data_train(
+                option, img_list_subset, imgidxs_sub, rect_ids[imgidxs_sub])
+
+    with open(annalists_full_name, 'w') as f:
+        f.write(json.dumps(result_list, default=json_default))
+    return result_list
+
+
+def func_crop_data_test():
+    return []
+
+
+def get_points_all(points):
+    points_all = np.array([])
+    for f_p in points:
+        pp = [f_p['x'], f_p['y']]
+        points_all = np.r_[points_all, pp]
+    return points_all
+
+
+def get_rect_position(points):
+    points_all = get_points_all(points)
+    points_all = points_all.reshape((-1, 2))
+    log.debug("points_all: %s", points_all)
+    min_x = np.min(points_all[:, 0])
+    max_x = np.max(points_all[:, 0])
+    min_y = np.min(points_all[:, 1])
+    max_y = np.max(points_all[:, 1])
+    rp = EasyDict(x1=min_x, x2=max_x, y1=min_y, y2=max_y)
+    log.debug("RectPosition:%s", rp)
+    return rp
+
+
+def get_scale_and_delta(ref_height, rect_value, img, delta_crop):
+    if ref_height > 0:
+        sc = rect_value['scale'] * 200 / ref_height
+        img_sc = np.array(
+            Image.fromarray(img).resize((int(img.shape[1] / sc), int(img.shape[0] / sc)), Image.BICUBIC))
+        delta_x = delta_crop
+        delta_y = delta_crop
+    else:
+        sc = 1.0
+        img_sc = img
+        delta_x = np.round(delta_crop * rect_value['scale'])
+        delta_y = np.round(delta_crop * rect_value['scale'])
+    log.debug('sc: %s', sc)
+    log.debug('img_sc shape: %s', img_sc.shape)
+    log.debug('delta_x: %s', delta_x)
+    log.debug('delta_y: %s', delta_y)
+    return sc, img_sc, delta_x, delta_y
+
+
+def get_position(rp, sc):
+    pos = EasyDict(
+        x1=np.round(rp.x1 / sc),
+        x2=np.round(rp.x2 / sc),
+        y1=np.round(rp.y1 / sc),
+        y2=np.round(rp.y2 / sc),
+    )
+    log.debug("pos %s", pos)
+    return pos
+
+
+def get_position_new(pos, delta_x, delta_y, img_sc):
+    new = EasyDict(
+        x1=np.round(max(1, pos.x1 - delta_x)),
+        x2=np.round(min(img_sc.shape[1], pos.x2 + delta_x)),
+        y1=max(1, pos.y1 - delta_y),
+        y2=min(img_sc.shape[0], pos.y2 + delta_y),
+    )
+    log.debug("1st new %s", new)
+    return new
+
+
+def update_position_new(b_crop_isolated, rect, rect2, r_id, img_list, img_id, sc, pos, ref_height, rect_value, pos_new):
+    if b_crop_isolated and len(rect) > 1:
+
+        points2_all = []
+
+        for r_id2, rect_value2 in enumerate(rect2):
+            if r_id2 == r_id:
+                continue
+            points2 = rect_value2['points']
+            if points2 is None or not points2:
+                continue
+            for f_p in points2:
+                pp = [f_p['x'], f_p['y']]
+                points2_all.append(pp)
+        points2_all = np.array(points2_all)
+        log.debug("points2_all: %s", points2_all)
+        if points2_all:
+            def max_index(d, idx):
+                return np.argmax(d[idx])
+
+            log.debug("img_list len :%s img_id:%d r_id:%s", len(img_list), img_id, r_id)
+            points2_all = np.true_divide(points2_all, sc)
+            d = points2_all[:, 0] - pos.x1
+            idx = np.where(d < 0)[0]
+            # log.debug("idx:%s max_index:%s", idx, max_index(d, idx))
+            pos_x1other = None if not idx.any() else points2_all[idx[max_index(d, idx)], 0]
+
+            d = points2_all[:, 1] - pos.y1
+            idx = np.where(d < 0)[0]
+            # pos_y1other = None if not idx.any() else points2_all[idx[max_index(d, idx)], 1]
+
+            d = pos.x2 - points2_all[:, 0]
+            idx = np.where(d < 0)[0]
+            pos_x2other = None if not idx.any() else points2_all[idx[max_index(d, idx)], 0]
+
+            d = pos.y2 - points2_all[:, 1]
+            idx = np.where(d < 0)[0]
+            # pos_y2other = None if not idx.any() else points2_all[idx[max_index(d, idx)], 1]
+
+            if ref_height > 0:
+                delta2 = ref_height / 200 * 10
+            else:
+                delta2 = rect_value['scale'] * 10
+
+            if pos_x1other is not None:
+                pos_new.x1 = np.round(max(pos_new.x1, pos_x1other + delta2))
+
+            if pos_x2other is not None:
+                pos_new.x2 = np.round(min(pos_new.x2, pos_x2other - delta2))
+    pos_new.y1 = int(pos_new.y1)
+    pos_new.y2 = int(pos_new.y2)
+    pos_new.x1 = int(pos_new.x1)
+    pos_new.x2 = int(pos_new.x2)
+
+    log.debug("2nd new: %s", pos_new)
+    return pos_new
+
+
+def transform_annotation(points, pos, sc, rect_value):
+    # transfer annotations
+    log.debug("before transfer: %s pos.x1: %s pos.y1: %s sc: %s", points, pos.x1, pos.y1, sc)
+    for pid in points:
+        pid['x'] = pid['x'] / sc - pos.x1 + 1
+        pid['y'] = pid['y'] / sc - pos.y1 + 1
+    log.debug("after transfer: %s pos_new.x1: %s pos_new.y1: %s sc: %s", points, pos.x1, pos.y1, sc)
+    rect_value['x1'] = rect_value['x1'] / sc - pos.x1 + 1
+    rect_value['y1'] = rect_value['y1'] / sc - pos.y1 + 1
+    rect_value['x2'] = rect_value['x2'] / sc - pos.x1 + 1
+    rect_value['y2'] = rect_value['y2'] / sc - pos.y1 + 1
+
+    rect_value['objpos_x'] = rect_value['objpos_x'] / sc - pos.x1 + 1
+    rect_value['objpos_y'] = rect_value['objpos_y'] / sc - pos.y1 + 1
+
+
+def get_annotation(annolists2, f_name, rect_value, image_size, num_crops):
+    if not annolists2:
+        obj = dict()
+        obj['name'] = f_name
+        obj['imgnum'] = 1
+        obj['rect'] = rect_value
+        obj['image_size'] = image_size
+    else:
+        num_crops = num_crops + 1
+        obj = dict()
+        obj['name'] = f_name
+        obj['imgnum'] = num_crops
+        obj['rect'] = rect_value
+        obj['image_size'] = image_size
+    return obj
+
+
+def func_crop_data_train(option, img_list, img_ids, rectidxs):
+    """
+    crop train dataset
+    """
+    save_dir = option['saveDir']
+    ref_height = option['refHeight']
+    delta_crop = option['deltaCrop']
+    b_crop_isolated = option['bCropIsolated']
+    if not os.path.isdir(save_dir):
+        os.mkdir(save_dir)
+
+    annolists2 = []
+    num_crops = 1
+    num_images = 0
+
+    for img_id, alv in enumerate(img_list):
+        log.info("==> start img_id/all: %s/%s", img_id + 1, len(img_list))
+        rect = alv['rect']
+        rect2 = deepcopy(rect)
+        name = alv['name']
+        img = imread(os.path.join(option['imageDir'], name))
+        log.debug("img :%s", alv)
+        for r_id, rect_value in enumerate(rect):
+            if r_id not in rectidxs[img_id]:
+                continue
+            log.info("==> start img_id: %s/%s r_id: %s/%s", img_id + 1, len(img_list), r_id + 1, len(rect))
+            num_images = num_images + 1
+            points = rect_value['points']
+            if points is None or not points:
+                continue
+            rp = get_rect_position(points)
+            sc, img_sc, delta_x, delta_y = get_scale_and_delta(ref_height, rect_value, img, delta_crop)
+            pos = get_position(rp, sc)
+            pos_new = get_position_new(pos, delta_x, delta_y, img_sc)
+            pos_new = update_position_new(b_crop_isolated, rect, rect2, r_id, img_list, img_id, sc, pos, ref_height,
+                                          rect_value, pos_new)
+            img_crop = np.array([img_sc[pos_new.y1:pos_new.y2, pos_new.x1:pos_new.x2, 0],
+                                 img_sc[pos_new.y1:pos_new.y2, pos_new.x1:pos_new.x2, 1],
+                                 img_sc[pos_new.y1:pos_new.y2, pos_new.x1:pos_new.x2, 2]])
+
+            img_crop = img_crop.transpose((1, 2, 0))
+            # save image
+            f_name = os.path.join(
+                save_dir, 'im' + pad_zeros(img_ids[img_id], 5) + '_' + str(r_id) + '.png')
+            f_name_t = os.path.join(
+                save_dir, 'T_' + pad_zeros(img_ids[img_id], 5) + '_' + str(r_id) + '.json')
+            log.debug('file name: %s', f_name)
+            log.debug("image shape: %s", img_crop.shape)
+            # noinspection PyTypeChecker
+            imsave(f_name, img_crop)
+            image_size = [img_crop.shape[0], img_crop.shape[1]]
+
+            T = sc * np.array([[1, 0, pos_new.x1], [0, 1, pos_new.y1], [0, 0, 1]])
+            with open(f_name_t, 'w') as f:
+                f.write(json.dumps(T, default=json_default))
+
+            transform_annotation(points, pos_new, sc, rect_value)
+            anno = get_annotation(annolists2, f_name, rect_value, image_size, num_crops)
+            num_crops = num_crops + 1
+            annolists2.append(anno)
+            log.info("==> finish img_id: %s/%s r_id: %s/%s ", img_id + 1, len(img_list), r_id + 1, len(rect))
+        log.info("==> finish img_id: %s/%s", img_id + 1, len(img_list))
+    return annolists2
diff --git a/research/cv/ArtTrack/src/tool/preprocess/mat2json.py b/research/cv/ArtTrack/src/tool/preprocess/mat2json.py
new file mode 100644
index 0000000000000000000000000000000000000000..6284b0fead431f27ec21ea1f88224da694248a2d
--- /dev/null
+++ b/research/cv/ArtTrack/src/tool/preprocess/mat2json.py
@@ -0,0 +1,56 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import json
+import os
+
+import numpy as np
+import scipy.io as scio
+
+
+def mat2json(index_mat, name, dataset_json=None, output_dir=None, index_offset=-1, stdout=False):
+    """
+    select sample from matlab index mat and and save as json format
+
+    Args:
+         index_mat: path to index mat
+         name: field in index mat
+         dataset_json: path to json format dataset which contains full sample
+         output_dir: output dir
+         index_offset: offset for index mat. first index in matlab is usually 1, but in python it's 0.
+         stdout: if True, print index
+    """
+    mat = scio.loadmat(index_mat)
+    ids = mat.get(name).flatten().tolist()
+    ids = [i + index_offset for i in ids]
+    if stdout:
+        print(ids)
+    out_dataset = None
+    if dataset_json is not None:
+        with open(dataset_json, 'r') as f:
+            dataset = f.read()
+        dataset = np.array(json.loads(dataset))
+        out_dataset = dataset[ids].tolist()
+
+    if output_dir is not None:
+        output_dir = os.path.abspath(output_dir)
+        os.makedirs(os.path.abspath(output_dir), exist_ok=True)
+        s = json.dumps(ids)
+        with open(os.path.join(output_dir, '%s.json' % name), 'w') as f:
+            f.write(s)
+        if out_dataset is not None:
+            s = json.dumps(out_dataset)
+            with open(os.path.join(output_dir, '%s_dataset.json' % name), 'w') as f:
+                f.write(s)
diff --git a/research/cv/ArtTrack/src/tool/preprocess/pairwise_stats.py b/research/cv/ArtTrack/src/tool/preprocess/pairwise_stats.py
new file mode 100644
index 0000000000000000000000000000000000000000..164d117a812947dcd1bdbdafa1461b5629552dcd
--- /dev/null
+++ b/research/cv/ArtTrack/src/tool/preprocess/pairwise_stats.py
@@ -0,0 +1,72 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import os
+
+import numpy as np
+import scipy.io
+
+from src.dataset.coco import MSCOCO
+from src.dataset.pose import Batch
+from src.tool.decorator import process_cfg
+
+
+def remap_keys(mapping):
+    return [{'key': k, 'value': v} for k, v in mapping.items()]
+
+
+def save_stats(stats, cfg):
+    mat_stats = {"graph": [], "means": [], "std_devs": []}
+    for start in range(cfg.num_joints):
+        for end in range(cfg.num_joints):
+            if start != end:
+                joint_pair = (start, end)
+                mat_stats["graph"].append([start, end])
+                mat_stats["means"].append(stats[joint_pair]["mean"])
+                mat_stats["std_devs"].append(stats[joint_pair]["std"])
+    print(mat_stats)
+    os.makedirs(os.path.dirname(cfg.pairwise_stats_fn), exist_ok=True)
+    scipy.io.savemat(cfg.pairwise_stats_fn, mat_stats)
+
+
+# Compute pairwise statistics at reference scale
+@process_cfg
+def pairwise_stats(cfg):
+    dataset = MSCOCO(cfg)
+    dataset.set_pairwise_stats_collect(True)
+
+    num_images = dataset.num_images
+    all_pairwise_differences = {}
+
+    if cfg.dataset.mirror:
+        num_images *= 2
+
+    for k in range(num_images):
+        print('processing image {}/{}'.format(k, num_images - 1))
+
+        batch = dataset.get_item(k)
+        batch_stats = batch[Batch.data_item].pairwise_stats
+        for joint_pair in batch_stats:
+            if joint_pair not in all_pairwise_differences:
+                all_pairwise_differences[joint_pair] = []
+            all_pairwise_differences[joint_pair] += batch_stats[joint_pair]
+
+    stats = {}
+    for joint_pair in all_pairwise_differences:
+        stats[joint_pair] = {}
+        stats[joint_pair]["mean"] = np.mean(all_pairwise_differences[joint_pair], axis=0)
+        stats[joint_pair]["std"] = np.std(all_pairwise_differences[joint_pair], axis=0)
+
+    save_stats(stats, cfg)
diff --git a/research/cv/ArtTrack/src/tool/preprocess/parts.json b/research/cv/ArtTrack/src/tool/preprocess/parts.json
new file mode 100644
index 0000000000000000000000000000000000000000..1f2440689be9f31d8367d3cbd5bdc042376bc9bf
--- /dev/null
+++ b/research/cv/ArtTrack/src/tool/preprocess/parts.json
@@ -0,0 +1,30 @@
+[
+    {"id":0,"pos":[0,0]},
+    {"id":1,"pos":[0,1]},
+    {"id":2,"pos":[1,1]},
+    {"id":3,"pos":[1,2]},
+    {"id":4,"pos":[2,2]},
+    {"id":5,"pos":[3,3]},
+    {"id":6,"pos":[3,4]},
+    {"id":7,"pos":[4,4]},
+    {"id":8,"pos":[4,5]},
+    {"id":9,"pos":[5,5]},
+    {"id":10,"pos":[6,7]},
+    {"id":11,"pos":[8,9]},
+    {"id":12,"pos":[10,10]},
+    {"id":13,"pos":[11,10]},
+    {"id":14,"pos":[11,11]},
+    {"id":15,"pos":[12,11]},
+    {"id":16,"pos":[12,12]},
+    {"id":17,"pos":[13,13]},
+    {"id":18,"pos":[13,14]},
+    {"id":19,"pos":[14,14]},
+    {"id":20,"pos":[14,15]},
+    {"id":21,"pos":[15,15]},
+    {"id":22,"pos":[8,8]},
+    {"id":23,"pos":[9,9]},
+    {"id":24,"pos":[6,6]},
+    {"id":25,"pos":[7,7]},
+    {"id":26,"pos":[16,16]},
+    {"id":27,"pos":[17,17]}
+]
diff --git a/research/cv/ArtTrack/src/tool/preprocess/preprocess_single.py b/research/cv/ArtTrack/src/tool/preprocess/preprocess_single.py
new file mode 100644
index 0000000000000000000000000000000000000000..c74d2569daf6b77fde9bdc1914159092868d7244
--- /dev/null
+++ b/research/cv/ArtTrack/src/tool/preprocess/preprocess_single.py
@@ -0,0 +1,163 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import json
+import os
+import sys
+
+import numpy as np
+import scipy.io as scio
+
+from src.log import log
+from src.tool.preprocess.crop import crop_data
+from src.tool.preprocess.utils import json_default, mpii_mat2dict
+
+np.set_printoptions(threshold=sys.maxsize)
+basedir = os.path.abspath(os.path.dirname(__file__))
+
+
+def preprocess_single(dataset_dir, dataset_name, save_dir=None, image_dir=None):
+    """
+    preprocess single person dataset
+
+    Args:
+        dataset_dir: path to dataset
+        dataset_name: annotation file relative path
+        save_dir: path to cropped images
+        image_dir: path to original images
+    """
+    if save_dir is None:
+        save_dir = os.path.join(dataset_dir, 'cropped')
+    if image_dir is None:
+        image_dir = os.path.join(dataset_dir, 'images')
+
+    p = dict()
+
+    p["bTrain"] = 1
+    p["refHeight"] = 400
+    p["deltaCrop"] = 130
+    p["bSingle"] = 1
+    p["bCropIsolated"] = 1
+    p["bMulti"] = 0
+    p["bObjposOffset"] = 1
+
+    p["datasetDir"] = dataset_dir
+    p["datasetName"] = os.path.join(p["datasetDir"], dataset_name)
+
+    p["saveDir"] = save_dir
+    p["imageDir"] = image_dir
+
+    mat = scio.loadmat(p["datasetName"], struct_as_record=False)
+    p["dataset"] = mpii_mat2dict(mat)
+
+    img_list = crop_data(p)
+    p['deltaCrop'] = 65
+    p['bSingle'] = 0
+    p['bCropIsolated'] = 0
+    img_list2 = crop_data(p)
+
+    img_list = img_list + img_list2
+
+    img_list_full_name = os.path.join(p['saveDir'], 'annolist-full-h' + str(p['refHeight']) + '.json')
+    with open(img_list_full_name, 'w') as f:
+        f.write(json.dumps(img_list, default=json_default))
+
+    prepare_training_data(img_list, p['saveDir'])
+
+
+def prepare_training_data(img_list, saveDir):
+    """
+    generate final dataset file
+    """
+    zero_indexed_joints_ids = True
+    pidxs = [0, 2, 4, 5, 7, 9, 12, 14, 16, 17, 19, 21, 22, 23]
+
+    num_joints = len(pidxs)
+    with open(os.path.join(basedir, 'parts.json'), 'r') as f:
+        parts = json.load(f)
+
+    if not os.path.exists(saveDir):
+        os.makedirs(saveDir)
+
+    num_images = len(img_list)
+    channels = 3
+    dataset = []
+
+    for imgidx, imgv in enumerate(img_list):
+        if imgidx + 1 % 100 == 0:
+            log.info('processing image %s/%s', imgidx, num_images)
+
+        filename = imgv['name']
+
+        joints = np.zeros((num_joints, 3))
+        all_joints = []
+        rectv = imgv['rect']
+
+        joint_list = get_anno_joints(rectv, pidxs, parts)
+
+        n = 0
+        for j in range(0, num_joints):
+            jnt = joint_list[j, :]
+            if not np.isnan(jnt[0]):
+                joints[n, :] = np.concatenate([[j], jnt])
+                n = n + 1
+
+        joints = joints[:n, :]
+        if zero_indexed_joints_ids:
+            joints[:, 0] = joints[:, 0]
+        else:
+            joints[:, 0] = joints[:, 0] + 1
+        all_joints.append(joints)
+
+        entry = dict()
+        entry['image'] = filename
+        entry['size'] = np.concatenate([[channels], imgv['image_size']])
+        entry['joints'] = all_joints
+        dataset.append(entry)
+    os.makedirs(saveDir, exist_ok=True)
+    out_filename = os.path.join(saveDir, 'dataset.json')
+    log.debug("Generated dataset definition file:%s", out_filename)
+    with open(out_filename, 'w') as f:
+        f.write(json.dumps(dataset, default=json_default))
+
+
+def get_anno_joints(rect, pidxs, parts):
+    """
+    get annotation joints
+    """
+    num_joints = len(pidxs)
+    joints = np.full((num_joints, 2), np.nan)
+    points = rect['points']
+    for j, pidx in enumerate(pidxs):
+        annopoint_idxs = parts[pidx]['pos']
+        assert annopoint_idxs[0] == annopoint_idxs[1]
+        pt, _ = get_annopoint_by_id(points, annopoint_idxs[0])
+        if pt is not None:
+            joints[j, :] = np.array([pt['x'], pt['y']])
+    return joints
+
+
+def get_annopoint_by_id(points, idx):
+    """
+    get annotation point by id
+    """
+    point = None
+    ind = None
+    for i, v in enumerate(points):
+        if v['id'] == idx:
+            point = v
+            ind = i
+            return (point, ind)
+    return (point, ind)
diff --git a/research/cv/ArtTrack/src/tool/preprocess/split.py b/research/cv/ArtTrack/src/tool/preprocess/split.py
new file mode 100644
index 0000000000000000000000000000000000000000..39db8f9419d052e3449e3e9e286bbfac9688d424
--- /dev/null
+++ b/research/cv/ArtTrack/src/tool/preprocess/split.py
@@ -0,0 +1,42 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import json
+import os
+
+import numpy as np
+
+
+def split(path, out, test_ratio=0.2):
+    """
+    split dataset
+    Args:
+        path: path to json format dataset which contains full sample
+        out: output dir
+        test_ratio: eval ratio
+    """
+    with open(path, 'r') as f:
+        dataset = json.load(f)
+        dataset = np.array(dataset)
+    np.random.seed(1256)
+    dataset_len = len(dataset)
+    shuffled_indices = np.random.permutation(dataset_len)
+    test_size = int(dataset_len * test_ratio)
+    test_indices = shuffled_indices[:test_size]
+    train_indices = shuffled_indices[test_size:]
+    with open(os.path.join(out, "train-dataset.json"), 'w') as f:
+        f.write(json.dumps(dataset[train_indices].tolist()))
+    with open(os.path.join(out, "eval-dataset.json"), 'w') as f:
+        f.write(json.dumps(dataset[test_indices].tolist()))
diff --git a/research/cv/ArtTrack/src/tool/preprocess/tf2ms.py b/research/cv/ArtTrack/src/tool/preprocess/tf2ms.py
new file mode 100644
index 0000000000000000000000000000000000000000..8565910e16fced100dfcab43c300ddedecf69cab
--- /dev/null
+++ b/research/cv/ArtTrack/src/tool/preprocess/tf2ms.py
@@ -0,0 +1,59 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import json
+import os
+
+import numpy as np
+import tensorflow.compat.v1 as tf
+from tensorflow.python.framework.errors_impl import NotFoundError
+from mindspore import Parameter
+from mindspore.train.serialization import save_checkpoint
+
+from src.log import log
+
+
+def tf2ms(tf_checkpoint, out_path, map_path):
+    """
+    convert tensorflow's checkpoint to mindspore model
+    Args:
+        tf_checkpoint: path to tensorflow's checkpoint
+        out_path: path to output
+        map_path: map config
+    """
+    reader = tf.train.NewCheckpointReader(tf_checkpoint)
+    new_params_list = []
+    with open(map_path) as f:
+        s = f.read()
+        param = json.loads(s)
+    for k, v in param.items():
+        param_dict = {}
+        try:
+            parameter = reader.get_tensor(k)
+        except NotFoundError:
+            log.warning("not found %s skip", k)
+            continue
+        if len(parameter.shape) == 4:
+            parameter = np.transpose(parameter, axes=[3, 2, 0, 1])
+        elif len(parameter.shape) != 1:
+            log.error('unknown shape %s for %s', parameter.shape, k)
+            exit(1)
+        log.info('convert %s -> %s', k, v)
+
+        param_dict['name'] = param[k]
+        param_dict['data'] = Parameter(parameter, requires_grad=False)
+        new_params_list.append(param_dict)
+    os.makedirs(os.path.dirname(os.path.abspath(out_path)), exist_ok=True)
+    save_checkpoint(new_params_list, out_path)
diff --git a/research/cv/ArtTrack/src/tool/preprocess/utils.py b/research/cv/ArtTrack/src/tool/preprocess/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..8dc5eaa48fe310f7c3f6b8758a1e6563f267930c
--- /dev/null
+++ b/research/cv/ArtTrack/src/tool/preprocess/utils.py
@@ -0,0 +1,190 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import json
+
+import numpy as np
+
+from src.log import log
+
+
+def util_set_scale(img_list, ref_height=200):
+    """
+        set images scale
+    """
+    head_height_ratio = 1.0 / 8
+
+    for _, v in enumerate(img_list):
+        rect = getattr(v, 'rect', None)
+        if rect is not None and rect:
+            for _, rv in enumerate(rect):
+                points = getattr(rv, "points", None)
+                if points is not None and points:
+                    head_size = util_get_head_size(rv)
+                    sc = ref_height * head_height_ratio / head_size
+                    assert 100 > sc > 0.01
+                    points.scale = 1 / sc
+            v['rect'] = rect
+
+    return img_list
+
+
+def util_get_head_size(rect):
+    sc_bias = 0.6
+    return sc_bias * np.linalg.norm(np.array([rect['x2'], rect['y2']]) - np.array([rect['x1'], rect['y1']]))
+
+
+def pad_zeros(s, npad):
+    n = len(str(s))
+
+    assert n <= npad
+    return '0' * (npad - n) + str(s)
+
+
+class NumpyEncoder(json.JSONDecoder):
+    def default(self, obj):
+        if isinstance(obj, np.integer):
+            return int(obj)
+        if isinstance(obj, np.uint8):
+            return int(obj)
+        if isinstance(obj, np.floating):
+            return float(obj)
+        return json.JSONDecoder.default(self, obj)
+
+
+def set_rect_field(rect_dict, rv, field):
+    if hasattr(rv, field):
+        rect_dict[field] = getattr(rv, field)[0, 0]
+
+
+def set_rect_scale(rect_dict, rv):
+    if hasattr(rv, 'scale'):
+        scale = rv.scale.flatten()
+        if scale:
+            rect_dict['scale'] = scale[0]
+
+
+def set_rect_vidx(rect_dict, rv):
+    if hasattr(rv, 'vidx'):
+        rect_dict['vidx'] = rv.vidx - 1
+
+
+def set_rect_frame_sec(rect_dict, rv):
+    if hasattr(rv, 'frame_sec'):
+        rect_dict['frame_sec'] = rv.frame_sec
+
+
+def set_rect_objpos(rect_dict, rv):
+    if hasattr(rv, 'objpos'):
+        objpos = rv.objpos.flatten()
+        if rect_dict.get('scale') is not None:
+            rect_dict['objpos_x'] = objpos[0].x[0, 0]
+            rect_dict['objpos_y'] = objpos[0].y[0, 0]
+
+
+def set_rect_points_list(rect_dict, rv):
+    rect_points = getattr(rv, 'annopoints', None)
+    if rect_points is None or not rect_points:
+        points_list = []
+        points = []
+    else:
+        rect_points = rect_points[0, 0]
+        points_list = []
+        points = rect_points.point[0]
+    for f_p in points:
+        pp = {'x': f_p.x[0, 0],
+              'y': f_p.y[0, 0],
+              'id': f_p.id[0, 0],
+              }
+        if hasattr(f_p, 'is_visible'):
+            visible = f_p.is_visible.flatten()
+            if visible:
+                pp['is_visible'] = visible[0]
+        points_list.append(pp)
+    rect_dict['points'] = points_list
+
+
+def set_act_name(act_dict, field, name):
+    if name:
+        act_dict[field] = name[0]
+
+
+def mpii_mat2dict(mpii):
+    """
+    raw mat to dict
+    """
+    mpii = mpii['RELEASE'][0, 0]
+    mpii.annolist = mpii.annolist.flatten()
+    mpii.img_train = mpii.img_train.flatten()
+    mpii.act = mpii.act.flatten()
+    mpii.single_person = mpii.single_person.flatten()
+    mpii.video_list = mpii.video_list.flatten()
+    img_list = []
+    for imgidx, alv in enumerate(mpii.annolist):
+        img_train = mpii.img_train[imgidx]
+        act = mpii.act[imgidx]
+        single_person = mpii.single_person[imgidx].flatten()
+        name = alv.image[0, 0].name[0]
+
+        # annorect
+        rect = alv.annorect.flatten()
+        rect_list = list()
+        for _, rv in enumerate(rect):
+            rect_dict = dict()
+            set_rect_field(rect_dict, rv, 'x1')
+            set_rect_field(rect_dict, rv, 'y1')
+            set_rect_field(rect_dict, rv, 'x2')
+            set_rect_field(rect_dict, rv, 'y2')
+            set_rect_scale(rect_dict, rv)
+            set_rect_vidx(rect_dict, rv)
+            set_rect_frame_sec(rect_dict, rv)
+            set_rect_objpos(rect_dict, rv)
+            set_rect_points_list(rect_dict, rv)
+            rect_list.append(rect_dict)
+
+        single_person = [i - 1 for i in single_person]
+        act_dict = dict()
+        act_name = act.act_name.flatten()
+        cat_name = act.cat_name.flatten()
+        set_act_name(act_dict, 'act_name', act_name)
+        set_act_name(act_dict, 'cat_name', cat_name)
+        act_dict['act_id'] = act.act_id[0, 0]
+        if len(act_name) > 1 or len(cat_name) > 1:
+            log.debug("%s %s", act_name, cat_name)
+
+        value = dict()
+        value['name'] = name
+        value['rect'] = rect_list
+        value['img_train'] = img_train
+        value['single_person'] = single_person
+        value['act'] = act_dict
+        img_list.append(value)
+
+    video_list = [i[0] for i in mpii.video_list]
+    result = {"img_list": img_list,
+              "video_list": video_list
+              }
+    return result
+
+
+def json_default(obj):
+    """
+    numpy to json
+    """
+    if type(obj).__module__ == np.__name__:
+        if isinstance(obj, np.ndarray):
+            return obj.tolist()
+        return obj.item()
+    return obj
diff --git a/research/cv/ArtTrack/train.py b/research/cv/ArtTrack/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..538dc3385d028df14e640b5d46448007d0c3d235
--- /dev/null
+++ b/research/cv/ArtTrack/train.py
@@ -0,0 +1,85 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import os
+import sys
+
+import mindspore
+import mindspore.context as ctx
+from mindspore import Model, nn
+from mindspore.communication import get_group_size, get_rank, init
+from mindspore.train.callback import CheckpointConfig, LossMonitor, ModelCheckpoint, TimeMonitor
+
+from src.args_util import command, create_arg_parser, TARGET_COCO_MULTI, TARGET_MPII_SINGLE
+from src.dataset.util import create_dataset
+from src.model.pose import PoseNet, PoseNetTotalLoss
+from src.tool.decorator import process_cfg
+
+
+@command
+def train(parser, args, cfg):
+    if args.target == TARGET_MPII_SINGLE:
+        from src.dataset import MPII
+        start_train(cfg, MPII)
+    elif args.target == TARGET_COCO_MULTI:
+        from src.dataset import MSCOCO
+        start_train(cfg, MSCOCO)
+    else:
+        parser.print_help()
+
+
+@process_cfg
+def start_train(cfg, dataset_class):
+    """
+    start train
+    """
+    ctx.set_context(**cfg.context)
+    group_size = None
+    rank_id = None
+    if hasattr(cfg, 'parallel_context') and cfg.parallel_context is not None:
+        init()
+        rank_id = get_rank()
+        group_size = get_group_size()
+        ctx.set_auto_parallel_context(device_num=group_size, **cfg.parallel_context)
+        ctx.set_auto_parallel_context(parameter_broadcast=True)
+    dataset = dataset_class(cfg)
+    dataset = create_dataset(cfg.dataset.type, dataset, cfg.dataset.shuffle, cfg.dataset.batch_size,
+                             parallel=cfg.dataset.parallel, train=True, num_shards=group_size, rank_id=rank_id)
+    net = PoseNet(cfg=cfg)
+    loss = PoseNetTotalLoss(net, cfg)
+    optimizer = nn.SGD(loss.trainable_params(),
+                       learning_rate=nn.dynamic_lr.piecewise_constant_lr(cfg.multi_step[1], cfg.multi_step[0]))
+    train_net = nn.TrainOneStepCell(loss, optimizer)
+    train_net.set_train()
+    if hasattr(cfg, 'load_ckpt') and os.path.exists(cfg.load_ckpt):
+        mindspore.load_checkpoint(cfg.load_ckpt, net=train_net)
+    model = Model(train_net)
+    steps_per_epoch = dataset.get_dataset_size()
+    ckpt_config = CheckpointConfig(save_checkpoint_steps=steps_per_epoch, keep_checkpoint_max=10)
+    ckpt_dir = cfg.get('ckpt_dir', 'ckpt')
+    ckpt_dir = ckpt_dir if rank_id is None else os.path.join(ckpt_dir, 'rank_%s' % str(rank_id))
+    ckpt_cb = ModelCheckpoint(prefix=cfg.get('ckpt_prefix', 'arttrack'), directory=ckpt_dir,
+                              config=ckpt_config)
+    callbacks = [TimeMonitor(data_size=steps_per_epoch), LossMonitor(), ckpt_cb]
+    model.train(cfg.epoch, dataset, callbacks=callbacks, dataset_sink_mode=False)
+
+
+def main():
+    parser = create_arg_parser()['train']
+    args = parser.parse_args(sys.argv[1:])
+    train(parser, args)
+
+
+if __name__ == '__main__':
+    main()