diff --git a/official/cv/nima/README.md b/official/cv/nima/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..735371e6ddba1e508603f520f102c30b447e3ebc
--- /dev/null
+++ b/official/cv/nima/README.md
@@ -0,0 +1,341 @@
+# 鐩綍
+
+<!-- TOC -->
+
+- [鐩綍](#鐩綍)
+- [ResNet鎻忚堪](#resnet鎻忚堪)
+    - [姒傝堪](#姒傝堪)
+    - [璁烘枃](#璁烘枃)
+- [妯″瀷鏋舵瀯](#妯″瀷鏋舵瀯)
+- [鏁版嵁闆哴(#鏁版嵁闆�)
+- [鐗规€(#鐗规€�)
+    - [娣峰悎绮惧害](#娣峰悎绮惧害)
+- [鐜瑕佹眰](#鐜瑕佹眰)
+- [蹇€熷叆闂╙(#蹇€熷叆闂�)
+- [鑴氭湰璇存槑](#鑴氭湰璇存槑)
+    - [鑴氭湰鍙婃牱渚嬩唬鐮乚(#鑴氭湰鍙婃牱渚嬩唬鐮�)
+    - [鑴氭湰鍙傛暟](#鑴氭湰鍙傛暟)
+    - [璁粌杩囩▼](#璁粌杩囩▼)
+        - [鐢ㄦ硶](#鐢ㄦ硶)
+            - [Ascend澶勭悊鍣ㄧ幆澧冭繍琛宂(#ascend澶勭悊鍣ㄧ幆澧冭繍琛�)
+            - [杩愯鍙傛暟鏈嶅姟鍣ㄦā寮忚缁僝(#杩愯鍙傛暟鏈嶅姟鍣ㄦā寮忚缁�)
+            - [璁粌鏃舵帹鐞哴(#璁粌鏃舵帹鐞�)
+        - [缁撴灉](#缁撴灉)
+    - [璇勪及杩囩▼](#璇勪及杩囩▼)
+        - [鐢ㄦ硶](#鐢ㄦ硶-1)
+            - [Ascend澶勭悊鍣ㄧ幆澧冭繍琛宂(#ascend澶勭悊鍣ㄧ幆澧冭繍琛�-1)
+        - [缁撴灉](#缁撴灉-1)
+    - [鎺ㄧ悊杩囩▼](#鎺ㄧ悊杩囩▼)
+        - [瀵煎嚭MindIR](#瀵煎嚭mindir)
+        - [鍦ˋscend310鎵ц鎺ㄧ悊](#鍦╝scend310鎵ц鎺ㄧ悊)
+        - [缁撴灉](#缁撴灉-2)
+- [妯″瀷鎻忚堪](#妯″瀷鎻忚堪)
+    - [鎬ц兘](#鎬ц兘)
+        - [璇勪及鎬ц兘](#璇勪及鎬ц兘)
+            - [AVA_Dataset涓婄殑ResNet50](#cifar-10涓婄殑resnet50)
+- [闅忔満鎯呭喌璇存槑](#闅忔満鎯呭喌璇存槑)
+- [ModelZoo涓婚〉](#modelzoo涓婚〉)
+
+<!-- /TOC -->
+
+# ResNet鎻忚堪
+
+## 姒傝堪
+
+娈嬪樊绁炵粡缃戠粶锛圧esNet锛夌敱寰蒋鐮旂┒闄綍鍑槑绛変簲浣嶅崕浜烘彁鍑猴紝閫氳繃ResNet鍗曞厓锛屾垚鍔熻缁�152灞傜缁忕綉缁滐紝璧㈠緱浜咺LSVRC2015鍐犲啗銆俁esNet鍓嶄簲椤圭殑璇樊鐜囦负3.57%锛屽弬鏁伴噺浣庝簬VGGNet锛屽洜姝ゆ晥鏋滈潪甯告樉钁椼€備紶缁熺殑鍗风Н缃戠粶鎴栧叏杩炴帴缃戠粶鎴栧鎴栧皯瀛樺湪淇℃伅涓㈠け鐨勯棶棰橈紝杩樹細閫犳垚姊害娑堝け鎴栫垎鐐革紝瀵艰嚧娣卞害缃戠粶璁粌澶辫触锛孯esNet鍒欏湪涓€瀹氱▼搴︿笂瑙e喅浜嗚繖涓棶棰樸€傞€氳繃灏嗚緭鍏ヤ俊鎭紶閫掔粰杈撳嚭锛岀‘淇濅俊鎭畬鏁存€с€傛暣涓綉缁滃彧闇€瑕佸涔犺緭鍏ュ拰杈撳嚭鐨勫樊寮傞儴鍒嗭紝绠€鍖栦簡瀛︿範鐩爣鍜岄毦搴︺€俁esNet鐨勭粨鏋勫ぇ骞呮彁楂樹簡绁炵粡缃戠粶璁粌鐨勯€熷害锛屽苟涓斿ぇ澶ф彁楂樹簡妯″瀷鐨勫噯纭巼銆�
+
+濡備笅涓篗indSpore浣跨敤AVA_Dataset鏁版嵁闆嗗ResNet50杩涜璁粌鐨勭ず渚嬨€俁esNet50鍙弬鑰僛璁烘枃1](https://arxiv.org/pdf/1512.03385.pdf)銆�
+
+## 璁烘枃
+
+1. [璁烘枃](https://arxiv.org/pdf/1512.03385.pdf)锛欿aiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun."Deep Residual Learning for Image Recognition"
+
+2. [璁烘枃](https://arxiv.org/abs/1709.05424)锛欻. Talebi and P. Milanfar, "NIMA: Neural Image Assessment"
+
+# 妯″瀷鏋舵瀯
+
+ResNet鐨勬€讳綋缃戠粶鏋舵瀯濡備笅锛�
+[閾炬帴](https://arxiv.org/pdf/1512.03385.pdf)
+
+棰勮缁冩ā鍨嬶細
+[閾炬帴](https://download.mindspore.cn/model_zoo/r1.3/resnet50_ascend_v130_imagenet2012_official_cv_bs256_top1acc76.97__top5acc_93.44/)
+
+# 鏁版嵁闆�
+
+## 涓嬭浇鏁版嵁闆�, 骞跺垝鍒嗚缁冮泦涓庢祴璇曢泦
+
+浣跨敤鐨勬暟鎹泦锛歔AVA_Dataset](<https://github.com/mtobeiyf/ava_downloader/tree/master/AVA_dataset>)
+
+浣跨敤label锛歔AVA.txt](https://github.com/mtobeiyf/ava_downloader/blob/master/AVA_dataset/AVA.txt)
+
+鍑嗗濂芥暟鎹紝鎵ц涓嬮潰python鍛戒护鍒掑垎鏁版嵁闆�
+
+```text
+python ./src/dividing_label.py --config_path=~/config.yaml
+#鏇存敼閰嶇疆鏂囦欢锛歞ata_path銆乴abel_path銆乿al_label_path銆乼rain_label_path
+```
+
+- 鏁版嵁闆嗗ぇ灏忥細255,502寮犲僵鑹插浘鍍�
+    - 璁粌闆嗭細229,905寮犲浘鍍�
+    - 娴嬭瘯闆嗭細25,597寮犲浘鍍�
+- 鏁版嵁鏍煎紡锛欽EPG鍥惧儚
+
+# 鐗规€�
+
+## 娣峰悎绮惧害
+
+閲囩敤[娣峰悎绮惧害](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/enable_mixed_precision.html)鐨勮缁冩柟娉曚娇鐢ㄦ敮鎸佸崟绮惧害鍜屽崐绮惧害鏁版嵁鏉ユ彁楂樻繁搴﹀涔犵缁忕綉缁滅殑璁粌閫熷害锛屽悓鏃朵繚鎸佸崟绮惧害璁粌鎵€鑳借揪鍒扮殑缃戠粶绮惧害銆傛贩鍚堢簿搴﹁缁冩彁楂樿绠楅€熷害銆佸噺灏戝唴瀛樹娇鐢ㄧ殑鍚屾椂锛屾敮鎸佸湪鐗瑰畾纭欢涓婅缁冩洿澶х殑妯″瀷鎴栧疄鐜版洿澶ф壒娆$殑璁粌銆�
+浠P16绠楀瓙涓轰緥锛屽鏋滆緭鍏ユ暟鎹被鍨嬩负FP32锛孧indSpore鍚庡彴浼氳嚜鍔ㄩ檷浣庣簿搴︽潵澶勭悊鏁版嵁銆傜敤鎴峰彲鎵撳紑INFO鏃ュ織锛屾悳绱⑩€渞educe precision鈥濇煡鐪嬬簿搴﹂檷浣庣殑绠楀瓙銆�
+
+# 鐜瑕佹眰
+
+- 纭欢(Ascend/GPU)
+    - 鍑嗗Ascend鎴朑PU澶勭悊鍣ㄦ惌寤虹‖浠剁幆澧冦€�
+- 妗嗘灦
+    - [MindSpore](https://www.mindspore.cn/install/en)
+- 濡傞渶鏌ョ湅璇︽儏锛岃鍙傝濡備笅璧勬簮锛�
+    - [MindSpore鏁欑▼](https://www.mindspore.cn/tutorials/zh-CN/master/index.html)
+    - [MindSpore Python API](https://www.mindspore.cn/docs/api/zh-CN/master/index.html)
+
+# 蹇€熷叆闂�
+
+閫氳繃瀹樻柟缃戠珯瀹夎MindSpore鍚庯紝鎮ㄥ彲浠ユ寜鐓у涓嬫楠よ繘琛岃缁冨拰璇勪及锛�
+
+- Ascend澶勭悊鍣ㄧ幆澧冭繍琛�
+
+```text
+# 杩愯璁粌绀轰緥
+python train.py --config_path=./config.yaml >train.log 2>&1 &
+
+# 鍒嗗竷寮忚缁�
+bash ./scripts/run_train_ascend.sh ~/hccl_8p.json
+
+# 杩愯璇勪及绀轰緥
+python eval.py --config_path ./config.yaml >eval.log 2>&1 &
+
+```
+
+濡傛灉瑕佸湪modelarts涓婅繘琛屾ā鍨嬬殑璁粌锛屽彲浠ュ弬鑰僲odelarts鐨勫畼鏂规寚瀵兼枃妗�(https://support.huaweicloud.com/modelarts/)
+寮€濮嬭繘琛屾ā鍨嬬殑璁粌鍜屾帹鐞嗭紝鍏蜂綋鎿嶄綔濡備笅锛�
+
+```python
+# 鍦╩odelarts涓婁娇鐢ㄥ垎甯冨紡璁粌鐨勭ず渚嬶細
+# (1) 鍦� config.yaml 鏂囦欢涓缃� "enable_modelarts=True","is_distributed=True"锛屽苟璁剧疆鍏朵粬鍙傛暟锛�
+#     濡傦細data_path銆乷utput_path銆乼rain_data_path銆乿al_data_path銆乧heckpoint_path绛夈€�
+# (2) 鍦╩odelarts鐨勭晫闈笂璁剧疆浠g爜鐩綍"~/NIMA/"銆�
+# (3) 鍦╩odelarts鐨勭晫闈笂璁剧疆妯″瀷鐨勫惎鍔ㄦ枃浠� "~/NIMA/train.py" 銆�
+# (4) 鍦╩odelarts鐨勭晫闈笂娣诲姞杩愯鍙傛暟 config_path = "~/NIMA/config.yaml"
+# (5) 鍦╩odelarts鐨勭晫闈笂璁剧疆妯″瀷鐨勬棩蹇楄矾寰� "Job log path" 銆�
+# (6) 寮€濮嬫ā鍨嬬殑璁粌銆�
+
+# 鍦╩odelarts涓婁娇鐢ㄦā鍨嬫帹鐞嗙殑绀轰緥
+# (1) 鎶婅缁冨ソ鐨勬ā鍨嬪湴鏂瑰埌妗剁殑瀵瑰簲浣嶇疆銆�
+# (2) 鍦� config.yaml 鏂囦欢涓缃� "enable_modelarts=True"锛屽苟璁剧疆濡備笅鍙傛暟锛�
+#     data_path銆乿al_data_path銆乧kpt_file
+# (3) 鍦╩odelarts鐨勭晫闈笂璁剧疆浠g爜鐩綍"~/NIMA/"銆�
+# (4) 鍦╩odelarts鐨勭晫闈笂璁剧疆妯″瀷鐨勫惎鍔ㄦ枃浠� "eval.py" 銆�
+# (5) 鍦╩odelarts鐨勭晫闈笂娣诲姞杩愯鍙傛暟 config_path = "~/config.yaml"
+# (6) 鍦╩odelarts鐨勭晫闈笂璁剧疆妯″瀷鐨勬棩蹇楄矾寰� "Job log path" 銆�
+# (7) 寮€濮嬫ā鍨嬬殑鎺ㄧ悊銆�
+```
+
+# 鑴氭湰璇存槑
+
+## 鑴氭湰鍙婃牱渚嬩唬鐮�
+
+```shell
+.
+鈹溾攢鈹€NIMA
+  鈹溾攢鈹€ README.md                 #鐩稿叧璇存槑
+  鈹溾攢鈹€ascend310_infer            #瀹炵幇310鎺ㄧ悊婧愪唬鐮�
+  鈹溾攢鈹€model                      #棰勮缁冩ā鍨�
+    鈹溾攢鈹€ascend.ckpt
+  鈹溾攢鈹€scripts
+    鈹溾攢鈹€run_eval.sh              #910璇勪及shell鑴氭湰
+    鈹溾攢鈹€run_infer_310.sh         #310鎺ㄧ悊shell鑴氭湰
+    鈹溾攢鈹€run_train_ascend.sh      #910璁粌shell鑴氭湰
+  鈹溾攢鈹€src
+    鈹溾攢鈹€resnet.py                #涓诲共缃戠粶鏋舵瀯
+    鈹溾攢鈹€test_data.py             #鐢熸垚310鎺ㄧ悊鏁版嵁闆�
+    鈹溾攢鈹€config.py                #鍙傛暟閰嶇疆
+    鈹溾攢鈹€device_adapter.py        #璁惧閫傞厤
+    鈹溾攢鈹€dividing_label.py        #鍒掑垎鏁版嵁闆�
+    鈹溾攢鈹€callback.py              #鍥炶皟
+    鈹溾攢鈹€dataset.py               #鏁版嵁澶勭悊
+    鈹溾攢鈹€metric.py                #鎹熷け鍙婃寚鏍�
+  鈹溾攢鈹€eval.py                    #璇勪及鑴氭湰
+  鈹溾攢鈹€export.py                  #灏哻heckpoint鏂囦欢瀵煎嚭鍒癿indir
+  鈹溾攢鈹€postprocess.py             #310鎺ㄧ悊鍚庡鐞嗚剼鏈�
+  鈹溾攢鈹€train.py                   #璁粌鑴氭湰
+  鈹溾攢鈹€AVA_train.txt              #璁粌闆唋abel
+  鈹溾攢鈹€AVA_test.txt               #娴嬭瘯闆唋abel
+
+```
+
+## 鑴氭湰鍙傛暟
+
+```python
+"device_target": "Ascend"               #杩愯璁惧
+"batch_size": 256                       #璁粌鎵规澶у皬
+"epoch_size": 50                        #鎬昏璁粌epoch鏁�
+"num_parallel_workers": 16              #杩涚▼鏁�
+"learning_rate": 0.001                  #瀛︿範鐜�
+"momentum": 0.95                        #鍔ㄩ噺
+"weight_decay": 0.001                   #鏉冨€艰“鍑忓€�
+"bf_crop_size": 256                     #瑁佸壀鍓嶅浘鐗囧ぇ灏�
+"image_size": 224                       #瀹為檯閫佸叆缃戠粶鍥剧墖澶у皬
+"train_label_path": "AVA_train.txt"     #璁粌闆嗙粷瀵硅矾寰�
+"val_label_path": "AVA_test.txt"        #娴嬭瘯闆嗙粷瀵硅矾寰�
+"keep_checkpoint_max": 10               #淇濆瓨 checkpoint 鐨勬渶澶ф暟閲�
+"checkpoint_path": "./resnet50.ckpt"    #棰勮缁冩ā鍨嬬殑缁濆璺緞
+"ckpt_save_dir": "./ckpt/"              #妯″瀷淇濆瓨璺緞
+"is_distributed": False                 #鏄惁鍒嗗竷寮忚缁冿紝榛樿False
+"enable_modelarts": False               #鏄惁浣跨敤modelarts璁粌锛岄粯璁alse
+"output_path": "./"                     #modelarts璁粌鏃讹紝灏哻kpt_save_dir鏂囦欢澶嶅埗鍒版《
+
+```
+
+## 璁粌杩囩▼
+
+### 鐢ㄦ硶
+
+#### Ascend澶勭悊鍣ㄧ幆澧冭繍琛�
+
+```text
+# 鍗曟満璁粌
+python train.py --config_path=./config.yaml >train.log
+```
+
+鍙寚瀹歚config.yaml`涓殑`device_id`
+
+杩愯涓婅堪python鍛戒护鍚庯紝鎮ㄥ彲浠ラ€氳繃`train.log`鏂囦欢鏌ョ湅缁撴灉
+
+```text
+# 鍒嗗竷寮忚缁�
+Usage锛歜ash scripts/run_train_ascend.sh [RANK_TABLE_FILE] [CONFIG_PATH]
+#example: bash ./scripts/run_train_ascend.sh ~/hccl_8p.json ~/config.yaml
+```
+
+鍒嗗竷寮忚缁冮渶瑕佹彁鍓嶅垱寤篔SON鏍煎紡鐨凥CCL閰嶇疆鏂囦欢銆�
+
+鍏蜂綋鎿嶄綔锛屽弬瑙乕hccn_tools](https://gitee.com/mindspore/mindspore/tree/master/model_zoo/utils/hccl_tools)涓殑璇存槑銆�
+
+璁粌缁撴灉淇濆瓨鍦ㄧず渚嬭矾寰勪腑锛屾枃浠跺す鍚嶇О浠モ€渢rain鈥濇垨鈥渢rain_parallel鈥濆紑澶淬€傛偍鍙湪姝よ矾寰勪笅鐨勬棩蹇椾腑鎵惧埌妫€鏌ョ偣鏂囦欢浠ュ強缁撴灉銆�
+
+杩愯鍗曞崱鐢ㄤ緥鏃跺鏋滄兂鏇存崲杩愯鍗″彿锛屽彲浠ラ€氳繃閰嶇疆鐜涓缃甡device_id=x`鎴栬€呭湪context涓缃� `device_id=x`鎸囧畾鐩稿簲鐨勫崱鍙枫€�
+
+### 缁撴灉
+
+```text
+# 鍒嗗竷寮忚缁冪粨鏋滐紙8P锛�
+epoch: 1 step: 898, loss is 0.08514725
+epoch: 2 step: 898, loss is 0.072653964
+epoch: 3 step: 898, loss is 0.06939027
+epoch: 4 step: 898, loss is 0.087793864
+epoch: 5 step: 898, loss is 0.08969345
+...
+```
+
+## 璇勪及杩囩▼
+
+### 鐢ㄦ硶
+
+#### Ascend澶勭悊鍣ㄧ幆澧冭繍琛�
+
+```text
+# 杩愯璇勪及绀轰緥
+Usage锛歜ash run_eval.sh [CONFIG_PATH]
+#example: bash scripts/run_eval.sh config.yaml >export.log
+```
+
+鏇存敼閰嶇疆鏂囦欢`config.yaml`涓璥data_path`銆乣val_data_path`銆乣ckpt_file`鍗冲彲
+
+### 缁撴灉
+
+璇勪及缁撴灉淇濆瓨鍦ㄧず渚嬫枃浠禶eval.log`涓€傛偍鍙湪姝ゆ枃浠朵腑鎵惧埌鐨勬棩蹇楁壘鍒板涓嬬粨鏋滐細
+
+```bash
+SRCC: 0.657146300995645
+```
+
+## 鎺ㄧ悊杩囩▼
+
+### [瀵煎嚭MindIR](#contents)
+
+鏁版嵁鍑嗗
+
+```shell
+python ./src/test_data.py --config_path=config.yaml
+```
+
+纭繚`data_path`銆乣val_data_path`璺緞姝g‘
+鎵ц璇ュ懡浠ゅ悗浼氭寜鐓� `val_data_path` 鐢熸垚310鎺ㄧ悊鐨勬暟鎹泦
+
+瀵煎嚭mindir妯″瀷
+
+```shell
+python export.py --config_path=config.yaml >export.log
+```
+
+鏇存敼`ckpt_file`銆乣file_name`鍗冲彲
+
+### 鍦ˋscend310鎵ц鎺ㄧ悊
+
+鍦ㄦ墽琛屾帹鐞嗗墠锛宮indir鏂囦欢蹇呴』閫氳繃`export.py`鑴氭湰瀵煎嚭銆備互涓嬪睍绀轰簡浣跨敤mindir妯″瀷鎵ц鎺ㄧ悊鐨勭ず渚嬨€�
+鐩墠浠呮敮鎸乥atch_Size涓�1鐨勬帹鐞嗐€�
+
+```shell
+# Ascend310 inference
+bash ./scripts/run_infer_310.sh [MODEL_PATH] [VAL_DATA_PATH] [DEVICE_ID]
+# example: bash ./scripts/run_infer_310.sh  ~/model/NIMA.mindir ~/test_data/ 0
+```
+
+- `DEVICE_ID` 鍙€夛紝榛樿鍊间负0銆�
+
+### 缁撴灉
+
+```shell
+python ./postprocess.py --config_path=config.yaml &> acc.log
+```
+
+鎺ㄧ悊缁撴灉淇濆瓨鍦ㄨ剼鏈墽琛岀殑褰撳墠璺緞锛屼綘鍙互鍦╜acc.log`涓湅鍒颁互涓嬬簿搴﹁绠楃粨鏋溿€�
+
+```shell
+cat acc.log
+
+SRCC: 0.6571463000995645.
+```
+
+# 妯″瀷鎻忚堪
+
+## 鎬ц兘
+
+### 璇勪及鎬ц兘
+
+#### AVA_Dataset涓婄殑ResNet50
+
+| 鍙傛暟                   | Ascend 910                             |
+| ---------------------- | -------------------------------------- |
+| 妯″瀷鐗堟湰               | ResNet50                               |
+| 璧勬簮                   |  Ascend 910锛汣PU 2.60GHz锛�192鏍革紱鍐呭瓨 720G锛涚郴缁� Euler2.8 |
+| 涓婁紶鏃ユ湡               | 2021-11-19  ;                          |
+| MindSpore鐗堟湰          | 1.3.0                                  |
+| 鏁版嵁闆�                 | AVA_Dataset                            |
+| 璁粌鍙傛暟               | epoch=50, steps per epoch=898, batch_size = 256|
+| 浼樺寲鍣�                 | SGD                                    |
+| 鎹熷け鍑芥暟               | EmdLoss(鎺ㄥ湡鏈鸿窛绂�)                    |
+| 杈撳嚭                   | 姒傜巼鍒嗗竷                               |
+| 鎹熷け                   | 0.05819133                             |
+| 閫熷害                   | 356姣/姝ワ紙8鍗★級                      |
+| 鎬绘椂闀�                 | 174鍒嗛挓                                |
+| 鍙傛暟(M)                | 25.57                                  |
+| 寰皟妫€鏌ョ偣             | 195M锛�.ckpt鏂囦欢锛�                      |
+| 閰嶇疆鏂囦欢               | [閾炬帴](https://gitee.com/mindspore/models/blob/master/official/cv/nima/config.yaml) |
+
+# 闅忔満鎯呭喌璇存槑
+
+`dividing_label.py`涓缃簡random.seed(10)锛宍train.py`涓悓鏍疯缃簡set_seed(10)銆�
+
+# ModelZoo涓婚〉
+
+璇锋祻瑙堝畼缃慬涓婚〉](https://gitee.com/mindspore/mindspore/tree/master/model_zoo)
diff --git a/official/cv/nima/ascend310_infer/CMakeLists.txt b/official/cv/nima/ascend310_infer/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ee3c85447340e0449ff2b70ed24f60a17e07b2b6
--- /dev/null
+++ b/official/cv/nima/ascend310_infer/CMakeLists.txt
@@ -0,0 +1,14 @@
+cmake_minimum_required(VERSION 3.14.1)
+project(Ascend310Infer)
+add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0)
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -std=c++17 -Werror -Wall -fPIE -Wl,--allow-shlib-undefined")
+set(PROJECT_SRC_ROOT ${CMAKE_CURRENT_LIST_DIR}/)
+option(MINDSPORE_PATH "mindspore install path" "")
+include_directories(${MINDSPORE_PATH})
+include_directories(${MINDSPORE_PATH}/include)
+include_directories(${PROJECT_SRC_ROOT})
+find_library(MS_LIB libmindspore.so ${MINDSPORE_PATH}/lib)
+file(GLOB_RECURSE MD_LIB ${MINDSPORE_PATH}/_c_dataengine*)
+
+add_executable(main src/main.cc src/utils.cc)
+target_link_libraries(main ${MS_LIB} ${MD_LIB} gflags)
diff --git a/official/cv/nima/ascend310_infer/build.sh b/official/cv/nima/ascend310_infer/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..770a8851efade7f352039fc8665d307ae1abbb00
--- /dev/null
+++ b/official/cv/nima/ascend310_infer/build.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [ ! -d out ]; then
+  mkdir out
+fi
+cd out || exit
+cmake .. \
+    -DMINDSPORE_PATH="`pip show mindspore-ascend | grep Location | awk '{print $2"/mindspore"}' | xargs realpath`"
+make
diff --git a/official/cv/nima/ascend310_infer/inc/utils.h b/official/cv/nima/ascend310_infer/inc/utils.h
new file mode 100644
index 0000000000000000000000000000000000000000..abeb8fcbf11a042e6fefafa5868166d975e44dfb
--- /dev/null
+++ b/official/cv/nima/ascend310_infer/inc/utils.h
@@ -0,0 +1,32 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MINDSPORE_INFERENCE_UTILS_H_
+#define MINDSPORE_INFERENCE_UTILS_H_
+
+#include <sys/stat.h>
+#include <dirent.h>
+#include <vector>
+#include <string>
+#include <memory>
+#include "include/api/types.h"
+
+std::vector<std::string> GetAllFiles(std::string_view dirName);
+DIR *OpenDir(std::string_view dirName);
+std::string RealPath(std::string_view path);
+mindspore::MSTensor ReadFileToTensor(const std::string &file);
+int WriteResult(const std::string& imageFile, const std::vector<mindspore::MSTensor> &outputs);
+#endif
diff --git a/official/cv/nima/ascend310_infer/src/main.cc b/official/cv/nima/ascend310_infer/src/main.cc
new file mode 100644
index 0000000000000000000000000000000000000000..c6b393bb8bfef3505ebfcd33cb7e562c326e2304
--- /dev/null
+++ b/official/cv/nima/ascend310_infer/src/main.cc
@@ -0,0 +1,171 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sys/time.h>
+#include <gflags/gflags.h>
+#include <dirent.h>
+#include <iostream>
+#include <string>
+#include <algorithm>
+#include <iosfwd>
+#include <vector>
+#include <fstream>
+#include <sstream>
+
+#include "../inc/utils.h"
+#include "include/dataset/execute.h"
+#include "include/dataset/transforms.h"
+#include "include/dataset/vision.h"
+#include "include/dataset/vision_ascend.h"
+#include "include/api/types.h"
+#include "include/api/model.h"
+#include "include/api/serialization.h"
+#include "include/api/context.h"
+
+using mindspore::Serialization;
+using mindspore::Model;
+using mindspore::Context;
+using mindspore::Status;
+using mindspore::ModelType;
+using mindspore::Graph;
+using mindspore::GraphCell;
+using mindspore::kSuccess;
+using mindspore::MSTensor;
+using mindspore::DataType;
+using mindspore::dataset::Execute;
+using mindspore::dataset::TensorTransform;
+using mindspore::dataset::vision::Decode;
+using mindspore::dataset::vision::Resize;
+using mindspore::dataset::vision::Normalize;
+using mindspore::dataset::vision::HWC2CHW;
+using mindspore::dataset::InterpolationMode;
+using mindspore::dataset::transforms::TypeCast;
+
+
+DEFINE_string(model_path, "", "model path");
+DEFINE_string(dataset_path, ".", "dataset path");
+DEFINE_int32(input_width, 960, "input width");
+DEFINE_int32(input_height, 576, "inputheight");
+DEFINE_int32(device_id, 0, "device id");
+DEFINE_string(precision_mode, "allow_fp32_to_fp16", "precision mode");
+DEFINE_string(op_select_impl_mode, "", "op select impl mode");
+DEFINE_string(aipp_path, "./aipp.cfg", "aipp path");
+DEFINE_string(device_target, "Ascend310", "device target");
+
+int main(int argc, char **argv) {
+    gflags::ParseCommandLineFlags(&argc, &argv, true);
+    if (RealPath(FLAGS_model_path).empty()) {
+      std::cout << "Invalid model" << std::endl;
+      return 1;
+    }
+
+    auto context = std::make_shared<Context>();
+    auto ascend310_info = std::make_shared<mindspore::Ascend310DeviceInfo>();
+    ascend310_info->SetDeviceID(FLAGS_device_id);
+    context->MutableDeviceInfo().push_back(ascend310_info);
+
+    Graph graph;
+    Status ret = Serialization::Load(FLAGS_model_path, ModelType::kMindIR, &graph);
+    if (ret != kSuccess) {
+        std::cout << "Load model failed." << std::endl;
+        return 1;
+    }
+
+    Model model;
+    ret = model.Build(GraphCell(graph), context);
+    if (ret != kSuccess) {
+        std::cout << "ERROR: Build failed." << std::endl;
+        return 1;
+    }
+
+    std::vector<MSTensor> modelInputs = model.GetInputs();
+
+    auto all_files = GetAllFiles(FLAGS_dataset_path);
+    if (all_files.empty()) {
+        std::cout << "ERROR: no input data." << std::endl;
+        return 1;
+    }
+
+    auto decode = Decode();
+    auto resize = Resize({224, 224}, InterpolationMode::kCubic);
+    auto normalize = Normalize({0.485, 0.456, 0.406}, {0.229, 0.224, 0.225});
+    auto hwc2chw = HWC2CHW();
+    auto typeCast = TypeCast(DataType::kNumberTypeFloat32);
+
+    mindspore::dataset::Execute transformDecode(decode);
+    mindspore::dataset::Execute transform({resize, normalize, hwc2chw});
+    mindspore::dataset::Execute transformCast(typeCast);
+
+    std::map<double, double> costTime_map;
+
+    size_t size = all_files.size();
+
+    for (size_t i = 0; i < size; ++i) {
+        struct timeval start;
+        struct timeval end;
+        double startTime_ms;
+        double endTime_ms;
+        std::vector<MSTensor> inputs;
+        std::vector<MSTensor> outputs;
+        std::cout << "Start predict input files:" << all_files[i] << std::endl;
+        mindspore::MSTensor image =  ReadFileToTensor(all_files[i]);
+
+        transformDecode(image, &image);
+        std::vector<int64_t> shape = image.Shape();
+        transform(image, &image);
+        transformCast(image, &image);
+
+        inputs.emplace_back(modelInputs[0].Name(), modelInputs[0].DataType(), modelInputs[0].Shape(),
+                            image.Data().get(), image.DataSize());
+
+        gettimeofday(&start, NULL);
+        model.Predict(inputs, &outputs);
+        std::cout << " infer result:" << all_files[i] << std::endl;
+        gettimeofday(&end, NULL);
+
+        startTime_ms = (1.0 * start.tv_sec * 1000000 + start.tv_usec) / 1000;
+        endTime_ms = (1.0 * end.tv_sec * 1000000 + end.tv_usec) / 1000;
+        costTime_map.insert(std::pair<double, double>(startTime_ms, endTime_ms));
+        WriteResult(all_files[i], outputs);
+    }
+    double average = 0.0;
+    double Fps = 0.0;
+    int infer_cnt = 0;
+
+    for (auto iter = costTime_map.begin(); iter != costTime_map.end(); iter++) {
+        double diff = 0.0;
+        std::cout << "time_time_1"<< iter->second << "time_time_0 "
+                  << iter->first << iter->second - iter->first << std::endl;
+        diff = iter->second - iter->first;
+        average += diff;
+        infer_cnt++;
+    }
+    Fps = infer_cnt*1000 / average;
+    average = average / infer_cnt;
+
+    std::stringstream timeCost;
+    std::stringstream fps;
+    fps << "Image processing speed is: " << Fps << "imgs/s" << std::endl;
+    timeCost << "NN inference cost average time: "<< average << " ms of infer_count " << infer_cnt << std::endl;
+    std::cout << "NN inference cost average time: "<< average << "ms of infer_count " << infer_cnt << std::endl;
+    std::string file_name = "./time_Result" + std::string("/test_perform_static.txt");
+    std::ofstream file_stream(file_name.c_str(), std::ios::trunc);
+    file_stream << fps.str();
+    file_stream << timeCost.str();
+    file_stream.close();
+    costTime_map.clear();
+    return 0;
+}
diff --git a/official/cv/nima/ascend310_infer/src/utils.cc b/official/cv/nima/ascend310_infer/src/utils.cc
new file mode 100644
index 0000000000000000000000000000000000000000..cb1516efa436dec82d87fe7030516a8ac57b5252
--- /dev/null
+++ b/official/cv/nima/ascend310_infer/src/utils.cc
@@ -0,0 +1,134 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "inc/utils.h"
+
+#include <fstream>
+#include <algorithm>
+#include <iostream>
+#include <sstream>
+
+using mindspore::MSTensor;
+using mindspore::DataType;
+
+std::vector<std::string> GetAllFiles(std::string_view dirName) {
+    struct dirent *filename;
+    DIR *dir = OpenDir(dirName);
+    if (dir == nullptr) {
+        return {};
+    }
+    std::vector<std::string> res;
+    while ((filename = readdir(dir)) != nullptr) {
+        std::string dName = std::string(filename->d_name);
+        if (dName == "." || dName == ".." || filename->d_type != DT_REG) {
+            continue;
+        }
+        res.emplace_back(std::string(dirName) + "/" + filename->d_name);
+    }
+    std::sort(res.begin(), res.end());
+    for (auto &f : res) {
+        std::cout << "image file: " << f << std::endl;
+    }
+    return res;
+}
+
+
+
+int WriteResult(const std::string& imageFile, const std::vector<MSTensor> &outputs) {
+    std::string file_name1 = "./time_Result" + std::string("/test.txt");
+    for (auto tensor : outputs) {
+        std::cout << "tensor name is:" << tensor.Name() << " tensor size is:" << tensor.DataSize()
+        << " tensor elements num is:" << tensor.ElementNum() << std::endl;
+        auto out_data = reinterpret_cast<const float *>(tensor.Data().get());
+        std::cout << "output data is:";
+        std::ofstream file_stream1(file_name1.c_str(), std::ios::app);
+        file_stream1 << imageFile << ":";
+        for (int j = 0; j < tensor.ElementNum() && j <= 10; j++) {
+            std::stringstream out_data_1;
+            out_data_1 << out_data[j] << " ";
+            std::cout << out_data[j];
+            file_stream1 << out_data_1.str();
+        }
+        file_stream1 << ";\n";
+        file_stream1.close();
+        std::cout << std::endl;
+    }
+    return 0;
+}
+
+mindspore::MSTensor ReadFileToTensor(const std::string &file) {
+  if (file.empty()) {
+    std::cout << "Pointer file is nullptr" << std::endl;
+    return mindspore::MSTensor();
+  }
+
+  std::ifstream ifs(file);
+  if (!ifs.good()) {
+    std::cout << "File: " << file << " is not exist" << std::endl;
+    return mindspore::MSTensor();
+  }
+
+  if (!ifs.is_open()) {
+    std::cout << "File: " << file << "open failed" << std::endl;
+    return mindspore::MSTensor();
+  }
+
+  ifs.seekg(0, std::ios::end);
+  size_t size = ifs.tellg();
+  mindspore::MSTensor buffer(file, mindspore::DataType::kNumberTypeUInt8, {static_cast<int64_t>(size)}, nullptr, size);
+
+  ifs.seekg(0, std::ios::beg);
+  ifs.read(reinterpret_cast<char *>(buffer.MutableData()), size);
+  ifs.close();
+
+  return buffer;
+}
+
+
+DIR *OpenDir(std::string_view dirName) {
+    if (dirName.empty()) {
+        std::cout << " dirName is null ! " << std::endl;
+        return nullptr;
+    }
+    std::string realPath = RealPath(dirName);
+    struct stat s;
+    lstat(realPath.c_str(), &s);
+    if (!S_ISDIR(s.st_mode)) {
+        std::cout << "dirName is not a valid directory !" << std::endl;
+        return nullptr;
+    }
+    DIR *dir = opendir(realPath.c_str());
+    if (dir == nullptr) {
+        std::cout << "Can not open dir " << dirName << std::endl;
+        return nullptr;
+    }
+    std::cout << "Successfully opened the dir " << dirName << std::endl;
+    return dir;
+}
+
+std::string RealPath(std::string_view path) {
+    char realPathMem[PATH_MAX] = {0};
+    char *realPathRet = nullptr;
+    realPathRet = realpath(path.data(), realPathMem);
+    if (realPathRet == nullptr) {
+        std::cout << "File: " << path << " is not exist.";
+        return "";
+    }
+
+    std::string realPath(realPathMem);
+    std::cout << path << " realpath is: " << realPath << std::endl;
+    return realPath;
+}
diff --git a/official/cv/nima/config.yaml b/official/cv/nima/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0cfb48f867ad04641884ca16400e0ab63be507c4
--- /dev/null
+++ b/official/cv/nima/config.yaml
@@ -0,0 +1,46 @@
+# Builtin Configurations(DO NOT CHANGE THESE CONFIGURATIONS unless you know exactly what you are doing)
+enable_modelarts: False
+is_distributed: False
+# Path for local or modelarts
+data_path: "~/NIMA/data/"
+label_path: "~/NIMA/AVA.txt"
+output_path: "s3://~output/"
+
+# ==============================================================================
+# options
+device_target: "Ascend"
+dataset_name: "AVA_Dataset"
+batch_size: 256
+epoch_size: 50
+num_parallel_workers: 16
+learning_rate: 0.001
+momentum: 0.95
+weight_decay: 0.001
+bf_crop_size: 256
+image_size: 224
+train_label_path: "~/NIMA/AVA_train.txt"
+val_label_path: "~/NIMA/AVA_test.txt"
+keep_checkpoint_max: 10
+checkpoint_path: "~/NIMA/model/resnet50_ascend_v130_imagenet2012_official_cv_bs256_top1acc76.97__top5acc_93.44.ckpt"
+ckpt_filename: "NIMA"
+ckpt_save_dir: "./model/"
+device_id: 0
+
+# eval or export option
+val_data_path: "~/NIMA/test_data/"
+ckpt_file: "~/NIMA/model/NIMA-2_898.ckpt"
+file_name: "NIMA"
+file_format: "MINDIR"
+file_save: "./model/"
+
+---
+
+# Help description for each configuration
+enable_modelarts: "Whether training on modelarts, default: False"
+is_distributed: "Whether training use multiple cards, default: False"
+data_url: "Url for modelarts"
+train_url: "Url for modelarts"
+output_path: "The location of the output file that uses modelarts."
+data_path: "The location of the input data."
+device_target: 'Target device type'
+enable_profiling: 'Whether enable profiling while training, default: False'
diff --git a/official/cv/nima/eval.py b/official/cv/nima/eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..6828aaec3ad735d7cd7952ab02497e69b176ae79
--- /dev/null
+++ b/official/cv/nima/eval.py
@@ -0,0 +1,53 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import numpy as np
+import scipy.stats
+
+from mindspore import load_checkpoint
+import mindspore.context as context
+
+from src.resnet import resnet50 as resnet
+from src.dataset import create_dataset
+from src.config import config
+
+if __name__ == "__main__":
+    args = config
+    if args.enable_modelarts:
+        import moxing as mox
+        mox.file.shift('os', 'mox')
+    config.device_num = 1
+    config.rank = config.device_id
+    context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False,
+                        device_id=args.device_id, reserve_class_name_in_scope=False)
+    ds_val, steps_per_epoch_val = create_dataset(args, data_mode='val')
+    net = resnet(10)
+    load_checkpoint(args.ckpt_file, net=net)
+    total_score = []
+    total_gt = []
+    SCORE_LIST = np.array([x for x in range(1, 11)])
+    for i, (data, gt_classes) in enumerate(ds_val):
+        net.set_train(False)
+        gt_classes = gt_classes.asnumpy()
+        output = net(data)
+        output = output.asnumpy()
+        gt = np.sum(gt_classes * np.array(SCORE_LIST), axis=1)
+        score = np.sum(output * np.array(SCORE_LIST), axis=1)
+        total_score += score.tolist()
+        total_gt += gt.tolist()
+    total_score = np.array(total_score)
+    total_gt = np.array(total_gt)
+    print('mse:', np.mean(np.power((total_score-total_gt), 2)))
+    print('deal imgs is:', total_score.shape[0])
+    print('SRCC:', scipy.stats.spearmanr(total_score, total_gt)[0])
diff --git a/official/cv/nima/export.py b/official/cv/nima/export.py
new file mode 100644
index 0000000000000000000000000000000000000000..90936bc02edcb25ed5a0e42ceac768201855cf66
--- /dev/null
+++ b/official/cv/nima/export.py
@@ -0,0 +1,29 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import os
+import numpy as np
+from mindspore.common import dtype as mstype
+from mindspore import Tensor, load_checkpoint, export
+
+from src.resnet import resnet50 as resnet
+from src.config import config
+
+if __name__ == "__main__":
+    path = config.ckpt_file
+    net = resnet(10)
+    load_checkpoint(path, net=net)
+    img = np.random.randint(0, 255, size=(1, 3, config.image_size, config.image_size))
+    img = Tensor(np.array(img), mstype.float32)
+    export(net, img, file_name=os.path.join(config.file_save, config.file_name), file_format=config.file_format)
diff --git a/official/cv/nima/postprocess.py b/official/cv/nima/postprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0589bb09c6b60b35c0e1bd7b219d1f58d4e2199
--- /dev/null
+++ b/official/cv/nima/postprocess.py
@@ -0,0 +1,43 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+"""Evaluation for NIMA"""
+import numpy as np
+import scipy.stats
+from src.config import config
+
+
+dic = {}
+with open(config.val_label_path) as l:
+    for lst in l.readlines():
+        dic[lst.split(',')[1]] = float(lst.split(',')[-1])
+
+with open('./time_Result/test.txt') as f:
+    y_pred = f.readlines()
+scores = []
+gt = []
+SCORE_LIST = np.array([x for x in range(1, 11)])
+for i in y_pred:
+    pic = i.split(':')[0].split('/')[-1]
+    score_list = [float(j) for j in i.split(':')[1].split()[:-1]]
+    score = np.sum(np.array(score_list) * SCORE_LIST)
+    scores.append(score)
+    gt.append(dic[pic])
+scores = np.array(scores)
+gt = np.array(gt)
+result = sum([(scores > 5) & (gt > 5)][0]) + sum([(scores <= 5) & (gt <= 5)][0])
+print('mse:', np.mean(np.power((scores - gt), 2)))
+print('acc: ', result/gt.shape[0])
+print('SRCC: ', scipy.stats.spearmanr(scores, gt)[0])
diff --git a/official/cv/nima/scripts/run_eval.sh b/official/cv/nima/scripts/run_eval.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d54f66b1597412a879ddd6efcc2b903fc7964ee9
--- /dev/null
+++ b/official/cv/nima/scripts/run_eval.sh
@@ -0,0 +1,32 @@
+#! /bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [[ $# -lt 1 || $# -gt 1 ]]; then 
+    echo "Usage: sh eval.sh [CONFIG_PATH]
+    CONFIG_PATH Must Be Provided!"
+exit 1
+fi
+
+if [ ! -f $1 ]
+then
+    echo "error: CONFIG_PATH=$1 is not a file"
+exit 1
+fi
+
+config_path=$1
+echo 'config_path:' $config_path
+
+python eval.py --config_path=$config_path &> eval.log
diff --git a/official/cv/nima/scripts/run_infer_310.sh b/official/cv/nima/scripts/run_infer_310.sh
new file mode 100644
index 0000000000000000000000000000000000000000..b44f16f37b5aa121f631a32b0fa0f8f8cccc6fb4
--- /dev/null
+++ b/official/cv/nima/scripts/run_infer_310.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [[ $# -lt 2 || $# -gt 3 ]]; then 
+    echo "Usage: sh run_infer_310.sh [MODEL_PATH] [VAL_DATA_PATH] [DEVICE_ID]
+    DEVICE_ID is optional, it can be set by environment variable device_id, otherwise the value is zero"
+exit 1
+fi
+
+if [ ! -f $1 ]
+then
+    echo "error: MODEL_PATH=$1 is not a file"
+exit 1
+fi
+
+if [ ! -d $2 ]
+then
+    echo "error: DATA_PATH=$2 is not a folder"
+exit 1
+fi
+
+get_real_path(){
+  if [ "${1:0:1}" == "/" ]; then
+    echo "$1"
+  else
+    echo "$(realpath -m $PWD/$1)"
+  fi
+}
+
+model=$(get_real_path $1)
+data_path=$(get_real_path $2)
+
+if [ $# == 3 ]; then
+    device_id=$3
+elif [ $# == 2 ]; then
+    if [ -z $device_id ]; then
+        device_id=0
+    else
+        device_id=$device_id
+    fi
+fi
+
+echo $model
+echo $data_path
+echo $device_id
+
+export ASCEND_HOME=/usr/local/Ascend/
+if [ -d ${ASCEND_HOME}/ascend-toolkit ]; then
+    export PATH=$ASCEND_HOME/fwkacllib/bin:$ASCEND_HOME/fwkacllib/ccec_compiler/bin:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/ccec_compiler/bin:$ASCEND_HOME/ascend-toolkit/latest/atc/bin:$PATH
+    export LD_LIBRARY_PATH=$ASCEND_HOME/fwkacllib/lib64:/usr/local/lib:$ASCEND_HOME/ascend-toolkit/latest/atc/lib64:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:$LD_LIBRARY_PATH
+    export TBE_IMPL_PATH=$ASCEND_HOME/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe
+    export PYTHONPATH=$ASCEND_HOME/fwkacllib/python/site-packages:${TBE_IMPL_PATH}:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/python/site-packages:$PYTHONPATH
+    export ASCEND_OPP_PATH=$ASCEND_HOME/ascend-toolkit/latest/opp
+else
+    export PATH=$ASCEND_HOME/fwkacllib/bin:$ASCEND_HOME/fwkacllib/ccec_compiler/bin:$ASCEND_HOME/atc/ccec_compiler/bin:$ASCEND_HOME/atc/bin:$PATH
+    export LD_LIBRARY_PATH=$ASCEND_HOME/fwkacllib/lib64:/usr/local/lib:$ASCEND_HOME/atc/lib64:$ASCEND_HOME/acllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:$LD_LIBRARY_PATH
+    export PYTHONPATH=$ASCEND_HOME/fwkacllib/python/site-packages:$ASCEND_HOME/atc/python/site-packages:$PYTHONPATH
+    export ASCEND_OPP_PATH=$ASCEND_HOME/opp
+fi
+
+function compile_app()
+{
+    cd ./ascend310_infer || exit
+    if [ -f "Makefile" ]; then
+        make clean
+    fi
+    sh build.sh &> build.log
+
+    if [ $? -ne 0 ]; then
+        echo "compile app code failed"
+        exit 1
+    fi
+    cd - || exit
+}
+
+function infer()
+{
+     if [ -d time_Result ]; then
+        rm -rf ./time_Result
+    fi
+    mkdir time_Result
+    ./ascend310_infer/out/main --model_path=$model --dataset_path=$data_path --device_id=$device_id &> infer.log
+
+    if [ $? -ne 0 ]; then
+        echo "execute inference failed"
+        exit 1
+    fi
+}
+
+compile_app
+infer
diff --git a/official/cv/nima/scripts/run_train_ascend.sh b/official/cv/nima/scripts/run_train_ascend.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0b623b886dd7ee9f77f75cd29702bcae819221dd
--- /dev/null
+++ b/official/cv/nima/scripts/run_train_ascend.sh
@@ -0,0 +1,71 @@
+#! /bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [[ $# -lt 1 || $# -gt 2 ]]; then
+    echo "Usage: sh run_train_ascend.sh [RANK_TABLE_FILE] [CONFIG_PATH]
+    Single-card training is used by default."
+exit 1
+fi
+EXECUTE_PATH=$(pwd)
+if [ ! -f $1 ]
+then
+echo "error: RANK_TABLE_FILE=$1 is not a file"
+exit 1
+fi
+if [ $# == 1 ]; then
+    config_path="${EXECUTE_PATH}/config.yaml"
+    echo "config path is : ${config_path}"
+else
+    config_path="$(realpath $2)"
+    echo "config path is : ${config_path}"
+    if [ ! -f $2 ]
+    then
+        echo "error: CONFIG_PATH=$2 is not a file"
+    exit 1
+    fi
+fi
+
+export DEVICE_NUM=8
+export RANK_SIZE=8
+PATH1=$(realpath $1)
+export RANK_TABLE_FILE=$PATH1
+echo "RANK_TABLE_FILE=${PATH1}"
+
+export SERVER_ID=0
+rank_start=$((DEVICE_NUM * SERVER_ID))
+
+cpus=`cat /proc/cpuinfo| grep "processor"| wc -l`
+avg=`expr $cpus \/ $DEVICE_NUM`
+gap=`expr $avg \- 1`
+
+for((i=0; i<${DEVICE_NUM}; i++))
+do
+    start=`expr $i \* $avg`
+    end=`expr $start \+ $gap`
+    cmdopt=$start"-"$end
+    export DEVICE_ID=$i
+    export RANK_ID=$((rank_start + i))
+    rm -rf ./train_parallel$i
+    mkdir ./train_parallel$i
+    cp -r ./src ./train_parallel$i
+    cp -r ./*.yaml ./train_parallel$i
+    cp ./train.py ./train_parallel$i
+    echo "start training for rank $RANK_ID, device $DEVICE_ID"
+    cd ./train_parallel$i ||exit
+    env > env.log
+    taskset -c $cmdopt python train.py --config_path=$config_path> log.log 2>&1 &
+    cd ..
+done
diff --git a/official/cv/nima/src/callback.py b/official/cv/nima/src/callback.py
new file mode 100644
index 0000000000000000000000000000000000000000..72fca78e44c9ea641284c22c51c74b6f388f6190
--- /dev/null
+++ b/official/cv/nima/src/callback.py
@@ -0,0 +1,31 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+from mindspore.train.callback import Callback
+
+class EvalCallBack(Callback):
+    def __init__(self, model, eval_dataset, eval_per_epoch, epoch_per_eval):
+        self.model = model
+        self.eval_dataset = eval_dataset
+        self.eval_per_epoch = eval_per_epoch
+        self.epoch_per_eval = epoch_per_eval
+
+    def epoch_end(self, run_context):
+        cb_param = run_context.original_args()
+        cur_epoch = cb_param.cur_epoch_num
+        if cur_epoch % self.eval_per_epoch == 0:
+            acc = self.model.eval(self.eval_dataset, dataset_sink_mode=False)
+            self.epoch_per_eval["epoch"].append(cur_epoch)
+            self.epoch_per_eval["spearman"].append(acc["spearman"])
+            print('val_acc', acc)
diff --git a/official/cv/nima/src/config.py b/official/cv/nima/src/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc5ff16d1c1f5db4a2e789a88e67c68198029e32
--- /dev/null
+++ b/official/cv/nima/src/config.py
@@ -0,0 +1,130 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+"""Parse arguments"""
+
+import os
+import ast
+import argparse
+from pprint import pprint, pformat
+import yaml
+
+class Config:
+    """
+    Configuration namespace. Convert dictionary to members.
+    """
+    def __init__(self, cfg_dict):
+        for k, v in cfg_dict.items():
+            if isinstance(v, (list, tuple)):
+                setattr(self, k, [Config(x) if isinstance(x, dict) else x for x in v])
+            else:
+                setattr(self, k, Config(v) if isinstance(v, dict) else v)
+
+    def __str__(self):
+        return pformat(self.__dict__)
+
+    def __repr__(self):
+        return self.__str__()
+
+
+def parse_cli_to_yaml(parser, cfg, helper=None, choices=None, cfg_path="../config.yaml"):
+    """
+    Parse command line arguments to the configuration according to the default yaml.
+
+    Args:
+        parser: Parent parser.
+        cfg: Base configuration.
+        helper: Helper description.
+        cfg_path: Path to the default yaml config.
+    """
+    parser = argparse.ArgumentParser(description="[REPLACE THIS at config.py]",
+                                     parents=[parser])
+    helper = {} if helper is None else helper
+    choices = {} if choices is None else choices
+    for item in cfg:
+        if not isinstance(cfg[item], list) and not isinstance(cfg[item], dict):
+            help_description = helper[item] if item in helper else "Please reference to {}".format(cfg_path)
+            choice = choices[item] if item in choices else None
+            if isinstance(cfg[item], bool):
+                parser.add_argument("--" + item, type=ast.literal_eval, default=cfg[item], choices=choice,
+                                    help=help_description)
+            else:
+                parser.add_argument("--" + item, type=type(cfg[item]), default=cfg[item], choices=choice,
+                                    help=help_description)
+    args = parser.parse_args()
+    return args
+
+
+def parse_yaml(yaml_path):
+    """
+    Parse the yaml config file.
+
+    Args:
+        yaml_path: Path to the yaml config.
+    """
+    with open(yaml_path, 'r') as fin:
+        try:
+            cfgs = yaml.load_all(fin.read(), Loader=yaml.FullLoader)
+            cfgs = [x for x in cfgs]
+            if len(cfgs) == 1:
+                cfg_helper = {}
+                cfg = cfgs[0]
+                cfg_choices = {}
+            elif len(cfgs) == 2:
+                cfg, cfg_helper = cfgs
+                cfg_choices = {}
+            elif len(cfgs) == 3:
+                cfg, cfg_helper, cfg_choices = cfgs
+            else:
+                raise ValueError("At most 3 docs (config, description for help, choices) are supported in config yaml")
+            print(cfg_helper)
+        except:
+            raise ValueError("Failed to parse yaml")
+    return cfg, cfg_helper, cfg_choices
+
+
+def merge(args, cfg):
+    """
+    Merge the base config from yaml file and command line arguments.
+
+    Args:
+        args: Command line arguments.
+        cfg: Base configuration.
+    """
+    args_var = vars(args)
+    for item in args_var:
+        cfg[item] = args_var[item]
+    return cfg
+
+
+def get_config():
+    """
+    Get Config according to the yaml file and cli arguments.
+    """
+    parser = argparse.ArgumentParser(description="default name", add_help=False)
+    current_dir = os.path.dirname(os.path.abspath(__file__))
+    parser.add_argument("--config_path", type=str, default=os.path.join(current_dir, "../config.yaml"),
+                        help="Config file path")
+    path_args, _ = parser.parse_known_args()
+    if path_args.config_path.startswith('s3://'):
+        import moxing as mox
+        mox.file.shift('os', 'mox')
+    default, helper, choices = parse_yaml(path_args.config_path)
+    pprint(default)
+    args = parse_cli_to_yaml(parser=parser, cfg=default, helper=helper, choices=choices, cfg_path=path_args.config_path)
+    final_config = merge(args, default)
+    return Config(final_config)
+
+config = get_config()
diff --git a/official/cv/nima/src/dataset.py b/official/cv/nima/src/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..be1be6b7f9418a2fda175c166dc914023cac60a8
--- /dev/null
+++ b/official/cv/nima/src/dataset.py
@@ -0,0 +1,94 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import os
+import numpy as np
+
+import mindspore
+import mindspore.dataset as ds
+from mindspore.dataset.vision import Inter
+from mindspore.dataset.vision import c_transforms as v_ct
+from mindspore.dataset.transforms import c_transforms as t_ct
+
+
+class Dataset:
+    def __init__(self, image_list, label_list):
+        super(Dataset, self).__init__()
+        self.imgs = image_list
+        self.labels = label_list
+
+    def __getitem__(self, index):
+        with open(self.imgs[index], 'rb') as f:
+            img_ = f.read()
+            img = np.frombuffer(img_, np.uint8)
+        return img, self.labels[index]
+
+    def __len__(self):
+        return len(self.imgs)
+
+def score_lst(lst):
+    lst = np.array(lst).astype(int)
+    res = lst / sum(lst)
+    return res
+
+def create_dataset(args, data_mode='train'):
+    mean = [0.485, 0.456, 0.406]
+    std = [0.229, 0.224, 0.225]
+    rank_id = args.rank
+    rank_size = args.device_num
+    if data_mode == 'train':
+        with open(args.train_label_path, 'r') as f:
+            datafile = f.readlines()
+        transform_img = t_ct.Compose([
+            v_ct.Decode(),
+            v_ct.Resize([args.bf_crop_size, args.bf_crop_size], Inter.BICUBIC),
+            v_ct.RandomCrop(args.image_size),
+            v_ct.RandomHorizontalFlip(prob=0.5),
+            v_ct.Normalize(mean=mean, std=std),
+            v_ct.HWC2CHW()])
+    else:
+        with open(args.val_label_path, 'r') as f:
+            datafile = f.readlines()
+        transform_img = t_ct.Compose([
+            v_ct.Decode(),
+            v_ct.Resize([args.image_size, args.image_size], Inter.BICUBIC),
+            v_ct.RandomHorizontalFlip(prob=0.5),
+            v_ct.Normalize(mean=mean, std=std),
+            v_ct.HWC2CHW()])
+    transform_label = t_ct.TypeCast(mindspore.dtype.float32)
+
+    save_image_list = [os.path.join(args.data_path, i.split(',')[1]) for i in datafile]
+    save_label_list = [score_lst(i.split(',')[2:12]) for i in datafile]
+    dataset = Dataset(save_image_list, save_label_list)
+    if data_mode == 'train':
+        if rank_size == 1:
+            de_dataset = ds.GeneratorDataset(dataset, column_names=["image", "label"],
+                                             shuffle=True, num_parallel_workers=args.num_parallel_workers)
+        else:
+            de_dataset = ds.GeneratorDataset(dataset, column_names=["image", "label"],
+                                             shuffle=True, num_parallel_workers=args.num_parallel_workers,
+                                             num_shards=rank_size, shard_id=rank_id)
+        drop_remainder = True
+    else:
+        de_dataset = ds.GeneratorDataset(dataset, column_names=["image", "label"],
+                                         shuffle=False, num_parallel_workers=args.num_parallel_workers)
+        drop_remainder = False
+    de_dataset = de_dataset.map(input_columns="image", operations=transform_img)
+    de_dataset = de_dataset.map(input_columns="label", operations=transform_label)
+
+    de_dataset = de_dataset.batch(args.batch_size,
+                                  drop_remainder=drop_remainder)
+    de_dataset = de_dataset.repeat(1)
+    steps_per_epoch = de_dataset.get_dataset_size()
+    return de_dataset, steps_per_epoch
diff --git a/official/cv/nima/src/device_adapter.py b/official/cv/nima/src/device_adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..891618e748b1b66d94a399a354bfdbf2cb447ee5
--- /dev/null
+++ b/official/cv/nima/src/device_adapter.py
@@ -0,0 +1,37 @@
+# Copyright 2020 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import os
+
+def get_device_id():
+    device_id = os.getenv('DEVICE_ID', '0')
+    return int(device_id)
+
+
+def get_device_num():
+    device_num = os.getenv('RANK_SIZE', '1')
+    return int(device_num)
+def _get_rank_info():
+    """
+    get rank size and rank id
+    """
+    rank_size = int(os.environ.get("RANK_SIZE", 1))
+
+    if rank_size > 1:
+        from mindspore.communication.management import get_rank, get_group_size
+        rank_size = get_group_size()
+        rank_id = get_rank()
+    else:
+        rank_size = rank_id = None
+    return rank_size, rank_id
diff --git a/official/cv/nima/src/dividing_label.py b/official/cv/nima/src/dividing_label.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6394b2bd292adf7a18669156922336151145f6f
--- /dev/null
+++ b/official/cv/nima/src/dividing_label.py
@@ -0,0 +1,41 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import os
+import random
+import pandas as pd
+from config import config
+random.seed(10)
+
+if config.enable_modelarts:
+    import moxing as mox
+    mox.file.shift('os', 'mox')
+pic_names = os.listdir(config.data_path)
+dic = []
+with open(config.label_path) as f:
+    for line in f:
+        name = line.split()[1]+'.jpg'
+        lst = map(int, line.split()[2:12])
+        lst = list(lst)
+        score = round(sum([(i+1)*j for i, j in enumerate(lst)])/sum(lst), 7)
+        dic.append([name]+line.split()[2:12]+[score])
+df = pd.DataFrame(dic)
+df_new = df[df[0].isin(pic_names)].copy()
+df_new.reset_index(drop=True, inplace=True)
+test_img = random.sample(pic_names, 25597)
+
+test_label = df_new[df_new[0].isin(test_img)].copy()
+train_label = df_new[~df_new[0].isin(test_img)].copy()
+test_label.to_csv(config.val_label_path, header=0)
+train_label.to_csv(config.train_label_path, header=0)
diff --git a/official/cv/nima/src/metric.py b/official/cv/nima/src/metric.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4b3c9169c6f1733bba66c2b6ff789205fdcf2ae
--- /dev/null
+++ b/official/cv/nima/src/metric.py
@@ -0,0 +1,70 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import time
+import numpy as np
+import scipy.stats
+
+import mindspore.nn as nn
+import mindspore.ops as P
+from mindspore.train.callback import Callback
+
+class EmdLoss(nn.Cell):
+    def __init__(self):
+        super(EmdLoss, self).__init__()
+        self.square = P.Square()
+        self.reduce_mean = P.ReduceMean()
+        self.cumsum = P.CumSum()
+        self.sqrt = P.Sqrt()
+    def construct(self, data, label):
+        data = self.cumsum(data, 1)
+        label = self.cumsum(label, 1)
+        diff = data - label
+        emd = self.sqrt(self.reduce_mean(self.square(diff), 1))
+        return self.reduce_mean(emd)
+
+class PrintFps(Callback):
+    def __init__(self, train_data_num, start_time, end_time):
+        self.train_data_num = train_data_num
+        self.start_time = start_time
+        self.end_time = end_time
+    def epoch_begin(self, run_context):
+        self.start_time = time.time()
+    def epoch_end(self, run_context):
+        self.end_time = time.time()
+        cb_param = run_context.original_args()
+        cur_epoch = cb_param.cur_epoch_num
+        fps = self.train_data_num / (self.end_time - self.start_time)
+        loss = cb_param.net_outputs
+        used_time = self.end_time - self.start_time
+        print("Epoch:{} ,used time is:{:.2f}, fps: {:.2f}imgs/sec".format(cur_epoch, used_time, fps))
+        print('Step_end loss is', loss)
+
+class spearman(nn.Accuracy):
+
+    def clear(self):
+        self._correct_num = []
+        self._total_num = []
+
+    def update(self, *inputs):
+        y_pred = self._convert_data(inputs[0])
+        y = self._convert_data(inputs[1])
+        SCORE_LIST = [[i for i in range(1, 11)]] * inputs[0].shape[0]
+        gt = np.sum(y * np.array(SCORE_LIST), axis=1)
+        score = np.sum(y_pred * np.array(SCORE_LIST), axis=1)
+        self._correct_num += gt.tolist()
+        self._total_num += score.tolist()
+
+    def eval(self):
+        return scipy.stats.spearmanr(self._correct_num, self._total_num)
diff --git a/official/cv/nima/src/resnet.py b/official/cv/nima/src/resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..c95997f23a1b3c4a24bb29b0e05ec431c103a841
--- /dev/null
+++ b/official/cv/nima/src/resnet.py
@@ -0,0 +1,445 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""ResNet."""
+import math
+import numpy as np
+from scipy.stats import truncnorm
+import mindspore.nn as nn
+import mindspore.common.dtype as mstype
+from mindspore.ops import operations as P
+from mindspore.ops import functional as F
+from mindspore.common.tensor import Tensor
+
+def conv_variance_scaling_initializer(in_channel, out_channel, kernel_size):
+    fan_in = in_channel * kernel_size * kernel_size
+    scale = 1.0
+    scale /= max(1., fan_in)
+    stddev = (scale ** 0.5) / .87962566103423978
+    mu, sigma = 0, stddev
+    weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(out_channel * in_channel * kernel_size * kernel_size)
+    weight = np.reshape(weight, (out_channel, in_channel, kernel_size, kernel_size))
+    return Tensor(weight, dtype=mstype.float32)
+
+
+def _weight_variable(shape, factor=0.01):
+    init_value = np.random.randn(*shape).astype(np.float32) * factor
+    return Tensor(init_value)
+
+
+def calculate_gain(nonlinearity, param=None):
+    """calculate_gain"""
+    linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
+    res = 0
+    if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
+        res = 1
+    elif nonlinearity == 'tanh':
+        res = 5.0 / 3
+    elif nonlinearity == 'relu':
+        res = math.sqrt(2.0)
+    elif nonlinearity == 'leaky_relu':
+        if param is None:
+            neg_slope = 0.01
+        elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
+            neg_slope = param
+        else:
+            raise ValueError("neg_slope {} not a valid number".format(param))
+        res = math.sqrt(2.0 / (1 + neg_slope ** 2))
+    else:
+        raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
+    return res
+
+
+def _calculate_fan_in_and_fan_out(tensor):
+    """_calculate_fan_in_and_fan_out"""
+    dimensions = len(tensor)
+    if dimensions < 2:
+        raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
+    if dimensions == 2:  # Linear
+        fan_in = tensor[1]
+        fan_out = tensor[0]
+    else:
+        num_input_fmaps = tensor[1]
+        num_output_fmaps = tensor[0]
+        receptive_field_size = 1
+        if dimensions > 2:
+            receptive_field_size = tensor[2] * tensor[3]
+        fan_in = num_input_fmaps * receptive_field_size
+        fan_out = num_output_fmaps * receptive_field_size
+    return fan_in, fan_out
+
+
+def _calculate_correct_fan(tensor, mode):
+    mode = mode.lower()
+    valid_modes = ['fan_in', 'fan_out']
+    if mode not in valid_modes:
+        raise ValueError("Unsupported mode {}, please use one of {}".format(mode, valid_modes))
+    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
+    return fan_in if mode == 'fan_in' else fan_out
+
+
+def kaiming_normal(inputs_shape, a=0, mode='fan_in', nonlinearity='leaky_relu'):
+    fan = _calculate_correct_fan(inputs_shape, mode)
+    gain = calculate_gain(nonlinearity, a)
+    std = gain / math.sqrt(fan)
+    return np.random.normal(0, std, size=inputs_shape).astype(np.float32)
+
+
+def kaiming_uniform(inputs_shape, a=0., mode='fan_in', nonlinearity='leaky_relu'):
+    fan = _calculate_correct_fan(inputs_shape, mode)
+    gain = calculate_gain(nonlinearity, a)
+    std = gain / math.sqrt(fan)
+    bound = math.sqrt(3.0) * std  # Calculate uniform bounds from standard deviation
+    return np.random.uniform(-bound, bound, size=inputs_shape).astype(np.float32)
+
+
+def _conv3x3(in_channel, out_channel, stride=1, use_se=False, res_base=False):
+    if use_se:
+        weight = conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=3)
+    else:
+        weight_shape = (out_channel, in_channel, 3, 3)
+        weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
+    if res_base:
+        return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride,
+                         padding=1, pad_mode='pad', weight_init=weight)
+    return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride,
+                     padding=0, pad_mode='same', weight_init=weight)
+
+
+def _conv1x1(in_channel, out_channel, stride=1, use_se=False, res_base=False):
+    if use_se:
+        weight = conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=1)
+    else:
+        weight_shape = (out_channel, in_channel, 1, 1)
+        weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
+    if res_base:
+        return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride,
+                         padding=0, pad_mode='pad', weight_init=weight)
+    return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride,
+                     padding=0, pad_mode='same', weight_init=weight)
+
+
+def _conv7x7(in_channel, out_channel, stride=1, use_se=False, res_base=False):
+    if use_se:
+        weight = conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=7)
+    else:
+        weight_shape = (out_channel, in_channel, 7, 7)
+        weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
+    if res_base:
+        return nn.Conv2d(in_channel, out_channel,
+                         kernel_size=7, stride=stride, padding=3, pad_mode='pad', weight_init=weight)
+    return nn.Conv2d(in_channel, out_channel,
+                     kernel_size=7, stride=stride, padding=0, pad_mode='same', weight_init=weight)
+
+
+def _bn(channel, res_base=False):
+    if res_base:
+        return nn.BatchNorm2d(channel, eps=1e-5, momentum=0.1,
+                              gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)
+    return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,
+                          gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)
+
+
+def _bn_last(channel):
+    return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,
+                          gamma_init=0, beta_init=0, moving_mean_init=0, moving_var_init=1)
+
+
+def _fc(in_channel, out_channel, use_se=False):
+    if use_se:
+        weight = np.random.normal(loc=0, scale=0.01, size=out_channel * in_channel)
+        weight = Tensor(np.reshape(weight, (out_channel, in_channel)), dtype=mstype.float32)
+    else:
+        weight_shape = (out_channel, in_channel)
+        weight = Tensor(kaiming_uniform(weight_shape, a=math.sqrt(5)))
+    return nn.Dense(in_channel, out_channel, has_bias=True, weight_init=weight, bias_init=0)
+
+
+class ResidualBlock(nn.Cell):
+    """
+    ResNet V1 residual block definition.
+
+    Args:
+        in_channel (int): Input channel.
+        out_channel (int): Output channel.
+        stride (int): Stride size for the first convolutional layer. Default: 1.
+        use_se (bool): Enable SE-ResNet50 net. Default: False.
+        se_block(bool): Use se block in SE-ResNet50 net. Default: False.
+
+    Returns:
+        Tensor, output tensor.
+
+    Examples:
+        >>> ResidualBlock(3, 256, stride=2)
+    """
+    expansion = 4
+
+    def __init__(self,
+                 in_channel,
+                 out_channel,
+                 stride=1,
+                 use_se=False, se_block=False):
+        super(ResidualBlock, self).__init__()
+        self.stride = stride
+        self.use_se = use_se
+        self.se_block = se_block
+        channel = out_channel // self.expansion
+        self.conv1 = _conv1x1(in_channel, channel, stride=1, use_se=self.use_se)
+        self.bn1 = _bn(channel)
+        if self.use_se and self.stride != 1:
+            self.e2 = nn.SequentialCell([_conv3x3(channel, channel, stride=1, use_se=True), _bn(channel),
+                                         nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same')])
+        else:
+            self.conv2 = _conv3x3(channel, channel, stride=stride, use_se=self.use_se)
+            self.bn2 = _bn(channel)
+
+        self.conv3 = _conv1x1(channel, out_channel, stride=1, use_se=self.use_se)
+        self.bn3 = _bn(out_channel)
+        if self.se_block:
+            self.se_global_pool = P.ReduceMean(keep_dims=False)
+            self.se_dense_0 = _fc(out_channel, int(out_channel / 4), use_se=self.use_se)
+            self.se_dense_1 = _fc(int(out_channel / 4), out_channel, use_se=self.use_se)
+            self.se_sigmoid = nn.Sigmoid()
+            self.se_mul = P.Mul()
+        self.relu = nn.ReLU()
+
+        self.down_sample = False
+
+        if stride != 1 or in_channel != out_channel:
+            self.down_sample = True
+        self.down_sample_layer = None
+
+        if self.down_sample:
+            if self.use_se:
+                if stride == 1:
+                    self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel,
+                                                                         stride, use_se=self.use_se), _bn(out_channel)])
+                else:
+                    self.down_sample_layer = nn.SequentialCell([nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same'),
+                                                                _conv1x1(in_channel, out_channel, 1,
+                                                                         use_se=self.use_se), _bn(out_channel)])
+            else:
+                self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride,
+                                                                     use_se=self.use_se), _bn(out_channel)])
+
+    def construct(self, x):
+        identity = x
+
+        out = self.conv1(x)
+        out = self.bn1(out)
+        out = self.relu(out)
+        if self.use_se and self.stride != 1:
+            out = self.e2(out)
+        else:
+            out = self.conv2(out)
+            out = self.bn2(out)
+            out = self.relu(out)
+        out = self.conv3(out)
+        out = self.bn3(out)
+        if self.se_block:
+            out_se = out
+            out = self.se_global_pool(out, (2, 3))
+            out = self.se_dense_0(out)
+            out = self.relu(out)
+            out = self.se_dense_1(out)
+            out = self.se_sigmoid(out)
+            out = F.reshape(out, F.shape(out) + (1, 1))
+            out = self.se_mul(out, out_se)
+
+        if self.down_sample:
+            identity = self.down_sample_layer(identity)
+
+        out = out + identity
+        out = self.relu(out)
+
+        return out
+
+
+class ResNet(nn.Cell):
+    """
+    ResNet architecture.
+
+    Args:
+        block (Cell): Block for network.
+        layer_nums (list): Numbers of block in different layers.
+        in_channels (list): Input channel in each layer.
+        out_channels (list): Output channel in each layer.
+        strides (list):  Stride size in each layer.
+        num_classes (int): The number of classes that the training images are belonging to.
+        use_se (bool): Enable SE-ResNet50 net. Default: False.
+        se_block(bool): Use se block in SE-ResNet50 net in layer 3 and layer 4. Default: False.
+        res_base (bool): Enable parameter setting of resnet18. Default: False.
+
+    Returns:
+        Tensor, output tensor.
+
+    Examples:
+        >>> ResNet(ResidualBlock,
+        >>>        [3, 4, 6, 3],
+        >>>        [64, 256, 512, 1024],
+        >>>        [256, 512, 1024, 2048],
+        >>>        [1, 2, 2, 2],
+        >>>        10)
+    """
+
+    def __init__(self,
+                 block,
+                 layer_nums,
+                 in_channels,
+                 out_channels,
+                 strides,
+                 num_classes,
+                 use_se=False,
+                 res_base=False):
+        super(ResNet, self).__init__()
+
+        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
+            raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")
+        self.use_se = use_se
+        self.res_base = res_base
+        self.se_block = False
+        if self.use_se:
+            self.se_block = True
+
+        if self.use_se:
+            self.conv1_0 = _conv3x3(3, 32, stride=2, use_se=self.use_se)
+            self.bn1_0 = _bn(32)
+            self.conv1_1 = _conv3x3(32, 32, stride=1, use_se=self.use_se)
+            self.bn1_1 = _bn(32)
+            self.conv1_2 = _conv3x3(32, 64, stride=1, use_se=self.use_se)
+        else:
+            self.conv1 = _conv7x7(3, 64, stride=2, res_base=self.res_base)
+        self.bn1 = _bn(64, self.res_base)
+        self.relu = P.ReLU()
+
+        if self.res_base:
+            self.pad = nn.Pad(paddings=((0, 0), (0, 0), (1, 1), (1, 1)))
+            self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid")
+        else:
+            self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")
+
+        self.layer1 = self._make_layer(block,
+                                       layer_nums[0],
+                                       in_channel=in_channels[0],
+                                       out_channel=out_channels[0],
+                                       stride=strides[0],
+                                       use_se=self.use_se)
+        self.layer2 = self._make_layer(block,
+                                       layer_nums[1],
+                                       in_channel=in_channels[1],
+                                       out_channel=out_channels[1],
+                                       stride=strides[1],
+                                       use_se=self.use_se)
+        self.layer3 = self._make_layer(block,
+                                       layer_nums[2],
+                                       in_channel=in_channels[2],
+                                       out_channel=out_channels[2],
+                                       stride=strides[2],
+                                       use_se=self.use_se,
+                                       se_block=self.se_block)
+        self.layer4 = self._make_layer(block,
+                                       layer_nums[3],
+                                       in_channel=in_channels[3],
+                                       out_channel=out_channels[3],
+                                       stride=strides[3],
+                                       use_se=self.use_se,
+                                       se_block=self.se_block)
+
+        self.mean = P.ReduceMean(keep_dims=True)
+        self.flatten = nn.Flatten()
+        self.end_point = _fc(out_channels[3], 1000, use_se=self.use_se)
+        self.out = nn.SequentialCell([nn.Dense(1000, num_classes),# nn.Dropout(0.75),
+                                      nn.Softmax()
+                                     ])
+
+    def _make_layer(self, block, layer_num, in_channel, out_channel, stride, use_se=False, se_block=False):
+        """
+        Make stage network of ResNet.
+
+        Args:
+            block (Cell): Resnet block.
+            layer_num (int): Layer number.
+            in_channel (int): Input channel.
+            out_channel (int): Output channel.
+            stride (int): Stride size for the first convolutional layer.
+            se_block(bool): Use se block in SE-ResNet50 net. Default: False.
+        Returns:
+            SequentialCell, the output layer.
+
+        Examples:
+            >>> _make_layer(ResidualBlock, 3, 128, 256, 2)
+        """
+        layers = []
+
+        resnet_block = block(in_channel, out_channel, stride=stride, use_se=use_se)
+        layers.append(resnet_block)
+        if se_block:
+            for _ in range(1, layer_num - 1):
+                resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se)
+                layers.append(resnet_block)
+            resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se, se_block=se_block)
+            layers.append(resnet_block)
+        else:
+            for _ in range(1, layer_num):
+                resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se)
+                layers.append(resnet_block)
+        return nn.SequentialCell(layers)
+
+    def construct(self, x):
+        if self.use_se:
+            x = self.conv1_0(x)
+            x = self.bn1_0(x)
+            x = self.relu(x)
+            x = self.conv1_1(x)
+            x = self.bn1_1(x)
+            x = self.relu(x)
+            x = self.conv1_2(x)
+        else:
+            x = self.conv1(x)
+        x = self.bn1(x)
+        x = self.relu(x)
+        if self.res_base:
+            x = self.pad(x)
+        c1 = self.maxpool(x)
+
+        c2 = self.layer1(c1)
+        c3 = self.layer2(c2)
+        c4 = self.layer3(c3)
+        c5 = self.layer4(c4)
+
+        out = self.mean(c5, (2, 3))
+        out = self.flatten(out)
+        out = self.end_point(out)
+        out = self.out(out)
+
+        return out
+def resnet50(class_num=10):
+    """
+    Get ResNet50 neural network.
+
+    Args:
+        class_num (int): Class number.
+
+    Returns:
+        Cell, cell instance of ResNet50 neural network.
+
+    Examples:
+        >>> net = resnet50(10)
+    """
+    return ResNet(ResidualBlock,
+                  [3, 4, 6, 3],
+                  [64, 256, 512, 1024],
+                  [256, 512, 1024, 2048],
+                  [1, 2, 2, 2],
+                  class_num)
diff --git a/official/cv/nima/src/test_data.py b/official/cv/nima/src/test_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..50bfb2d48360e65173045bd10e6da20869a499d8
--- /dev/null
+++ b/official/cv/nima/src/test_data.py
@@ -0,0 +1,25 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import os
+import shutil
+from config import config
+
+if not os.path.exists(config.val_data_path):# Create Test Path
+    os.mkdir(config.val_data_path)
+with open(config.val_label_path) as f:# Test File
+    for info in f.readlines():
+        pic_name = info.split(',')[1]
+        shutil.copy(os.path.join(config.data_path, pic_name),
+                    os.path.join(config.val_data_path, pic_name))
diff --git a/official/cv/nima/train.py b/official/cv/nima/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bc1012f6ffb26c1a96e16523ff3105f4a41f04a
--- /dev/null
+++ b/official/cv/nima/train.py
@@ -0,0 +1,109 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+import os
+import time
+
+import mindspore.nn as nn
+from mindspore import Model
+import mindspore.context as context
+from mindspore.common import set_seed
+from mindspore.context import ParallelMode
+from mindspore.communication.management import init
+from mindspore.train.serialization import load_checkpoint, load_param_into_net
+from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor
+
+from src.config import config
+from src.callback import EvalCallBack
+from src.dataset import create_dataset
+from src.resnet import resnet50 as resnet
+from src.metric import EmdLoss, PrintFps, spearman
+from src.device_adapter import get_device_id, get_device_num, _get_rank_info
+
+def train_net(model, args):
+    if args.is_distributed:
+        args.device_num, args.rank = _get_rank_info()
+    else:
+        args.device_num = 1
+        args.rank = args.device_id
+    ds_train, steps_per_epoch_train = create_dataset(args, data_mode='train')
+    ds_val, _ = create_dataset(args, data_mode='val')
+    print('steps_per_epoch_train', steps_per_epoch_train, 'epoch_size', args.epoch_size)
+    config_ck = CheckpointConfig(save_checkpoint_steps=steps_per_epoch_train,
+                                 keep_checkpoint_max=args.keep_checkpoint_max)
+    ckpoint_cb = ModelCheckpoint(prefix=args.ckpt_filename, directory=args.ckpt_save_dir, config=config_ck)
+    eval_per_epoch = 1
+    print("============== Starting Training ==============")
+    epoch_per_eval = {"epoch": [], "spearman": []}
+    eval_cb = EvalCallBack(model, ds_val, eval_per_epoch, epoch_per_eval)
+    train_data_num = steps_per_epoch_train*args.batch_size
+    init_time = time.time()
+    fps = PrintFps(train_data_num, init_time, init_time)
+    time_cb = TimeMonitor(train_data_num)
+    dataset_sink_mode = not args.device_target == "CPU"
+    model.train(args.epoch_size, ds_train, callbacks=[ckpoint_cb, time_cb, fps, eval_cb],
+                dataset_sink_mode=dataset_sink_mode, sink_size=steps_per_epoch_train)
+
+if __name__ == "__main__":
+    if not os.path.exists(config.ckpt_save_dir):
+        os.mkdir(config.ckpt_save_dir)
+    if config.enable_modelarts:
+        import moxing as mox
+        mox.file.shift('os', 'mox')
+    context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
+    if config.is_distributed:
+        device_num = get_device_num()
+        config.batch_size = int(config.batch_size/device_num)
+        device_id = get_device_id()
+        context.set_context(device_id=device_id)
+        context.reset_auto_parallel_context()
+        context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
+                                          gradients_mean=True)
+        init()
+    else:
+        device_num = 1
+        device_id = config.device_id
+        context.set_context(device_id=device_id)
+    print('batch_size:', config.batch_size, 'workers:', config.num_parallel_workers)
+    print('device_id', device_id, 'device_num', device_num)
+    set_seed(10)
+
+    net = resnet(10)
+    param_dict = load_checkpoint(config.checkpoint_path)
+    param_dict_new = {}
+    for key, values in param_dict.items():
+        if key.startswith('moments.'):
+            continue
+        elif key.startswith('end_point'):
+            continue
+        else:
+            param_dict_new[key] = values
+    load_param_into_net(net, param_dict_new, strict_load=False)
+    # loss
+    criterion = EmdLoss()
+    # opt
+    learning_rate = config.learning_rate
+    momentum = config.momentum
+    weight_decay = config.weight_decay
+    opt = nn.SGD(params=net.trainable_params(), learning_rate=learning_rate,
+                 momentum=momentum, weight_decay=weight_decay)
+    # Construct model
+    metrics = {'spearman': spearman()}
+    net = Model(net, criterion, opt, metrics=metrics)
+    # Train
+    train_net(net, config)
+    if config.enable_modelarts:
+        for file in os.listdir(config.ckpt_save_dir):
+            mox.file.copy(os.path.join(config.ckpt_save_dir, file),
+                          os.path.join(config.output_path, 'Ascend_{}P_'.format(device_num) + file))