diff --git a/research/audio/jasper/README-CN.md b/research/audio/jasper/README-CN.md
index 467684eb6880d4f0a8c1246bf6b8edac0a879767..5159e1636fd2b3357d80704fa66a21e70b70f70b 100644
--- a/research/audio/jasper/README-CN.md
+++ b/research/audio/jasper/README-CN.md
@@ -12,16 +12,17 @@
   - [鏂囦欢璇存槑鍜岃繍琛岃鏄嶿(#鏂囦欢璇存槑鍜岃繍琛岃鏄�)
     - [浠g爜鐩綍缁撴瀯璇存槑](#浠g爜鐩綍缁撴瀯璇存槑)
     - [妯″瀷鍙傛暟](#妯″瀷鍙傛暟)
-    - [璁粌鍜屾帹鐞嗚繃绋媇(#璁粌鍜屾帹鐞嗚繃绋�)
-    - [Export](#Export)
+    - [璁粌鍜岃瘎浼癩(#璁粌鍜岃瘎浼�)
+    - [瀵煎嚭鍜屾帹鐞哴(#瀵煎嚭鍜屾帹鐞�)
   - [鎬ц兘](#鎬ц兘)
     - [璁粌鎬ц兘](#璁粌鎬ц兘)
     - [鎺ㄧ悊鎬ц兘](#鎺ㄧ悊鎬ц兘)
+  - [FAQ](#FAQ)
   - [ModelZoo涓婚〉](#modelzoo涓婚〉)
 
 ## [Jasper浠嬬粛](#contents)
 
-Japser鏄竴涓娇鐢� CTC 鎹熷け璁粌鐨勭鍒扮鐨勮闊宠瘑鍒ā鍨嬨€侸asper妯″瀷浠呬粎浣跨敤1D convolutions, batch normalization, ReLU, dropout鍜宺esidual connections杩欎簺妯″潡銆傝缁冨拰楠岃瘉鏀寔CPU鍜孏PU銆�
+Japser鏄竴涓娇鐢� CTC 鎹熷け璁粌鐨勭鍒扮鐨勮闊宠瘑鍒ā鍨嬨€侸asper妯″瀷浠呬粎浣跨敤1D convolutions, batch normalization, ReLU, dropout鍜宺esidual connections杩欎簺妯″潡銆傝缁冨拰楠岃瘉鏀寔CPU,GPU鍜孉scend銆�
 
 [璁烘枃](https://arxiv.org/pdf/1904.03288v3.pdf): Jason Li, et al. Jasper: An End-to-End Convolutional Neural Acoustic Model.
 
@@ -53,8 +54,8 @@ test-other.tar.gz [328M] (娴嬭瘯闆�, 鏈夊櫔闊�)
 
 ## [鐜瑕佹眰](#contents)
 
-纭欢锛圙PU锛�
-  GPU澶勭悊鍣�
+纭欢
+  GPU澶勭悊鍣ㄦ垨Ascend澶勭悊鍣�
 妗嗘灦
   [MindSpore](https://www.mindspore.cn/install/en)
 閫氳繃涓嬮潰缃戝潃鍙互鑾峰緱鏇村淇℃伅锛�
@@ -77,6 +78,8 @@ test-other.tar.gz [328M] (娴嬭瘯闆�, 鏈夊櫔闊�)
         鈹�  README.md                        //鑻辨枃readme
         鈹�  requirements.txt                 //闇€瑕佺殑搴撴枃浠�
         鈹�  train.py                         //璁粌鏂囦欢
+        鈹�  preprocess.py                    //鎺ㄧ悊棰勫鐞�
+        鈹�  postprocess.py                   //鎺ㄧ悊鍚庡鐞�
         鈹�
         鈹溾攢scripts
         鈹�      download_librispeech.sh      //涓嬭浇鏁版嵁闆嗙殑鑴氭湰
@@ -86,6 +89,12 @@ test-other.tar.gz [328M] (娴嬭瘯闆�, 鏈夊櫔闊�)
         鈹�      run_eval_gpu.sh              //GPU鎺ㄧ悊
         鈹�      run_standalone_train_cpu.sh  //CPU鍗曞崱璁粌
         鈹�      run_standalone_train_gpu.sh  //GPU鍗曞崱璁粌
+        鈹�      run_distribute_train_ascend.sh  //Ascend-910 8鍗¤缁�
+        鈹�      run_standalone_train_ascend.sh  //Ascend-910鍗曞崱璁粌
+        鈹�      run_eval_ascend.sh           //Ascend-910璇勪及
+        鈹�      run_infer_310.sh             //Ascend-310鎺ㄧ悊
+        |
+        鈹溾攢ascend310_infer                   //ascend-310鎺ㄧ悊浠g爜
         鈹�
         鈹溾攢src
         鈹�      audio.py                     //鏁版嵁澶勭悊鐩稿叧浠g爜
@@ -159,27 +168,26 @@ checkpoint鐩稿叧鍙傛暟
     keep_checkpoint_max          ckpt鏂囦欢鐨勬渶澶ф暟閲忛檺鍒讹紝鍒犻櫎鏃х殑妫€鏌ョ偣锛岄粯璁ゆ槸10
 ```
 
-## [璁粌鍜屾帹鐞嗚繃绋媇(#contents)
+## [璁粌鍜岃瘎浼癩(#contents)
 
 ### 璁粌
 
 ```text
-杩愯: train.py   [--use_pretrained USE_PRETRAINED]
-                 [--pre_trained_model_path PRE_TRAINED_MODEL_PATH]
+杩愯: train.py   [--pre_trained_model_path PRE_TRAINED_MODEL_PATH]
                  [--is_distributed IS_DISTRIBUTED]
-                 [--bidirectional BIDIRECTIONAL]
                  [--device_target DEVICE_TARGET]
+                 [--device_id DEVICE_ID]
 鍙傛暟:
     --pre_trained_model_path    棰勫厛璁粌鐨勬ā鍨嬫枃浠惰矾寰勶紝榛樿涓�''
     --is_distributed            澶氬崱璁粌锛岄粯璁や负False
-    --device_target             杩愯浠g爜鐨勮澶囷細"GPU" | 鈥淐PU鈥濓紝榛樿涓�"GPU"
+    --device_target             杩愯浠g爜鐨勮澶囷細"GPU" | 鈥淐PU鈥� | 鈥淎scend鈥濓紝榛樿涓�"GPU"
+    --device_target             杩愯浠g爜鐨勮澶嘔D锛屼娇鐢ˋscend璁粌鏃剁敤鍒帮紝榛樿涓�0
 ```
 
-### 鎺ㄧ悊
+### 璇勪及
 
 ```text
-杩愯: eval.py   [--bidirectional BIDIRECTIONAL]
-                [--pretrain_ckpt PRETRAIN_CKPT]
+杩愯: eval.py   [--pretrain_ckpt PRETRAIN_CKPT]
                 [--device_target DEVICE_TARGET]
 
 鍙傛暟:
@@ -244,9 +252,18 @@ bash ./scripts/run_standalone_train_cpu.sh
 # gpu澶氬崱璁粌
 bash ./scripts/run_distribute_train_gpu.sh
 
+# Ascend鍗曞崱璁粌
+bash ./scripts/run_standalone_train_ascend.sh [DEVICE_ID]
+
+# Ascend澶氬崱璁粌
+bash ./scripts/run_distribute_train_ascend.sh [RANK_SIZE] [BEGIN] [RANK_TABLE_FILE]
+# 鍏朵腑锛孯ANK_SIZE锛氳缁冪殑鍗℃暟锛孊EGIN锛氳捣濮嬬殑鍗″彿锛孯ANK_TABLE_FILE锛氬垎甯冨紡閰嶇疆鏂囦欢璺緞
+
 ```
 
-鎺ㄧ悊锛�
+渚嬪锛屽彲浠ヨ繖鏍疯繘琛孉scend鍏崱鐨勮缁冿細`bash ./scripts/run_distribute_train_ascend.sh 8 0 ./hccl_config/hccl_8.json`
+
+璇勪及锛�
 
 ```shell
 
@@ -256,8 +273,53 @@ bash ./scripts/run_eval_cpu.sh [PATH_CHECKPOINT]
 # gpu璇勪及
 bash ./scripts/run_eval_gpu.sh [DEVICE_ID] [PATH_CHECKPOINT]
 
+# Ascend璇勪及
+bash ./scripts/run_eval_ascend.sh [DEVICE_ID] [PATH_CHECKPOINT]
+
+```
+
+## [瀵煎嚭鍜屾帹鐞哴(#contents)
+
+### 瀵煎嚭
+
+鍦ㄦ帹鐞嗕箣鍓嶉渶瑕佸厛瀵煎嚭mindir锛屽苟鍑嗗濂借缁冨ソ鐨刢kpt鏂囦欢
+
+```shell
+# 鍦ˋscend 910涓婃墽琛屽鍑�
+DEVICE_ID=[DEVIC_ID] python export.py --pre_trained_model_path [CKPT_FILE] --device_target 'Ascend'
+```
+
+### 鎺ㄧ悊
+
+鎺ㄧ悊闇€瑕佸湪Ascend 310涓婅繘琛岋紝灏嗗鍑篳jasper_graph.mindir`鏂囦欢鍜宍jasper_variables/`鏀剧疆鍒板綋鍓嶈矾寰勶紝灏嗗鐞嗗ソ锛堜粠Ascend 910涓婏級鐨勬祴璇曢泦鍙婂搴旂殑json鏂囦欢瀛樺ソ锛屼慨鏀筦src/config.py`涓殑鎺ㄧ悊閰嶇疆鐨�"DataConfig"锛屽彟澶栵紝鎺ㄧ悊闇€瑕佺敤鍒扮涓夋柟瑙g爜鍣紝闇€瑕佸湪鐜涓畨瑁卲ytorch
+
+```shell
+# 鎺ㄧ悊閰嶇疆
+"DataConfig": {
+    "Data_dir": '/home/dataset/LibriSpeech',
+    "test_manifest": ['/home/dataset/LibriSpeech/librispeech-test-clean-wav.json'],
+},
+"batch_size_infer": 1,
+# for preprocess
+"result_path": "./preprocess_Result",
+# for postprocess
+"result_dir": "./result_Files",
+"post_out": "./infer_output.txt"
+
 ```
 
+鎵ц鎺ㄧ悊鑴氭湰锛�
+
+```shell
+bash scripts/run_infer_310.sh [MINDIR_PATH] [NEED_PREPROCESS] [DEVICE_ID]
+```
+
+- `MINDIR_PATH` mindir鏂囦欢
+- `NEED_PREPROCESS` 鏄惁闇€瑕佽繘琛宲reprocess锛岄渶瑕佽缃�'y'锛屼笉闇€瑕佽缃�'n'
+- `DEVICE_ID` 鏄彲閫夌殑锛岄粯璁や负0
+
+鎺ㄧ悊瀹屾垚鍚庯紝鍙互鍦╜infer_output.txt`涓湅鍒版帹鐞嗙粨鏋�
+
 ## [鎬ц兘](#contents)
 
 ### [璁粌鍜屾祴璇曟€ц兘鍒嗘瀽](#contents)
@@ -267,16 +329,16 @@ bash ./scripts/run_eval_gpu.sh [DEVICE_ID] [PATH_CHECKPOINT]
 | 鍙傛暟                 | Jasper                                                      |
 | -------------------------- | ---------------------------------------------------------------|
 | 璧勬簮                   | NV SMX2 V100-32G              |
-| 鏇存柊鏃ユ湡              | 2/7/2022 (month/day/year)                                    |
+| 鏇存柊鏃ユ湡              | 7/2/2022 (month/day/year)                                    |
 | MindSpore鐗堟湰           | 1.8.0                                                        |
 | 鏁版嵁闆�                    | LibriSpeech                                                 |
 | 璁粌鍙傛暟       | 8p, epoch=440, steps=1088 * epoch, batch_size = 64, lr=3e-4 |
 | 浼樺寲鍣�                  | Adam                                                           |
 | 鎹熷け鍑芥暟              | CTCLoss                                |
 | 杈撳嚭                    | 姒傜巼鍊�                                                    |
-| 鎹熷け鍊�                       | 0.2-0.7                                                        |
-| 杩愯閫熷害                      | 8p 2.7s/step                              |
-| 璁粌鎬绘椂闂�       | 8p: around 194h;                          |
+| 鎹熷け鍊�                       | 32-33                                                        |
+| 杩愯閫熷害                      | GPU: 8p 2.7s/step; Ascend: 8p 1.7s/step    |
+| 璁粌鎬绘椂闂�       | GPU: 8p around 194h; Ascend 8p around 220h  |
 | Checkpoint鏂囦欢澶у皬                 | 991M (.ckpt file)                                              |
 | 浠g爜                   | [Japser script](https://gitee.com/mindspore/models/tree/master/research/audio/jasper) |
 
@@ -289,11 +351,20 @@ bash ./scripts/run_eval_gpu.sh [DEVICE_ID] [PATH_CHECKPOINT]
 | MindSpore鐗堟湰          | 1.8.0                                                         |
 | 鏁版嵁闆�                    | LibriSpeech                         |
 | 鎵瑰鐞嗗ぇ灏�                 | 64                                                         |
-| 杈撳嚭                    | 姒傜巼鍊�                       |
-| 绮剧‘搴�(鏃犲櫔澹�)       | 8p: WER: 5.754  CER: 2.151 |
-| 绮剧‘搴�(鏈夊櫔澹�)      | 8p: WER: 19.213 CER: 9.393 |
+| 杈撳嚭                    | 閿欒鐜�                       |
+| 绮剧‘搴�(鏃犲櫔澹�)       | GPU: 8p WER: 5.754  CER: 2.151; Ascend: 8p, WER: 4.597  CER: 1.544 |
+| 绮剧‘搴�(鏈夊櫔澹�)      | GPU: 8p, WER: 19.213 CER: 9.393; Ascend: 8p, WER, 12.871 CER: 5.618 |
 | 妯″瀷澶у皬        | 330M (.mindir file)                                              |
 
+## [FAQ](#contents)
+
+Q1. 璁粌涓柇濡備綍缁х画璁粌锛�  
+A1. 灏嗕腑鏂椂鐨刢kpt鏂囦欢浣滀负璁粌鑴氭湰鐨刾re_path鍙傛暟锛屽彲浠ユ帴鐫€璇ユ柇鐐圭户缁缁冦€�  
+Q2. 璁粌瀹屾垚鍚庡浣曢€夋嫨ckpt鏂囦欢鍘诲仛鎺ㄧ悊锛�  
+A2. 鍙互鏍规嵁杈撳嚭鐨刲og鏂囦欢锛屾壘鍒發oss鏈€灏忔椂瀵瑰簲鐨刢kpt鏂囦欢銆�  
+Q3. src/dataset.py, src/model.py鍜宻rc/model_test.py涓殑甯搁噺TRAIN_INPUT_PAD_LENGTH濡備綍璁剧疆锛�  
+A3. 璇ュ父閲忚〃绀鸿缁冭緭鍏ユ暟鎹繘琛宲adding鍚庣殑闀垮害锛屽彲浠ラ€傚綋澧炲ぇ鐗虹壊璁粌鏃堕棿浠ユ崲鍙栨洿浣庣殑閿欒鐜囥€�
+
 ## [ModelZoo涓婚〉](#contents)
 
  [ModelZoo涓婚〉](https://gitee.com/mindspore/models).
diff --git a/research/audio/jasper/README.md b/research/audio/jasper/README.md
index 948c0e70608ef3ca6f30296e2f03c1bad4bb50c4..5f837163934d786c4882286fd66de4935bf36ce2 100644
--- a/research/audio/jasper/README.md
+++ b/research/audio/jasper/README.md
@@ -7,16 +7,17 @@
   - [Script Description](#script-description)
     - [Script and Sample Code](#script-parameters)
     - [Script Parameters](#script-parameters)
-    - [Training and eval Process](#training-process)
-    - [Export](#Export)
+    - [Training and eval](#training-and-eval)
+    - [Export and infer](#export-and-infer)
   - [Performance](#performance)
     - [Training Performance](#training-performance)
     - [Inference Performance](#inference-performance)
+  - [FAQ](#FAQ)
   - [ModelZoo Homepage](#modelzoo-homepage)
 
 ## [Jasper Description](#contents)
 
-Jasper is an end-to-end speech recognition models which is trained with CTC loss. Jasper model uses only 1D convolutions, batch normalization, ReLU, dropout, and residual connections. We support training and evaluation on CPU and GPU.
+Jasper is an end-to-end speech recognition models which is trained with CTC loss. Jasper model uses only 1D convolutions, batch normalization, ReLU, dropout, and residual connections. We support training and evaluation on CPU, GPU and Ascend.
 
 [Paper](https://arxiv.org/pdf/1904.03288v3.pdf): Jason Li, et al. Jasper: An End-to-End Convolutional Neural Acoustic Model.
 
@@ -46,8 +47,8 @@ Data format锛歸av and txt files
 
 ## [Environment Requirements](#contents)
 
-Hardware锛圙PU锛�
-  Prepare hardware environment with GPU processor.
+Hardware
+  Prepare hardware environment with GPU processor or Ascend processor.
 Framework
   [MindSpore](https://www.mindspore.cn/install/en)
 For more information, please check the resources below锛�
@@ -70,6 +71,8 @@ For more information, please check the resources below锛�
         鈹�  README.md                        //English readme
         鈹�  requirements.txt                 //required library file
         鈹�  train.py                         //train file
+        鈹�  preprocess.py                    //inference preprocess
+        鈹�  postprocess.py                   //inference postprocess
         鈹�
         鈹溾攢scripts
         鈹�      download_librispeech.sh      //download data
@@ -79,6 +82,12 @@ For more information, please check the resources below锛�
         鈹�      run_eval_gpu.sh              //GPU evaluate
         鈹�      run_standalone_train_cpu.sh  //one CPU train
         鈹�      run_standalone_train_gpu.sh  //one GPU train
+        鈹�      run_distribute_train_ascend.sh  //8 Ascend-910 cards training
+        鈹�      run_standalone_train_ascend.sh  //single Ascend-910 training
+        鈹�      run_eval_ascend.sh           //Ascend-910 evaluation
+        鈹�      run_infer_310.sh             //Ascend-310 inference
+        |
+        鈹溾攢ascend310_infer                   //ascend-310 inference code
         鈹�
         鈹溾攢src
         鈹�      audio.py                     //preprocess data
@@ -113,16 +122,16 @@ For more information, please check the resources below锛�
 #### Training
 
 ```text
-usage: train.py  [--use_pretrained USE_PRETRAINED]
-                 [--pre_trained_model_path PRE_TRAINED_MODEL_PATH]
+usage: train.py  [--pre_trained_model_path PRE_TRAINED_MODEL_PATH]
                  [--is_distributed IS_DISTRIBUTED]
-                 [--bidirectional BIDIRECTIONAL]
                  [--device_target DEVICE_TARGET]
+                 [--device_id DEVICE_ID]
 options:
     --pre_trained_model_path    pretrained checkpoint path, default is ''
     --is_distributed            distributed training, default is False
     is True. Currently, only bidirectional model is implemented
-    --device_target             device where the code will be implemented: "GPU" | "CPU", default is "GPU"
+    --device_target             device where the code will be implemented: "GPU" | "CPU" | "Ascend", default is "GPU"
+    --device_id                 device id, it is used when device_target is Ascend, default is 0
 ```
 
 #### Evaluation
@@ -135,7 +144,7 @@ usage: eval.py  [--bidirectional BIDIRECTIONAL]
 options:
     --bidirectional              whether to use bidirectional RNN, default is True. Currently, only bidirectional model is implemented
     --pretrain_ckpt              saved checkpoint path, default is ''
-    --device_target              device where the code will be implemented: "GPU" | "CPU", default is "GPU"
+    --device_target              device where the code will be implemented: "GPU" | "CPU" | "Ascend", default is "GPU"
 ```
 
 #### Options and Parameters
@@ -144,7 +153,7 @@ Parameters for training and evaluation can be set in file `config.py`
 
 ```text
 config for training.
-    epochs                       number of training epoch, default is 70
+    epochs                       number of training epoch, default is 440
 ```
 
 ```text
@@ -183,7 +192,7 @@ config for checkpoint.
     keep_checkpoint_max          max number of checkpoints to save, delete older checkpoints, default is 10
 ```
 
-## [Training and Eval process](#contents)
+## [Training and Eval](#contents)
 
 Before training, the dataset should be processed.
 
@@ -245,8 +254,16 @@ bash ./scripts/run_standalone_train_cpu.sh
 # distributed training gpu
 bash ./scripts/run_distribute_train_gpu.sh
 
+# standalone training Ascend
+bash ./scripts/run_standalone_train_ascend.sh [DEVICE_ID]
+
+# distributed training  Ascend
+bash ./scripts/run_distribute_train_ascend.sh [RANK_SIZE] [BEGIN] [RANK_TABLE_FILE]
+
 ```
 
+For example, you can do distributed training on 8 card Ascend like this: `bash ./scripts/run_distribute_train_ascend.sh 8 0 ./hccl_config/hccl_8.json`.
+
 The following script is used to evaluate the model. Note we only support greedy decoder now and before run the script:
 
 ```shell
@@ -257,8 +274,53 @@ bash ./scripts/run_eval_cpu.sh [PATH_CHECKPOINT]
 # eval on gpu
 bash ./scripts/run_eval_gpu.sh [DEVICE_ID] [PATH_CHECKPOINT]
 
+# eval on Ascend
+bash ./scripts/run_eval_ascend.sh [DEVICE_ID] [PATH_CHECKPOINT]
+
+```
+
+## [Export and Infer](#contents)
+
+### Export
+
+Before inference, you need to export the mindir file. And you need to prepare the trained checkpoint.
+
+```shell
+# export on Ascend 910
+DEVICE_ID=[DEVIC_ID] python export.py --pre_trained_model_path [CKPT_FILE] --device_target 'Ascend'
+```
+
+### Infer
+
+Inference must be executed on Ascend 310. You need to put the export file `jasper_graph.mindir` and `jasper_variables/` in current path. Modify "DataConfig" of `src/config.py`. And we only support greedy decoder now and before running the script you need to install pytorch in your environment.
+
+```shell
+# Inference config
+"DataConfig": {
+    "Data_dir": '/home/dataset/LibriSpeech',
+    "test_manifest": ['/home/dataset/LibriSpeech/librispeech-test-clean-wav.json'],
+},
+"batch_size_infer": 1,
+# for preprocess
+"result_path": "./preprocess_Result",
+# for postprocess
+"result_dir": "./result_Files",
+"post_out": "./infer_output.txt"
+
+```
+
+run the inference script:
+
+```shell
+bash scripts/run_infer_310.sh [MINDIR_PATH] [NEED_PREPROCESS] [DEVICE_ID]
 ```
 
+- `MINDIR_PATH` the mindir file.
+- `NEED_PREPROCESS` means weather need preprocess or not, it's value is 'y' or 'n'.
+- `DEVICE_ID` is optional, default value is 0.
+
+After inference, you can view the result in `infer_output.txt`.
+
 ## [Model Description](#contents)
 
 ### [Performance](#contents)
@@ -268,16 +330,16 @@ bash ./scripts/run_eval_gpu.sh [DEVICE_ID] [PATH_CHECKPOINT]
 | Parameters           | Jasper                                                       |
 | -------------------- | ------------------------------------------------------------ |
 | Resource             | NV SMX2 V100-32G                                             |
-| uploaded Date        | 2/7/2022 (month/day/year)                                    |
+| uploaded Date        | 7/2/2022 (month/day/year)                                    |
 | MindSpore Version    | 1.8.0                                                        |
 | Dataset              | LibriSpeech                                                  |
 | Training Parameters  | 8p, epoch=70, steps=1088 * epoch, batch_size = 64, lr=3e-4   |
 | Optimizer            | Adam                                                         |
 | Loss Function        | CTCLoss                                                      |
 | outputs              | probability                                                  |
-| Loss                 | 0.2-0.7                                                      |
-| Speed                | 8p 2.7s/step                                                 |
-| Total time: training | 8p: around 194 h;                                            |
+| Loss                 | 32-33                                                      |
+| Speed                | GPU: 8p 2.7s/step; Ascend: 8p 1.7s/step             |
+| Total time: training | GPU: 8p around 194h; Ascend 8p around 220h                 |
 | Checkpoint           | 991M (.ckpt file)                                            |
 | Scripts              | [Jasper script](https://gitee.com/mindspore/models/tree/master/research/audio/jasper) |
 
@@ -291,10 +353,19 @@ bash ./scripts/run_eval_gpu.sh [DEVICE_ID] [PATH_CHECKPOINT]
 | Dataset             | LibriSpeech                |
 | batch_size          | 64                         |
 | outputs             | probability                |
-| Accuracy(dev-clean) | 8p: WER: 5.754  CER: 2.151 |
-| Accuracy(dev-other) | 8p: WER: 19.213 CER: 9.393 |
+| Accuracy(dev-clean) | GPU: 8p WER: 5.754  CER: 2.151; Ascend: 8p, WER: 4.597  CER: 1.544 |
+| Accuracy(dev-other) | GPU: 8p, WER: 19.213 CER: 9.393; Ascend: 8p, WER, 12.871 CER: 5.618 |
 | Model for inference | 330M (.mindir file)        |
 
+## [FAQ](#contents)
+
+Q1. How to continue training after training interruption?  
+A1. The CKPT file at the time of interruption is used as the parameter "pre_path" of the training script. The path parameter can be used to continue training.  
+Q2. How to choose the CKPT file after training?  
+A2. You can find the training log in log files, and choose the checkpoint whose loss is the lowest.  
+Q3. How to set the TRAIN_INPUT_PAD_LENGTH constant in src/dataset.py, src/model.py and src/model_test.py?  
+A3. TRAIN_INPUT_PAD_LENGTH means the input data length after padding. You can increase its value properly to reduce WER but extending training time.  
+
 ## [ModelZoo Homepage](#contents)
 
  Please check the official [homepage](https://gitee.com/mindspore/models).
diff --git a/research/audio/jasper/ascend310_infer/CMakeLists.txt b/research/audio/jasper/ascend310_infer/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ee3c85447340e0449ff2b70ed24f60a17e07b2b6
--- /dev/null
+++ b/research/audio/jasper/ascend310_infer/CMakeLists.txt
@@ -0,0 +1,14 @@
+cmake_minimum_required(VERSION 3.14.1)
+project(Ascend310Infer)
+add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0)
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -std=c++17 -Werror -Wall -fPIE -Wl,--allow-shlib-undefined")
+set(PROJECT_SRC_ROOT ${CMAKE_CURRENT_LIST_DIR}/)
+option(MINDSPORE_PATH "mindspore install path" "")
+include_directories(${MINDSPORE_PATH})
+include_directories(${MINDSPORE_PATH}/include)
+include_directories(${PROJECT_SRC_ROOT})
+find_library(MS_LIB libmindspore.so ${MINDSPORE_PATH}/lib)
+file(GLOB_RECURSE MD_LIB ${MINDSPORE_PATH}/_c_dataengine*)
+
+add_executable(main src/main.cc src/utils.cc)
+target_link_libraries(main ${MS_LIB} ${MD_LIB} gflags)
diff --git a/research/audio/jasper/ascend310_infer/build.sh b/research/audio/jasper/ascend310_infer/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c12df0d7958d0f01f250ef01ebd7782a4193bd89
--- /dev/null
+++ b/research/audio/jasper/ascend310_infer/build.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+if [ -d out ]; then
+    rm -rf out
+fi
+
+mkdir out
+cd out || exit
+
+if [ -f "Makefile" ]; then
+  make clean
+fi
+
+cmake .. \
+    -DMINDSPORE_PATH="`pip show mindspore-ascend | grep Location | awk '{print $2"/mindspore"}' | xargs realpath`"
+make
+
diff --git a/research/audio/jasper/ascend310_infer/inc/utils.h b/research/audio/jasper/ascend310_infer/inc/utils.h
new file mode 100644
index 0000000000000000000000000000000000000000..be13a766754d505b3bdd73369d098590142c0ed5
--- /dev/null
+++ b/research/audio/jasper/ascend310_infer/inc/utils.h
@@ -0,0 +1,32 @@
+/**
+ * Copyright 2022 Huawei Technologies Co., Ltd
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MINDSPORE_INFERENCE_UTILS_H_
+#define MINDSPORE_INFERENCE_UTILS_H_
+
+#include <sys/stat.h>
+#include <dirent.h>
+#include <vector>
+#include <string>
+#include <memory>
+#include "include/api/types.h"
+
+std::vector<std::string> GetAllFiles(std::string_view dirName);
+DIR *OpenDir(std::string_view dirName);
+std::string RealPath(std::string_view path);
+mindspore::MSTensor ReadFileToTensor(const std::string &file);
+int WriteResult(const std::string& imageFile, const std::vector<mindspore::MSTensor> &outputs);
+#endif
diff --git a/research/audio/jasper/ascend310_infer/src/main.cc b/research/audio/jasper/ascend310_infer/src/main.cc
new file mode 100644
index 0000000000000000000000000000000000000000..eebe35ba2ce06602a894a3d2742d6c54b56dc414
--- /dev/null
+++ b/research/audio/jasper/ascend310_infer/src/main.cc
@@ -0,0 +1,137 @@
+/**
+ * Copyright 2022 Huawei Technologies Co., Ltd
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <sys/time.h>
+#include <gflags/gflags.h>
+#include <dirent.h>
+#include <iostream>
+#include <string>
+#include <algorithm>
+#include <iosfwd>
+#include <vector>
+#include <fstream>
+#include <sstream>
+
+#include "include/api/model.h"
+#include "include/api/context.h"
+#include "include/api/types.h"
+#include "include/api/serialization.h"
+#include "include/dataset/execute.h"
+#include "include/dataset/vision.h"
+#include "inc/utils.h"
+
+using mindspore::Context;
+using mindspore::Serialization;
+using mindspore::Model;
+using mindspore::Status;
+using mindspore::MSTensor;
+using mindspore::dataset::Execute;
+using mindspore::ModelType;
+using mindspore::GraphCell;
+using mindspore::kSuccess;
+
+DEFINE_string(mindir_path, "", "mindir path");
+DEFINE_string(input0_path, ".", "input0 path");
+DEFINE_string(input1_path, ".", "input1 path");
+DEFINE_int32(device_id, 0, "device id");
+
+int main(int argc, char **argv) {
+  gflags::ParseCommandLineFlags(&argc, &argv, true);
+  if (RealPath(FLAGS_mindir_path).empty()) {
+    std::cout << "Invalid mindir" << std::endl;
+    return 1;
+  }
+
+  auto context = std::make_shared<Context>();
+  auto ascend310 = std::make_shared<mindspore::Ascend310DeviceInfo>();
+  ascend310->SetDeviceID(FLAGS_device_id);
+  ascend310->SetPrecisionMode("allow_fp32_to_fp16");
+  ascend310->SetOpSelectImplMode("high_precision");
+  context->MutableDeviceInfo().push_back(ascend310);
+  mindspore::Graph graph;
+  Serialization::Load(FLAGS_mindir_path, ModelType::kMindIR, &graph);
+
+  Model model;
+  Status ret = model.Build(GraphCell(graph), context);
+  if (ret != kSuccess) {
+    std::cout << "ERROR: Build failed." << std::endl;
+    return 1;
+  }
+
+  std::vector<MSTensor> model_inputs = model.GetInputs();
+  if (model_inputs.empty()) {
+    std::cout << "Invalid model, inputs is empty." << std::endl;
+    return 1;
+  }
+
+  auto input0_files = GetAllFiles(FLAGS_input0_path);
+  auto input1_files = GetAllFiles(FLAGS_input1_path);
+
+  if (input0_files.empty() || input1_files.empty()) {
+    std::cout << "ERROR: input data empty." << std::endl;
+    return 1;
+  }
+
+  std::map<double, double> costTime_map;
+  size_t size = input0_files.size();
+
+  for (size_t i = 0; i < size; ++i) {
+    struct timeval start = {0};
+    struct timeval end = {0};
+    double startTimeMs;
+    double endTimeMs;
+    std::vector<MSTensor> inputs;
+    std::vector<MSTensor> outputs;
+    std::cout << "Start predict input files:" << input0_files[i] << std::endl;
+
+    auto input0 = ReadFileToTensor(input0_files[i]);
+    auto input1 = ReadFileToTensor(input1_files[i]);
+    inputs.emplace_back(model_inputs[0].Name(), model_inputs[0].DataType(), model_inputs[0].Shape(),
+                        input0.Data().get(), input0.DataSize());
+    inputs.emplace_back(model_inputs[1].Name(), model_inputs[1].DataType(), model_inputs[1].Shape(),
+                        input1.Data().get(), input1.DataSize());
+
+    gettimeofday(&start, nullptr);
+    ret = model.Predict(inputs, &outputs);
+    gettimeofday(&end, nullptr);
+    if (ret != kSuccess) {
+      std::cout << "Predict " << input0_files[i] << " failed." << std::endl;
+      return 1;
+    }
+    startTimeMs = (1.0 * start.tv_sec * 1000000 + start.tv_usec) / 1000;
+    endTimeMs = (1.0 * end.tv_sec * 1000000 + end.tv_usec) / 1000;
+    costTime_map.insert(std::pair<double, double>(startTimeMs, endTimeMs));
+    WriteResult(input0_files[i], outputs);
+  }
+  double average = 0.0;
+  int inferCount = 0;
+
+  for (auto iter = costTime_map.begin(); iter != costTime_map.end(); iter++) {
+    double diff = 0.0;
+    diff = iter->second - iter->first;
+    average += diff;
+    inferCount++;
+  }
+  average = average / inferCount;
+  std::stringstream timeCost;
+  timeCost << "NN inference cost average time: "<< average << " ms of infer_count " << inferCount << std::endl;
+  std::cout << "NN inference cost average time: "<< average << "ms of infer_count " << inferCount << std::endl;
+  std::string fileName = "./time_Result" + std::string("/test_perform_static.txt");
+  std::ofstream fileStream(fileName.c_str(), std::ios::trunc);
+  fileStream << timeCost.str();
+  fileStream.close();
+  costTime_map.clear();
+  return 0;
+}
diff --git a/research/audio/jasper/ascend310_infer/src/utils.cc b/research/audio/jasper/ascend310_infer/src/utils.cc
new file mode 100644
index 0000000000000000000000000000000000000000..ea51538fe064b621e47a8081a1821823a2dec1ca
--- /dev/null
+++ b/research/audio/jasper/ascend310_infer/src/utils.cc
@@ -0,0 +1,142 @@
+/**
+ * Copyright 2022 Huawei Technologies Co., Ltd
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fstream>
+#include <algorithm>
+#include <iostream>
+#include "inc/utils.h"
+
+using mindspore::MSTensor;
+using mindspore::DataType;
+
+std::vector<std::string> GetAllFiles(std::string_view dirName) {
+  struct dirent *filename;
+  DIR *dir = OpenDir(dirName);
+  if (dir == nullptr) {
+    return {};
+  }
+  std::vector<std::string> res;
+  while ((filename = readdir(dir)) != nullptr) {
+    std::string dName = std::string(filename->d_name);
+    if (dName == "." || dName == ".." || filename->d_type != DT_REG) {
+      continue;
+    }
+    res.emplace_back(std::string(dirName) + "/" + filename->d_name);
+  }
+  std::sort(res.begin(), res.end());
+  for (auto &f : res) {
+    std::cout << "input file: " << f << std::endl;
+  }
+  return res;
+}
+
+int WriteResult(const std::string& imageFile, const std::vector<MSTensor> &outputs) {
+  std::string homePath = "./result_Files";
+  const int INVALID_POINTER = -1;
+  const int ERROR = -2;
+  for (size_t i = 0; i < outputs.size(); ++i) {
+    size_t outputSize;
+    std::shared_ptr<const void> netOutput;
+    netOutput = outputs[i].Data();
+    outputSize = outputs[i].DataSize();
+    int pos = imageFile.rfind('/');
+    std::string fileName(imageFile, pos + 1);
+    fileName.replace(fileName.find('.'), fileName.size() - fileName.find('.'), '_' + std::to_string(i) + ".bin");
+    std::string outFileName = homePath + "/" + fileName;
+    FILE * outputFile = fopen(outFileName.c_str(), "wb");
+    if (outputFile == nullptr) {
+        std::cout << "open result file " << outFileName << " failed" << std::endl;
+        return INVALID_POINTER;
+    }
+    size_t size = fwrite(netOutput.get(), sizeof(char), outputSize, outputFile);
+    if (size != outputSize) {
+        fclose(outputFile);
+        outputFile = nullptr;
+        std::cout << "write result file " << outFileName << " failed, write size[" << size <<
+            "] is smaller than output size[" << outputSize << "], maybe the disk is full." << std::endl;
+        return ERROR;
+    }
+    fclose(outputFile);
+    outputFile = nullptr;
+  }
+  return 0;
+}
+
+mindspore::MSTensor ReadFileToTensor(const std::string &file) {
+  if (file.empty()) {
+    std::cout << "Pointer file is nullptr" << std::endl;
+    return mindspore::MSTensor();
+  }
+
+  std::ifstream ifs(file);
+  if (!ifs.good()) {
+    std::cout << "File: " << file << " is not exist" << std::endl;
+    return mindspore::MSTensor();
+  }
+
+  if (!ifs.is_open()) {
+    std::cout << "File: " << file << "open failed" << std::endl;
+    return mindspore::MSTensor();
+  }
+
+  ifs.seekg(0, std::ios::end);
+  size_t size = ifs.tellg();
+  mindspore::MSTensor buffer(file, mindspore::DataType::kNumberTypeUInt8, {static_cast<int64_t>(size)}, nullptr, size);
+
+  ifs.seekg(0, std::ios::beg);
+  ifs.read(reinterpret_cast<char *>(buffer.MutableData()), size);
+  ifs.close();
+
+  return buffer;
+}
+
+
+DIR *OpenDir(std::string_view dirName) {
+  if (dirName.empty()) {
+    std::cout << " dirName is null ! " << std::endl;
+    return nullptr;
+  }
+  std::string realPath = RealPath(dirName);
+  struct stat s;
+  lstat(realPath.c_str(), &s);
+  if (!S_ISDIR(s.st_mode)) {
+    std::cout << "dirName is not a valid directory !" << std::endl;
+    return nullptr;
+  }
+  DIR *dir;
+  dir = opendir(realPath.c_str());
+  if (dir == nullptr) {
+    std::cout << "Can not open dir " << dirName << std::endl;
+    return nullptr;
+  }
+  std::cout << "Successfully opened the dir " << dirName << std::endl;
+  return dir;
+}
+
+std::string RealPath(std::string_view path) {
+  char realPathMem[PATH_MAX] = {0};
+  char *realPathRet = nullptr;
+  realPathRet = realpath(path.data(), realPathMem);
+
+  if (realPathRet == nullptr) {
+    std::cout << "File: " << path << " is not exist.";
+    return "";
+  }
+
+  std::string realPath(realPathMem);
+  std::cout << path << " realpath is: " << realPath << std::endl;
+  return realPath;
+}
diff --git a/research/audio/jasper/eval.py b/research/audio/jasper/eval.py
index 21503542afd47f241ec0f9dd46c3439cdca0ea30..e76c4e15a7e805caaac6e10ecb32141d91f74613 100644
--- a/research/audio/jasper/eval.py
+++ b/research/audio/jasper/eval.py
@@ -18,6 +18,7 @@ Eval for Japer
 """
 import argparse
 import json
+import os
 import pickle
 import numpy as np
 from src.config import eval_config, symbols, encoder_kw, decoder_kw
@@ -30,13 +31,20 @@ from mindspore.train.serialization import load_checkpoint, load_param_into_net
 parser = argparse.ArgumentParser(description='jasper evaluation')
 parser.add_argument('--pretrain_ckpt', type=str,
                     default='./checkpoint/ckpt_0/jasper10.ckpt', help='Pretrained checkpoint path')
-parser.add_argument('--device_target', type=str, default="GPU", choices=("GPU", "CPU"),
+parser.add_argument('--device_target', type=str, default="GPU", choices=("GPU", "Ascend", "CPU"),
                     help='Device target, support GPU and CPU, Default: GPU')
 args = parser.parse_args()
 
 if __name__ == '__main__':
-    context.set_context(mode=context.GRAPH_MODE,
-                        device_target=args.device_target, save_graphs=False)
+    if args.device_target == "Ascend":
+        device_id = int(os.getenv('DEVICE_ID'))
+        context.set_context(mode=context.GRAPH_MODE,
+                            device_id=device_id,
+                            device_target=args.device_target,
+                            save_graphs=False)
+    else:
+        context.set_context(mode=context.GRAPH_MODE,
+                            device_target=args.device_target, save_graphs=False)
     config = eval_config
     with open(config.DataConfig.labels_path) as label_file:
         labels = json.load(label_file)
diff --git a/research/audio/jasper/export.py b/research/audio/jasper/export.py
index 58e6d9ae1d7ac3943ffed654fc5cfd7e2d4f6c0e..268cbd18c63938bc51f2e07165d7ae4036ffd2c2 100644
--- a/research/audio/jasper/export.py
+++ b/research/audio/jasper/export.py
@@ -19,17 +19,16 @@ export checkpoint file to mindir model
 import json
 import argparse
 import numpy as np
-import mindspore as ms
 from mindspore import context, Tensor
 from mindspore.train.serialization import load_checkpoint, load_param_into_net, export
 from src.config import train_config, encoder_kw, decoder_kw
-from src.model import Jasper
+from src.model_test import Jasper, PredictWithSoftmax
 
 parser = argparse.ArgumentParser(
     description='Export DeepSpeech model to Mindir')
 parser.add_argument('--pre_trained_model_path', type=str,
                     default='', help=' existed checkpoint path')
-parser.add_argument('--device_target', type=str, default="GPU", choices=("GPU", "CPU"),
+parser.add_argument('--device_target', type=str, default="GPU", choices=("GPU", "CPU", "Ascend"),
                     help='Device target, support GPU and CPU, Default: GPU')
 args = parser.parse_args()
 
@@ -40,8 +39,8 @@ if __name__ == '__main__':
     with open(config.DataConfig.labels_path) as label_file:
         labels = json.load(label_file)
 
-    jasper_net = Jasper(encoder_kw=encoder_kw,
-                        decoder_kw=decoder_kw).to_float(ms.float16)
+    jasper_net = PredictWithSoftmax(
+        Jasper(encoder_kw=encoder_kw, decoder_kw=decoder_kw))
 
     param_dict = load_checkpoint(args.pre_trained_model_path)
     load_param_into_net(jasper_net, param_dict)
diff --git a/research/audio/jasper/postprocess.py b/research/audio/jasper/postprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..40e97d490d5932b2755c92aa10e3f0dfb4dce754
--- /dev/null
+++ b/research/audio/jasper/postprocess.py
@@ -0,0 +1,85 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Jasper postprocess"""
+import os
+import numpy as np
+from src.config import infer_config, symbols
+from src.decoder import GreedyDecoder
+import mindspore
+from mindspore import Tensor
+
+
+def format_text(s):
+    s = s.strip().split()
+    s.pop()
+    while s[len(s)-1] == 'e' or s[len(s)-1] == 'ee' or s[len(s)-1] == 'eee' \
+        or s[len(s)-1] == 'a' or s[len(s)-1] == 'o':
+        s.pop()
+    return ' '.join(s)
+
+
+def compute_wer(s_file, t_file, decoder):
+    total_wer, num_tokens = 0, 0
+    with open(s_file, 'r', encoding='utf-8') as s, open(t_file, 'r', encoding='utf-8') as t:
+        for trans, refer in zip(s, t):
+            wer_inst = decoder.wer(trans.strip(), refer.strip())
+            total_wer += wer_inst
+            num_tokens += len(refer.strip().split())
+    wer = float(total_wer) / num_tokens
+    return wer
+
+
+def generate_output():
+    '''
+    Generate output and write to file.
+    '''
+    config = infer_config
+    if config.LMConfig.decoder_type == 'greedy':
+        decoder = GreedyDecoder(labels=symbols, blank_index=len(symbols) - 1)
+    else:
+        raise NotImplementedError("Only greedy decoder is supported now")
+
+    # get model out from .bin files
+    predictions = []
+    file_num = int(len(os.listdir(config.result_dir)) / 2)
+    for i in range(file_num):
+        out = "jasper_bs_" + str(
+            config.batch_size_infer) + "_" + str(i) + "_0.bin"
+        out_size = "jasper_bs_" + str(
+            config.batch_size_infer) + "_" + str(i) + "_1.bin"
+        out = np.fromfile(os.path.join(config.result_dir, out),
+                          np.float32).reshape(1, -1, 29)
+        out_size = np.fromfile(os.path.join(config.result_dir, out_size),
+                               np.int32).reshape(-1)
+        predictions.append([out, out_size])
+
+    # decode and write to file
+    f = open(config.post_out, 'w')
+    for out, _ in predictions:
+        out = Tensor(out, dtype=mindspore.float32)
+        decoded_output, _ = decoder.decode(out)
+        for d_output in decoded_output:
+            transcript = d_output[0].lower()
+            f.write(format_text(transcript) + '\n')
+
+    f.close()
+    print("Finished inference.")
+    print("You can see the result in {}.".format(config.post_out))
+    wer = compute_wer(config.post_out, 'target.txt', decoder)
+    print('The average WER: ', wer)
+
+
+if __name__ == "__main__":
+    generate_output()
diff --git a/research/audio/jasper/preprocess.py b/research/audio/jasper/preprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a912eda929584b2e85c3c4e10d15b261872c219
--- /dev/null
+++ b/research/audio/jasper/preprocess.py
@@ -0,0 +1,65 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""310 data_processing"""
+import os
+from src.dataset import create_eval_dataset
+from src.config import infer_config, symbols
+from src.decoder import GreedyDecoder
+import numpy as np
+
+
+def preprocess_data():
+    config = infer_config
+
+    ds = create_eval_dataset(data_dir=config.DataConfig.Data_dir,
+                             manifest_filepath=config.DataConfig.test_manifest,
+                             labels=symbols,
+                             batch_size=config.batch_size_infer,
+                             train_mode=False)
+
+    target_decoder = GreedyDecoder(symbols, blank_index=len(symbols)-1)
+
+    feature_path = os.path.join(config.result_path, "00_data")
+    length_path = os.path.join(config.result_path, "01_data")
+    os.makedirs(feature_path)
+    os.makedirs(length_path)
+
+    with open('target.txt', 'w', encoding='utf-8') as f:
+        for i, data in enumerate(ds.create_dict_iterator(output_numpy=True)):
+            file_name = "jasper_bs_" + str(
+                config.batch_size_infer) + "_" + str(i) + ".bin"
+            data['inputs'].tofile(os.path.join(feature_path, file_name))
+            data['input_length'].tofile(os.path.join(length_path, file_name))
+
+            target_indices, targets = data['target_indices'], data['targets']
+            split_targets = []
+            start, count, last_id = 0, 0, 0
+            for j in range(np.shape(targets)[0]):
+                if target_indices[j, 0] == last_id:
+                    count += 1
+                else:
+                    split_targets.append(list(targets[start:count]))
+                    last_id += 1
+                    start = count
+                    count += 1
+            split_targets.append(list(targets[start:]))
+            target_strings = target_decoder.convert_to_strings(split_targets)
+            f.write(' '.join(target_strings[0]) + '\n')
+
+    print("=" * 20, "export bin files finished", "=" * 20)
+
+
+if __name__ == '__main__':
+    preprocess_data()
diff --git a/research/audio/jasper/scripts/run_distribute_train_ascend.sh b/research/audio/jasper/scripts/run_distribute_train_ascend.sh
new file mode 100644
index 0000000000000000000000000000000000000000..30a36c1d40fd39d9eb884371ae033ec659706f76
--- /dev/null
+++ b/research/audio/jasper/scripts/run_distribute_train_ascend.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [ $# -lt 3 ]; then
+    echo "Usage: bash scripts/run_distributed_train_ascend.sh 
+    [RANK_SIZE] [BEGIN] [RANK_TABLE_FILE]"
+exit 1
+fi
+
+log_dir="./log"
+
+if [ ! -d $log_dir ]; then
+    mkdir log
+fi
+
+RANK_SIZE=$1  # num of cards
+BEGIN=$2      # begin of the device, default 0
+export RANK_TABLE_FILE=$3
+export RANK_SIZE=$RANK_SIZE
+
+pre_path=
+
+for((i=$BEGIN;i<RANK_SIZE+BEGIN;i++))
+do
+    let rank=$i-$BEGIN
+    export RANK_ID=$rank
+    export DEVICE_ID=$i
+    echo "start training for rank $rank, device $DEVICE_ID"
+    if [ ! $pre_path ]; then
+        python train.py --is_distributed --device_target 'Ascend' > log/distributed-train.log.$i 2>&1 &
+    else
+        python train.py --pre_trained_model_path $pre_path --is_distributed --device_target 'Ascend' > log/distributed-train.log.$i 2>&1 &
+    fi  
+done
diff --git a/research/audio/jasper/scripts/run_eval_ascend.sh b/research/audio/jasper/scripts/run_eval_ascend.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c00ce147cca3cf751a97b1c5275391eca5879840
--- /dev/null
+++ b/research/audio/jasper/scripts/run_eval_ascend.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [ $# -lt 2 ]; then
+    echo "Usage: bash scripts/run_eval_ascend.sh 
+    [PRETRAIN_CKPT] [DEVICE_ID]"
+exit 1
+fi
+
+log_dir="./eval_out"
+
+if [ ! -d $log_dir ]; then
+    mkdir eval_out
+fi
+
+DEVICE_ID=$1
+PRETRAIN_CKPT=$2
+
+DEVICE_ID=$DEVICE_ID python eval.py --device_target 'Ascend' --pretrain_ckpt $PRETRAIN_CKPT > eval_out/eval.log 2>&1 &
diff --git a/research/audio/jasper/scripts/run_eval_gpu.sh b/research/audio/jasper/scripts/run_eval_gpu.sh
index fc4edfe349893db8f941623b06ec048b894b9b29..aaac2dd593d729107ddb067528676f0c5d52b385 100644
--- a/research/audio/jasper/scripts/run_eval_gpu.sh
+++ b/research/audio/jasper/scripts/run_eval_gpu.sh
@@ -13,8 +13,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ============================================================================
+
 DEVICE_ID=$1
 PATH_CHECKPOINT=$2
 export CUDA_VISIBLE_DEVICES=$DEVICE_ID
 python ./eval.py --pretrain_ckpt $PATH_CHECKPOINT \
---device_target 'GPU' > eval.log 2>&1 &
\ No newline at end of file
+--device_target 'GPU' > eval.log 2>&1 &
diff --git a/research/audio/jasper/scripts/run_infer_310.sh b/research/audio/jasper/scripts/run_infer_310.sh
new file mode 100644
index 0000000000000000000000000000000000000000..aa153eb15f00b032b8a08e5730e065753bf4ee5b
--- /dev/null
+++ b/research/audio/jasper/scripts/run_infer_310.sh
@@ -0,0 +1,119 @@
+#!/bin/bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [[ $# -lt 2 || $# -gt 3 ]]; then
+  echo "Usage: bash ./scripts/run_infer_310.sh [MODEL_PATH] [NEED_PREPROCESS] [DEVICE_ID]
+    NEED_PREPROCESS means weather need preprocess or not, it's value is 'y' or 'n'.
+    DEVICE_ID is optional, it can be set by environment variable device_id, otherwise the value is zero"
+  exit 1
+fi
+
+get_real_path() {
+  if [ -z "$1" ]; then
+    echo ""
+  elif [ "${1:0:1}" == "/" ]; then
+    echo "$1"
+  else
+    echo "$(realpath -m $PWD/$1)"
+  fi
+}
+
+model=$(get_real_path $1)
+if [ "$2" == "y" ] || [ "$2" == "n" ]; then
+  need_preprocess=$2
+else
+  echo "weather need preprocess or not, it's value must be in [y, n]"
+  exit 1
+fi
+
+device_id=0
+if [ $# == 3 ]; then
+  device_id=$3
+fi
+
+echo "mindir name: "$model
+echo "need preprocess: "$need_preprocess
+echo "device id: "$device_id
+
+export ASCEND_HOME=/usr/local/Ascend/
+if [ -d ${ASCEND_HOME}/ascend-toolkit ]; then
+    export PATH=$ASCEND_HOME/fwkacllib/bin:$ASCEND_HOME/fwkacllib/ccec_compiler/bin:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/ccec_compiler/bin:$ASCEND_HOME/ascend-toolkit/latest/atc/bin:$PATH
+    export LD_LIBRARY_PATH=$ASCEND_HOME/fwkacllib/lib64:/usr/local/lib:$ASCEND_HOME/ascend-toolkit/latest/atc/lib64:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:$LD_LIBRARY_PATH
+    export TBE_IMPL_PATH=$ASCEND_HOME/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe
+    export PYTHONPATH=$ASCEND_HOME/fwkacllib/python/site-packages:${TBE_IMPL_PATH}:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/python/site-packages:$PYTHONPATH
+    export ASCEND_OPP_PATH=$ASCEND_HOME/ascend-toolkit/latest/opp
+else
+    export ASCEND_HOME=/usr/local/Ascend/latest/
+    export PATH=$ASCEND_HOME/fwkacllib/bin:$ASCEND_HOME/fwkacllib/ccec_compiler/bin:$ASCEND_HOME/atc/ccec_compiler/bin:$ASCEND_HOME/atc/bin:$PATH
+    export LD_LIBRARY_PATH=$ASCEND_HOME/fwkacllib/lib64:/usr/local/lib:$ASCEND_HOME/atc/lib64:$ASCEND_HOME/acllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:$LD_LIBRARY_PATH
+    export PYTHONPATH=$ASCEND_HOME/fwkacllib/python/site-packages:$ASCEND_HOME/atc/python/site-packages:$PYTHONPATH
+    export ASCEND_OPP_PATH=$ASCEND_HOME/opp
+fi
+
+function preprocess_data() {
+  if [ -d preprocess_Result ]; then
+    rm -rf ./preprocess_Result
+  fi
+  mkdir preprocess_Result
+  python ./preprocess.py
+}
+
+function compile_app() {
+  cd ./ascend310_infer || exit
+  bash build.sh &>build.log
+}
+
+function infer() {
+  cd - || exit
+  if [ -d result_Files ]; then
+    rm -rf ./result_Files
+  fi
+  if [ -d time_Result ]; then
+    rm -rf ./time_Result
+  fi
+  mkdir result_Files
+  mkdir time_Result
+
+  ./ascend310_infer/out/main --mindir_path=$model --input0_path=./preprocess_Result/00_data --input1_path=./preprocess_Result/01_data --device_id=$device_id &> infer.log
+
+}
+
+function cal_acc() {
+  python ./postprocess.py &> acc.log
+}
+
+if [ $need_preprocess == "y" ]; then
+  preprocess_data
+  if [ $? -ne 0 ]; then
+    echo "preprocess dataset failed"
+    exit 1
+  fi
+fi
+compile_app
+if [ $? -ne 0 ]; then
+  echo "compile app code failed"
+  exit 1
+fi
+infer
+if [ $? -ne 0 ]; then
+  echo " execute inference failed"
+  exit 1
+fi
+cal_acc
+if [ $? -ne 0 ]; then
+  echo "calculate accuracy failed"
+  exit 1
+fi
diff --git a/research/audio/jasper/scripts/run_standalone_train_ascend.sh b/research/audio/jasper/scripts/run_standalone_train_ascend.sh
new file mode 100644
index 0000000000000000000000000000000000000000..6db0e70b748295ac4c86a8eb5498b284d26ec7ec
--- /dev/null
+++ b/research/audio/jasper/scripts/run_standalone_train_ascend.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [ $# -lt 1 ]; then
+    echo "Usage: bash scripts/run_standalone_train_ascend.sh [DEVICE_ID]"
+exit 1
+fi
+
+log_dir="./log"
+
+if [ ! -d $log_dir ]; then
+    mkdir log
+fi
+
+DEVICE_ID=$1
+
+python train.py --device_target 'Ascend' --device_id $DEVICE_ID > log/train_standalone.log 2>&1 &
diff --git a/research/audio/jasper/scripts/run_standalone_train_cpu.sh b/research/audio/jasper/scripts/run_standalone_train_cpu.sh
index ac83a8ad060e22eb8ec3a94ee15cc122278bad02..9ff50b46a1b5edd8c68be3535051dd0e15886d45 100644
--- a/research/audio/jasper/scripts/run_standalone_train_cpu.sh
+++ b/research/audio/jasper/scripts/run_standalone_train_cpu.sh
@@ -15,4 +15,3 @@
 # ============================================================================
 
 python ./train.py --device_target 'CPU' > train.log 2>&1 &
-
diff --git a/research/audio/jasper/scripts/run_standalone_train_gpu.sh b/research/audio/jasper/scripts/run_standalone_train_gpu.sh
index 4782ab3513504ec09456dffc51d16ad6e317af4d..92ac191a47d91bb5bfbd08f6d86630f25edf2878 100644
--- a/research/audio/jasper/scripts/run_standalone_train_gpu.sh
+++ b/research/audio/jasper/scripts/run_standalone_train_gpu.sh
@@ -16,4 +16,3 @@
 
 DEVICE_ID=$1
 CUDA_VISIBLE_DEVICES=$DEVICE_ID python ./train.py --device_target 'GPU' > train.log 2>&1 &
-
diff --git a/research/audio/jasper/src/config.py b/research/audio/jasper/src/config.py
index 05985f92953481482bdf0fc0a94319786f6ec7f8..4a76e10ee3e0d6a826d973730c50f1f2467b2edd 100644
--- a/research/audio/jasper/src/config.py
+++ b/research/audio/jasper/src/config.py
@@ -27,7 +27,7 @@ train_config = ed({
 
     "TrainingConfig": {
         "epochs": 440,
-        "loss_scale": 128.0
+        "loss_scale": 128.0,
     },
 
     "DataConfig": {
@@ -112,6 +112,32 @@ eval_config = ed({
 
 })
 
+infer_config = ed({
+    "DataConfig": {
+        "Data_dir":
+        '/home/dataset/LibriSpeech',
+        "test_manifest":
+        ['/home/dataset/LibriSpeech/librispeech-test-clean-wav.json'],
+    },
+    "LMConfig": {
+        "decoder_type": "greedy",
+        "lm_path": './3-gram.pruned.3e-7.arpa',
+        "top_paths": 1,
+        "alpha": 1.818182,
+        "beta": 0,
+        "cutoff_top_n": 40,
+        "cutoff_prob": 1.0,
+        "beam_width": 1024,
+        "lm_workers": 4
+    },
+    "batch_size_infer": 1,
+    # for preprocess
+    "result_path": "./preprocess_Result",
+    # for postprocess
+    "result_dir": "./result_Files",
+    "post_out": "./infer_output.txt"
+})
+
 
 def default_args(klass):
     sig = inspect.signature(klass.__init__)
diff --git a/research/audio/jasper/src/dataset.py b/research/audio/jasper/src/dataset.py
index 21719b70dc067f6e2c8a1ec04fb52c433738591c..fd210c8a3b7c02a332c2a1af53a7b614f7979a65 100644
--- a/research/audio/jasper/src/dataset.py
+++ b/research/audio/jasper/src/dataset.py
@@ -23,7 +23,7 @@ import mindspore.dataset.engine as de
 from src.audio import AudioSegment, SpeedPerturbation
 from src.text import _clean_text, punctuation_map
 
-TRAIN_INPUT_PAD_LENGTH = 1300
+TRAIN_INPUT_PAD_LENGTH = 1500
 TRAIN_LABEL_PAD_LENGTH = 360
 TEST_INPUT_PAD_LENGTH = 3500
 
diff --git a/research/audio/jasper/src/decoder.py b/research/audio/jasper/src/decoder.py
index 541095512293ec71a94a1624d786532b8434d7db..b893e4133464f738615d9fc0879adfb0e69fdfeb 100644
--- a/research/audio/jasper/src/decoder.py
+++ b/research/audio/jasper/src/decoder.py
@@ -121,7 +121,8 @@ class GreedyDecoder(Decoder):
 
     def decode(self, probs, sizes=None):
         probs = probs.asnumpy()
-        sizes = sizes.asnumpy()
+        if sizes is not None:
+            sizes = sizes.asnumpy()
 
         max_probs = np.argmax(probs, axis=-1)
         strings, offsets = self.convert_to_strings(
diff --git a/research/audio/jasper/src/greedydecoder.py b/research/audio/jasper/src/greedydecoder.py
index 3393d97898d93c34b3f58fc74733c0da851fa559..c2d75ab5ed1eca628c75f03c401d176cf795f228 100644
--- a/research/audio/jasper/src/greedydecoder.py
+++ b/research/audio/jasper/src/greedydecoder.py
@@ -45,7 +45,8 @@ class MSGreedyDecoder(GreedyDecoder):
 
     def decode(self, probs, sizes=None):
         probs = probs.asnumpy()
-        sizes = sizes.asnumpy()
+        if sizes is not None:
+            sizes = sizes.asnumpy()
 
         max_probs = np.argmax(probs, axis=-1)
         strings, offsets = self.convert_to_strings(max_probs, sizes, remove_repetitions=True, return_offsets=True)
diff --git a/research/audio/jasper/src/model.py b/research/audio/jasper/src/model.py
index 2767c968f35e01d2d84de8aec7443cb9dd6cbf37..f1294543cc44b503af3bbd7d50d9173c5eaa1e44 100644
--- a/research/audio/jasper/src/model.py
+++ b/research/audio/jasper/src/model.py
@@ -30,7 +30,7 @@ activations = {
     "elu": nn.ELU,
 }
 
-TRAIN_INPUT_PAD_LENGTH = 1300
+TRAIN_INPUT_PAD_LENGTH = 1500
 TRAIN_LABEL_PAD_LENGTH = 350
 TEST_INPUT_PAD_LENGTH = 3500
 
@@ -289,9 +289,13 @@ class NetWithLossClass(nn.Cell):
     NetWithLossClass definition
     """
 
-    def __init__(self, network):
+    def __init__(self, network, ascend=False):
         super(NetWithLossClass, self).__init__(auto_prefix=False)
-        self.loss = P.CTCLoss(ctc_merge_repeated=True)
+        if ascend:
+            self.loss = P.CTCLoss(ctc_merge_repeated=True,
+                                  ignore_longer_outputs_than_inputs=True)
+        else:
+            self.loss = P.CTCLoss(ctc_merge_repeated=True)
         self.network = network
         self.ReduceMean_false = P.ReduceMean(keep_dims=False)
         self.squeeze_op = P.Squeeze(0)
diff --git a/research/audio/jasper/src/model_test.py b/research/audio/jasper/src/model_test.py
index 2ee3c95b02b0770e39b8db77baf39988c4685bbc..b34f859f7fe0ace97045a6275038adfbda4d67a8 100644
--- a/research/audio/jasper/src/model_test.py
+++ b/research/audio/jasper/src/model_test.py
@@ -27,7 +27,7 @@ activations = {
     "elu": nn.ELU,
 }
 
-TRAIN_INPUT_PAD_LENGTH = 1300
+TRAIN_INPUT_PAD_LENGTH = 1500
 TRAIN_LABEL_PAD_LENGTH = 350
 TEST_INPUT_PAD_LENGTH = 3500
 
diff --git a/research/audio/jasper/train.py b/research/audio/jasper/train.py
index 7fd8daa509f62678adc99b95be91882dfbfe715d..cdbe88391b6b95e13142ffed8b892e82db6ed355 100644
--- a/research/audio/jasper/train.py
+++ b/research/audio/jasper/train.py
@@ -42,8 +42,9 @@ parser.add_argument('--pre_trained_model_path', type=str,
                     default='', help='Pretrained checkpoint path')
 parser.add_argument('--is_distributed', action="store_true",
                     default=False, help='Distributed training')
-parser.add_argument('--device_target', type=str, default="GPU", choices=("GPU", "CPU"),
+parser.add_argument('--device_target', type=str, default="GPU", choices=("GPU", "CPU", "Ascend"),
                     help='Device target, support GPU and CPU, Default: GPU')
+parser.add_argument('--device_id', type=int, default=0, help='Device ID')
 args = parser.parse_args()
 
 ms.set_seed(1)
@@ -56,15 +57,20 @@ if __name__ == '__main__':
     data_sink = False
     context.set_context(mode=context.GRAPH_MODE,
                         device_target=args.device_target, save_graphs=False)
-    if args.device_target == "GPU":
-        context.set_context(enable_graph_kernel=False)
+
     if args.is_distributed:
         init()
+        context.reset_auto_parallel_context()
+        context.set_auto_parallel_context(
+            parallel_mode=ParallelMode.DATA_PARALLEL,
+            device_num=get_group_size(),
+            gradients_mean=True)
         rank_id = get_rank()
         group_size = get_group_size()
-        context.reset_auto_parallel_context()
-        context.set_auto_parallel_context(device_num=get_group_size(), parallel_mode=ParallelMode.DATA_PARALLEL,
-                                          gradients_mean=True)
+    else:
+        if args.device_target == "Ascend":
+            device_id = int(args.device_id)
+            context.set_context(device_id=device_id)
 
     with open(config.DataConfig.labels_path) as label_file:
         labels = json.load(label_file)
@@ -81,7 +87,7 @@ if __name__ == '__main__':
     jasper_net = Jasper(encoder_kw=encoder_kw,
                         decoder_kw=decoder_kw).to_float(ms.float16)
 
-    loss_net = NetWithLossClass(jasper_net)
+    loss_net = NetWithLossClass(jasper_net, ascend=(args.device_target == "Ascend"))
     init_weights(loss_net)
     weights = ParameterTuple(jasper_net.trainable_params())
     optimizer = AdamWeightDecay(weights, learning_rate=lr, eps=config.OptimConfig.epsilon, weight_decay=1e-3)