diff --git a/official/cv/MCNN/infer/Dockerfile b/official/cv/MCNN/infer/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..e413a3d0274e444b03fd1d72af160025327eb6a6
--- /dev/null
+++ b/official/cv/MCNN/infer/Dockerfile
@@ -0,0 +1,24 @@
+ARG FROM_IMAGE_NAME
+FROM ${FROM_IMAGE_NAME}
+
+# 閰嶇疆闀滃儚浠g悊
+ENV http_proxy="http://192.168.88.254:8080"
+ENV https_proxy="http://192.168.88.254:8080"
+
+# 娣诲姞鐢ㄦ埛浠ュ強鐢ㄦ埛缁� username ID涓哄綋鍓嶇敤鎴�
+RUN useradd -d /home/hwMindX -u 9000 -m -s /bin/bash hwMindX && \
+    useradd -d /home/HwHiAiUser -u 1000 -m -s /bin/bash HwHiAiUser && \
+    useradd -d /home/sjtu_liu -u 1001 -m -s /bin/bash sjtu_liu -g HwHiAiUser && \
+    usermod -a -G HwHiAiUser hwMindX
+# 娣诲姞Python绗﹀彿閾炬帴
+RUN ln -s  /usr/local/python3.7.5/bin/python3.7 /usr/bin/python
+# 瀹夎鐩稿叧渚濊禆鍖咃紝鏍规嵁瀹為檯妯″瀷渚濊禆淇敼
+RUN apt-get update && \
+    apt-get install libglib2.0-dev -y || \
+    rm -rf /var/lib/dpkg/info && \
+    mkdir /var/lib/dpkg/info && \
+    apt-get install libglib2.0-dev dos2unix -y && \
+    pip install pytest-runner==5.3.0
+# 瀹夎Python渚濊禆鍖咃紝鏍规嵁瀹為檯妯″瀷渚濊禆淇敼
+COPY requirements.txt .
+RUN pip3.7 install -r requirements.txt
\ No newline at end of file
diff --git a/official/cv/MCNN/infer/README.md b/official/cv/MCNN/infer/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5eb3bcebc8cfde7862f540647146dfea6796dd31
--- /dev/null
+++ b/official/cv/MCNN/infer/README.md
@@ -0,0 +1,658 @@
+# MCNN妯″瀷浜や粯浠�-浼楁櫤
+
+## 浜や粯浠跺熀鏈俊鎭�
+
+**鍙戝竷鑰咃紙Publisher锛�**锛欻uawei
+
+**搴旂敤棰嗗煙锛圓pplication Domain锛�**锛欳rowd Counting
+
+**鐗堟湰锛圴ersion锛�**锛�1.1
+
+**淇敼鏃堕棿锛圡odified锛�**锛�2020.12.4
+
+**澶у皬锛圫ize锛�**锛�501 KB \(ckpt\)/541 KB \(air\)/916 KB \(om\)
+
+**妗嗘灦锛團ramework锛�**锛歁indSpore
+
+**妯″瀷鏍煎紡锛圡odel Format锛�**锛歝kpt/air/om
+
+**绮惧害锛圥recision锛�**锛歁ixed/FP16
+
+**澶勭悊鍣紙Processor锛�**锛氭槆鑵�910/鏄囪吘310
+
+**搴旂敤绾у埆锛圕ategories锛�**锛歊eleased
+
+Released锛堝彂琛岀増妯″瀷锛夛細鏄囪吘鎺ㄨ崘浣跨敤锛屾敮鎸佽缁冩帹鐞嗙鍒扮娴佺▼銆�
+
+銆怉绫籖esearch; B绫绘€ц兘<1.8鍊嶇敤Official锛孿>1.8鍊嶇敤Benchmark; C绫籖eleased銆�
+
+**鎻忚堪锛圖escription锛�**锛歁CNN鏄竴绉嶅鍒楀嵎绉缁忕綉缁滐紝鍙互浠庡嚑涔庝换浣曡搴﹀噯纭及璁″崟涓浘鍍忎腑鐨勪汉缇ゆ暟閲忋€�
+
+## 姒傝堪
+
+### 绠€杩�
+
+MCNN鍖呭惈涓変釜骞宠CNN锛屽叾婊ゆ尝鍣ㄥ叿鏈変笉鍚屽ぇ灏忕殑灞€閮ㄦ劅鍙楅噹銆備负浜嗙畝鍖栵紝闄や簡杩囨护鍣ㄧ殑澶у皬鍜屾暟閲忎箣澶栵紝鎴戜滑瀵规墍鏈夊垪锛堝嵆conv鈥損ooling鈥揷onv鈥損ooling锛変娇鐢ㄧ浉鍚岀殑缃戠粶缁撴瀯銆傛瘡涓�2脳2鍖哄煙閲囩敤鏈€澶ф睜锛岀敱浜庢牎姝g嚎鎬у崟鍏冿紙ReLU锛夊CNN鍏锋湁鑹ソ鐨勬€ц兘锛屽洜姝ら噰鐢≧eLU浣滀负婵€娲诲嚱鏁般€�
+
+- 鍙傝€冭鏂囷細
+
+    [Yingying Zhang, Desen Zhou, Siqin Chen, Shenghua Gao, Yi Ma. Single-Image Crowd Counting via Multi-Column Convolutional Neural Network](https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Zhang_Single-Image_Crowd_Counting_CVPR_2016_paper.pdf)
+
+- 鍙傝€冨疄鐜帮細[https://github.com/svishwa/crowdcount-mcnn](https://github.com/svishwa/crowdcount-mcnn)
+
+閫氳繃Git鑾峰彇瀵瑰簲commit\_id鐨勪唬鐮佹柟娉曞涓嬶細
+
+``` python
+git clone {repository_url}    # 鍏嬮殕浠撳簱鐨勪唬鐮�
+cd {repository_name}    # 鍒囨崲鍒版ā鍨嬬殑浠g爜浠撶洰褰�
+git checkout  {branch}    # 鍒囨崲鍒板搴斿垎鏀�
+git reset --hard 锝沜ommit_id锝�     # 浠g爜璁剧疆鍒板搴旂殑commit_id
+cd 锝沜ode_path锝�    # 鍒囨崲鍒版ā鍨嬩唬鐮佹墍鍦ㄨ矾寰勶紝鑻ヤ粨搴撲笅鍙湁璇ユā鍨嬶紝鍒欐棤闇€鍒囨崲
+```
+
+### 榛樿閰嶇疆
+
+- 璁粌闆嗛澶勭悊 :
+
+    鍘熷鏁版嵁闆嗙殑姣忓紶鍥剧墖灏哄鍚勪笉鐩稿悓
+
+    棰勫鐞嗗仛鐨勪簨鎯呮槸灏嗚繖浜涘浘鐗囩殑闀垮閮借皟鏁村埌256
+
+- 娴嬭瘯闆嗛澶勭悊 :
+
+    鍘熷娴嬭瘯闆嗙殑姣忓紶鍥剧墖灏哄鍚勪笉鐩稿悓
+
+    棰勫鐞嗗仛鐨勪簨鎯呮槸灏嗚繖浜涘浘鐗囩殑闀垮閮借皟鏁村埌1024锛堜负浜嗗拰om妯″瀷瀵瑰簲锛�
+
+- 璁粌瓒呭弬 :
+
+    Batch size : 1
+
+    Learning rate : 0.000028
+
+    Momentum : 0.0
+
+    Epoch_size : 800
+
+    Buffer_size : 1000
+
+    Save_checkpoint_steps : 1
+
+    Keep_checkpoint_max : 10
+
+    Air_name : "mcnn"
+
+### 鏀寔鐗规€�
+
+鏀寔鐨勭壒鎬у寘鎷細1銆佸垎甯冨紡骞惰璁粌銆�
+
+### 鍒嗗竷寮忚缁�
+
+MindSpore鏀寔鏁版嵁骞惰鍙婅嚜鍔ㄥ苟琛屻€傝嚜鍔ㄥ苟琛屾槸MindSpore铻嶅悎浜嗘暟鎹苟琛屻€佹ā鍨嬪苟琛屽強娣峰悎骞惰鐨勪竴绉嶅垎甯冨紡骞惰妯″紡锛屽彲浠ヨ嚜鍔ㄥ缓绔嬩唬浠锋ā鍨嬶紝涓虹敤鎴烽€夋嫨涓€绉嶅苟琛屾ā寮忋€傜浉鍏充唬鐮佺ず渚嬨€�
+
+``` python
+context.set_auto_parallel_context(device_num=args_opt.device_num, parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
+```
+
+## 鍑嗗宸ヤ綔
+
+### 鎺ㄧ悊鐜鍑嗗
+
+- 纭欢鐜銆佸紑鍙戠幆澧冨拰杩愯鐜鍑嗗璇峰弬瑙乕銆奀ANN 杞欢瀹夎鎸囧崡](https://support.huawei.com/enterprise/zh/ascend-computing/cann-pid-251168373?category=installation-upgrade)銆嬨€�
+- 瀹夸富鏈轰笂闇€瑕佸畨瑁匘ocker骞剁櫥褰昜Ascend Hub涓績](https://ascendhub.huawei.com/#/home)鑾峰彇闀滃儚銆�
+
+    褰撳墠妯″瀷鏀寔鐨勯暅鍍忓垪琛ㄥ涓嬭〃鎵€绀恒€�
+
+    **琛� 1**  闀滃儚鍒楄〃
+
+    <a name="zh-cn_topic_0000001205858411_table1519011227314"></a>
+    <table><thead align="left"><tr id="zh-cn_topic_0000001205858411_row0190152218319"><th class="cellrowborder" valign="top" width="55.00000000000001%" id="mcps1.2.4.1.1"><p id="zh-cn_topic_0000001205858411_p1419132211315"><a name="zh-cn_topic_0000001205858411_p1419132211315"></a><a name="zh-cn_topic_0000001205858411_p1419132211315"></a>闀滃儚鍚嶇О</p>
+    </th>
+    <th class="cellrowborder" valign="top" width="20%" id="mcps1.2.4.1.2"><p id="zh-cn_topic_0000001205858411_p75071327115313"><a name="zh-cn_topic_0000001205858411_p75071327115313"></a><a name="zh-cn_topic_0000001205858411_p75071327115313"></a>闀滃儚鐗堟湰</p>
+    </th>
+    <th class="cellrowborder" valign="top" width="25%" id="mcps1.2.4.1.3"><p id="zh-cn_topic_0000001205858411_p1024411406234"><a name="zh-cn_topic_0000001205858411_p1024411406234"></a><a name="zh-cn_topic_0000001205858411_p1024411406234"></a>閰嶅CANN鐗堟湰</p>
+    </th>
+    </tr>
+    </thead>
+    <tbody><tr id="zh-cn_topic_0000001205858411_row71915221134"><td class="cellrowborder" valign="top" width="55.00000000000001%" headers="mcps1.2.4.1.1 "><p id="zh-cn_topic_0000001205858411_p58911145153514"><a name="zh-cn_topic_0000001205858411_p58911145153514"></a><a name="zh-cn_topic_0000001205858411_p58911145153514"></a>ARM/x86鏋舵瀯锛�<a href="https://ascendhub.huawei.com/#/detail/infer-modelzoo" target="_blank" rel="noopener noreferrer">infer-modelzoo</a></p>
+    </td>
+    <td class="cellrowborder" valign="top" width="20%" headers="mcps1.2.4.1.2 "><p id="zh-cn_topic_0000001205858411_p14648161414516"><a name="zh-cn_topic_0000001205858411_p14648161414516"></a><a name="zh-cn_topic_0000001205858411_p14648161414516"></a>21.0.2</p>
+    </td>
+    <td class="cellrowborder" valign="top" width="25%" headers="mcps1.2.4.1.3 "><p id="zh-cn_topic_0000001205858411_p1264815147514"><a name="zh-cn_topic_0000001205858411_p1264815147514"></a><a name="zh-cn_topic_0000001205858411_p1264815147514"></a><a href="https://www.hiascend.com/software/cann/commercial" target="_blank" rel="noopener noreferrer">5.0.2</a></p>
+    </td>
+    </tr>
+    </tbody>
+    </table>
+
+### 婧愮爜浠嬬粛
+
+鑴氭湰鍜岀ず渚嬩唬鐮�
+
+``` python
+/MCNN
+鈹溾攢鈹€ infer                # MindX楂樻€ц兘棰勮缁冩ā鍨嬫柊澧�  
+鈹�   鈹溾攢鈹€ convert          # 杞崲om妯″瀷鍛戒护
+鈹�   鈹�   鈹斺攢鈹€convert_om.sh
+鈹�   鈹溾攢鈹€ model            # 瀛樻斁妯″瀷
+鈹�   鈹溾攢鈹€ test_data        # 瀛樻斁鏁版嵁闆�
+鈹�   鈹溾攢鈹€ mxbase           # 鍩轰簬mxbase鎺ㄧ悊
+鈹�   鈹�   鈹溾攢鈹€ src
+鈹�   鈹�   鈹�   鈹溾攢鈹€ Mcnn.cpp
+鈹�   鈹�   鈹�   鈹溾攢鈹€ Mcnn.h
+鈹�   鈹�   鈹�   鈹斺攢鈹€ main.cpp
+鈹�   鈹�   鈹溾攢鈹€ output       # 瀛樻斁缁撴灉璺緞
+鈹�   鈹�   鈹溾攢鈹€ CMakeLists.txt
+鈹�   鈹�   鈹斺攢鈹€ build.sh
+鈹�   鈹斺攢鈹€ sdk               # 鍩轰簬sdk.run鍖呮帹鐞嗭紱濡傛灉鏄疌++瀹炵幇锛屽瓨鏀捐矾寰勪竴鏍�
+鈹�   鈹�   鈹溾攢鈹€ out           # 瀛樻斁缁撴灉璺緞
+鈹�   鈹�   鈹溾攢鈹€ main.py
+鈹�   鈹�   鈹溾攢鈹€ mcnn.pipeline
+鈹�   鈹�   鈹斺攢鈹€ run.sh
+鈹�   鈹斺攢鈹€docker_start_infer.sh     # 鍚姩瀹瑰櫒鑴氭湰
+```
+
+## 鎺ㄧ悊
+
+- **[鍑嗗鎺ㄧ悊鏁版嵁](#鍑嗗鎺ㄧ悊鏁版嵁.md)**  
+
+- **[妯″瀷杞崲](#妯″瀷杞崲.md)**  
+
+- **[mxBase鎺ㄧ悊](#mxBase鎺ㄧ悊.md)**  
+
+- **[MindX SDK鎺ㄧ悊](#MindX-SDK鎺ㄧ悊.md)**  
+
+### 鍑嗗鎺ㄧ悊鏁版嵁
+
+鍑嗗妯″瀷杞崲鍜屾ā鍨嬫帹鐞嗘墍闇€鐩綍鍙婃暟鎹€�
+
+1. 涓嬭浇婧愮爜鍖呫€�
+
+    鍗曞嚮鈥滀笅杞芥ā鍨嬭剼鏈€濆拰鈥滀笅杞芥ā鍨嬧€濓紝涓嬭浇鎵€闇€杞欢鍖呫€�
+
+2. 灏嗘簮鐮佷笂浼犺嚦鎺ㄧ悊鏈嶅姟鍣ㄤ换鎰忕洰褰曞苟瑙e帇锛堝锛氣€�/home/MCNN鈥滐級銆傘€愩€愩€愭坊鍔犱互涓嬪懡浠わ紝鏈€缁堜笅杞芥ā鍨嬭剼鏈�"姝ラ寰楀埌鐨勬枃浠堕渶瑕佽繘琛屾牸寮忚浆鎹€€戙€戙€�
+
+    ```
+    #鍦ㄧ幆澧冧笂鎵ц
+    unzip MCNN_Mindspore_{version}_code.zip
+    cd {code_unzip_path}/MCNN_MindSpore_{version}_code/infer && dos2unix `find .`
+    ```
+
+3. 鍑嗗鏁版嵁銆�
+
+    鐢变簬鍚庣画鎺ㄧ悊鍧囧湪瀹瑰櫒涓繘琛岋紝鍥犳闇€瑕佹妸鐢ㄤ簬鎺ㄧ悊鐨勫浘鐗囥€佹暟鎹泦銆佹ā鍨嬫枃浠躲€佷唬鐮佺瓑鍧囨斁鍦ㄥ悓涓€鏁版嵁璺緞涓紝鍚庣画绀轰緥灏嗕互鈥�/home/MCNN鈥滀负渚嬨€�
+
+    ```
+    ..
+    /MCNN
+    鈹溾攢鈹€ infer                # MindX楂樻€ц兘棰勮缁冩ā鍨嬫柊澧�  
+    鈹�   鈹溾攢鈹€ convert          # 杞崲om妯″瀷鍛戒护
+    鈹�   鈹�   鈹斺攢鈹€convert_om.sh
+    鈹�   鈹溾攢鈹€ model            # 瀛樻斁妯″瀷
+    鈹�   鈹溾攢鈹€ test_data        # 瀛樻斁鏁版嵁闆�
+    鈹�   鈹溾攢鈹€ mxbase           # 鍩轰簬mxbase鎺ㄧ悊
+    鈹�   鈹�   鈹溾攢鈹€ src
+    鈹�   鈹�   鈹�   鈹溾攢鈹€ Mcnn.cpp
+    鈹�   鈹�   鈹�   鈹溾攢鈹€ Mcnn.h
+    鈹�   鈹�   鈹�   鈹斺攢鈹€ main.cpp
+    鈹�   鈹�   鈹溾攢鈹€ output       # 瀛樻斁缁撴灉璺緞
+    鈹�   鈹�   鈹溾攢鈹€ CMakeLists.txt
+    鈹�   鈹�   鈹斺攢鈹€ build.sh
+    鈹�   鈹斺攢鈹€ sdk               # 鍩轰簬sdk.run鍖呮帹鐞嗭紱濡傛灉鏄疌++瀹炵幇锛屽瓨鏀捐矾寰勪竴鏍�
+    鈹�   鈹�   鈹溾攢鈹€ out           # 瀛樻斁缁撴灉璺緞
+    鈹�   鈹�   鈹溾攢鈹€ main.py
+    鈹�   鈹�   鈹溾攢鈹€ mcnn.pipeline
+    鈹�   鈹�   鈹斺攢鈹€ run.sh
+    鈹�   鈹斺攢鈹€docker_start_infer.sh     # 鍚姩瀹瑰櫒鑴氭湰
+    ```
+
+    AIR妯″瀷鍙€氳繃鈥滄ā鍨嬭缁冣€濆悗杞崲鐢熸垚鎴栭€氳繃鈥滀笅杞芥ā鍨嬧€濊幏鍙栥€�
+
+    灏唖hanghaitechA鏁版嵁闆唗est_data鏀惧埌鈥�/MCNN/infer/鈥濈洰褰曚笅銆�
+
+    鏁版嵁闆嗛摼鎺�: https://pan.baidu.com/s/185jBeL91R85OUcbeARP9Sg 鎻愬彇鐮�: 5q9v
+
+4. 鍑嗗mxbase鐨勮緭鍑虹洰褰�/output銆傝鐩綍涓嬪瓨鍌ㄦ帹鐞嗙殑涓棿缁撴灉銆�
+
+    ```
+    #鍦ㄧ幆澧冧笂鎵ц
+    cd MCNN_Mindspore_{version}_code/infer/mxbase/
+    mkdir output
+    ```
+
+5. 鍚姩瀹瑰櫒銆�
+
+    杩涘叆鈥渋nfer鈥滅洰褰曪紝鎵ц浠ヤ笅鍛戒护锛屽惎鍔ㄥ鍣ㄣ€�
+
+    bash docker_start_infer.sh docker_image data_path
+
+    **琛� 2**  鍙傛暟璇存槑
+
+    <a name="table8122633182517"></a>
+    <table><thead align="left"><tr id="row16122113320259"><th class="cellrowborder" valign="top" width="40%" id="mcps1.2.3.1.1"><p id="p16122163382512"><a name="p16122163382512"></a><a name="p16122163382512"></a>鍙傛暟</p>
+    </th>
+    <th class="cellrowborder" valign="top" width="60%" id="mcps1.2.3.1.2"><p id="p8122103342518"><a name="p8122103342518"></a><a name="p8122103342518"></a>璇存槑</p>
+    </th>
+    </tr>
+    </thead>
+    <tbody><tr id="row11225332251"><td class="cellrowborder" valign="top" width="40%" headers="mcps1.2.3.1.1 "><p id="p712210339252"><a name="p712210339252"></a><a name="p712210339252"></a><em id="i121225338257"><a name="i121225338257"></a><a name="i121225338257"></a>docker_image</em></p>
+    </td>
+    <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.3.1.2 "><p id="p0122733152514"><a name="p0122733152514"></a><a name="p0122733152514"></a>鎺ㄧ悊闀滃儚鍚嶇О锛屾牴鎹疄闄呭啓鍏ャ€�</p>
+    </td>
+    </tr>
+    <tr id="row5835194195611"><td class="cellrowborder" valign="top" width="40%" headers="mcps1.2.3.1.1 "><p id="p59018537424"><a name="p59018537424"></a><a name="p59018537424"></a>data_path</p>
+    </td>
+    <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.3.1.2 "><p id="p1390135374214"><a name="p1390135374214"></a><a name="p1390135374214"></a>鏁版嵁闆嗚矾寰勩€�</p>
+    </td>
+    </tr>
+    </tbody>
+    </table>
+
+    鍚姩瀹瑰櫒鏃朵細灏嗘帹鐞嗚姱鐗囧拰鏁版嵁璺緞鎸傝浇鍒板鍣ㄤ腑銆傚彲鏍规嵁闇€瑕侀€氳繃淇敼**docker\_start\_infer.sh**鐨刣evice鏉ユ寚瀹氭寕杞界殑鎺ㄧ悊鑺墖銆�
+
+    ```
+    docker run -it \
+      --device=/dev/davinci0 \        # 鍙牴鎹渶瑕佷慨鏀规寕杞界殑npu璁惧
+      --device=/dev/davinci_manager \
+
+    # 璇存槑锛歁indX SDK寮€鍙戝浠讹紙mxManufacture锛夊凡瀹夎鍦ㄥ熀纭€闀滃儚涓紝瀹夎璺緞锛氣€�/usr/local/sdk\_home鈥溿€�
+    ```
+
+### 妯″瀷杞崲
+
+1. 鍑嗗妯″瀷鏂囦欢銆�
+
+    鍦╥nfer鐩綍涓嬪垱寤簃odel鐩綍锛屽皢mcnn.air鏀捐嚦姝ょ洰褰曚笅銆�
+
+    ```
+    cd MCNN_Mindspore_{version}_code/infer/
+    mkdir model
+    ```
+
+    灏唌cnn.air鏀捐嚦infer/model/涓嬨€�
+
+2. 妯″瀷杞崲銆�
+
+    杩涘叆鈥渋nfer/mcnn/convert鈥滅洰褰曡繘琛屾ā鍨嬭浆鎹紝**convert_om.sh**鑴氭湰鏂囦欢涓紝閰嶇疆鐩稿叧鍙傛暟銆�
+
+    ```
+    atc \
+                    --model=${model} \
+                    --output=${output} \
+                    --soc_version=${soc_version} \
+                    --input_shape=${input_shape} \
+                    --framework=1 \
+                    --input_format=NCHW
+    ```
+
+    杞崲鍛戒护濡備笅銆�
+
+    ```
+    bash convert_om.sh --model=[model_path] --output=[output_model_name]
+    # model_path涓篴ir妯″瀷璺緞 output_model_name涓烘柊妯″瀷鐨勫悕瀛�
+    # 渚嬪瓙濡備笅锛�
+    bash convert_om.sh --model=../model/mcnn.air --output=../model/mcnn
+    ```
+
+    **琛� 1**  鍙傛暟璇存槑
+
+    <a name="table15982121511203"></a>
+    <table><thead align="left"><tr id="row1598241522017"><th class="cellrowborder" valign="top" width="40%" id="mcps1.2.3.1.1"><p id="p189821115192014"><a name="p189821115192014"></a><a name="p189821115192014"></a>鍙傛暟</p>
+    </th>
+    <th class="cellrowborder" valign="top" width="60%" id="mcps1.2.3.1.2"><p id="p1982161512206"><a name="p1982161512206"></a><a name="p1982161512206"></a>璇存槑</p>
+    </th>
+    </tr>
+    </thead>
+    <tbody><tr id="row0982101592015"><td class="cellrowborder" valign="top" width="40%" headers="mcps1.2.3.1.1 "><p id="p1598231542020"><a name="p1598231542020"></a><a name="p1598231542020"></a>model_path</p>
+    </td>
+    <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.3.1.2 "><p id="p598231511200"><a name="p598231511200"></a><a name="p598231511200"></a>AIR鏂囦欢璺緞銆�</p>
+    </td>
+    </tr>
+    <tr id="row109831315132011"><td class="cellrowborder" valign="top" width="40%" headers="mcps1.2.3.1.1 "><p id="p598319158204"><a name="p598319158204"></a><a name="p598319158204"></a>output_model_name</p>
+    </td>
+    <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.3.1.2 "><p id="p1898316155207"><a name="p1898316155207"></a><a name="p1898316155207"></a>鐢熸垚鐨凮M鏂囦欢鍚嶏紝杞崲鑴氭湰浼氬湪姝ゅ熀纭€涓婃坊鍔�.om鍚庣紑銆�</p>
+    </td>
+    </tr>
+    </tbody>
+    </table>
+
+### mxBase鎺ㄧ悊
+
+鍦ㄥ鍣ㄥ唴鐢╩xBase杩涜鎺ㄧ悊銆�
+
+1. 缂栬瘧宸ョ▼銆�
+
+    杩涘叆/mxbase璺緞涓�
+
+    ```
+    bash build.sh
+    ```
+
+2. 杩愯鎺ㄧ悊鏈嶅姟銆�
+
+    ```
+    ./Mcnn [model_path] [data_path] [label_path] [output_path]
+    # model_path涓簅m妯″瀷璺緞 data_path涓烘暟鎹泦鏁版嵁閮ㄥ垎璺緞 label_path涓烘暟鎹泦鏍囩閮ㄥ垎璺緞 output_path涓鸿緭鍑鸿矾寰�
+    # 渚嬪瓙濡備笅锛�
+    ./Mcnn ../model/mcnn.om ../test_data/images/ ../test_data/ground_truth_csv/ ./output
+    ```
+
+3. 瑙傚療缁撴灉銆�
+
+    鎺ㄧ悊缁撴灉鐨勫弬鏁拌В閲婏細
+
+    datasize: 杈撳叆鏁版嵁澶у皬
+
+    output_size: 杈撳嚭鏁版嵁澶у皬
+
+    output0_datatype: 杈撳嚭鏁版嵁绫诲瀷
+
+    output0_shape: 杈撳嚭鏁版嵁鐨勫舰鐘�
+
+    output0_bytesize: 杈撳嚭鏁版嵁瀛楄妭绫诲瀷
+
+    鎺ヤ笅鏉ョ殑鏄祴璇曞浘鐗囩殑id銆侀娴嬪€笺€佺湡瀹炲€笺€�
+
+    鏈€鍚庤緭鍑虹殑鏄暣涓祴璇曢泦鐨勭簿搴︺€�
+
+### MindX SDK鎺ㄧ悊
+
+1. 妫€鏌ョ幆澧冦€�
+
+    纭繚mcnn.om妯″瀷鍦�/MCNN/infer/model涓嬨€�
+
+2. 淇敼閰嶇疆鏂囦欢銆�
+
+    1. 淇敼pipeline鏂囦欢銆�
+
+    ```
+    {
+    "mcnn_opencv": {
+        "appsrc0": {
+            "factory": "appsrc",
+            "next": "mxpi_tensorinfer0"
+        },
+        "mxpi_tensorinfer0": {
+            "props": {
+                "dataSource": "appsrc0",
+                "modelPath": "../model/mcnn.om", //姝ゅ鏄綘鐨勬ā鍨嬪瓨鏀捐矾寰�
+                "waitingTime": "2000"
+            },
+            "factory": "mxpi_tensorinfer",
+            "next": "appsink0"
+        },
+        "appsink0": {
+            "factory": "appsink"
+        }
+    }
+    }
+    ```
+
+3. 鎵撳紑鎬ц兘缁熻寮€鍏炽€傚皢"enable_ps"鍙傛暟璁剧疆涓簍rue锛�"ps_interval_time"鍙傛暟璁剧疆涓�6銆�
+
+    vim ${MX_SDK_HOME}/config/sdk.conf
+
+    ```
+    # Mindx SDK configuration file
+    # whether to enable performance statistics锛宒efault is false [dynamic config]
+    enable_ps=true
+    ...
+    ps_interval_time=6
+    ...
+    ```
+
+4. 杩愯鎺ㄧ悊鏈嶅姟銆�
+
+    1. 鎵ц鎺ㄧ悊銆�
+
+        杩涘叆/sdk鍚庯紝杩愯浠ヤ笅鍛戒护锛�
+
+        ```
+        bash run.sh [input_dir]  [gt_dir]
+        # input_dir涓烘暟鎹泦鏁版嵁閮ㄥ垎璺緞 gt_dir涓烘暟鎹泦鏍囩閮ㄥ垎璺緞
+        # 渚嬪锛�
+        bash run.sh ../test_data/images/ ../test_data/ground_truth_csv/
+        ```
+
+    2. 鏌ョ湅鎺ㄧ悊缁撴灉.
+
+5. 鎵ц绮惧害鍜屾€ц兘娴嬭瘯銆�
+
+6. 鍦ㄦ棩蹇楃洰褰曗€�$ {MX_SDK_HOME}/logs/鈥濇煡鐪嬫€ц兘缁熻缁撴灉銆�
+
+    ```
+    performance-statistics.log.e2e.xxx
+    performance-statistics.log.plugin.xxx
+    performance-statistics.log.tpr.xxx
+    ```
+
+    鍏朵腑e2e鏃ュ織缁熻绔埌绔椂闂�,plugin鏃ュ織缁熻鍗曟彃浠舵椂闂淬€�
+
+## 鍦∕odelArts涓婂簲鐢�
+
+- **[鍒涘缓OBS妗禲(#鍒涘缓OBS妗�.md)**  
+
+- **[鍒涘缓绠楁硶锛堥€傜敤浜嶮indSpore鍜孴ensorFlow锛塢(#鍒涘缓绠楁硶锛堥€傜敤浜嶮indSpore鍜孴ensorFlow锛�.md)**  
+
+- **[鍒涘缓璁粌浣滀笟](#鍒涘缓璁粌浣滀笟.md)**  
+
+- **[鏌ョ湅璁粌浠诲姟鏃ュ織](#鏌ョ湅璁粌浠诲姟鏃ュ織.md)**  
+
+- **[杩佺Щ瀛︿範](#杩佺Щ瀛︿範.md)**  
+
+### 鍒涘缓OBS妗�
+
+1.鐧诲綍[OBS绠$悊鎺у埗鍙癩(https://storage.huaweicloud.com/obs)锛屽垱寤篛BS妗躲€傚叿浣撹鍙傝[鍒涘缓妗禲(https://support.huaweicloud.com/usermanual-obs/obs_03_0306.html)绔犺妭銆備緥濡傦紝鍒涘缓鍚嶇О涓衡€淢CNN鈥濈殑OBS妗躲€�
+
+鍒涘缓妗剁殑鍖哄煙闇€瑕佷笌ModelArts鎵€鍦ㄧ殑鍖哄煙涓€鑷淬€備緥濡傦細褰撳墠ModelArts鍦ㄥ崕鍖�-鍖椾含鍥涘尯鍩燂紝鍦ㄥ璞″瓨鍌ㄦ湇鍔″垱寤烘《鏃讹紝璇烽€夋嫨鍗庡寳-鍖椾含鍥涖€�
+
+鍒涘缓鐢ㄤ簬瀛樻斁鏁版嵁鐨勬枃浠跺す锛屽叿浣撹鍙傝[鏂板缓鏂囦欢澶筣(https://support.huaweicloud.com/usermanual-obs/obs_03_0316.html)绔犺妭銆備緥濡傦紝鍦ㄥ凡鍒涘缓鐨凮BS妗朵腑MCNN椤圭洰閲屽垱寤篸ata銆丩OG銆乼rain_output
+
+鐩綍缁撴瀯璇存槑锛�
+
+- MCNN锛氬瓨鏀捐缁冭剼鏈洰褰�
+- data锛氬瓨鏀捐缁冩暟鎹泦鐩綍
+- LOG锛氬瓨鏀捐缁冩棩蹇楃洰褰�
+- train_output锛氬瓨鏀捐缁僣kpt鏂囦欢鍜屽喕缁撶殑AIR妯″瀷锛坥utput涓璻esult鏂囦欢澶逛腑锛�
+
+鏁版嵁闆唖hanghaitechA浼犺嚦鈥渄ata鈥濈洰褰曘€�
+
+鏁版嵁闆嗛摼鎺�: https://pan.baidu.com/s/185jBeL91R85OUcbeARP9Sg 鎻愬彇鐮�: 5q9v
+
+娉ㄦ剰锛氶渶瑕佸皢modlearts涓嬬殑start_train.py绉昏嚦鏍圭洰褰曚笅杩涜璁粌銆�
+
+### 鍒涘缓绠楁硶锛堥€傜敤浜嶮indSpore鍜孴ensorFlow锛�
+
+1. 浣跨敤鍗庝负浜戝笎鍙风櫥褰昜ModelArts绠$悊鎺у埗鍙癩(https://console.huaweicloud.com/modelarts)锛屽湪宸︿晶瀵艰埅鏍忎腑閫夋嫨鈥滅畻娉曠鐞嗏€濄€�
+2. 鍦ㄢ€滄垜鐨勭畻娉曠鐞嗏€濈晫闈紝鍗曞嚮宸︿笂瑙掆€滃垱寤衡€濓紝杩涘叆鈥滃垱寤虹畻娉曗€濋〉闈€€�
+3. 鍦ㄢ€滃垱寤虹畻娉曗€濋〉闈紝濉啓鐩稿叧鍙傛暟锛岀劧鍚庡崟鍑烩€滄彁浜も€濄€�
+    1. 璁剧疆绠楁硶鍩烘湰淇℃伅銆�
+
+    2. 璁剧疆鈥滃垱寤烘柟寮忊€濅负鈥滆嚜瀹氫箟鑴氭湰鈥濄€�
+
+        鐢ㄦ埛闇€鏍规嵁瀹為檯绠楁硶浠g爜鎯呭喌璁剧疆鈥淎I寮曟搸鈥濄€佲€滀唬鐮佺洰褰曗€濆拰鈥滃惎鍔ㄦ枃浠垛€濄€傞€夋嫨鐨凙I寮曟搸鍜岀紪鍐欑畻娉曚唬鐮佹椂閫夋嫨鐨勬鏋跺繀椤讳竴鑷淬€備緥濡傜紪鍐欑畻娉曚唬鐮佷娇鐢ㄧ殑鏄疢indSpore锛屽垯鍦ㄥ垱寤虹畻娉曟椂涔熻閫夋嫨MindSpore銆�
+
+        **琛� 1**
+
+        <a name="table09972489125"></a>
+        <table><thead align="left"><tr id="row139978484125"><th class="cellrowborder" valign="top" width="29.470000000000002%" id="mcps1.2.3.1.1"><p id="p16997114831219"><a name="p16997114831219"></a><a name="p16997114831219"></a><em id="i1199720484127"><a name="i1199720484127"></a><a name="i1199720484127"></a>鍙傛暟鍚嶇О</em></p>
+        </th>
+        <th class="cellrowborder" valign="top" width="70.53%" id="mcps1.2.3.1.2"><p id="p199976489122"><a name="p199976489122"></a><a name="p199976489122"></a><em id="i9997154816124"><a name="i9997154816124"></a><a name="i9997154816124"></a>璇存槑</em></p>
+        </th>
+        </tr>
+        </thead>
+        <tbody><tr id="row11997124871210"><td class="cellrowborder" valign="top" width="29.470000000000002%" headers="mcps1.2.3.1.1 "><p id="p1299734820121"><a name="p1299734820121"></a><a name="p1299734820121"></a><em id="i199764819121"><a name="i199764819121"></a><a name="i199764819121"></a>AI寮曟搸</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="70.53%" headers="mcps1.2.3.1.2 "><p id="p1899720481122"><a name="p1899720481122"></a><a name="p1899720481122"></a><em id="i9997848191217"><a name="i9997848191217"></a><a name="i9997848191217"></a>Ascend-Powered-Engine锛宮indspore_1.3.0-cann_5.0.2</em></p>
+        </td>
+        </tr>
+        <tr id="row5997348121218"><td class="cellrowborder" valign="top" width="29.470000000000002%" headers="mcps1.2.3.1.1 "><p id="p139971748141218"><a name="p139971748141218"></a><a name="p139971748141218"></a><em id="i1199784811220"><a name="i1199784811220"></a><a name="i1199784811220"></a>浠g爜鐩綍</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="70.53%" headers="mcps1.2.3.1.2 "><p id="p2099724810127"><a name="p2099724810127"></a><a name="p2099724810127"></a><em id="i17997144871212"><a name="i17997144871212"></a><a name="i17997144871212"></a>绠楁硶浠g爜瀛樺偍鐨凮BS璺緞銆備笂浼犺缁冭剼鏈紝濡傦細/mcnn/MCNN/</em></p>
+        </td>
+        </tr>
+        <tr id="row899794811124"><td class="cellrowborder" valign="top" width="29.470000000000002%" headers="mcps1.2.3.1.1 "><p id="p799714482129"><a name="p799714482129"></a><a name="p799714482129"></a><em id="i399704871210"><a name="i399704871210"></a><a name="i399704871210"></a>鍚姩鏂囦欢</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="70.53%" headers="mcps1.2.3.1.2 "><p id="p13997154831215"><a name="p13997154831215"></a><a name="p13997154831215"></a><em id="i11997648161214"><a name="i11997648161214"></a><a name="i11997648161214"></a>鍚姩鏂囦欢锛氬惎鍔ㄨ缁冪殑python鑴氭湰锛屽锛�/mcnn/MCNN/start_train.py</em></p>
+        <div class="notice" id="note1799734891214"><a name="note1799734891214"></a><a name="note1799734891214"></a><span class="noticetitle"> 椤荤煡锛� </span><div class="noticebody"><p id="p7998194814127"><a name="p7998194814127"></a><a name="p7998194814127"></a><em id="i199987481127"><a name="i199987481127"></a><a name="i199987481127"></a>闇€瑕佹妸modelArts/鐩綍涓嬬殑start.py鍚姩鑴氭湰鎷疯礉鍒版牴鐩綍涓嬨€�</em></p>
+        </div></div>
+        </td>
+        </tr>
+        <tr id="row59981448101210"><td class="cellrowborder" valign="top" width="29.470000000000002%" headers="mcps1.2.3.1.1 "><p id="p19998124812123"><a name="p19998124812123"></a><a name="p19998124812123"></a><em id="i1399864831211"><a name="i1399864831211"></a><a name="i1399864831211"></a>杈撳叆鏁版嵁閰嶇疆</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="70.53%" headers="mcps1.2.3.1.2 "><p id="p139982484129"><a name="p139982484129"></a><a name="p139982484129"></a><em id="i299816484122"><a name="i299816484122"></a><a name="i299816484122"></a>浠g爜璺緞鍙傛暟锛歞ata_url</em></p>
+        </td>
+        </tr>
+        <tr id="row179981948151214"><td class="cellrowborder" valign="top" width="29.470000000000002%" headers="mcps1.2.3.1.1 "><p id="p89981948191220"><a name="p89981948191220"></a><a name="p89981948191220"></a><em id="i599844831217"><a name="i599844831217"></a><a name="i599844831217"></a>杈撳嚭鏁版嵁閰嶇疆</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="70.53%" headers="mcps1.2.3.1.2 "><p id="p599814485120"><a name="p599814485120"></a><a name="p599814485120"></a><em id="i189981748171218"><a name="i189981748171218"></a><a name="i189981748171218"></a>浠g爜璺緞鍙傛暟锛歵rain_url</em></p>
+        </td>
+        </tr>
+        </tbody>
+        </table>
+
+    3. 濉啓瓒呭弬鏁般€�
+
+        鍗曞嚮鈥滄坊鍔犺秴鍙傗€濓紝鎵嬪姩娣诲姞瓒呭弬銆傞厤缃唬鐮佷腑鐨勫懡浠よ鍙傛暟鍊硷紝璇锋牴鎹偍缂栧啓鐨勭畻娉曚唬鐮侀€昏緫杩涜濉啓锛岀‘淇濆弬鏁板悕绉板拰浠g爜鐨勫弬鏁板悕绉颁繚鎸佷竴鑷达紝鍙~鍐欏涓弬鏁般€�
+
+        **琛� 2** _瓒呭弬璇存槑_
+
+        <a name="table29981482127"></a>
+        <table><thead align="left"><tr id="row1599894881216"><th class="cellrowborder" valign="top" width="25%" id="mcps1.2.6.1.1"><p id="p89988484121"><a name="p89988484121"></a><a name="p89988484121"></a><em id="i89985485123"><a name="i89985485123"></a><a name="i89985485123"></a>鍙傛暟鍚嶇О</em></p>
+        </th>
+        <th class="cellrowborder" valign="top" width="15%" id="mcps1.2.6.1.2"><p id="p1999114814121"><a name="p1999114814121"></a><a name="p1999114814121"></a><em id="i7999448181212"><a name="i7999448181212"></a><a name="i7999448181212"></a>绫诲瀷</em></p>
+        </th>
+        <th class="cellrowborder" valign="top" width="17%" id="mcps1.2.6.1.3"><p id="p6999124810126"><a name="p6999124810126"></a><a name="p6999124810126"></a><em id="i17999144818126"><a name="i17999144818126"></a><a name="i17999144818126"></a>榛樿鍊�</em></p>
+        </th>
+        <th class="cellrowborder" valign="top" width="18%" id="mcps1.2.6.1.4"><p id="p69992486123"><a name="p69992486123"></a><a name="p69992486123"></a><em id="i1599916488127"><a name="i1599916488127"></a><a name="i1599916488127"></a>鏄惁蹇呭~</em></p>
+        </th>
+        <th class="cellrowborder" valign="top" width="25%" id="mcps1.2.6.1.5"><p id="p1999248121214"><a name="p1999248121214"></a><a name="p1999248121214"></a><em id="i299915481121"><a name="i299915481121"></a><a name="i299915481121"></a>鎻忚堪</em></p>
+        </th>
+        </tr>
+        </thead>
+        <tbody><tr id="row9999134818128"><td class="cellrowborder" valign="top" width="25%" headers="mcps1.2.6.1.1 "><p id="p14999124811212"><a name="p14999124811212"></a><a name="p14999124811212"></a><em id="i39991748101218"><a name="i39991748101218"></a><a name="i39991748101218"></a>batch_size</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="15%" headers="mcps1.2.6.1.2 "><p id="p599924815129"><a name="p599924815129"></a><a name="p599924815129"></a><em id="i8999184811212"><a name="i8999184811212"></a><a name="i8999184811212"></a>int</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="17%" headers="mcps1.2.6.1.3 "><p id="p179992484129"><a name="p179992484129"></a><a name="p179992484129"></a><em id="i1799913488128"><a name="i1799913488128"></a><a name="i1799913488128"></a>1</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="18%" headers="mcps1.2.6.1.4 "><p id="p179991348181213"><a name="p179991348181213"></a><a name="p179991348181213"></a><em id="i20999134812126"><a name="i20999134812126"></a><a name="i20999134812126"></a>鍚�</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="25%" headers="mcps1.2.6.1.5 "><p id="p899916487125"><a name="p899916487125"></a><a name="p899916487125"></a><em id="i99999482127"><a name="i99999482127"></a><a name="i99999482127"></a>璁粌闆嗙殑batch_size銆�</em></p>
+        </td>
+        </tr>
+        <tr id="row14999148161210"><td class="cellrowborder" valign="top" width="25%" headers="mcps1.2.6.1.1 "><p id="p199915488129"><a name="p199915488129"></a><a name="p199915488129"></a><em id="i11999448141216"><a name="i11999448141216"></a><a name="i11999448141216"></a>lr</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="15%" headers="mcps1.2.6.1.2 "><p id="p7999124813124"><a name="p7999124813124"></a><a name="p7999124813124"></a><em id="i7999748151214"><a name="i7999748151214"></a><a name="i7999748151214"></a>float</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="17%" headers="mcps1.2.6.1.3 "><p id="p902049121213"><a name="p902049121213"></a><a name="p902049121213"></a><em id="i100124914123"><a name="i100124914123"></a><a name="i100124914123"></a>0.000028</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="18%" headers="mcps1.2.6.1.4 "><p id="p19004917125"><a name="p19004917125"></a><a name="p19004917125"></a><em id="i208494126"><a name="i208494126"></a><a name="i208494126"></a>鍚�</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="25%" headers="mcps1.2.6.1.5 "><p id="p10134915129"><a name="p10134915129"></a><a name="p10134915129"></a><em id="i101949121214"><a name="i101949121214"></a><a name="i101949121214"></a>璁粌鏃剁殑瀛︿範鐜�.褰撲娇鐢�8鍗¤缁冩椂锛屽涔犵巼涓�0.000028銆�</em></p>
+        </td>
+        </tr>
+        <tr id="row100124911121"><td class="cellrowborder" valign="top" width="25%" headers="mcps1.2.6.1.1 "><p id="p150849131211"><a name="p150849131211"></a><a name="p150849131211"></a><em id="i1101549151218"><a name="i1101549151218"></a><a name="i1101549151218"></a>momentum</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="15%" headers="mcps1.2.6.1.2 "><p id="p19054914124"><a name="p19054914124"></a><a name="p19054914124"></a><em id="i10144919126"><a name="i10144919126"></a><a name="i10144919126"></a>float</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="17%" headers="mcps1.2.6.1.3 "><p id="p6011490123"><a name="p6011490123"></a><a name="p6011490123"></a><em id="i00144917122"><a name="i00144917122"></a><a name="i00144917122"></a>0</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="18%" headers="mcps1.2.6.1.4 "><p id="p301449191215"><a name="p301449191215"></a><a name="p301449191215"></a><em id="i180104910126"><a name="i180104910126"></a><a name="i180104910126"></a>鍚�</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="25%" headers="mcps1.2.6.1.5 "><p id="p1702495127"><a name="p1702495127"></a><a name="p1702495127"></a><em id="i170249181214"><a name="i170249181214"></a><a name="i170249181214"></a>璁粌鏃剁殑鍔ㄩ噺銆�</em></p>
+        </td>
+        </tr>
+        <tr id="row001549161215"><td class="cellrowborder" valign="top" width="25%" headers="mcps1.2.6.1.1 "><p id="p1608498123"><a name="p1608498123"></a><a name="p1608498123"></a><em id="i501049191215"><a name="i501049191215"></a><a name="i501049191215"></a>epoch_size</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="15%" headers="mcps1.2.6.1.2 "><p id="p1064915124"><a name="p1064915124"></a><a name="p1064915124"></a><em id="i20104911127"><a name="i20104911127"></a><a name="i20104911127"></a>int</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="17%" headers="mcps1.2.6.1.3 "><p id="p80164951212"><a name="p80164951212"></a><a name="p80164951212"></a><em id="i190184921219"><a name="i190184921219"></a><a name="i190184921219"></a>800</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="18%" headers="mcps1.2.6.1.4 "><p id="p1811749171212"><a name="p1811749171212"></a><a name="p1811749171212"></a><em id="i161114981216"><a name="i161114981216"></a><a name="i161114981216"></a>鍚�</em></p>
+        </td>
+        <td class="cellrowborder" valign="top" width="25%" headers="mcps1.2.6.1.5 "><p id="p8114941212"><a name="p8114941212"></a><a name="p8114941212"></a><em id="i9174981214"><a name="i9174981214"></a><a name="i9174981214"></a>鎬昏缁冭疆鏁�</em></p>
+        </td>
+        </tr>
+        </tbody>
+        </table>
+
+### 鍒涘缓璁粌浣滀笟
+
+1. 鐧诲綍ModelArts銆�
+2. 鍒涘缓璁粌浣滀笟銆�
+
+1. 浣跨敤鍗庝负浜戝笎鍙风櫥褰昜ModelArts绠$悊鎺у埗鍙癩(https://console.huaweicloud.com/modelarts)锛屽湪宸︿晶瀵艰埅鏍忎腑閫夋嫨鈥滆缁冪鐞� \> 璁粌浣滀笟锛圢ew锛夆€濓紝榛樿杩涘叆鈥滆缁冧綔涓氣€濆垪琛ㄣ€�
+2. 鍗曞嚮鈥滃垱寤鸿缁冧綔涓氣€濓紝杩涘叆鈥滃垱寤鸿缁冧綔涓氣€濋〉闈紝鍦ㄨ椤甸潰濉啓璁粌浣滀笟鐩稿叧鍙傛暟銆�
+
+    1. 濉啓鍩烘湰淇℃伅銆�
+
+        鍩烘湰淇℃伅鍖呭惈鈥滃悕绉扳€濆拰鈥滄弿杩扳€濄€�
+
+    2. 濉啓浣滀笟鍙傛暟銆�
+
+        鍖呭惈鏁版嵁鏉ユ簮銆佺畻娉曟潵婧愮瓑鍏抽敭淇℃伅銆傛湰姝ラ鍙彁渚涜缁冧换鍔¢儴鍒嗗弬鏁伴厤缃鏄庯紝鍏朵粬鍙傛暟閰嶇疆璇︽儏璇峰弬瑙乕銆奙odelArts AI 宸ョ▼甯堢敤鎴锋寚鍗梋(https://support.huaweicloud.com/modelarts/index.html)銆嬩腑鈥滆缁冪鐞嗭紙new锛夆€濄€�
+
+        **MindSpore鍜孴ensorFlow鍒涘缓璁粌浣滀笟姝ラ**
+
+        **琛� 1**  鍙傛暟璇存槑
+
+        <a name="table96111035134613"></a>
+        <table><thead align="left"><tr id="zh-cn_topic_0000001178072725_row1727593212228"><th class="cellrowborder" valign="top" width="20%" id="mcps1.2.4.1.1"><p id="zh-cn_topic_0000001178072725_p102751332172212"><a name="zh-cn_topic_0000001178072725_p102751332172212"></a><a name="zh-cn_topic_0000001178072725_p102751332172212"></a>鍙傛暟鍚嶇О</p>
+        </th>
+        <th class="cellrowborder" valign="top" width="20%" id="mcps1.2.4.1.2"><p id="zh-cn_topic_0000001178072725_p186943411156"><a name="zh-cn_topic_0000001178072725_p186943411156"></a><a name="zh-cn_topic_0000001178072725_p186943411156"></a>瀛愬弬鏁�</p>
+        </th>
+        <th class="cellrowborder" valign="top" width="60%" id="mcps1.2.4.1.3"><p id="zh-cn_topic_0000001178072725_p1827543282216"><a name="zh-cn_topic_0000001178072725_p1827543282216"></a><a name="zh-cn_topic_0000001178072725_p1827543282216"></a>璇存槑</p>
+        </th>
+        </tr>
+        </thead>
+        <tbody><tr id="zh-cn_topic_0000001178072725_row780219161358"><td class="cellrowborder" valign="top" width="20%" headers="mcps1.2.4.1.1 "><p id="zh-cn_topic_0000001178072725_p0803121617510"><a name="zh-cn_topic_0000001178072725_p0803121617510"></a><a name="zh-cn_topic_0000001178072725_p0803121617510"></a>绠楁硶</p>
+        </td>
+        <td class="cellrowborder" valign="top" width="20%" headers="mcps1.2.4.1.2 "><p id="zh-cn_topic_0000001178072725_p186947411520"><a name="zh-cn_topic_0000001178072725_p186947411520"></a><a name="zh-cn_topic_0000001178072725_p186947411520"></a>鎴戠殑绠楁硶</p>
+        </td>
+        <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.4.1.3 "><p id="zh-cn_topic_0000001178072725_p20803141614514"><a name="zh-cn_topic_0000001178072725_p20803141614514"></a><a name="zh-cn_topic_0000001178072725_p20803141614514"></a>閫夋嫨鈥滄垜鐨勭畻娉曗€濋〉绛撅紝鍕鹃€変笂鏂囦腑鍒涘缓鐨勭畻娉曘€�</p>
+        <p id="zh-cn_topic_0000001178072725_p24290418284"><a name="zh-cn_topic_0000001178072725_p24290418284"></a><a name="zh-cn_topic_0000001178072725_p24290418284"></a>濡傛灉娌℃湁鍒涘缓绠楁硶锛岃鍗曞嚮鈥滃垱寤衡€濊繘鍏ュ垱寤虹畻娉曢〉闈紝璇︾粏鎿嶄綔鎸囧鍙傝鈥滃垱寤虹畻娉曗€濄€�</p>
+        </td>
+        </tr>
+        <tr id="zh-cn_topic_0000001178072725_row1927503211228"><td class="cellrowborder" valign="top" width="20%" headers="mcps1.2.4.1.1 "><p id="zh-cn_topic_0000001178072725_p327583216224"><a name="zh-cn_topic_0000001178072725_p327583216224"></a><a name="zh-cn_topic_0000001178072725_p327583216224"></a>璁粌杈撳叆</p>
+        </td>
+        <td class="cellrowborder" valign="top" width="20%" headers="mcps1.2.4.1.2 "><p id="zh-cn_topic_0000001178072725_p1069419416510"><a name="zh-cn_topic_0000001178072725_p1069419416510"></a><a name="zh-cn_topic_0000001178072725_p1069419416510"></a>鏁版嵁鏉ユ簮</p>
+        </td>
+        <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.4.1.3 "><p id="zh-cn_topic_0000001178072725_p142750323227"><a name="zh-cn_topic_0000001178072725_p142750323227"></a><a name="zh-cn_topic_0000001178072725_p142750323227"></a>閫夋嫨OBS涓婃暟鎹泦瀛樻斁鐨勭洰褰曘€�</p>
+        </td>
+        </tr>
+        <tr id="zh-cn_topic_0000001178072725_row127593211227"><td class="cellrowborder" valign="top" width="20%" headers="mcps1.2.4.1.1 "><p id="zh-cn_topic_0000001178072725_p9744151562"><a name="zh-cn_topic_0000001178072725_p9744151562"></a><a name="zh-cn_topic_0000001178072725_p9744151562"></a>璁粌杈撳嚭</p>
+        </td>
+        <td class="cellrowborder" valign="top" width="20%" headers="mcps1.2.4.1.2 "><p id="zh-cn_topic_0000001178072725_p1027563212210"><a name="zh-cn_topic_0000001178072725_p1027563212210"></a><a name="zh-cn_topic_0000001178072725_p1027563212210"></a>妯″瀷杈撳嚭</p>
+        </td>
+        <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.4.1.3 "><p id="zh-cn_topic_0000001178072725_p13275113252214"><a name="zh-cn_topic_0000001178072725_p13275113252214"></a><a name="zh-cn_topic_0000001178072725_p13275113252214"></a>閫夋嫨璁粌缁撴灉鐨勫瓨鍌ㄤ綅缃紙OBS璺緞锛夛紝璇峰敖閲忛€夋嫨绌虹洰褰曟潵浣滀负璁粌杈撳嚭璺緞銆�</p>
+        </td>
+        </tr>
+        <tr id="zh-cn_topic_0000001178072725_row18750142834916"><td class="cellrowborder" valign="top" width="20%" headers="mcps1.2.4.1.1 "><p id="zh-cn_topic_0000001178072725_p5751172811492"><a name="zh-cn_topic_0000001178072725_p5751172811492"></a><a name="zh-cn_topic_0000001178072725_p5751172811492"></a>瑙勬牸</p>
+        </td>
+        <td class="cellrowborder" valign="top" width="20%" headers="mcps1.2.4.1.2 "><p id="zh-cn_topic_0000001178072725_p107514288495"><a name="zh-cn_topic_0000001178072725_p107514288495"></a><a name="zh-cn_topic_0000001178072725_p107514288495"></a>-</p>
+        </td>
+        <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.4.1.3 "><p id="zh-cn_topic_0000001178072725_p3751142811495"><a name="zh-cn_topic_0000001178072725_p3751142811495"></a><a name="zh-cn_topic_0000001178072725_p3751142811495"></a>Ascend: 1*Ascend 910(32GB) | ARM: 24 鏍� 96GB</p>
+        </td>
+        </tr>
+        <tr id="zh-cn_topic_0000001178072725_row16275103282219"><td class="cellrowborder" valign="top" width="20%" headers="mcps1.2.4.1.1 "><p id="zh-cn_topic_0000001178072725_p15275132192213"><a name="zh-cn_topic_0000001178072725_p15275132192213"></a><a name="zh-cn_topic_0000001178072725_p15275132192213"></a>浣滀笟鏃ュ織璺緞</p>
+        </td>
+        <td class="cellrowborder" valign="top" width="20%" headers="mcps1.2.4.1.2 "><p id="zh-cn_topic_0000001178072725_p1369484117516"><a name="zh-cn_topic_0000001178072725_p1369484117516"></a><a name="zh-cn_topic_0000001178072725_p1369484117516"></a>-</p>
+        </td>
+        <td class="cellrowborder" valign="top" width="60%" headers="mcps1.2.4.1.3 "><p id="zh-cn_topic_0000001178072725_p227563218228"><a name="zh-cn_topic_0000001178072725_p227563218228"></a><a name="zh-cn_topic_0000001178072725_p227563218228"></a>璁剧疆璁粌鏃ュ織瀛樻斁鐨勭洰褰曘€傝娉ㄦ剰閫夋嫨鐨凮BS鐩綍鏈夎鍐欐潈闄愩€�</p>
+        </td>
+        </tr>
+        </tbody>
+        </table>
+
+3. 鍗曞嚮鈥滄彁浜も€濓紝瀹屾垚璁粌浣滀笟鐨勫垱寤恒€�
+
+    璁粌浣滀笟涓€鑸渶瑕佽繍琛屼竴娈垫椂闂达紝鏍规嵁鎮ㄩ€夋嫨鐨勬暟鎹噺鍜岃祫婧愪笉鍚岋紝璁粌鏃堕棿灏嗚€楁椂鍑犲垎閽熷埌鍑犲崄鍒嗛挓涓嶇瓑銆�
+
+### 鏌ョ湅璁粌浠诲姟鏃ュ織
+
+1. 鍦∕odelArts绠$悊鎺у埗鍙帮紝鍦ㄥ乏渚у鑸爮涓€夋嫨鈥滆缁冪鐞� \> 璁粌浣滀笟锛圢ew锛夆€濓紝榛樿杩涘叆鈥滆缁冧綔涓氣€濆垪琛ㄣ€俖
+2. 鍦ㄨ缁冧綔涓氬垪琛ㄤ腑锛屾偍鍙互鍗曞嚮浣滀笟鍚嶇О锛屾煡鐪嬭浣滀笟鐨勮鎯呫€�
+
+    璇︽儏涓寘鍚綔涓氱殑鍩烘湰淇℃伅銆佽缁冨弬鏁般€佹棩蹇楄鎯呭拰璧勬簮鍗犵敤鎯呭喌銆�
+
+    鍦∣BS妗剁殑train_output鏂囦欢澶逛笅鍙互鐪嬪埌ckpt妯″瀷鍜宎ir妯″瀷鐨勭敓鎴愩€�
diff --git a/official/cv/MCNN/infer/convert/convert_om.sh b/official/cv/MCNN/infer/convert/convert_om.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c0b209e928d2233929ede9a77cd46dbbba0cc887
--- /dev/null
+++ b/official/cv/MCNN/infer/convert/convert_om.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+soc_version=Ascend310
+input_shape="x:1,1,1024,1024"
+# 甯姪淇℃伅锛屼笉闇€瑕佷慨鏀�
+if [[ $1 == --help || $1 == -h ]];then
+    echo"usage:bash ./ATC_AIR_2_OM.sh <args>"
+    echo " "
+    echo "parameter explain:"
+    echo "--model                  set model place, e.g. --model=/home/mcnn.air"
+    echo "--output                 set the name and place of OM model, e.g. --output=/home/HwHiAiUser/fixmatch310_tune4"
+    echo "--soc_version            set the soc_version, default: --soc_version=Ascend310"
+    echo "--input_shape            set the input node and shape, default: --input_shape=x:1,1,1024,1024"
+    echo "-h/--help                show help message"
+    exit 1
+fi
+
+for para in "$@"
+do
+    if [[ $para == --model* ]];then
+        model=`echo ${para#*=}`
+    elif [[ $para == --output* ]];then
+        output=`echo ${para#*=}`
+    elif [[ $para == --soc_version* ]];then
+        soc_version=`echo ${para#*=}`
+    elif [[ $para == --input_shape* ]];then
+        input_shape=`echo ${para#*=}`
+    fi
+done
+
+if [[ $model  == "" ]];then
+   echo "[Error] para \"model \" must be config"
+   exit 1
+fi
+
+
+if [[ $output  == "" ]];then
+   echo "[Error] para \"output \" must be config"
+   exit 1
+fi
+
+atc \
+                    --model=${model} \
+                    --output=${output} \
+                    --soc_version=${soc_version} \
+                    --input_shape=${input_shape} \
+                    --framework=1 \
+                    --input_format=NCHW
\ No newline at end of file
diff --git a/official/cv/MCNN/infer/docker_start_infer.sh b/official/cv/MCNN/infer/docker_start_infer.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e25760deaf85985e879203642656cfa7c306619d
--- /dev/null
+++ b/official/cv/MCNN/infer/docker_start_infer.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+docker_image=$1
+data_path=$2
+
+function show_help() {
+    echo "Usage: docker_start.sh docker_image data_path"
+}
+
+function param_check() {
+    if [ -z "${docker_image}" ]; then
+        echo "please input docker_image"
+        show_help
+        exit 1
+    fi
+
+    if [ -z "${data_path}" ]; then
+        echo "please input data_path"
+        show_help
+        exit 1
+    fi
+}
+
+param_check
+
+docker run -it \
+  --device=/dev/davinci0 \
+  --device=/dev/davinci_manager \
+  --device=/dev/devmm_svm \
+  --device=/dev/hisi_hdc \
+  -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+  -v ${data_path}:${data_path} \
+  ${docker_image} \
+  /bin/bash
diff --git a/official/cv/MCNN/infer/mxbase/CMakeLists.txt b/official/cv/MCNN/infer/mxbase/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ff0685d87eb4793f5560e7a4e9ef197fe970f3e8
--- /dev/null
+++ b/official/cv/MCNN/infer/mxbase/CMakeLists.txt
@@ -0,0 +1,48 @@
+cmake_minimum_required(VERSION 3.14.0)
+project(mcnn)
+set(TARGET_MAIN Mcnn)
+
+add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+add_definitions(-Dgoogle=mindxsdk_private)
+add_compile_options(-std=c++11 -fPIE -fstack-protector-all -fPIC -Wall)
+add_link_options(-Wl,-z,relro,-z,now,-z,noexecstack -s -pie)
+
+if(NOT DEFINED ENV{ASCEND_HOME})
+    message(FATAL_ERROR "please define environment variable:ASCEND_HOME")
+endif()
+
+if(NOT DEFINED ENV{ARCH_PATTERN})
+    message(WARNING "please define environment variable:ARCH_PATTERN")
+endif()
+
+set(ACL_LIB_PATH $ENV{ASCEND_HOME}/nnrt/latest/acllib)
+set(MXBASE_ROOT_DIR $ENV{MX_SDK_HOME})
+set(MXBASE_INC ${MXBASE_ROOT_DIR}/include)
+set(MXBASE_LIB_DIR ${MXBASE_ROOT_DIR}/lib)
+set(MXBASE_POST_LIB_DIR ${MXBASE_ROOT_DIR}/lib/modelpostprocessors)
+set(MXBASE_POST_PROCESS_DIR ${MXBASE_ROOT_DIR}/include/MxBase/postprocess/include)
+
+if(NOT DEFINED ENV{MXSDK_OPENSOURCE_DIR})
+    message(WARNING "please define environment variable:MXSDK_OPENSOURCE_DIR")
+endif()
+
+set(OPENSOURCE_DIR $ENV{MXSDK_OPENSOURCE_DIR})
+
+include_directories(src)
+include_directories(${ACL_INC_DIR})
+include_directories(${OPENSOURCE_DIR}/include)
+include_directories(${OPENSOURCE_DIR}/include/opencv4)
+
+
+include_directories(${MXBASE_INC})
+include_directories(${MXBASE_POST_PROCESS_DIR})
+
+link_directories(${ACL_LIB_DIR})
+link_directories(${OPENSOURCE_DIR}/lib)
+link_directories(${MXBASE_LIB_DIR})
+link_directories(${MXBASE_POST_LIB_DIR})
+
+
+add_executable(${TARGET_MAIN} src/main.cpp src/Mcnn.cpp)
+
+target_link_libraries(${TARGET_MAIN} glog cpprest mxbase opencv_world)
diff --git a/official/cv/MCNN/infer/mxbase/build.sh b/official/cv/MCNN/infer/mxbase/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..5c2b02fccb384c5d9b014e8d20ff2a620180ede8
--- /dev/null
+++ b/official/cv/MCNN/infer/mxbase/build.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+path_cur=$(dirname $0)
+
+
+function check_env()
+{
+    # set ASCEND_VERSION to ascend-toolkit/latest when it was not specified by user
+    if [ ! "${ASCEND_VERSION}" ]; then
+        export ASCEND_VERSION=ascend-toolkit/latest
+        echo "Set ASCEND_VERSION to the default value: ${ASCEND_VERSION}"
+    else
+        echo "ASCEND_VERSION is set to ${ASCEND_VERSION} by user"
+    fi
+
+    if [ ! "${ARCH_PATTERN}" ]; then
+        # set ARCH_PATTERN to ./ when it was not specified by user
+        export ARCH_PATTERN=./
+        echo "ARCH_PATTERN is set to the default value: ${ARCH_PATTERN}"
+    else
+        echo "ARCH_PATTERN is set to ${ARCH_PATTERN} by user"
+    fi
+}
+
+function build_mcnn()
+{
+    cd $path_cur
+    rm -rf build
+    mkdir -p build
+    cd build
+    cmake ..
+    make
+    ret=$?
+    if [ ${ret} -ne 0 ]; then
+        echo "Failed to build srgan."
+        exit ${ret}
+    fi
+    mv Mcnn ../
+}
+export ASCEND_VERSION=nnrt/latest
+export ARCH_PATTERN=.
+export MXSDK_OPENSOURCE_DIR=${MX_SDK_HOME}/opensource
+export LD_LIBRARY_PATH="${MX_SDK_HOME}/lib/plugins:${MX_SDK_HOME}/opensource/lib64:${MX_SDK_HOME}/lib:${MX_SDK_HOME}/lib/modelpostprocessors:${MX_SDK_HOME}/opensource/lib:${LD_LIBRARY_PATH}"
+check_env
+build_mcnn
\ No newline at end of file
diff --git a/official/cv/MCNN/infer/mxbase/src/Mcnn.cpp b/official/cv/MCNN/infer/mxbase/src/Mcnn.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8c05e4a6103b2e0e7fd6018e4a06d9faa44e398f
--- /dev/null
+++ b/official/cv/MCNN/infer/mxbase/src/Mcnn.cpp
@@ -0,0 +1,315 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============================================================================
+ */
+
+#include "Mcnn.h"
+#include <iostream>
+#include <fstream>
+#include <sstream>
+#include <string>
+#include <vector>
+#include <cstdlib>
+#include "MxBase/DeviceManager/DeviceManager.h"
+#include "MxBase/Log/Log.h"
+#include <opencv2/opencv.hpp>
+
+
+using MxBase::TensorDesc;
+using MxBase::TensorBase;
+using MxBase::MemoryData;
+using MxBase::MemoryHelper;
+using MxBase::TENSOR_DTYPE_FLOAT32;
+using MxBase::DynamicInfo;
+using MxBase::DynamicType;
+
+void PrintTensorShape(const std::vector<MxBase::TensorDesc> &tensorDescVec, const std::string &tensorName) {
+    LogInfo << "The shape of " << tensorName << " is as follows:";
+    for (size_t i = 0; i < tensorDescVec.size(); ++i) {
+        LogInfo << "  Tensor " << i << ":";
+        for (size_t j = 0; j < tensorDescVec[i].tensorDims.size(); ++j) {
+            LogInfo << "   dim: " << j << ": " << tensorDescVec[i].tensorDims[j];
+        }
+    }
+}
+
+void PrintInputShape(const std::vector<MxBase::TensorBase> &input) {
+    MxBase::TensorBase img = input[0];
+    LogInfo << "  -------------------------input0 ";
+    LogInfo << img.GetDataType();
+    LogInfo << img.GetShape()[0] << ", " << img.GetShape()[1] \
+    << ", "  << img.GetShape()[2] << ", " << img.GetShape()[3];
+    LogInfo << img.GetSize();
+}
+
+APP_ERROR Mcnn::Init(const InitParam &initParam) {
+    deviceId_ = initParam.deviceId;
+    APP_ERROR ret = MxBase::DeviceManager::GetInstance()->InitDevices();
+    if (ret != APP_ERR_OK) {
+        LogError << "Init devices failed, ret=" << ret << ".";
+        return ret;
+    }
+    ret = MxBase::TensorContext::GetInstance()->SetContext(initParam.deviceId);
+    if (ret != APP_ERR_OK) {
+        LogError << "Set context failed, ret=" << ret << ".";
+        return ret;
+    }
+    dvppWrapper_ = std::make_shared<MxBase::DvppWrapper>();
+    ret = dvppWrapper_->Init();
+    if (ret != APP_ERR_OK) {
+        LogError << "DvppWrapper init failed, ret=" << ret << ".";
+        return ret;
+    }
+    model_ = std::make_shared<MxBase::ModelInferenceProcessor>();
+    ret = model_->Init(initParam.modelPath, modelDesc_);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInferenceProcessor init failed, ret=" << ret << ".";
+        return ret;
+    }
+    srPath_ = initParam.srPath;
+    gtPath_ = initParam.gtPath;
+    PrintTensorShape(modelDesc_.inputTensors, "Model Input Tensors");
+    PrintTensorShape(modelDesc_.outputTensors, "Model Output Tensors");
+
+
+    return APP_ERR_OK;
+}
+
+APP_ERROR Mcnn::DeInit() {
+    dvppWrapper_->DeInit();
+    model_->DeInit();
+
+    MxBase::DeviceManager::GetInstance()->DestroyDevices();
+
+    return APP_ERR_OK;
+}
+
+
+APP_ERROR Mcnn::ReadImage(const std::string &imgPath, cv::Mat *imageMat) {
+    *imageMat = cv::imread(imgPath, 0);
+    return APP_ERR_OK;
+}
+
+
+std::string Trim(const std::string &str) {
+    std::string str_new = str;
+    str_new.erase(0, str.find_first_not_of(" \t\r\n"));
+    str_new.erase(str.find_last_not_of(" \t\r\n") + 1);
+    return str_new;
+}
+
+
+float ReadCsv(std::string csvName) {
+    std::ifstream fin(csvName);
+    std::string line;
+    float num = 0;
+    while (getline(fin, line)) {
+        std::istringstream sin(line);
+        std::vector<std::string> fields;
+        std::string field;
+        int len = 0;
+        while (getline(sin, field, ',')) {
+            len++;
+            fields.push_back(field);
+        }
+        for (int i = 0; i < len; i++) {
+            std::string name = Trim(fields[i]);
+            float num_float = std::stof(name);
+            num = num + num_float;
+        }
+    }
+    return num;
+}
+
+
+APP_ERROR Mcnn::PadImage(const cv::Mat &imageMat, cv::Mat *imgPad) {
+    size_t W_o = imageMat.cols, H_o = imageMat.rows;
+    size_t W_b = 512 - W_o / 2;
+    size_t H_b = 512 - H_o / 2;
+    size_t W_e = W_b + W_o;
+    size_t H_e = H_b + H_o;
+    for (size_t h = 0; h < 1024; h++) {
+        for (size_t w = 0; w < 1024; w++) {
+            if (H_b <= h && h < H_e && W_b <= w && w < W_e) {
+                imgPad->at<uchar>(h, w) = imageMat.at<uchar>(h - H_b, w - W_b);
+            } else {
+                imgPad->at<uchar>(h, w) = 0;
+            }
+        }
+    }
+    return APP_ERR_OK;
+}
+
+
+APP_ERROR Mcnn::CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase *tensorBase) {
+    uint32_t dataSize = 1;
+    for (size_t i = 0; i < modelDesc_.inputTensors.size(); ++i) {
+        std::vector <uint32_t> shape = {};
+        for (size_t j = 0; j < modelDesc_.inputTensors[i].tensorDims.size(); ++j) {
+            shape.push_back((uint32_t) modelDesc_.inputTensors[i].tensorDims[j]);
+        }
+        for (uint32_t s = 0; s < shape.size(); ++s) {
+            dataSize *= shape[s];
+        }
+    }
+    APP_ERROR ret = PadImage(imageMat, &imgPad_);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Img pad error";
+        return ret;
+    }
+    // mat NHWC to NCHW
+    size_t H = 1024, W = 1024, C = 1;
+    LogInfo << "dataSize:" << dataSize;
+    dataSize = dataSize * 4;
+    int id;
+
+    for (size_t c = 0; c < C; c++) {
+        for (size_t h = 0; h < H; h++) {
+            for (size_t w = 0; w < W; w++) {
+                id = (C - c - 1) * (H * W) + h * W + w;
+                mat_data_[id] = static_cast<float>(imgPad_.at<uchar>(h, w));
+            }
+        }
+    }
+
+    MemoryData memoryDataDst(dataSize, MemoryData::MEMORY_DEVICE, deviceId_);
+    MemoryData memoryDataSrc(reinterpret_cast<void *>(&mat_data_[0]), dataSize, MemoryData::MEMORY_HOST_MALLOC);
+    ret = MemoryHelper::MxbsMallocAndCopy(memoryDataDst, memoryDataSrc);
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "Memory malloc failed.";
+        return ret;
+    }
+    std::vector <uint32_t> shape = {1, 1, 1024, 1024};
+    *tensorBase = MxBase::TensorBase(memoryDataDst, false, shape, TENSOR_DTYPE_FLOAT32);
+    return APP_ERR_OK;
+}
+
+
+APP_ERROR Mcnn::Inference(const std::vector<MxBase::TensorBase> &inputs,
+                           std::vector<MxBase::TensorBase> *outputs) {
+    auto dtypes = model_->GetOutputDataType();
+    for (size_t i = 0; i < modelDesc_.outputTensors.size(); ++i) {
+        std::vector<uint32_t> shape = {};
+        for (size_t j = 0; j < modelDesc_.outputTensors[i].tensorDims.size(); ++j) {
+            shape.push_back((uint32_t)modelDesc_.outputTensors[i].tensorDims[j]);
+        }
+        MxBase::TensorBase tensor(shape, dtypes[i], MemoryData::MemoryType::MEMORY_DEVICE, deviceId_);
+        APP_ERROR ret = TensorBase::TensorBaseMalloc(tensor);
+        if (ret != APP_ERR_OK) {
+            LogError << "TensorBaseMalloc failed, ret=" << ret << ".";
+            return ret;
+        }
+        outputs->push_back(tensor);
+    }
+    MxBase::DynamicInfo dynamicInfo = {};
+    dynamicInfo.dynamicType = DynamicType::STATIC_BATCH;
+    dynamicInfo.batchSize = 1;
+
+    APP_ERROR ret = model_->ModelInference(inputs, *outputs, dynamicInfo);
+    if (ret != APP_ERR_OK) {
+        LogError << "ModelInference failed, ret=" << ret << ".";
+        return ret;
+    }
+    return APP_ERR_OK;
+}
+
+
+APP_ERROR Mcnn::PostProcess(std::vector<MxBase::TensorBase> outputs, const std::string &imgName) {
+    LogInfo << "output_size:" << outputs.size();
+    LogInfo <<  "output0_datatype:" << outputs[0].GetDataType();
+    LogInfo << "output0_shape:" << outputs[0].GetShape()[0] << ", " \
+    << outputs[0].GetShape()[1] << ", "  << outputs[0].GetShape()[2] << ", " << outputs[0].GetShape()[3];
+    LogInfo << "output0_bytesize:"  << outputs[0].GetByteSize();
+
+    APP_ERROR ret = outputs[0].ToHost();
+    if (ret != APP_ERR_OK) {
+        LogError << GetError(ret) << "tohost fail.";
+        return ret;
+    }
+    float *outputPtr = reinterpret_cast<float *>(outputs[0].GetBuffer());
+
+    size_t  H = 1024/4 , W = 1024/4 , C = 1;
+
+    float tmpNum;
+    float pre = 0;
+    for (size_t c = 0; c < C; c++) {
+        for (size_t h = 0; h < H; h++) {
+            for (size_t w = 0; w < W; w++) {
+                tmpNum = static_cast<float>(*(outputPtr + (C - c - 1) * (H * W) + h * W + w));
+                pre = pre + tmpNum;
+            }
+        }
+    }
+    std::string imgName2 = imgName;
+    int len = imgName.length();
+    imgName2[len-3] = 'c';
+    imgName2[len-2] = 's';
+    imgName2[len-1] = 'v';
+    std::string gtName = gtPath_;
+    gtName.append(imgName2);
+    LogInfo << gtName;
+    LogInfo << "pre:" << pre;
+    float gt_count = ReadCsv(gtName);
+    LogInfo << "gt:" << gt_count;
+    mae += fabs(gt_count - pre);
+    mse = mse+ (gt_count - pre)*(gt_count - pre);
+    LogInfo << "mae:" << fabs(gt_count - pre);
+    return APP_ERR_OK;
+}
+
+
+APP_ERROR Mcnn::Process(const std::string &imgPath, const std::string &imgName) {
+    LogInfo << imgName;
+    cv::Mat imageMat;
+    APP_ERROR ret = ReadImage(imgPath, &imageMat);
+
+    if (ret != APP_ERR_OK) {
+        LogError << "ReadImage failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    size_t o_img_W = imageMat.cols, o_img_H = imageMat.rows, o_img_C = imageMat.channels();
+    LogInfo << o_img_C << "," << o_img_H << "," << o_img_W;
+
+
+    std::vector<MxBase::TensorBase> inputs = {};
+    std::vector<MxBase::TensorBase> outputs = {};
+
+    MxBase::TensorBase tensorBase;
+    ret = CVMatToTensorBase(imageMat, &tensorBase);
+    cv::imwrite(srPath_ + "/" + imgName, imgPad_);
+    if (ret != APP_ERR_OK) {
+        LogError << "CVMatToTensorBase failed, ret=" << ret << ".";
+        return ret;
+    }
+    inputs.push_back(tensorBase);
+
+    auto startTime = std::chrono::high_resolution_clock::now();
+    ret = Inference(inputs, &outputs);
+    auto endTime = std::chrono::high_resolution_clock::now();
+    double costMs = std::chrono::duration<double, std::milli>(endTime - startTime).count();  // save time
+    inferCostTimeMilliSec += costMs;
+    if (ret != APP_ERR_OK) {
+        LogError << "Inference failed, ret=" << ret << ".";
+        return ret;
+    }
+    ret = PostProcess(outputs, imgName);
+    if (ret != APP_ERR_OK) {
+        LogError << "PostProcess failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    return APP_ERR_OK;
+}
diff --git a/official/cv/MCNN/infer/mxbase/src/Mcnn.h b/official/cv/MCNN/infer/mxbase/src/Mcnn.h
new file mode 100644
index 0000000000000000000000000000000000000000..75182defcd38405c3b3176edaf8651d1d44e0923
--- /dev/null
+++ b/official/cv/MCNN/infer/mxbase/src/Mcnn.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============================================================================
+ */
+
+#ifndef MCNN_H
+#define MCNN_H
+#include <dirent.h>
+#include <memory>
+#include <vector>
+#include <map>
+#include <string>
+#include <fstream>
+#include <iostream>
+#include <opencv2/opencv.hpp>
+#include "MxBase/Log/Log.h"
+#include "MxBase/DvppWrapper/DvppWrapper.h"
+#include "MxBase/ModelInfer/ModelInferenceProcessor.h"
+#include "MxBase/Tensor/TensorContext/TensorContext.h"
+#include "MxBase/DeviceManager/DeviceManager.h"
+
+struct InitParam {
+    uint32_t deviceId;
+    bool checkTensor;
+    std::string modelPath;
+    std::string srPath;
+    std::string gtPath;
+};
+
+
+class Mcnn {
+ public:
+    APP_ERROR Init(const InitParam &initParam);
+    APP_ERROR DeInit();
+    APP_ERROR ReadImage(const std::string &imgPath, cv::Mat *imageMat);
+    APP_ERROR PadImage(const cv::Mat &img, cv::Mat *imgPad);
+    APP_ERROR CVMatToTensorBase(const cv::Mat &imageMat, MxBase::TensorBase *tensorBase);
+    APP_ERROR Inference(const std::vector<MxBase::TensorBase> &inputs, std::vector<MxBase::TensorBase> *outputs);
+    APP_ERROR PostProcess(std::vector<MxBase::TensorBase> outputs, const std::string &imgName);
+    APP_ERROR Process(const std::string &imgPath, const std::string &imgName);
+    // get infer time
+    double GetInferCostMilliSec() const {return inferCostTimeMilliSec;}
+    double GetPSNR() const {return psnr_;}
+    float getmae() const {return mae;}
+    float getmse() const {return mse;}
+
+ private:
+    std::shared_ptr<MxBase::DvppWrapper> dvppWrapper_;
+    std::shared_ptr<MxBase::ModelInferenceProcessor> model_;
+    std::string srPath_;
+    std::string gtPath_;
+    MxBase::ModelDesc modelDesc_;
+    double_t psnr_ = 0;
+    uint32_t deviceId_ = 0;
+    // infer time
+    double inferCostTimeMilliSec = 0.0;
+    float *mat_data_ = new float[1048576];
+    cv::Mat imgPad_ = cv::Mat(1024, 1024, CV_32FC1);
+    float mae = 0.0;
+    float mse = 0.0;
+};
+
+
+#endif
diff --git a/official/cv/MCNN/infer/mxbase/src/main.cpp b/official/cv/MCNN/infer/mxbase/src/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..059505d8d789bb033c2d70b9bb942122f4d1bfb6
--- /dev/null
+++ b/official/cv/MCNN/infer/mxbase/src/main.cpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============================================================================
+ */
+
+#include "Mcnn.h"
+
+
+APP_ERROR ScanImages(const std::string &path, std::vector<std::string> *imgFiles) {
+    DIR *dirPtr = opendir(path.c_str());
+    if (dirPtr == nullptr) {
+        LogError << "opendir failed. dir:" << path;
+        return APP_ERR_INTERNAL_ERROR;
+    }
+    dirent *direntPtr = nullptr;
+    while ((direntPtr = readdir(dirPtr)) != nullptr) {
+        std::string fileName = direntPtr->d_name;
+        if (fileName == "." || fileName == "..")
+            continue;
+
+        imgFiles->emplace_back(fileName);
+    }
+    closedir(dirPtr);
+    return APP_ERR_OK;
+}
+
+
+int main(int argc, char* argv[]) {
+    if (argc <= 4) {
+        LogWarn << "Please input image path, such as './Mcnn [model_path] [data_path] [label_path] [output_path]'.";
+        return APP_ERR_OK;
+    }
+
+    const std::string modelPath = argv[1];
+    std::string inputPath = argv[2];
+    std::string gtPath = argv[3];
+    std::string srPath = argv[4];
+
+    InitParam initParam = {};
+    initParam.deviceId = 0;
+    initParam.checkTensor = true;
+    initParam.modelPath = modelPath;
+    initParam.srPath = srPath;
+    initParam.gtPath = gtPath;
+
+    auto mcnn = std::make_shared<Mcnn>();
+    APP_ERROR ret = mcnn->Init(initParam);
+    if (ret != APP_ERR_OK) {
+        LogError << "mcnn init failed, ret=" << ret << ".";
+        return ret;
+    }
+
+    std::vector<std::string> imgFilePaths;
+    ret = ScanImages(inputPath, &imgFilePaths);
+    if (ret != APP_ERR_OK) {
+        LogError << "mcnn lq img scan error, ret=" << ret << ".";
+        return ret;
+    }
+
+    auto startTime = std::chrono::high_resolution_clock::now();
+
+    int totalNum = 0;
+
+    sort(imgFilePaths.begin(), imgFilePaths.end());
+    int imgFilePaths_size = imgFilePaths.size();
+    for (int i = 0; i < imgFilePaths_size; i++) {
+        LogInfo << imgFilePaths[i];
+    }
+
+    for (auto &imgFile : imgFilePaths) {
+        LogInfo << totalNum;
+        ret = mcnn->Process(inputPath+'/'+imgFile, imgFile);
+        ++totalNum;
+        if (ret != APP_ERR_OK) {
+            LogError << "mcnn process failed, ret=" << ret << ".";
+            mcnn->DeInit();
+            return ret;
+        }
+    }
+    float mae = mcnn->getmae()/totalNum;
+    float mse = sqrt(mcnn->getmse()/totalNum);
+    LogInfo << "mae:" << mae;
+    LogInfo << "mse:" << mse;
+    auto endTime = std::chrono::high_resolution_clock::now();
+    mcnn->DeInit();
+    double costMilliSecs = std::chrono::duration<double, std::milli>(endTime - startTime).count();
+    double fps = 1000.0 * imgFilePaths.size() / mcnn->GetInferCostMilliSec();
+    LogInfo << "[Process Delay] cost: " << costMilliSecs << " ms\tfps: " << fps << " imgs/sec";
+    return APP_ERR_OK;
+}
diff --git a/official/cv/MCNN/infer/sdk/main.py b/official/cv/MCNN/infer/sdk/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd13dc952ac3f979aaf6189871e2ddc50c1137b6
--- /dev/null
+++ b/official/cv/MCNN/infer/sdk/main.py
@@ -0,0 +1,141 @@
+# coding=utf-8
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+import datetime
+import os
+import sys
+import numpy as np
+import cv2
+
+from StreamManagerApi import StreamManagerApi
+from StreamManagerApi import MxDataInput
+from StreamManagerApi import StringVector
+from StreamManagerApi import MxProtobufIn
+from StreamManagerApi import InProtobufVector
+import MxpiDataType_pb2 as MxpiDataType
+
+if __name__ == '__main__':
+    # init stream manager
+    stream_manager_api = StreamManagerApi()
+    ret = stream_manager_api.InitManager()
+    if ret != 0:
+        print("Failed to init Stream manager, ret=%s" % str(ret))
+        exit()
+
+    # create streams by pipeline config file
+    with open("./mcnn.pipeline", 'rb') as f:
+        pipelineStr = f.read()
+    ret = stream_manager_api.CreateMultipleStreams(pipelineStr)
+    if ret != 0:
+        print("Failed to create Stream, ret=%s" % str(ret))
+        exit()
+
+    # Construct the input of the stream
+    data_input = MxDataInput()
+
+    dir_name = sys.argv[1]
+    gt_name = sys.argv[2]
+
+    file_list = os.listdir(dir_name)
+    file_list.sort()
+    mae = 0
+    mse = 0
+    start_time = datetime.datetime.now()
+    for file_name in file_list:
+        file_path = os.path.join(dir_name, file_name)
+        gt_path = os.path.join(gt_name, file_name[:-3] + 'csv')
+        if not (file_name.lower().endswith(".jpg") or file_name.lower().endswith(".jpeg") \
+                or file_name.lower().endswith(".png")):
+            continue
+
+        empty_data = []
+        stream_name = b'mcnn_opencv'
+        in_plugin_id = 0
+        input_key = 'appsrc0'
+
+        img = cv2.imread(file_path, 0)
+        img = img.astype(np.float32, copy=False)
+        ht = img.shape[0]
+        wd = img.shape[1]
+        hang_left = 512 - int(ht / 2)
+        hang_right = 1024 - hang_left - ht
+        lie_left = 512 - int(wd / 2)
+        lie_right = 1024 - lie_left - wd
+        img = np.pad(img, ((hang_left, hang_right), (lie_left, lie_right)), 'constant')
+
+        img = img.reshape((1, 1, 1024, 1024))
+        tensor_list = MxpiDataType.MxpiTensorPackageList()
+        tensor_pkg = tensor_list.tensorPackageVec.add()
+
+        tensor_vec = tensor_pkg.tensorVec.add()
+        tensor_vec.memType = 0
+        tensor_vec.tensorShape.extend(img.shape)
+        tensor_vec.tensorDataType = 0
+        tensor_vec.dataStr = img.tobytes()
+        tensor_vec.tensorDataSize = len(img)
+        buf_type = b"MxTools.MxpiTensorPackageList"
+
+        protobuf = MxProtobufIn()
+        protobuf.key = input_key.encode("utf-8")
+        protobuf.type = buf_type
+        protobuf.protobuf = tensor_list.SerializeToString()
+        protobuf_vec = InProtobufVector()
+        protobuf_vec.push_back(protobuf)
+        err_code = stream_manager_api.SendProtobuf(stream_name, in_plugin_id, protobuf_vec)
+        if err_code != 0:
+            print(
+                "Failed to send data to stream, stream_name(%s), plugin_id(%s), element_name(%s), "
+                "buf_type(%s), err_code(%s).", stream_name, in_plugin_id,
+                input_key, buf_type, err_code)
+
+        keys = [b"mxpi_tensorinfer0",]
+        keyVec = StringVector()
+        for key in keys:
+            keyVec.push_back(key)
+        infer_result = stream_manager_api.GetProtobuf(stream_name, 0, keyVec)
+        if infer_result.size() == 0:
+            print("infer_result is null")
+            exit()
+
+        if infer_result[0].errorCode != 0:
+            print("GetProtobuf error. errorCode=%d" % (
+                infer_result[0].errorCode))
+            exit()
+
+        TensorList = MxpiDataType.MxpiTensorPackageList()
+        TensorList.ParseFromString(infer_result[0].messageBuf)
+        data = np.frombuffer(TensorList.tensorPackageVec[0].tensorVec[0].dataStr, dtype=np.float32)
+
+        den = np.loadtxt(open(gt_path, "rb"), delimiter=",", skiprows=0)
+        den = den.astype(np.float32, copy=False)
+        gt_count = np.sum(den)
+        et_count = np.sum(data)
+        mae += abs(gt_count - et_count)
+        mse += ((gt_count - et_count) * (gt_count - et_count))
+        print(file_path, "True value:", np.sum(den), "predictive value:", np.sum(data))
+
+    mae = mae / 182
+    mse = np.sqrt(mse / 182)
+    end_time = datetime.datetime.now()
+    print("*********************************************")
+    print("Final accuracy of the project:")
+    print('MAE:', mae, '  MSE:', mse)
+    print("*********************************************")
+    print("Overall project performance:")
+    print(182 / (end_time - start_time).seconds, "images/seconds")
+
+    # destroy streams
+    stream_manager_api.DestroyAllStreams()
diff --git a/official/cv/MCNN/infer/sdk/mcnn.pipeline b/official/cv/MCNN/infer/sdk/mcnn.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..6214003074b94cb8267c53fd8800b54dc6b84d41
--- /dev/null
+++ b/official/cv/MCNN/infer/sdk/mcnn.pipeline
@@ -0,0 +1,20 @@
+{
+    "mcnn_opencv": {
+        "appsrc0": {
+            "factory": "appsrc",
+            "next": "mxpi_tensorinfer0"
+        },
+        "mxpi_tensorinfer0": {
+            "props": {
+                "dataSource": "appsrc0",
+                "modelPath": "../model/mcnn.om",
+                "waitingTime": "2000"
+            },
+            "factory": "mxpi_tensorinfer",
+            "next": "appsink0"
+        },
+        "appsink0": {
+            "factory": "appsink"
+        }
+    } 
+}
\ No newline at end of file
diff --git a/official/cv/MCNN/infer/sdk/run.sh b/official/cv/MCNN/infer/sdk/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..fe3f2c190d07b43305090365a2bf3fa5aff04457
--- /dev/null
+++ b/official/cv/MCNN/infer/sdk/run.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+input_dir=$1
+gt_dir=$2
+
+set -e
+
+
+# Simple log helper functions
+info() { echo -e "\033[1;34m[INFO ][MxStream] $1\033[1;37m" ; }
+warn() { echo >&2 -e "\033[1;31m[WARN ][MxStream] $1\033[1;37m" ; }
+
+
+#to set PYTHONPATH, import the StreamManagerApi.py
+export PYTHONPATH=$PYTHONPATH:${MX_SDK_HOME}/python
+
+python3.7 main.py $input_dir  $gt_dir
+exit 0
\ No newline at end of file
diff --git a/official/cv/MCNN/modelarts/start_train.py b/official/cv/MCNN/modelarts/start_train.py
new file mode 100644
index 0000000000000000000000000000000000000000..62eb9ec5460e5037a4a41e8fc930d98291435fec
--- /dev/null
+++ b/official/cv/MCNN/modelarts/start_train.py
@@ -0,0 +1,146 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+######################## train mcnn example ########################
+train mcnn and get network model files(.ckpt) :
+python train.py
+"""
+import os
+import argparse
+import ast
+import numpy as np
+import mindspore.nn as nn
+import mindspore
+from mindspore.context import ParallelMode
+from mindspore import context, Tensor
+from mindspore.communication.management import init, get_rank
+from mindspore.train.callback import LossMonitor, TimeMonitor
+from mindspore.train.serialization import export, load_checkpoint
+from mindspore.train import Model
+from src.data_loader import ImageDataLoader
+from src.config import crowd_cfg as cfg
+from src.dataset import create_dataset
+from src.mcnn import MCNN
+from src.generator_lr import get_lr_sha
+from src.Mcnn_Callback import mcnn_callback
+
+parser = argparse.ArgumentParser(description='MindSpore MCNN Example')
+parser.add_argument('--run_offline', type=ast.literal_eval,
+                    default=False, help='run in offline is False or True')
+parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU', 'CPU'],
+                    help='device where the code will be implemented (default: Ascend)')
+parser.add_argument("--batch_size", type=int, default=1, help="batch size")
+parser.add_argument("--lr", type=float, default=cfg['lr'], help="batch size")
+parser.add_argument("--momentum", type=float, default=cfg['momentum'], help="batch size")
+parser.add_argument("--epoch_size", type=int, default=cfg['epoch_size'], help="batch size")
+parser.add_argument("--input_size", type=int, default=1024, help="batch size")
+parser.add_argument('--ckpt_path', type=str, default="./train_output", help='Location of ckpt.')
+
+parser.add_argument('--data_url', default=None, help='Location of data.')
+parser.add_argument('--train_url', default=None, help='Location of training outputs.')
+
+parser.add_argument('--train_path', default='../MCNN/data/formatted_trainval/shanghaitech_part_A_patches_9/train',
+                    help='Location of data.')
+parser.add_argument('--train_gt_path',
+                    default='../MCNN/data/formatted_trainval/shanghaitech_part_A_patches_9/train_den',
+                    help='Location of data.')
+parser.add_argument('--val_path', default='../MCNN/data/formatted_trainval/shanghaitech_part_A_patches_9/val',
+                    help='Location of data.')
+parser.add_argument('--val_gt_path', default='../MCNN/data/formatted_trainval/shanghaitech_part_A_patches_9/val_den',
+                    help='Location of data.')
+args = parser.parse_args()
+rand_seed = 64678
+np.random.seed(rand_seed)
+
+if __name__ == "__main__":
+    device_num = int(os.getenv("RANK_SIZE", '1'))
+    device_id = int(os.getenv("DEVICE_ID", '0'))
+
+    print("device_id:", device_id)
+    print("device_num:", device_num)
+    device_target = args.device_target
+    context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
+    context.set_context(save_graphs=False)
+    print(args.data_url)
+
+    if device_target == "GPU":
+        context.set_context(enable_graph_kernel=True)
+        device_id = 0
+        if device_num > 1:
+            init()
+            context.reset_auto_parallel_context()
+            context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
+                                              gradients_mean=True)
+            device_id = get_rank()
+    elif device_target == "Ascend":
+        context.set_context(device_id=device_id)
+
+        if device_num > 1:
+            context.reset_auto_parallel_context()
+            context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
+                                              gradients_mean=True)
+            init()
+    else:
+        raise ValueError("Unsupported platform.")
+    if args.run_offline:
+        local_data1_url = args.train_path
+        local_data2_url = args.train_gt_path
+        local_data3_url = args.val_path
+        local_data4_url = args.val_gt_path
+    else:
+        import moxing as mox
+        local_data1_url = '/cache/train_path'
+        local_data2_url = '/cache/train_gt_path'
+        local_data3_url = '/cache/val_path'
+        local_data4_url = '/cache/val_gt_path'
+
+        args.train_path = os.path.join(args.data_url, "formatted_trainval/shanghaitech_part_A_patches_9/train")
+        args.train_gt_path = os.path.join(args.data_url, "formatted_trainval/shanghaitech_part_A_patches_9/train_den")
+        args.val_path = os.path.join(args.data_url, "formatted_trainval/shanghaitech_part_A_patches_9/val")
+        args.val_gt_path = os.path.join(args.data_url, "formatted_trainval/shanghaitech_part_A_patches_9/val_den")
+        args.ckpt_path = args.train_url
+        mox.file.copy_parallel(src_url=args.train_path, dst_url=local_data1_url) # pcl
+        mox.file.copy_parallel(src_url=args.train_gt_path, dst_url=local_data2_url) # pcl
+        mox.file.copy_parallel(src_url=args.val_path, dst_url=local_data3_url) # pcl
+        mox.file.copy_parallel(src_url=args.val_gt_path, dst_url=local_data4_url) # pcl
+
+    data_loader = ImageDataLoader(local_data1_url, local_data2_url, shuffle=True, gt_downsample=True, pre_load=True)
+    data_loader_val = ImageDataLoader(local_data3_url, local_data4_url,
+                                      shuffle=False, gt_downsample=True, pre_load=True)
+    ds_train = create_dataset(data_loader, target=args.device_target)
+    ds_val = create_dataset(data_loader_val, target=args.device_target, train=False)
+
+    ds_train = ds_train.batch(args.batch_size)
+    ds_val = ds_val.batch(1)
+
+    network = MCNN()
+    net_loss = nn.MSELoss(reduction='mean')
+    lr = Tensor(get_lr_sha(0, args.lr, args.epoch_size, ds_train.get_dataset_size()))
+    net_opt = nn.Adam(list(filter(lambda p: p.requires_grad, network.get_parameters())), learning_rate=lr)
+
+    model = Model(network, net_loss, net_opt, amp_level="O2")
+
+    print("============== Starting Training ==============")
+    time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())
+    eval_callback = mcnn_callback(network, ds_val, args.run_offline, args.ckpt_path)
+    model.train(args.epoch_size, ds_train, callbacks=[time_cb, eval_callback, LossMonitor(1)])
+    if not args.run_offline:
+        mox.file.copy_parallel(src_url='/cache/train_output', dst_url=args.ckpt_path)
+
+    load_checkpoint('/cache/train_output/best.ckpt', net=network)
+    inputs = Tensor(np.ones([args.batch_size, 1, args.input_size, args.input_size]), mindspore.float32)
+    export(network, inputs, file_name="mcnn", file_format="AIR")
+    print("MCNN exported")
+    mox.file.copy(src_url='mcnn.air', dst_url=os.path.join(args.train_url, 'mcnn.air'))
diff --git a/official/cv/MCNN/scripts/docker_start.sh b/official/cv/MCNN/scripts/docker_start.sh
new file mode 100644
index 0000000000000000000000000000000000000000..b1bfbb37c3dba0324536837af4f1b68ef02b1f73
--- /dev/null
+++ b/official/cv/MCNN/scripts/docker_start.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+docker_image=$1
+data_dir=$2
+model_dir=$3
+
+docker run -it --ipc=host \
+               --device=/dev/davinci0 \
+               --device=/dev/davinci1 \
+               --device=/dev/davinci2 \
+               --device=/dev/davinci3 \
+               --device=/dev/davinci4 \
+               --device=/dev/davinci5 \
+               --device=/dev/davinci6 \
+               --device=/dev/davinci7 \
+               --device=/dev/davinci_manager \
+               --device=/dev/devmm_svm --device=/dev/hisi_hdc \
+               -v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
+               -v /usr/local/Ascend/add-ons/:/usr/local/Ascend/add-ons/ \
+               -v ${model_dir}:${model_dir} \
+               -v ${data_dir}:${data_dir}  \
+               -v ~/ascend/log/npu/conf/slog/slog.conf:/var/log/npu/conf/slog/slog.conf \
+               -v ~/ascend/log/npu/slog/:/var/log/npu/slog -v ~/ascend/log/npu/profiling/:/var/log/npu/profiling \
+               -v ~/ascend/log/npu/dump/:/var/log/npu/dump -v ~/ascend/log/npu/:/usr/slog ${docker_image} \
+               /bin/bash
\ No newline at end of file