diff --git a/.jenkins/check/config/whitelizard.txt b/.jenkins/check/config/whitelizard.txt
index 934d28a6a7be265acac026ab25076b15c54be201..daafb517679d03173de3ef64fde58b0d3b7e55da 100644
--- a/.jenkins/check/config/whitelizard.txt
+++ b/.jenkins/check/config/whitelizard.txt
@@ -37,5 +37,21 @@ models/research/cvtmodel/regnet/src/regnet_y_32gf.py:__init__
 models/research/cvtmodel/regnet/src/regnet_x_8gf.py:__init__
 models/research/cvtmodel/regnet/src/regnet_x_32gf.py:__init__
 models/research/cvtmodel/regnet/src/regnet_x_16gf.py:__init__
-
+models/research/cvtmodel/hrnet/src/hrnet_w30.py:__init__
+models/research/cvtmodel/hrnet/src/hrnet_w30.py:construct
+models/research/cvtmodel/hrnet/src/hrnet_w48.py:__init__
+models/research/cvtmodel/hrnet/src/hrnet_w48.py:construct
+models/research/cvtmodel/hrnet/src/hrnet_w64.py:__init__
+models/research/cvtmodel/hrnet/src/hrnet_w64.py:construct
+models/research/cvtmodel/hrnet/src/hrnet_w40.py:__init__
+models/research/cvtmodel/hrnet/src/hrnet_w40.py:construct
+models/research/cvtmodel/hrnet/src/hrnet_w18_small.py:__init__
+models/research/cvtmodel/resnet_ipl/src/resnetrs200.py:__init__
+models/research/cvtmodel/resnet_ipl/src/gernet_l.py:__init__
+models/research/cvtmodel/resnet_ipl/src/resnet51q.py:__init__
+models/research/cvtmodel/resnet_ipl/src/seresnet152d.py:__init__
+models/research/cvtmodel/resnet_ipl/src/resnest101e.py:__init__
+models/research/cvtmodel/resnet_ipl/src/resnet26t.py:__init__
+models/research/cvtmodel/resnet_ipl/src/resnet101d.py:__init__
+models/research/cvtmodel/resnet_ipl/src/resnetrs50.py:__init__
 
diff --git a/research/cvtmodel/hrnet/README_CN.md b/research/cvtmodel/hrnet/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..f68a83b1368d023d57e7ac5f8c0a1860611f7201
--- /dev/null
+++ b/research/cvtmodel/hrnet/README_CN.md
@@ -0,0 +1,132 @@
+# 目录
+
+<!-- TOC -->
+
+- [目录](#目录)
+- [Hrnet描述](#Hrnet描述)
+- [模型架构](#模型架构)
+- [数据集](#数据集)
+- [环境要求](#环境要求)
+- [脚本说明](#脚本说明)
+    - [脚本及样例代码](#脚本及样例代码)
+    - [导出过程](#导出过程)
+        - [导出](#导出)
+    - [推理过程](#推理过程)
+        - [推理](#推理)
+- [ModelZoo主页](#modelzoo主页)
+
+<!-- /TOC -->
+
+# Hrnet系列描述
+
+Hrnet系列是一系列基于Hrnet扩展的网络模型,用于图像分类。有关该模型的描述,可查阅(http://rwightman.github.io/pytorch-image-models/models/)。
+本仓库中是基于torch提供的模型文件,使用MindConverter工具转化出Mindspore来ckpt文件,进行全量推理以验证模型文件精度。
+
+# 模型架构
+
+Hesnet模型支持五种模式:Hrnet_w18_small, Hrnet_w30, Hrnet_w40, Hrnet_w48, Hrnet_w64。
+
+# 数据集
+
+Hesnet使用的数据集: ImageNet
+
+数据集的默认配置如下:
+
+- 测试数据集预处理:
+    - 图像的输入尺寸(Hrnet_w18_small):224\*224(将图像缩放到256\*256,然后在中央区域裁剪图像)
+    - 图像的输入尺寸(Hrnet_w30):224\*224(将图像缩放到256\*256,然后在中央区域裁剪图像)
+    - 图像的输入尺寸(Hrnet_w40):224\*224(将图像缩放到256\*256,然后在中央区域裁剪图像)
+    - 图像的输入尺寸(Hrnet_w48):224\*224(将图像缩放到256\*256,然后在中央区域裁剪图像)
+    - 图像的输入尺寸(Hrnet_w64):224\*224(将图像缩放到256\*256,然后在中央区域裁剪图像)
+    - 根据平均值和标准偏差对输入图像进行归一化
+
+# 环境要求
+
+- 硬件(Ascend/GPU)
+- 准备Ascend或GPU处理器搭建硬件环境。
+- 框架
+- [MindSpore](https://www.mindspore.cn/install)
+- 如需查看详情,请参见如下资源:
+- [MindSpore教程](https://www.mindspore.cn/tutorials/zh-CN/master/index.html)
+- [MindSpore Python API](https://www.mindspore.cn/docs/api/zh-CN/master/index.html)
+
+# 脚本说明
+
+## 脚本及样例代码
+
+```shell
+├── model_zoo
+    ├── README_CN.md                          // 所有模型的说明
+    ├── Hrnet
+        ├── README_CN.md                 // Hrnet系列相关说明
+        ├── ascend310_infer              // 实现310推理源代码
+        ├── scripts
+        │   ├── run_infer_310.sh                    // Ascend 310 推理shell脚本
+        ├── src
+        │   ├── hrnet_w18_small.py             // hrnet_w18_small模型文件
+        │   ├── hrnet_w30.py             // hrnet_w30模型文件
+        │   ├── hrnet_w40.py             // hrnet_w40模型文件
+        │   ├── hrnet_w48.py             // hrnet_w48模型文件
+        │   ├── hrnet_w64.py             // hrnet_w64模型文件
+        ├── export.py                   // 导出脚本
+        ├── preprocess.py                   // 数据预处理脚本
+        ├── postprocess.py                   // 310 推理后处理脚本
+```
+
+## 导出过程
+
+### 导出
+
+```shell
+python export.py --ckpt_path [CKPT_PATH] --device_target [DEVICE_TARGET] --device_id 0 --file_format [EXPORT_FORMAT] --file_name [FILE_NAME]
+```
+
+`EXPORT_FORMAT` 设定为 ["MINDIR"]
+
+## 推理过程
+
+### 推理
+
+在推理之前需要先导出模型,MINDIR可以在任意环境上导出。
+
+```shell
+# 昇腾310 推理
+bash run_infer_310.sh [MINDIR_PATH] [DATA_PATH] [DEVICE_ID]
+```
+
+-注: Hrnet系列网络使用ImageNet数据集。
+
+推理的结果保存在当前目录下,在acc.log日志文件中可以找到类似以下的结果。
+Hrnet_w18_small网络使用ImageNet推理得到的结果如下:
+
+  ```log
+  Eval: top1_correct=35778, tot=50000, acc=71.56%
+  ```
+
+Hrnet_w30网络使用ImageNet推理得到的结果如下:
+
+  ```log
+  Eval: top1_correct=38987, tot=50000, acc=77.97%
+  ```
+
+Hrnet_w40网络使用ImageNet推理得到的结果如下:
+
+  ```log
+  Eval: top1_correct=39251, tot=50000, acc=78.5%
+  ```
+
+Hrnet_w48网络使用ImageNet推理得到的结果如下:
+
+  ```log
+  Eval: top1_correct=39447, tot=50000, acc=78.89%
+  ```
+
+Hrnet_w64网络使用ImageNet推理得到的结果如下:
+
+  ```log
+  Eval: top1_correct=39547, tot=50000, acc=79.09%
+  ```
+
+# ModelZoo主页
+
+ 请浏览官网[主页](https://gitee.com/mindspore/models)。
diff --git a/research/cvtmodel/hrnet/ascend310_infer/CMakeLists.txt b/research/cvtmodel/hrnet/ascend310_infer/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ee3c85447340e0449ff2b70ed24f60a17e07b2b6
--- /dev/null
+++ b/research/cvtmodel/hrnet/ascend310_infer/CMakeLists.txt
@@ -0,0 +1,14 @@
+cmake_minimum_required(VERSION 3.14.1)
+project(Ascend310Infer)
+add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0)
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -std=c++17 -Werror -Wall -fPIE -Wl,--allow-shlib-undefined")
+set(PROJECT_SRC_ROOT ${CMAKE_CURRENT_LIST_DIR}/)
+option(MINDSPORE_PATH "mindspore install path" "")
+include_directories(${MINDSPORE_PATH})
+include_directories(${MINDSPORE_PATH}/include)
+include_directories(${PROJECT_SRC_ROOT})
+find_library(MS_LIB libmindspore.so ${MINDSPORE_PATH}/lib)
+file(GLOB_RECURSE MD_LIB ${MINDSPORE_PATH}/_c_dataengine*)
+
+add_executable(main src/main.cc src/utils.cc)
+target_link_libraries(main ${MS_LIB} ${MD_LIB} gflags)
diff --git a/research/cvtmodel/hrnet/ascend310_infer/build.sh b/research/cvtmodel/hrnet/ascend310_infer/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..770a8851efade7f352039fc8665d307ae1abbb00
--- /dev/null
+++ b/research/cvtmodel/hrnet/ascend310_infer/build.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [ ! -d out ]; then
+  mkdir out
+fi
+cd out || exit
+cmake .. \
+    -DMINDSPORE_PATH="`pip show mindspore-ascend | grep Location | awk '{print $2"/mindspore"}' | xargs realpath`"
+make
diff --git a/research/cvtmodel/hrnet/ascend310_infer/inc/utils.h b/research/cvtmodel/hrnet/ascend310_infer/inc/utils.h
new file mode 100644
index 0000000000000000000000000000000000000000..efebe03a8c1179f5a1f9d5f7ee07e0352a9937c6
--- /dev/null
+++ b/research/cvtmodel/hrnet/ascend310_infer/inc/utils.h
@@ -0,0 +1,32 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MINDSPORE_INFERENCE_UTILS_H_
+#define MINDSPORE_INFERENCE_UTILS_H_
+
+#include <sys/stat.h>
+#include <dirent.h>
+#include <vector>
+#include <string>
+#include <memory>
+#include "include/api/types.h"
+
+std::vector<std::string> GetAllFiles(std::string_view dirName);
+DIR *OpenDir(std::string_view dirName);
+std::string RealPath(std::string_view path);
+mindspore::MSTensor ReadFileToTensor(const std::string &file);
+int WriteResult(const std::string& imageFile, const std::vector<mindspore::MSTensor> &outputs);
+#endif
diff --git a/research/cvtmodel/hrnet/ascend310_infer/src/main.cc b/research/cvtmodel/hrnet/ascend310_infer/src/main.cc
new file mode 100644
index 0000000000000000000000000000000000000000..f91b4d3cad950a06c0a87b43ef407e31640406c8
--- /dev/null
+++ b/research/cvtmodel/hrnet/ascend310_infer/src/main.cc
@@ -0,0 +1,155 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sys/time.h>
+#include <gflags/gflags.h>
+#include <dirent.h>
+#include <iostream>
+#include <string>
+#include <algorithm>
+#include <iosfwd>
+#include <vector>
+#include <fstream>
+#include <sstream>
+
+#include "../inc/utils.h"
+#include "include/dataset/execute.h"
+#include "include/dataset/transforms.h"
+#include "include/dataset/vision.h"
+#include "include/dataset/vision_ascend.h"
+#include "include/api/types.h"
+#include "include/api/model.h"
+#include "include/api/serialization.h"
+#include "include/api/context.h"
+
+using mindspore::Serialization;
+using mindspore::Model;
+using mindspore::Context;
+using mindspore::Status;
+using mindspore::ModelType;
+using mindspore::Graph;
+using mindspore::GraphCell;
+using mindspore::kSuccess;
+using mindspore::MSTensor;
+using mindspore::DataType;
+using mindspore::dataset::Execute;
+using mindspore::dataset::TensorTransform;
+using mindspore::dataset::InterpolationMode;
+using mindspore::dataset::vision::Decode;
+using mindspore::dataset::vision::Resize;
+using mindspore::dataset::vision::CenterCrop;
+using mindspore::dataset::vision::Normalize;
+using mindspore::dataset::vision::HWC2CHW;
+
+using mindspore::dataset::transforms::TypeCast;
+
+
+DEFINE_string(model_path, "", "model path");
+DEFINE_string(dataset_path, ".", "dataset path");
+DEFINE_int32(device_id, 0, "device id");
+
+int main(int argc, char **argv) {
+    gflags::ParseCommandLineFlags(&argc, &argv, true);
+    if (RealPath(FLAGS_model_path).empty()) {
+      std::cout << "Invalid model" << std::endl;
+      return 1;
+    }
+
+    auto context = std::make_shared<Context>();
+    auto ascend310_info = std::make_shared<mindspore::Ascend310DeviceInfo>();
+    ascend310_info->SetDeviceID(FLAGS_device_id);
+    context->MutableDeviceInfo().push_back(ascend310_info);
+
+    Graph graph;
+    Status ret = Serialization::Load(FLAGS_model_path, ModelType::kMindIR, &graph);
+    if (ret != kSuccess) {
+        std::cout << "Load model failed." << std::endl;
+        return 1;
+    }
+
+    Model model;
+    ret = model.Build(GraphCell(graph), context);
+    if (ret != kSuccess) {
+        std::cout << "ERROR: Build failed." << std::endl;
+        return 1;
+    }
+
+    std::vector<MSTensor> modelInputs = model.GetInputs();
+
+    auto all_files = GetAllFiles(FLAGS_dataset_path);
+    if (all_files.empty()) {
+        std::cout << "ERROR: no input data." << std::endl;
+        return 1;
+    }
+
+    std::shared_ptr<TensorTransform> decode(new Decode());
+    std::shared_ptr<TensorTransform> resizeArea(new Resize({256, 256}, InterpolationMode::kArea));
+    std::shared_ptr<TensorTransform> centerCrop(new CenterCrop({224, 224}));
+    std::shared_ptr<TensorTransform> normImageNet(new Normalize({123.675, 116.28, 103.53}, {58.395, 57.12, 57.375}));
+    std::shared_ptr<TensorTransform> hwc2chw(new HWC2CHW());
+
+    mindspore::dataset::Execute transformArea({decode, resizeArea, centerCrop, normImageNet, hwc2chw});
+
+    std::map<double, double> costTime_map;
+
+    size_t size = all_files.size();
+    for (size_t i = 0; i < size; ++i) {
+        struct timeval start;
+        struct timeval end;
+        double startTime_ms;
+        double endTime_ms;
+        std::vector<MSTensor> inputs;
+        std::vector<MSTensor> outputs;
+
+        std::cout << "Start predict input files:" << all_files[i] << std::endl;
+        mindspore::MSTensor image =  ReadFileToTensor(all_files[i]);
+        transformArea(image, &image);
+        inputs.emplace_back(modelInputs[0].Name(), modelInputs[0].DataType(), modelInputs[0].Shape(),
+                            image.Data().get(), image.DataSize());
+
+        gettimeofday(&start, NULL);
+        model.Predict(inputs, &outputs);
+        gettimeofday(&end, NULL);
+
+        startTime_ms = (1.0 * start.tv_sec * 1000000 + start.tv_usec) / 1000;
+        endTime_ms = (1.0 * end.tv_sec * 1000000 + end.tv_usec) / 1000;
+        costTime_map.insert(std::pair<double, double>(startTime_ms, endTime_ms));
+        int rst = WriteResult(all_files[i], outputs);
+        if (rst != 0) {
+            std::cout << "write result failed." << std::endl;
+            return rst;
+        }
+    }
+    double average = 0.0;
+    int inferCount = 0;
+
+    for (auto iter = costTime_map.begin(); iter != costTime_map.end(); iter++) {
+        double diff = 0.0;
+        diff = iter->second - iter->first;
+        average += diff;
+        inferCount++;
+    }
+    average = average / inferCount;
+    std::stringstream timeCost;
+    timeCost << "NN inference cost average time: "<< average << " ms of infer_count " << inferCount << std::endl;
+    std::cout << "NN inference cost average time: "<< average << "ms of infer_count " << inferCount << std::endl;
+    std::string fileName = "./time_Result" + std::string("/test_perform_static.txt");
+    std::ofstream fileStream(fileName.c_str(), std::ios::trunc);
+    fileStream << timeCost.str();
+    fileStream.close();
+    costTime_map.clear();
+  return 0;
+}
diff --git a/research/cvtmodel/hrnet/ascend310_infer/src/utils.cc b/research/cvtmodel/hrnet/ascend310_infer/src/utils.cc
new file mode 100644
index 0000000000000000000000000000000000000000..653b1de44962614ac77d44481f8d1a7bde52caaf
--- /dev/null
+++ b/research/cvtmodel/hrnet/ascend310_infer/src/utils.cc
@@ -0,0 +1,160 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "inc/utils.h"
+
+#include <fstream>
+#include <algorithm>
+#include <iostream>
+
+using mindspore::MSTensor;
+using mindspore::DataType;
+
+std::vector<std::string> GetAllFiles(std::string_view dirName) {
+    struct dirent *filename;
+    DIR *dir = OpenDir(dirName);
+    if (dir == nullptr) {
+        return {};
+    }
+    std::vector<std::string> dirs;
+    std::vector<std::string> files;
+    while ((filename = readdir(dir)) != nullptr) {
+        std::string dName = std::string(filename->d_name);
+        if (dName == "." || dName == "..") {
+            continue;
+        } else if (filename->d_type == DT_DIR) {
+            dirs.emplace_back(std::string(dirName) + "/" + filename->d_name);
+        } else if (filename->d_type == DT_REG) {
+            files.emplace_back(std::string(dirName) + "/" + filename->d_name);
+        } else {
+            continue;
+        }
+    }
+
+    for (auto d : dirs) {
+        dir = OpenDir(d);
+        while ((filename = readdir(dir)) != nullptr) {
+            std::string dName = std::string(filename->d_name);
+            if (dName == "." || dName == ".." || filename->d_type != DT_REG) {
+                continue;
+            }
+            files.emplace_back(std::string(d) + "/" + filename->d_name);
+        }
+    }
+    std::sort(files.begin(), files.end());
+    for (auto &f : files) {
+        std::cout << "image file: " << f << std::endl;
+    }
+    return files;
+}
+
+int WriteResult(const std::string& imageFile, const std::vector<MSTensor> &outputs) {
+    std::string homePath = "./result_Files";
+    const int INVALID_POINTER = -1;
+    const int ERROR = -2;
+    for (size_t i = 0; i < outputs.size(); ++i) {
+        size_t outputSize;
+        std::shared_ptr<const void> netOutput;
+        netOutput = outputs[i].Data();
+        outputSize = outputs[i].DataSize();
+        int pos = imageFile.rfind('/');
+        std::string fileName(imageFile, pos + 1);
+        fileName.replace(fileName.find('.'), fileName.size() - fileName.find('.'), '_' + std::to_string(i) + ".bin");
+        std::string outFileName = homePath + "/" + fileName;
+        FILE * outputFile = fopen(outFileName.c_str(), "wb");
+        if (outputFile == nullptr) {
+            std::cout << "open result file " << outFileName << " failed" << std::endl;
+            return INVALID_POINTER;
+        }
+        size_t size = fwrite(netOutput.get(), sizeof(char), outputSize, outputFile);
+        if (size != outputSize) {
+            fclose(outputFile);
+            outputFile = nullptr;
+            std::cout << "write result file " << outFileName << " failed, write size[" << size <<
+                "] is smaller than output size[" << outputSize << "], maybe the disk is full." << std::endl;
+            return ERROR;
+        }
+        fclose(outputFile);
+        outputFile = nullptr;
+    }
+    return 0;
+}
+
+mindspore::MSTensor ReadFileToTensor(const std::string &file) {
+  if (file.empty()) {
+    std::cout << "Pointer file is nullptr" << std::endl;
+    return mindspore::MSTensor();
+  }
+
+  std::ifstream ifs(file);
+  if (!ifs.good()) {
+    std::cout << "File: " << file << " is not exist" << std::endl;
+    return mindspore::MSTensor();
+  }
+
+  if (!ifs.is_open()) {
+    std::cout << "File: " << file << "open failed" << std::endl;
+    return mindspore::MSTensor();
+  }
+
+  ifs.seekg(0, std::ios::end);
+  size_t size = ifs.tellg();
+  mindspore::MSTensor buffer(file, mindspore::DataType::kNumberTypeUInt8, {static_cast<int64_t>(size)}, nullptr, size);
+
+  ifs.seekg(0, std::ios::beg);
+  ifs.read(reinterpret_cast<char *>(buffer.MutableData()), size);
+  ifs.close();
+
+  return buffer;
+}
+
+
+DIR *OpenDir(std::string_view dirName) {
+    if (dirName.empty()) {
+        std::cout << " dirName is null ! " << std::endl;
+        return nullptr;
+    }
+    std::string realPath = RealPath(dirName);
+    struct stat s;
+    lstat(realPath.c_str(), &s);
+    if (!S_ISDIR(s.st_mode)) {
+        std::cout << "dirName is not a valid directory !" << std::endl;
+        return nullptr;
+    }
+    DIR *dir;
+    dir = opendir(realPath.c_str());
+    if (dir == nullptr) {
+        std::cout << "Can not open dir " << dirName << std::endl;
+        return nullptr;
+    }
+    std::cout << "Successfully opened the dir " << dirName << std::endl;
+    return dir;
+}
+
+std::string RealPath(std::string_view path) {
+    char realPathMem[PATH_MAX] = {0};
+    char *realPathRet = nullptr;
+    realPathRet = realpath(path.data(), realPathMem);
+
+    if (realPathRet == nullptr) {
+        std::cout << "File: " << path << " is not exist.";
+        return "";
+    }
+
+    std::string realPath(realPathMem);
+    std::cout << path << " realpath is: " << realPath << std::endl;
+    return realPath;
+}
diff --git a/research/cvtmodel/hrnet/export.py b/research/cvtmodel/hrnet/export.py
new file mode 100644
index 0000000000000000000000000000000000000000..493acfc7f45dca6e439dabb7887d5b648c203932
--- /dev/null
+++ b/research/cvtmodel/hrnet/export.py
@@ -0,0 +1,63 @@
+# Copyright 2020-2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+"""export checkpoint file into air, onnx, mindir models
+   Suggest run as python export.py --ckpt_path [ckpt_path] --file_name [file_name]
+                                   --file_format [file format]
+"""
+import argparse
+import numpy as np
+import mindspore as ms
+from mindspore import context, Tensor
+from mindspore.train.serialization import export, load_checkpoint, load_param_into_net
+
+parser = argparse.ArgumentParser(description='post process for 310 inference')
+parser.add_argument("--backbone", type=str, required=True, default="hrnet_w18_small", help="model backbone")
+parser.add_argument("--ckpt_path", type=str, required=True, help="checkpoint file path")
+parser.add_argument("--file_name", type=str, default="hrnet_w18_small", help="file name")
+parser.add_argument("--file_format", type=str, default="MINDIR", choices=["MINDIR", "AIR"], help="file format")
+parser.add_argument("--device_target", type=str, default="Ascend", choices=["Ascend", "GPU"], help="device target")
+parser.add_argument("--device_id", type=int, default=0, help="device target")
+args = parser.parse_args()
+
+context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
+
+def model_export():
+    '''export main function'''
+    if args.device_target == "Ascend":
+        context.set_context(device_id=args.device_id)
+
+    image_size = 224
+    if args.backbone == "hrnet_w18_small":
+        from src.hrnet_w18_small import MindSporeModel
+    elif args.backbone == "hrnet_w30":
+        from src.hrnet_w30 import MindSporeModel
+    elif args.backbone == "hrnet_w40":
+        from src.hrnet_w40 import MindSporeModel
+    elif args.backbone == "hrnet_w48":
+        from src.hrnet_w48 import MindSporeModel
+    elif args.backbone == "hrnet_w64":
+        from src.hrnet_w64 import MindSporeModel
+
+    net = MindSporeModel()
+
+    param_dict = load_checkpoint(args.ckpt_path)
+    load_param_into_net(net, param_dict)
+
+    input_arr = Tensor(np.zeros([1, 3, image_size, image_size]), ms.float32)
+    export(net, input_arr, file_name=args.file_name, file_format=args.file_format)
+
+if __name__ == '__main__':
+    model_export()
diff --git a/research/cvtmodel/hrnet/postprocess.py b/research/cvtmodel/hrnet/postprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..192881adbaa3c3deb0ba5f2e11f4cf911df3f63b
--- /dev/null
+++ b/research/cvtmodel/hrnet/postprocess.py
@@ -0,0 +1,81 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+'''post process for 310 inference'''
+import os
+import argparse
+import numpy as np
+
+parser = argparse.ArgumentParser(description='post process for 310 inference')
+parser.add_argument("--dataset", type=str, default="imagenet", help="result file path")
+parser.add_argument("--result_path", type=str, required=True, help="result file path")
+parser.add_argument("--label_file", type=str, required=True, help="label file")
+args = parser.parse_args()
+
+def get_top5_acc(top5_arg, gt_class):
+    sub_count = 0
+    for top5, gt in zip(top5_arg, gt_class):
+        if gt in top5:
+            sub_count += 1
+    return sub_count
+
+def read_label(label_file):
+    '''read label file'''
+    f = open(label_file, "r")
+    lines = f.readlines()
+
+    img_label = {}
+    for line in lines:
+        img_id = line.split(":")[0]
+        label = line.split(":")[1]
+        img_label[img_id] = label
+
+    return img_label
+
+def cal_acc(dataset, result_path, label_file):
+    '''main acc calculation function'''
+    img_label = read_label(label_file)
+
+    img_tot = 0
+    top1_correct = 0
+    top5_correct = 0
+
+    files = os.listdir(result_path)
+    for file in files:
+        full_file_path = os.path.join(result_path, file)
+        if os.path.isfile(full_file_path):
+            result = np.fromfile(full_file_path, dtype=np.float32).reshape(1, 1000)
+            gt_classes = int(img_label[file[:-6]])
+
+            top1_output = np.argmax(result, (-1))
+            top5_output = np.argsort(result)[:, -5:]
+
+            t1_correct = np.equal(top1_output, gt_classes).sum()
+            top1_correct += t1_correct
+            top5_correct += get_top5_acc(top5_output, [gt_classes])
+            img_tot += 1
+
+    results = [[top1_correct], [top5_correct], [img_tot]]
+
+    results = np.array(results)
+    top1_correct = results[0, 0]
+    top5_correct = results[1, 0]
+    img_tot = results[2, 0]
+    acc1 = 100.0 * top1_correct / img_tot
+    acc5 = 100.0 * top5_correct / img_tot
+    print('Eval: top1_correct={}, tot={}, acc={:.2f}%'.format(top1_correct, img_tot, acc1))
+    print('Eval: top5_correct={}, tot={}, acc={:.2f}%'.format(top5_correct, img_tot, acc5))
+
+if __name__ == "__main__":
+    cal_acc(args.dataset, args.result_path, args.label_file)
diff --git a/research/cvtmodel/hrnet/preprocess.py b/research/cvtmodel/hrnet/preprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef53c220ef47609d17ab0600ff47ca6a743325be
--- /dev/null
+++ b/research/cvtmodel/hrnet/preprocess.py
@@ -0,0 +1,47 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""preprocess"""
+import os
+import argparse
+
+parser = argparse.ArgumentParser('preprocess')
+parser.add_argument('--data_path', type=str, default='', help='eval data dir')
+args = parser.parse_args()
+
+def create_label(result_path, dir_path):
+    print("[WARNING] Create imagenet label. Currently only use for Imagenet2012!")
+    text_path = os.path.join(result_path, "imagenet_label.txt")
+    dirs = os.listdir(dir_path)
+    file_list = []
+    for file in dirs:
+        file_list.append(file)
+    file_list = sorted(file_list)
+    total = 0
+    img_label = {}
+    text_file = open(text_path, 'a')
+    for i, file_dir in enumerate(file_list):
+        files = os.listdir(os.path.join(dir_path, file_dir))
+        for f in files:
+            img_label[f.split('.')[0]] = i
+            line = f.split('.')[0] + ":" + str(i)
+            text_file.write(line)
+            text_file.write('\n')
+        total += len(files)
+    text_file.close()
+    print("[INFO] Completed! Total {} data.".format(total))
+
+if __name__ == "__main__":
+    create_label('./preprocess_Result/', args.data_path)
+        
\ No newline at end of file
diff --git a/research/cvtmodel/hrnet/scripts/run_infer_310.sh b/research/cvtmodel/hrnet/scripts/run_infer_310.sh
new file mode 100644
index 0000000000000000000000000000000000000000..1082abb5bb1d4ab167bf336d96f1fb3cb74204d5
--- /dev/null
+++ b/research/cvtmodel/hrnet/scripts/run_infer_310.sh
@@ -0,0 +1,113 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [[ $# -lt 4 || $# -gt 5 ]]; then 
+    echo "Usage: bash run_infer_310.sh [MINDIR_PATH] [BACKBONE] [DATA_PATH] [DEVICE_ID]
+    DEVICE_ID is optional, it can be set by environment variable device_id, otherwise the value is zero"
+exit 1
+fi
+
+get_real_path(){
+  if [ "${1:0:1}" == "/" ]; then
+    echo "$1"
+  else
+    echo "$(realpath -m $PWD/$1)"
+  fi
+}
+
+model=$(get_real_path $1)
+backbone=$2
+data_path=$(get_real_path $3)
+device_id=0
+
+if [ $# == 4 ]; then
+    device_id=$4
+fi
+
+echo $model
+echo $backbone
+echo $data_path
+echo $device_id
+
+export ASCEND_HOME=/usr/local/Ascend/
+if [ -d ${ASCEND_HOME}/ascend-toolkit ]; then
+    export PATH=$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/ccec_compiler/bin:$ASCEND_HOME/ascend-toolkit/latest/atc/bin:$PATH
+    export LD_LIBRARY_PATH=/usr/local/lib:$ASCEND_HOME/ascend-toolkit/latest/atc/lib64:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:$LD_LIBRARY_PATH
+    export TBE_IMPL_PATH=$ASCEND_HOME/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe
+    export PYTHONPATH=${TBE_IMPL_PATH}:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/python/site-packages:$PYTHONPATH
+    export ASCEND_OPP_PATH=$ASCEND_HOME/ascend-toolkit/latest/opp
+else
+    export PATH=$ASCEND_HOME/atc/ccec_compiler/bin:$ASCEND_HOME/atc/bin:$PATH
+    export LD_LIBRARY_PATH=/usr/local/lib:$ASCEND_HOME/atc/lib64:$ASCEND_HOME/acllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:$LD_LIBRARY_PATH
+    export PYTHONPATH=$ASCEND_HOME/atc/python/site-packages:$PYTHONPATH
+    export ASCEND_OPP_PATH=$ASCEND_HOME/opp
+fi
+
+function preprocess_data()
+{
+    if [ -d preprocess_Result ]; then
+        rm -rf ./preprocess_Result
+    fi
+    mkdir preprocess_Result
+    python ../preprocess.py --data_path=$data_path
+}
+
+function compile_app()
+{
+    cd ../ascend310_infer || exit
+    if [ -f "Makefile" ]; then
+        make clean
+    fi
+    sh build.sh &> build.log
+
+    if [ $? -ne 0 ]; then
+        echo "compile app code failed"
+        exit 1
+    fi
+    cd - || exit
+}
+
+function infer()
+{
+    if [ -d result_Files ]; then
+        rm -rf ./result_Files
+    fi
+     if [ -d time_Result ]; then
+        rm -rf ./time_Result
+    fi
+    mkdir result_Files
+    mkdir time_Result
+    ../ascend310_infer/out/main --model_path=$model --dataset_path=$data_path --device_id=$device_id &> infer.log
+
+    if [ $? -ne 0 ]; then
+        echo "execute inference failed"
+        exit 1
+    fi
+}
+
+function cal_acc()
+{
+    python ../postprocess.py --label_file=./preprocess_Result/imagenet_label.txt --result_path=result_Files &> acc.log
+    if [ $? -ne 0 ]; then
+        echo "calculate accuracy failed"
+        exit 1
+    fi
+}
+
+preprocess_data
+compile_app
+infer
+cal_acc
diff --git a/research/cvtmodel/hrnet/src/hrnet_w18_small.py b/research/cvtmodel/hrnet/src/hrnet_w18_small.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e173fabeb2b9b3376fc48cf3e21c077c2bcea9f
--- /dev/null
+++ b/research/cvtmodel/hrnet/src/hrnet_w18_small.py
@@ -0,0 +1,683 @@
+import mindspore.ops as P
+from mindspore import nn
+
+
+class Module3(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
+                 conv2d_0_padding, conv2d_0_pad_mode):
+        super(Module3, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=conv2d_0_kernel_size,
+                                  stride=conv2d_0_stride,
+                                  padding=conv2d_0_padding,
+                                  pad_mode=conv2d_0_pad_mode,
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        return opt_relu_1
+
+
+class Module10(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
+                 conv2d_0_padding, conv2d_0_pad_mode, module3_0_conv2d_0_in_channels, module3_0_conv2d_0_out_channels,
+                 module3_0_conv2d_0_kernel_size, module3_0_conv2d_0_stride, module3_0_conv2d_0_padding,
+                 module3_0_conv2d_0_pad_mode, module3_1_conv2d_0_in_channels, module3_1_conv2d_0_out_channels,
+                 module3_1_conv2d_0_kernel_size, module3_1_conv2d_0_stride, module3_1_conv2d_0_padding,
+                 module3_1_conv2d_0_pad_mode):
+        super(Module10, self).__init__()
+        self.module3_0 = Module3(conv2d_0_in_channels=module3_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module3_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module3_0_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module3_0_conv2d_0_stride,
+                                 conv2d_0_padding=module3_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module3_0_conv2d_0_pad_mode)
+        self.module3_1 = Module3(conv2d_0_in_channels=module3_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module3_1_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module3_1_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module3_1_conv2d_0_stride,
+                                 conv2d_0_padding=module3_1_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module3_1_conv2d_0_pad_mode)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=conv2d_0_kernel_size,
+                                  stride=conv2d_0_stride,
+                                  padding=conv2d_0_padding,
+                                  pad_mode=conv2d_0_pad_mode,
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module3_0_opt = self.module3_0(x)
+        module3_1_opt = self.module3_1(module3_0_opt)
+        opt_conv2d_0 = self.conv2d_0(module3_1_opt)
+        return opt_conv2d_0
+
+
+class Module1(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
+                 conv2d_5_in_channels, conv2d_5_out_channels, conv2d_7_in_channels, conv2d_7_out_channels):
+        super(Module1, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
+                                  out_channels=conv2d_2_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_4 = nn.ReLU()
+        self.conv2d_5 = nn.Conv2d(in_channels=conv2d_5_in_channels,
+                                  out_channels=conv2d_5_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_6 = nn.ReLU()
+        self.conv2d_7 = nn.Conv2d(in_channels=conv2d_7_in_channels,
+                                  out_channels=conv2d_7_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_9 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        opt_conv2d_2 = self.conv2d_2(opt_relu_1)
+        opt_add_3 = P.Add()(opt_conv2d_2, x)
+        opt_relu_4 = self.relu_4(opt_add_3)
+        opt_conv2d_5 = self.conv2d_5(opt_relu_4)
+        opt_relu_6 = self.relu_6(opt_conv2d_5)
+        opt_conv2d_7 = self.conv2d_7(opt_relu_6)
+        opt_add_8 = P.Add()(opt_conv2d_7, opt_relu_4)
+        opt_relu_9 = self.relu_9(opt_add_8)
+        return opt_relu_9
+
+
+class Module8(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, resizenearestneighbor_1_size):
+        super(Module8, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.resizenearestneighbor_1 = P.ResizeNearestNeighbor(size=resizenearestneighbor_1_size, align_corners=False)
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_resizenearestneighbor_1 = self.resizenearestneighbor_1(opt_conv2d_0)
+        return opt_resizenearestneighbor_1
+
+
+class Module6(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, module3_0_conv2d_0_in_channels,
+                 module3_0_conv2d_0_out_channels, module3_0_conv2d_0_kernel_size, module3_0_conv2d_0_stride,
+                 module3_0_conv2d_0_padding, module3_0_conv2d_0_pad_mode):
+        super(Module6, self).__init__()
+        self.module3_0 = Module3(conv2d_0_in_channels=module3_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module3_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module3_0_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module3_0_conv2d_0_stride,
+                                 conv2d_0_padding=module3_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module3_0_conv2d_0_pad_mode)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module3_0_opt = self.module3_0(x)
+        opt_conv2d_0 = self.conv2d_0(module3_0_opt)
+        return opt_conv2d_0
+
+
+class MindSporeModel(nn.Cell):
+    def __init__(self):
+        super(MindSporeModel, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=3,
+                                  out_channels=64,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.module3_0 = Module3(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module10_0 = Module10(conv2d_0_in_channels=32,
+                                   conv2d_0_out_channels=128,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module3_0_conv2d_0_in_channels=64,
+                                   module3_0_conv2d_0_out_channels=32,
+                                   module3_0_conv2d_0_kernel_size=(1, 1),
+                                   module3_0_conv2d_0_stride=(1, 1),
+                                   module3_0_conv2d_0_padding=0,
+                                   module3_0_conv2d_0_pad_mode="valid",
+                                   module3_1_conv2d_0_in_channels=32,
+                                   module3_1_conv2d_0_out_channels=32,
+                                   module3_1_conv2d_0_kernel_size=(3, 3),
+                                   module3_1_conv2d_0_stride=(1, 1),
+                                   module3_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module3_1_conv2d_0_pad_mode="pad")
+        self.conv2d_5 = nn.Conv2d(in_channels=64,
+                                  out_channels=128,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_11 = nn.ReLU()
+        self.module3_1 = Module3(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=16,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module3_2 = Module3(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=32,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module1_0 = Module1(conv2d_0_in_channels=16,
+                                 conv2d_0_out_channels=16,
+                                 conv2d_2_in_channels=16,
+                                 conv2d_2_out_channels=16,
+                                 conv2d_5_in_channels=16,
+                                 conv2d_5_out_channels=16,
+                                 conv2d_7_in_channels=16,
+                                 conv2d_7_out_channels=16)
+        self.module1_1 = Module1(conv2d_0_in_channels=32,
+                                 conv2d_0_out_channels=32,
+                                 conv2d_2_in_channels=32,
+                                 conv2d_2_out_channels=32,
+                                 conv2d_5_in_channels=32,
+                                 conv2d_5_out_channels=32,
+                                 conv2d_7_in_channels=32,
+                                 conv2d_7_out_channels=32)
+        self.module8_0 = Module8(conv2d_0_in_channels=32,
+                                 conv2d_0_out_channels=16,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_44 = nn.ReLU()
+        self.conv2d_36 = nn.Conv2d(in_channels=16,
+                                   out_channels=32,
+                                   kernel_size=(3, 3),
+                                   stride=(2, 2),
+                                   padding=(1, 1, 1, 1),
+                                   pad_mode="pad",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.relu_40 = nn.ReLU()
+        self.module3_3 = Module3(conv2d_0_in_channels=32,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module1_2 = Module1(conv2d_0_in_channels=16,
+                                 conv2d_0_out_channels=16,
+                                 conv2d_2_in_channels=16,
+                                 conv2d_2_out_channels=16,
+                                 conv2d_5_in_channels=16,
+                                 conv2d_5_out_channels=16,
+                                 conv2d_7_in_channels=16,
+                                 conv2d_7_out_channels=16)
+        self.module1_3 = Module1(conv2d_0_in_channels=32,
+                                 conv2d_0_out_channels=32,
+                                 conv2d_2_in_channels=32,
+                                 conv2d_2_out_channels=32,
+                                 conv2d_5_in_channels=32,
+                                 conv2d_5_out_channels=32,
+                                 conv2d_7_in_channels=32,
+                                 conv2d_7_out_channels=32)
+        self.module1_4 = Module1(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_2_in_channels=64,
+                                 conv2d_2_out_channels=64,
+                                 conv2d_5_in_channels=64,
+                                 conv2d_5_out_channels=64,
+                                 conv2d_7_in_channels=64,
+                                 conv2d_7_out_channels=64)
+        self.module8_1 = Module8(conv2d_0_in_channels=32,
+                                 conv2d_0_out_channels=16,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module8_2 = Module8(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=16,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_91 = nn.ReLU()
+        self.conv2d_78 = nn.Conv2d(in_channels=16,
+                                   out_channels=32,
+                                   kernel_size=(3, 3),
+                                   stride=(2, 2),
+                                   padding=(1, 1, 1, 1),
+                                   pad_mode="pad",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.module8_3 = Module8(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=32,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.relu_92 = nn.ReLU()
+        self.module6_0 = Module6(conv2d_0_in_channels=16,
+                                 conv2d_0_out_channels=64,
+                                 module3_0_conv2d_0_in_channels=16,
+                                 module3_0_conv2d_0_out_channels=16,
+                                 module3_0_conv2d_0_kernel_size=(3, 3),
+                                 module3_0_conv2d_0_stride=(2, 2),
+                                 module3_0_conv2d_0_padding=(1, 1, 1, 1),
+                                 module3_0_conv2d_0_pad_mode="pad")
+        self.conv2d_74 = nn.Conv2d(in_channels=32,
+                                   out_channels=64,
+                                   kernel_size=(3, 3),
+                                   stride=(2, 2),
+                                   padding=(1, 1, 1, 1),
+                                   pad_mode="pad",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.relu_96 = nn.ReLU()
+        self.module3_4 = Module3(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=128,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module1_5 = Module1(conv2d_0_in_channels=16,
+                                 conv2d_0_out_channels=16,
+                                 conv2d_2_in_channels=16,
+                                 conv2d_2_out_channels=16,
+                                 conv2d_5_in_channels=16,
+                                 conv2d_5_out_channels=16,
+                                 conv2d_7_in_channels=16,
+                                 conv2d_7_out_channels=16)
+        self.module1_6 = Module1(conv2d_0_in_channels=32,
+                                 conv2d_0_out_channels=32,
+                                 conv2d_2_in_channels=32,
+                                 conv2d_2_out_channels=32,
+                                 conv2d_5_in_channels=32,
+                                 conv2d_5_out_channels=32,
+                                 conv2d_7_in_channels=32,
+                                 conv2d_7_out_channels=32)
+        self.module1_7 = Module1(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_2_in_channels=64,
+                                 conv2d_2_out_channels=64,
+                                 conv2d_5_in_channels=64,
+                                 conv2d_5_out_channels=64,
+                                 conv2d_7_in_channels=64,
+                                 conv2d_7_out_channels=64)
+        self.module1_8 = Module1(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=128,
+                                 conv2d_2_in_channels=128,
+                                 conv2d_2_out_channels=128,
+                                 conv2d_5_in_channels=128,
+                                 conv2d_5_out_channels=128,
+                                 conv2d_7_in_channels=128,
+                                 conv2d_7_out_channels=128)
+        self.module8_4 = Module8(conv2d_0_in_channels=32,
+                                 conv2d_0_out_channels=16,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module8_5 = Module8(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=16,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module8_6 = Module8(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=16,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_174 = nn.ReLU()
+        self.conv2d_133 = nn.Conv2d(in_channels=16,
+                                    out_channels=32,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module8_7 = Module8(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=32,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.module8_8 = Module8(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=32,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.relu_175 = nn.ReLU()
+        self.module6_1 = Module6(conv2d_0_in_channels=16,
+                                 conv2d_0_out_channels=64,
+                                 module3_0_conv2d_0_in_channels=16,
+                                 module3_0_conv2d_0_out_channels=16,
+                                 module3_0_conv2d_0_kernel_size=(3, 3),
+                                 module3_0_conv2d_0_stride=(2, 2),
+                                 module3_0_conv2d_0_padding=(1, 1, 1, 1),
+                                 module3_0_conv2d_0_pad_mode="pad")
+        self.conv2d_137 = nn.Conv2d(in_channels=32,
+                                    out_channels=64,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module8_9 = Module8(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=64,
+                                 resizenearestneighbor_1_size=(14, 14))
+        self.relu_176 = nn.ReLU()
+        self.module10_1 = Module10(conv2d_0_in_channels=16,
+                                   conv2d_0_out_channels=128,
+                                   conv2d_0_kernel_size=(3, 3),
+                                   conv2d_0_stride=(2, 2),
+                                   conv2d_0_padding=(1, 1, 1, 1),
+                                   conv2d_0_pad_mode="pad",
+                                   module3_0_conv2d_0_in_channels=16,
+                                   module3_0_conv2d_0_out_channels=16,
+                                   module3_0_conv2d_0_kernel_size=(3, 3),
+                                   module3_0_conv2d_0_stride=(2, 2),
+                                   module3_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module3_0_conv2d_0_pad_mode="pad",
+                                   module3_1_conv2d_0_in_channels=16,
+                                   module3_1_conv2d_0_out_channels=16,
+                                   module3_1_conv2d_0_kernel_size=(3, 3),
+                                   module3_1_conv2d_0_stride=(2, 2),
+                                   module3_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module3_1_conv2d_0_pad_mode="pad")
+        self.module6_2 = Module6(conv2d_0_in_channels=32,
+                                 conv2d_0_out_channels=128,
+                                 module3_0_conv2d_0_in_channels=32,
+                                 module3_0_conv2d_0_out_channels=32,
+                                 module3_0_conv2d_0_kernel_size=(3, 3),
+                                 module3_0_conv2d_0_stride=(2, 2),
+                                 module3_0_conv2d_0_padding=(1, 1, 1, 1),
+                                 module3_0_conv2d_0_pad_mode="pad")
+        self.conv2d_149 = nn.Conv2d(in_channels=64,
+                                    out_channels=128,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_184 = nn.ReLU()
+        self.module10_2 = Module10(conv2d_0_in_channels=32,
+                                   conv2d_0_out_channels=128,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module3_0_conv2d_0_in_channels=16,
+                                   module3_0_conv2d_0_out_channels=32,
+                                   module3_0_conv2d_0_kernel_size=(1, 1),
+                                   module3_0_conv2d_0_stride=(1, 1),
+                                   module3_0_conv2d_0_padding=0,
+                                   module3_0_conv2d_0_pad_mode="valid",
+                                   module3_1_conv2d_0_in_channels=32,
+                                   module3_1_conv2d_0_out_channels=32,
+                                   module3_1_conv2d_0_kernel_size=(3, 3),
+                                   module3_1_conv2d_0_stride=(1, 1),
+                                   module3_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module3_1_conv2d_0_pad_mode="pad")
+        self.conv2d_179 = nn.Conv2d(in_channels=16,
+                                    out_channels=128,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_206 = nn.ReLU()
+        self.module10_3 = Module10(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module3_0_conv2d_0_in_channels=32,
+                                   module3_0_conv2d_0_out_channels=64,
+                                   module3_0_conv2d_0_kernel_size=(1, 1),
+                                   module3_0_conv2d_0_stride=(1, 1),
+                                   module3_0_conv2d_0_padding=0,
+                                   module3_0_conv2d_0_pad_mode="valid",
+                                   module3_1_conv2d_0_in_channels=64,
+                                   module3_1_conv2d_0_out_channels=64,
+                                   module3_1_conv2d_0_kernel_size=(3, 3),
+                                   module3_1_conv2d_0_stride=(1, 1),
+                                   module3_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module3_1_conv2d_0_pad_mode="pad")
+        self.conv2d_181 = nn.Conv2d(in_channels=32,
+                                    out_channels=256,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_207 = nn.ReLU()
+        self.module3_5 = Module3(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module10_4 = Module10(conv2d_0_in_channels=128,
+                                   conv2d_0_out_channels=512,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module3_0_conv2d_0_in_channels=64,
+                                   module3_0_conv2d_0_out_channels=128,
+                                   module3_0_conv2d_0_kernel_size=(1, 1),
+                                   module3_0_conv2d_0_stride=(1, 1),
+                                   module3_0_conv2d_0_padding=0,
+                                   module3_0_conv2d_0_pad_mode="valid",
+                                   module3_1_conv2d_0_in_channels=128,
+                                   module3_1_conv2d_0_out_channels=128,
+                                   module3_1_conv2d_0_kernel_size=(3, 3),
+                                   module3_1_conv2d_0_stride=(1, 1),
+                                   module3_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module3_1_conv2d_0_pad_mode="pad")
+        self.conv2d_183 = nn.Conv2d(in_channels=64,
+                                    out_channels=512,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_208 = nn.ReLU()
+        self.module3_6 = Module3(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=512,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module10_5 = Module10(conv2d_0_in_channels=256,
+                                   conv2d_0_out_channels=1024,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module3_0_conv2d_0_in_channels=128,
+                                   module3_0_conv2d_0_out_channels=256,
+                                   module3_0_conv2d_0_kernel_size=(1, 1),
+                                   module3_0_conv2d_0_stride=(1, 1),
+                                   module3_0_conv2d_0_padding=0,
+                                   module3_0_conv2d_0_pad_mode="valid",
+                                   module3_1_conv2d_0_in_channels=256,
+                                   module3_1_conv2d_0_out_channels=256,
+                                   module3_1_conv2d_0_kernel_size=(3, 3),
+                                   module3_1_conv2d_0_stride=(1, 1),
+                                   module3_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module3_1_conv2d_0_pad_mode="pad")
+        self.conv2d_189 = nn.Conv2d(in_channels=128,
+                                    out_channels=1024,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_211 = nn.ReLU()
+        self.module3_7 = Module3(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=1024,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module3_8 = Module3(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=2048,
+                                 conv2d_0_kernel_size=(1, 1),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=0,
+                                 conv2d_0_pad_mode="valid")
+        self.avgpool2d_222 = nn.AvgPool2d(kernel_size=(7, 7))
+        self.flatten_223 = nn.Flatten()
+        self.dense_224 = nn.Dense(in_channels=2048, out_channels=1000, has_bias=True)
+
+    def construct(self, input_1):
+        opt_conv2d_0 = self.conv2d_0(input_1)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        module3_0_opt = self.module3_0(opt_relu_1)
+        module10_0_opt = self.module10_0(module3_0_opt)
+        opt_conv2d_5 = self.conv2d_5(module3_0_opt)
+        opt_add_10 = P.Add()(module10_0_opt, opt_conv2d_5)
+        opt_relu_11 = self.relu_11(opt_add_10)
+        module3_1_opt = self.module3_1(opt_relu_11)
+        module3_2_opt = self.module3_2(opt_relu_11)
+        module1_0_opt = self.module1_0(module3_1_opt)
+        module1_1_opt = self.module1_1(module3_2_opt)
+        module8_0_opt = self.module8_0(module1_1_opt)
+        opt_add_41 = P.Add()(module1_0_opt, module8_0_opt)
+        opt_relu_44 = self.relu_44(opt_add_41)
+        opt_conv2d_36 = self.conv2d_36(module1_0_opt)
+        opt_add_38 = P.Add()(opt_conv2d_36, module1_1_opt)
+        opt_relu_40 = self.relu_40(opt_add_38)
+        module3_3_opt = self.module3_3(opt_relu_40)
+        module1_2_opt = self.module1_2(opt_relu_44)
+        module1_3_opt = self.module1_3(opt_relu_40)
+        module1_4_opt = self.module1_4(module3_3_opt)
+        module8_1_opt = self.module8_1(module1_3_opt)
+        opt_add_82 = P.Add()(module1_2_opt, module8_1_opt)
+        module8_2_opt = self.module8_2(module1_4_opt)
+        opt_add_88 = P.Add()(opt_add_82, module8_2_opt)
+        opt_relu_91 = self.relu_91(opt_add_88)
+        opt_conv2d_78 = self.conv2d_78(module1_2_opt)
+        opt_add_83 = P.Add()(opt_conv2d_78, module1_3_opt)
+        module8_3_opt = self.module8_3(module1_4_opt)
+        opt_add_89 = P.Add()(opt_add_83, module8_3_opt)
+        opt_relu_92 = self.relu_92(opt_add_89)
+        module6_0_opt = self.module6_0(module1_2_opt)
+        opt_conv2d_74 = self.conv2d_74(module1_3_opt)
+        opt_add_90 = P.Add()(module6_0_opt, opt_conv2d_74)
+        opt_add_93 = P.Add()(opt_add_90, module1_4_opt)
+        opt_relu_96 = self.relu_96(opt_add_93)
+        module3_4_opt = self.module3_4(opt_relu_96)
+        module1_5_opt = self.module1_5(opt_relu_91)
+        module1_6_opt = self.module1_6(opt_relu_92)
+        module1_7_opt = self.module1_7(opt_relu_96)
+        module1_8_opt = self.module1_8(module3_4_opt)
+        module8_4_opt = self.module8_4(module1_6_opt)
+        opt_add_152 = P.Add()(module1_5_opt, module8_4_opt)
+        module8_5_opt = self.module8_5(module1_7_opt)
+        opt_add_162 = P.Add()(opt_add_152, module8_5_opt)
+        module8_6_opt = self.module8_6(module1_8_opt)
+        opt_add_170 = P.Add()(opt_add_162, module8_6_opt)
+        opt_relu_174 = self.relu_174(opt_add_170)
+        opt_conv2d_133 = self.conv2d_133(module1_5_opt)
+        opt_add_141 = P.Add()(opt_conv2d_133, module1_6_opt)
+        module8_7_opt = self.module8_7(module1_7_opt)
+        opt_add_163 = P.Add()(opt_add_141, module8_7_opt)
+        module8_8_opt = self.module8_8(module1_8_opt)
+        opt_add_171 = P.Add()(opt_add_163, module8_8_opt)
+        opt_relu_175 = self.relu_175(opt_add_171)
+        module6_1_opt = self.module6_1(module1_5_opt)
+        opt_conv2d_137 = self.conv2d_137(module1_6_opt)
+        opt_add_157 = P.Add()(module6_1_opt, opt_conv2d_137)
+        opt_add_164 = P.Add()(opt_add_157, module1_7_opt)
+        module8_9_opt = self.module8_9(module1_8_opt)
+        opt_add_172 = P.Add()(opt_add_164, module8_9_opt)
+        opt_relu_176 = self.relu_176(opt_add_172)
+        module10_1_opt = self.module10_1(module1_5_opt)
+        module6_2_opt = self.module6_2(module1_6_opt)
+        opt_add_169 = P.Add()(module10_1_opt, module6_2_opt)
+        opt_conv2d_149 = self.conv2d_149(module1_7_opt)
+        opt_add_173 = P.Add()(opt_add_169, opt_conv2d_149)
+        opt_add_177 = P.Add()(opt_add_173, module1_8_opt)
+        opt_relu_184 = self.relu_184(opt_add_177)
+        module10_2_opt = self.module10_2(opt_relu_174)
+        opt_conv2d_179 = self.conv2d_179(opt_relu_174)
+        opt_add_202 = P.Add()(module10_2_opt, opt_conv2d_179)
+        opt_relu_206 = self.relu_206(opt_add_202)
+        module10_3_opt = self.module10_3(opt_relu_175)
+        opt_conv2d_181 = self.conv2d_181(opt_relu_175)
+        opt_add_203 = P.Add()(module10_3_opt, opt_conv2d_181)
+        opt_relu_207 = self.relu_207(opt_add_203)
+        module3_5_opt = self.module3_5(opt_relu_206)
+        opt_add_213 = P.Add()(opt_relu_207, module3_5_opt)
+        module10_4_opt = self.module10_4(opt_relu_176)
+        opt_conv2d_183 = self.conv2d_183(opt_relu_176)
+        opt_add_204 = P.Add()(module10_4_opt, opt_conv2d_183)
+        opt_relu_208 = self.relu_208(opt_add_204)
+        module3_6_opt = self.module3_6(opt_add_213)
+        opt_add_216 = P.Add()(opt_relu_208, module3_6_opt)
+        module10_5_opt = self.module10_5(opt_relu_184)
+        opt_conv2d_189 = self.conv2d_189(opt_relu_184)
+        opt_add_209 = P.Add()(module10_5_opt, opt_conv2d_189)
+        opt_relu_211 = self.relu_211(opt_add_209)
+        module3_7_opt = self.module3_7(opt_add_216)
+        opt_add_219 = P.Add()(opt_relu_211, module3_7_opt)
+        module3_8_opt = self.module3_8(opt_add_219)
+        opt_avgpool2d_222 = self.avgpool2d_222(module3_8_opt)
+        opt_flatten_223 = self.flatten_223(opt_avgpool2d_222)
+        opt_dense_224 = self.dense_224(opt_flatten_223)
+        return opt_dense_224
diff --git a/research/cvtmodel/hrnet/src/hrnet_w30.py b/research/cvtmodel/hrnet/src/hrnet_w30.py
new file mode 100644
index 0000000000000000000000000000000000000000..087309c5188f5cf93b78d197e953990966534d30
--- /dev/null
+++ b/research/cvtmodel/hrnet/src/hrnet_w30.py
@@ -0,0 +1,1522 @@
+import mindspore.ops as P
+from mindspore import nn
+
+
+class Module5(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
+                 conv2d_0_padding, conv2d_0_pad_mode):
+        super(Module5, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=conv2d_0_kernel_size,
+                                  stride=conv2d_0_stride,
+                                  padding=conv2d_0_padding,
+                                  pad_mode=conv2d_0_pad_mode,
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        return opt_relu_1
+
+
+class Module15(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
+                 conv2d_0_padding, conv2d_0_pad_mode, module5_0_conv2d_0_in_channels, module5_0_conv2d_0_out_channels,
+                 module5_0_conv2d_0_kernel_size, module5_0_conv2d_0_stride, module5_0_conv2d_0_padding,
+                 module5_0_conv2d_0_pad_mode, module5_1_conv2d_0_in_channels, module5_1_conv2d_0_out_channels,
+                 module5_1_conv2d_0_kernel_size, module5_1_conv2d_0_stride, module5_1_conv2d_0_padding,
+                 module5_1_conv2d_0_pad_mode):
+        super(Module15, self).__init__()
+        self.module5_0 = Module5(conv2d_0_in_channels=module5_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module5_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module5_0_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module5_0_conv2d_0_stride,
+                                 conv2d_0_padding=module5_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module5_0_conv2d_0_pad_mode)
+        self.module5_1 = Module5(conv2d_0_in_channels=module5_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module5_1_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module5_1_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module5_1_conv2d_0_stride,
+                                 conv2d_0_padding=module5_1_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module5_1_conv2d_0_pad_mode)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=conv2d_0_kernel_size,
+                                  stride=conv2d_0_stride,
+                                  padding=conv2d_0_padding,
+                                  pad_mode=conv2d_0_pad_mode,
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module5_0_opt = self.module5_0(x)
+        module5_1_opt = self.module5_1(module5_0_opt)
+        opt_conv2d_0 = self.conv2d_0(module5_1_opt)
+        return opt_conv2d_0
+
+
+class Module0(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
+                 conv2d_5_in_channels, conv2d_5_out_channels, conv2d_7_in_channels, conv2d_7_out_channels):
+        super(Module0, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
+                                  out_channels=conv2d_2_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_4 = nn.ReLU()
+        self.conv2d_5 = nn.Conv2d(in_channels=conv2d_5_in_channels,
+                                  out_channels=conv2d_5_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_6 = nn.ReLU()
+        self.conv2d_7 = nn.Conv2d(in_channels=conv2d_7_in_channels,
+                                  out_channels=conv2d_7_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_9 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        opt_conv2d_2 = self.conv2d_2(opt_relu_1)
+        opt_add_3 = P.Add()(opt_conv2d_2, x)
+        opt_relu_4 = self.relu_4(opt_add_3)
+        opt_conv2d_5 = self.conv2d_5(opt_relu_4)
+        opt_relu_6 = self.relu_6(opt_conv2d_5)
+        opt_conv2d_7 = self.conv2d_7(opt_relu_6)
+        opt_add_8 = P.Add()(opt_conv2d_7, opt_relu_4)
+        opt_relu_9 = self.relu_9(opt_add_8)
+        return opt_relu_9
+
+
+class Module16(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_conv2d_5_in_channels, module0_0_conv2d_5_out_channels,
+                 module0_0_conv2d_7_in_channels, module0_0_conv2d_7_out_channels, module0_1_conv2d_0_in_channels,
+                 module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels,
+                 module0_1_conv2d_5_in_channels, module0_1_conv2d_5_out_channels, module0_1_conv2d_7_in_channels,
+                 module0_1_conv2d_7_out_channels):
+        super(Module16, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_5_in_channels=module0_0_conv2d_5_in_channels,
+                                 conv2d_5_out_channels=module0_0_conv2d_5_out_channels,
+                                 conv2d_7_in_channels=module0_0_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_0_conv2d_7_out_channels)
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_5_in_channels=module0_1_conv2d_5_in_channels,
+                                 conv2d_5_out_channels=module0_1_conv2d_5_out_channels,
+                                 conv2d_7_in_channels=module0_1_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_1_conv2d_7_out_channels)
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        module0_1_opt = self.module0_1(module0_0_opt)
+        return module0_1_opt
+
+
+class Module7(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, resizenearestneighbor_1_size):
+        super(Module7, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.resizenearestneighbor_1 = P.ResizeNearestNeighbor(size=resizenearestneighbor_1_size, align_corners=False)
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_resizenearestneighbor_1 = self.resizenearestneighbor_1(opt_conv2d_0)
+        return opt_resizenearestneighbor_1
+
+
+class Module11(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, module5_0_conv2d_0_in_channels,
+                 module5_0_conv2d_0_out_channels, module5_0_conv2d_0_kernel_size, module5_0_conv2d_0_stride,
+                 module5_0_conv2d_0_padding, module5_0_conv2d_0_pad_mode):
+        super(Module11, self).__init__()
+        self.module5_0 = Module5(conv2d_0_in_channels=module5_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module5_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module5_0_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module5_0_conv2d_0_stride,
+                                 conv2d_0_padding=module5_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module5_0_conv2d_0_pad_mode)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module5_0_opt = self.module5_0(x)
+        opt_conv2d_0 = self.conv2d_0(module5_0_opt)
+        return opt_conv2d_0
+
+
+class MindSporeModel(nn.Cell):
+    def __init__(self):
+        super(MindSporeModel, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=3,
+                                  out_channels=64,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.module5_0 = Module5(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module15_0 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_5 = nn.Conv2d(in_channels=64,
+                                  out_channels=256,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_11 = nn.ReLU()
+        self.module15_1 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=256,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.relu_18 = nn.ReLU()
+        self.module15_2 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=256,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.relu_25 = nn.ReLU()
+        self.module15_3 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=256,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.relu_32 = nn.ReLU()
+        self.module5_1 = Module5(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=30,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module5_2 = Module5(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=60,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module16_0 = Module16(module0_0_conv2d_0_in_channels=30,
+                                   module0_0_conv2d_0_out_channels=30,
+                                   module0_0_conv2d_2_in_channels=30,
+                                   module0_0_conv2d_2_out_channels=30,
+                                   module0_0_conv2d_5_in_channels=30,
+                                   module0_0_conv2d_5_out_channels=30,
+                                   module0_0_conv2d_7_in_channels=30,
+                                   module0_0_conv2d_7_out_channels=30,
+                                   module0_1_conv2d_0_in_channels=30,
+                                   module0_1_conv2d_0_out_channels=30,
+                                   module0_1_conv2d_2_in_channels=30,
+                                   module0_1_conv2d_2_out_channels=30,
+                                   module0_1_conv2d_5_in_channels=30,
+                                   module0_1_conv2d_5_out_channels=30,
+                                   module0_1_conv2d_7_in_channels=30,
+                                   module0_1_conv2d_7_out_channels=30)
+        self.module16_1 = Module16(module0_0_conv2d_0_in_channels=60,
+                                   module0_0_conv2d_0_out_channels=60,
+                                   module0_0_conv2d_2_in_channels=60,
+                                   module0_0_conv2d_2_out_channels=60,
+                                   module0_0_conv2d_5_in_channels=60,
+                                   module0_0_conv2d_5_out_channels=60,
+                                   module0_0_conv2d_7_in_channels=60,
+                                   module0_0_conv2d_7_out_channels=60,
+                                   module0_1_conv2d_0_in_channels=60,
+                                   module0_1_conv2d_0_out_channels=60,
+                                   module0_1_conv2d_2_in_channels=60,
+                                   module0_1_conv2d_2_out_channels=60,
+                                   module0_1_conv2d_5_in_channels=60,
+                                   module0_1_conv2d_5_out_channels=60,
+                                   module0_1_conv2d_7_in_channels=60,
+                                   module0_1_conv2d_7_out_channels=60)
+        self.module7_0 = Module7(conv2d_0_in_channels=60,
+                                 conv2d_0_out_channels=30,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_85 = nn.ReLU()
+        self.conv2d_77 = nn.Conv2d(in_channels=30,
+                                   out_channels=60,
+                                   kernel_size=(3, 3),
+                                   stride=(2, 2),
+                                   padding=(1, 1, 1, 1),
+                                   pad_mode="pad",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.relu_81 = nn.ReLU()
+        self.module5_3 = Module5(conv2d_0_in_channels=60,
+                                 conv2d_0_out_channels=120,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module16_2 = Module16(module0_0_conv2d_0_in_channels=30,
+                                   module0_0_conv2d_0_out_channels=30,
+                                   module0_0_conv2d_2_in_channels=30,
+                                   module0_0_conv2d_2_out_channels=30,
+                                   module0_0_conv2d_5_in_channels=30,
+                                   module0_0_conv2d_5_out_channels=30,
+                                   module0_0_conv2d_7_in_channels=30,
+                                   module0_0_conv2d_7_out_channels=30,
+                                   module0_1_conv2d_0_in_channels=30,
+                                   module0_1_conv2d_0_out_channels=30,
+                                   module0_1_conv2d_2_in_channels=30,
+                                   module0_1_conv2d_2_out_channels=30,
+                                   module0_1_conv2d_5_in_channels=30,
+                                   module0_1_conv2d_5_out_channels=30,
+                                   module0_1_conv2d_7_in_channels=30,
+                                   module0_1_conv2d_7_out_channels=30)
+        self.module16_3 = Module16(module0_0_conv2d_0_in_channels=60,
+                                   module0_0_conv2d_0_out_channels=60,
+                                   module0_0_conv2d_2_in_channels=60,
+                                   module0_0_conv2d_2_out_channels=60,
+                                   module0_0_conv2d_5_in_channels=60,
+                                   module0_0_conv2d_5_out_channels=60,
+                                   module0_0_conv2d_7_in_channels=60,
+                                   module0_0_conv2d_7_out_channels=60,
+                                   module0_1_conv2d_0_in_channels=60,
+                                   module0_1_conv2d_0_out_channels=60,
+                                   module0_1_conv2d_2_in_channels=60,
+                                   module0_1_conv2d_2_out_channels=60,
+                                   module0_1_conv2d_5_in_channels=60,
+                                   module0_1_conv2d_5_out_channels=60,
+                                   module0_1_conv2d_7_in_channels=60,
+                                   module0_1_conv2d_7_out_channels=60)
+        self.module16_4 = Module16(module0_0_conv2d_0_in_channels=120,
+                                   module0_0_conv2d_0_out_channels=120,
+                                   module0_0_conv2d_2_in_channels=120,
+                                   module0_0_conv2d_2_out_channels=120,
+                                   module0_0_conv2d_5_in_channels=120,
+                                   module0_0_conv2d_5_out_channels=120,
+                                   module0_0_conv2d_7_in_channels=120,
+                                   module0_0_conv2d_7_out_channels=120,
+                                   module0_1_conv2d_0_in_channels=120,
+                                   module0_1_conv2d_0_out_channels=120,
+                                   module0_1_conv2d_2_in_channels=120,
+                                   module0_1_conv2d_2_out_channels=120,
+                                   module0_1_conv2d_5_in_channels=120,
+                                   module0_1_conv2d_5_out_channels=120,
+                                   module0_1_conv2d_7_in_channels=120,
+                                   module0_1_conv2d_7_out_channels=120)
+        self.module7_1 = Module7(conv2d_0_in_channels=60,
+                                 conv2d_0_out_channels=30,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module7_2 = Module7(conv2d_0_in_channels=120,
+                                 conv2d_0_out_channels=30,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_162 = nn.ReLU()
+        self.conv2d_149 = nn.Conv2d(in_channels=30,
+                                    out_channels=60,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_3 = Module7(conv2d_0_in_channels=120,
+                                 conv2d_0_out_channels=60,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.relu_163 = nn.ReLU()
+        self.module11_0 = Module11(conv2d_0_in_channels=30,
+                                   conv2d_0_out_channels=120,
+                                   module5_0_conv2d_0_in_channels=30,
+                                   module5_0_conv2d_0_out_channels=30,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_145 = nn.Conv2d(in_channels=60,
+                                    out_channels=120,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_167 = nn.ReLU()
+        self.module16_5 = Module16(module0_0_conv2d_0_in_channels=30,
+                                   module0_0_conv2d_0_out_channels=30,
+                                   module0_0_conv2d_2_in_channels=30,
+                                   module0_0_conv2d_2_out_channels=30,
+                                   module0_0_conv2d_5_in_channels=30,
+                                   module0_0_conv2d_5_out_channels=30,
+                                   module0_0_conv2d_7_in_channels=30,
+                                   module0_0_conv2d_7_out_channels=30,
+                                   module0_1_conv2d_0_in_channels=30,
+                                   module0_1_conv2d_0_out_channels=30,
+                                   module0_1_conv2d_2_in_channels=30,
+                                   module0_1_conv2d_2_out_channels=30,
+                                   module0_1_conv2d_5_in_channels=30,
+                                   module0_1_conv2d_5_out_channels=30,
+                                   module0_1_conv2d_7_in_channels=30,
+                                   module0_1_conv2d_7_out_channels=30)
+        self.module16_6 = Module16(module0_0_conv2d_0_in_channels=60,
+                                   module0_0_conv2d_0_out_channels=60,
+                                   module0_0_conv2d_2_in_channels=60,
+                                   module0_0_conv2d_2_out_channels=60,
+                                   module0_0_conv2d_5_in_channels=60,
+                                   module0_0_conv2d_5_out_channels=60,
+                                   module0_0_conv2d_7_in_channels=60,
+                                   module0_0_conv2d_7_out_channels=60,
+                                   module0_1_conv2d_0_in_channels=60,
+                                   module0_1_conv2d_0_out_channels=60,
+                                   module0_1_conv2d_2_in_channels=60,
+                                   module0_1_conv2d_2_out_channels=60,
+                                   module0_1_conv2d_5_in_channels=60,
+                                   module0_1_conv2d_5_out_channels=60,
+                                   module0_1_conv2d_7_in_channels=60,
+                                   module0_1_conv2d_7_out_channels=60)
+        self.module16_7 = Module16(module0_0_conv2d_0_in_channels=120,
+                                   module0_0_conv2d_0_out_channels=120,
+                                   module0_0_conv2d_2_in_channels=120,
+                                   module0_0_conv2d_2_out_channels=120,
+                                   module0_0_conv2d_5_in_channels=120,
+                                   module0_0_conv2d_5_out_channels=120,
+                                   module0_0_conv2d_7_in_channels=120,
+                                   module0_0_conv2d_7_out_channels=120,
+                                   module0_1_conv2d_0_in_channels=120,
+                                   module0_1_conv2d_0_out_channels=120,
+                                   module0_1_conv2d_2_in_channels=120,
+                                   module0_1_conv2d_2_out_channels=120,
+                                   module0_1_conv2d_5_in_channels=120,
+                                   module0_1_conv2d_5_out_channels=120,
+                                   module0_1_conv2d_7_in_channels=120,
+                                   module0_1_conv2d_7_out_channels=120)
+        self.module7_4 = Module7(conv2d_0_in_channels=60,
+                                 conv2d_0_out_channels=30,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module7_5 = Module7(conv2d_0_in_channels=120,
+                                 conv2d_0_out_channels=30,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_243 = nn.ReLU()
+        self.conv2d_225 = nn.Conv2d(in_channels=30,
+                                    out_channels=60,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_6 = Module7(conv2d_0_in_channels=120,
+                                 conv2d_0_out_channels=60,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.relu_244 = nn.ReLU()
+        self.module11_1 = Module11(conv2d_0_in_channels=30,
+                                   conv2d_0_out_channels=120,
+                                   module5_0_conv2d_0_in_channels=30,
+                                   module5_0_conv2d_0_out_channels=30,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_228 = nn.Conv2d(in_channels=60,
+                                    out_channels=120,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_245 = nn.ReLU()
+        self.module16_8 = Module16(module0_0_conv2d_0_in_channels=30,
+                                   module0_0_conv2d_0_out_channels=30,
+                                   module0_0_conv2d_2_in_channels=30,
+                                   module0_0_conv2d_2_out_channels=30,
+                                   module0_0_conv2d_5_in_channels=30,
+                                   module0_0_conv2d_5_out_channels=30,
+                                   module0_0_conv2d_7_in_channels=30,
+                                   module0_0_conv2d_7_out_channels=30,
+                                   module0_1_conv2d_0_in_channels=30,
+                                   module0_1_conv2d_0_out_channels=30,
+                                   module0_1_conv2d_2_in_channels=30,
+                                   module0_1_conv2d_2_out_channels=30,
+                                   module0_1_conv2d_5_in_channels=30,
+                                   module0_1_conv2d_5_out_channels=30,
+                                   module0_1_conv2d_7_in_channels=30,
+                                   module0_1_conv2d_7_out_channels=30)
+        self.module16_9 = Module16(module0_0_conv2d_0_in_channels=60,
+                                   module0_0_conv2d_0_out_channels=60,
+                                   module0_0_conv2d_2_in_channels=60,
+                                   module0_0_conv2d_2_out_channels=60,
+                                   module0_0_conv2d_5_in_channels=60,
+                                   module0_0_conv2d_5_out_channels=60,
+                                   module0_0_conv2d_7_in_channels=60,
+                                   module0_0_conv2d_7_out_channels=60,
+                                   module0_1_conv2d_0_in_channels=60,
+                                   module0_1_conv2d_0_out_channels=60,
+                                   module0_1_conv2d_2_in_channels=60,
+                                   module0_1_conv2d_2_out_channels=60,
+                                   module0_1_conv2d_5_in_channels=60,
+                                   module0_1_conv2d_5_out_channels=60,
+                                   module0_1_conv2d_7_in_channels=60,
+                                   module0_1_conv2d_7_out_channels=60)
+        self.module16_10 = Module16(module0_0_conv2d_0_in_channels=120,
+                                    module0_0_conv2d_0_out_channels=120,
+                                    module0_0_conv2d_2_in_channels=120,
+                                    module0_0_conv2d_2_out_channels=120,
+                                    module0_0_conv2d_5_in_channels=120,
+                                    module0_0_conv2d_5_out_channels=120,
+                                    module0_0_conv2d_7_in_channels=120,
+                                    module0_0_conv2d_7_out_channels=120,
+                                    module0_1_conv2d_0_in_channels=120,
+                                    module0_1_conv2d_0_out_channels=120,
+                                    module0_1_conv2d_2_in_channels=120,
+                                    module0_1_conv2d_2_out_channels=120,
+                                    module0_1_conv2d_5_in_channels=120,
+                                    module0_1_conv2d_5_out_channels=120,
+                                    module0_1_conv2d_7_in_channels=120,
+                                    module0_1_conv2d_7_out_channels=120)
+        self.module7_7 = Module7(conv2d_0_in_channels=60,
+                                 conv2d_0_out_channels=30,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module7_8 = Module7(conv2d_0_in_channels=120,
+                                 conv2d_0_out_channels=30,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_324 = nn.ReLU()
+        self.conv2d_306 = nn.Conv2d(in_channels=30,
+                                    out_channels=60,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_9 = Module7(conv2d_0_in_channels=120,
+                                 conv2d_0_out_channels=60,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.relu_322 = nn.ReLU()
+        self.module11_2 = Module11(conv2d_0_in_channels=30,
+                                   conv2d_0_out_channels=120,
+                                   module5_0_conv2d_0_in_channels=30,
+                                   module5_0_conv2d_0_out_channels=30,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_309 = nn.Conv2d(in_channels=60,
+                                    out_channels=120,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_326 = nn.ReLU()
+        self.module16_11 = Module16(module0_0_conv2d_0_in_channels=30,
+                                    module0_0_conv2d_0_out_channels=30,
+                                    module0_0_conv2d_2_in_channels=30,
+                                    module0_0_conv2d_2_out_channels=30,
+                                    module0_0_conv2d_5_in_channels=30,
+                                    module0_0_conv2d_5_out_channels=30,
+                                    module0_0_conv2d_7_in_channels=30,
+                                    module0_0_conv2d_7_out_channels=30,
+                                    module0_1_conv2d_0_in_channels=30,
+                                    module0_1_conv2d_0_out_channels=30,
+                                    module0_1_conv2d_2_in_channels=30,
+                                    module0_1_conv2d_2_out_channels=30,
+                                    module0_1_conv2d_5_in_channels=30,
+                                    module0_1_conv2d_5_out_channels=30,
+                                    module0_1_conv2d_7_in_channels=30,
+                                    module0_1_conv2d_7_out_channels=30)
+        self.module16_12 = Module16(module0_0_conv2d_0_in_channels=60,
+                                    module0_0_conv2d_0_out_channels=60,
+                                    module0_0_conv2d_2_in_channels=60,
+                                    module0_0_conv2d_2_out_channels=60,
+                                    module0_0_conv2d_5_in_channels=60,
+                                    module0_0_conv2d_5_out_channels=60,
+                                    module0_0_conv2d_7_in_channels=60,
+                                    module0_0_conv2d_7_out_channels=60,
+                                    module0_1_conv2d_0_in_channels=60,
+                                    module0_1_conv2d_0_out_channels=60,
+                                    module0_1_conv2d_2_in_channels=60,
+                                    module0_1_conv2d_2_out_channels=60,
+                                    module0_1_conv2d_5_in_channels=60,
+                                    module0_1_conv2d_5_out_channels=60,
+                                    module0_1_conv2d_7_in_channels=60,
+                                    module0_1_conv2d_7_out_channels=60)
+        self.module16_13 = Module16(module0_0_conv2d_0_in_channels=120,
+                                    module0_0_conv2d_0_out_channels=120,
+                                    module0_0_conv2d_2_in_channels=120,
+                                    module0_0_conv2d_2_out_channels=120,
+                                    module0_0_conv2d_5_in_channels=120,
+                                    module0_0_conv2d_5_out_channels=120,
+                                    module0_0_conv2d_7_in_channels=120,
+                                    module0_0_conv2d_7_out_channels=120,
+                                    module0_1_conv2d_0_in_channels=120,
+                                    module0_1_conv2d_0_out_channels=120,
+                                    module0_1_conv2d_2_in_channels=120,
+                                    module0_1_conv2d_2_out_channels=120,
+                                    module0_1_conv2d_5_in_channels=120,
+                                    module0_1_conv2d_5_out_channels=120,
+                                    module0_1_conv2d_7_in_channels=120,
+                                    module0_1_conv2d_7_out_channels=120)
+        self.module7_10 = Module7(conv2d_0_in_channels=60,
+                                  conv2d_0_out_channels=30,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_11 = Module7(conv2d_0_in_channels=120,
+                                  conv2d_0_out_channels=30,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_402 = nn.ReLU()
+        self.conv2d_388 = nn.Conv2d(in_channels=30,
+                                    out_channels=60,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_12 = Module7(conv2d_0_in_channels=120,
+                                  conv2d_0_out_channels=60,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_403 = nn.ReLU()
+        self.module11_3 = Module11(conv2d_0_in_channels=30,
+                                   conv2d_0_out_channels=120,
+                                   module5_0_conv2d_0_in_channels=30,
+                                   module5_0_conv2d_0_out_channels=30,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_386 = nn.Conv2d(in_channels=60,
+                                    out_channels=120,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_407 = nn.ReLU()
+        self.module5_4 = Module5(conv2d_0_in_channels=120,
+                                 conv2d_0_out_channels=240,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module16_14 = Module16(module0_0_conv2d_0_in_channels=30,
+                                    module0_0_conv2d_0_out_channels=30,
+                                    module0_0_conv2d_2_in_channels=30,
+                                    module0_0_conv2d_2_out_channels=30,
+                                    module0_0_conv2d_5_in_channels=30,
+                                    module0_0_conv2d_5_out_channels=30,
+                                    module0_0_conv2d_7_in_channels=30,
+                                    module0_0_conv2d_7_out_channels=30,
+                                    module0_1_conv2d_0_in_channels=30,
+                                    module0_1_conv2d_0_out_channels=30,
+                                    module0_1_conv2d_2_in_channels=30,
+                                    module0_1_conv2d_2_out_channels=30,
+                                    module0_1_conv2d_5_in_channels=30,
+                                    module0_1_conv2d_5_out_channels=30,
+                                    module0_1_conv2d_7_in_channels=30,
+                                    module0_1_conv2d_7_out_channels=30)
+        self.module16_15 = Module16(module0_0_conv2d_0_in_channels=60,
+                                    module0_0_conv2d_0_out_channels=60,
+                                    module0_0_conv2d_2_in_channels=60,
+                                    module0_0_conv2d_2_out_channels=60,
+                                    module0_0_conv2d_5_in_channels=60,
+                                    module0_0_conv2d_5_out_channels=60,
+                                    module0_0_conv2d_7_in_channels=60,
+                                    module0_0_conv2d_7_out_channels=60,
+                                    module0_1_conv2d_0_in_channels=60,
+                                    module0_1_conv2d_0_out_channels=60,
+                                    module0_1_conv2d_2_in_channels=60,
+                                    module0_1_conv2d_2_out_channels=60,
+                                    module0_1_conv2d_5_in_channels=60,
+                                    module0_1_conv2d_5_out_channels=60,
+                                    module0_1_conv2d_7_in_channels=60,
+                                    module0_1_conv2d_7_out_channels=60)
+        self.module16_16 = Module16(module0_0_conv2d_0_in_channels=120,
+                                    module0_0_conv2d_0_out_channels=120,
+                                    module0_0_conv2d_2_in_channels=120,
+                                    module0_0_conv2d_2_out_channels=120,
+                                    module0_0_conv2d_5_in_channels=120,
+                                    module0_0_conv2d_5_out_channels=120,
+                                    module0_0_conv2d_7_in_channels=120,
+                                    module0_0_conv2d_7_out_channels=120,
+                                    module0_1_conv2d_0_in_channels=120,
+                                    module0_1_conv2d_0_out_channels=120,
+                                    module0_1_conv2d_2_in_channels=120,
+                                    module0_1_conv2d_2_out_channels=120,
+                                    module0_1_conv2d_5_in_channels=120,
+                                    module0_1_conv2d_5_out_channels=120,
+                                    module0_1_conv2d_7_in_channels=120,
+                                    module0_1_conv2d_7_out_channels=120)
+        self.module16_17 = Module16(module0_0_conv2d_0_in_channels=240,
+                                    module0_0_conv2d_0_out_channels=240,
+                                    module0_0_conv2d_2_in_channels=240,
+                                    module0_0_conv2d_2_out_channels=240,
+                                    module0_0_conv2d_5_in_channels=240,
+                                    module0_0_conv2d_5_out_channels=240,
+                                    module0_0_conv2d_7_in_channels=240,
+                                    module0_0_conv2d_7_out_channels=240,
+                                    module0_1_conv2d_0_in_channels=240,
+                                    module0_1_conv2d_0_out_channels=240,
+                                    module0_1_conv2d_2_in_channels=240,
+                                    module0_1_conv2d_2_out_channels=240,
+                                    module0_1_conv2d_5_in_channels=240,
+                                    module0_1_conv2d_5_out_channels=240,
+                                    module0_1_conv2d_7_in_channels=240,
+                                    module0_1_conv2d_7_out_channels=240)
+        self.module7_13 = Module7(conv2d_0_in_channels=60,
+                                  conv2d_0_out_channels=30,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_14 = Module7(conv2d_0_in_channels=120,
+                                  conv2d_0_out_channels=30,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_15 = Module7(conv2d_0_in_channels=240,
+                                  conv2d_0_out_channels=30,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_525 = nn.ReLU()
+        self.conv2d_484 = nn.Conv2d(in_channels=30,
+                                    out_channels=60,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_16 = Module7(conv2d_0_in_channels=120,
+                                  conv2d_0_out_channels=60,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.module7_17 = Module7(conv2d_0_in_channels=240,
+                                  conv2d_0_out_channels=60,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_526 = nn.ReLU()
+        self.module11_4 = Module11(conv2d_0_in_channels=30,
+                                   conv2d_0_out_channels=120,
+                                   module5_0_conv2d_0_in_channels=30,
+                                   module5_0_conv2d_0_out_channels=30,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_488 = nn.Conv2d(in_channels=60,
+                                    out_channels=120,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_18 = Module7(conv2d_0_in_channels=240,
+                                  conv2d_0_out_channels=120,
+                                  resizenearestneighbor_1_size=(14, 14))
+        self.relu_527 = nn.ReLU()
+        self.module15_4 = Module15(conv2d_0_in_channels=30,
+                                   conv2d_0_out_channels=240,
+                                   conv2d_0_kernel_size=(3, 3),
+                                   conv2d_0_stride=(2, 2),
+                                   conv2d_0_padding=(1, 1, 1, 1),
+                                   conv2d_0_pad_mode="pad",
+                                   module5_0_conv2d_0_in_channels=30,
+                                   module5_0_conv2d_0_out_channels=30,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad",
+                                   module5_1_conv2d_0_in_channels=30,
+                                   module5_1_conv2d_0_out_channels=30,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(2, 2),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.module11_5 = Module11(conv2d_0_in_channels=60,
+                                   conv2d_0_out_channels=240,
+                                   module5_0_conv2d_0_in_channels=60,
+                                   module5_0_conv2d_0_out_channels=60,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_500 = nn.Conv2d(in_channels=120,
+                                    out_channels=240,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_532 = nn.ReLU()
+        self.module16_18 = Module16(module0_0_conv2d_0_in_channels=30,
+                                    module0_0_conv2d_0_out_channels=30,
+                                    module0_0_conv2d_2_in_channels=30,
+                                    module0_0_conv2d_2_out_channels=30,
+                                    module0_0_conv2d_5_in_channels=30,
+                                    module0_0_conv2d_5_out_channels=30,
+                                    module0_0_conv2d_7_in_channels=30,
+                                    module0_0_conv2d_7_out_channels=30,
+                                    module0_1_conv2d_0_in_channels=30,
+                                    module0_1_conv2d_0_out_channels=30,
+                                    module0_1_conv2d_2_in_channels=30,
+                                    module0_1_conv2d_2_out_channels=30,
+                                    module0_1_conv2d_5_in_channels=30,
+                                    module0_1_conv2d_5_out_channels=30,
+                                    module0_1_conv2d_7_in_channels=30,
+                                    module0_1_conv2d_7_out_channels=30)
+        self.module16_19 = Module16(module0_0_conv2d_0_in_channels=60,
+                                    module0_0_conv2d_0_out_channels=60,
+                                    module0_0_conv2d_2_in_channels=60,
+                                    module0_0_conv2d_2_out_channels=60,
+                                    module0_0_conv2d_5_in_channels=60,
+                                    module0_0_conv2d_5_out_channels=60,
+                                    module0_0_conv2d_7_in_channels=60,
+                                    module0_0_conv2d_7_out_channels=60,
+                                    module0_1_conv2d_0_in_channels=60,
+                                    module0_1_conv2d_0_out_channels=60,
+                                    module0_1_conv2d_2_in_channels=60,
+                                    module0_1_conv2d_2_out_channels=60,
+                                    module0_1_conv2d_5_in_channels=60,
+                                    module0_1_conv2d_5_out_channels=60,
+                                    module0_1_conv2d_7_in_channels=60,
+                                    module0_1_conv2d_7_out_channels=60)
+        self.module16_20 = Module16(module0_0_conv2d_0_in_channels=120,
+                                    module0_0_conv2d_0_out_channels=120,
+                                    module0_0_conv2d_2_in_channels=120,
+                                    module0_0_conv2d_2_out_channels=120,
+                                    module0_0_conv2d_5_in_channels=120,
+                                    module0_0_conv2d_5_out_channels=120,
+                                    module0_0_conv2d_7_in_channels=120,
+                                    module0_0_conv2d_7_out_channels=120,
+                                    module0_1_conv2d_0_in_channels=120,
+                                    module0_1_conv2d_0_out_channels=120,
+                                    module0_1_conv2d_2_in_channels=120,
+                                    module0_1_conv2d_2_out_channels=120,
+                                    module0_1_conv2d_5_in_channels=120,
+                                    module0_1_conv2d_5_out_channels=120,
+                                    module0_1_conv2d_7_in_channels=120,
+                                    module0_1_conv2d_7_out_channels=120)
+        self.module16_21 = Module16(module0_0_conv2d_0_in_channels=240,
+                                    module0_0_conv2d_0_out_channels=240,
+                                    module0_0_conv2d_2_in_channels=240,
+                                    module0_0_conv2d_2_out_channels=240,
+                                    module0_0_conv2d_5_in_channels=240,
+                                    module0_0_conv2d_5_out_channels=240,
+                                    module0_0_conv2d_7_in_channels=240,
+                                    module0_0_conv2d_7_out_channels=240,
+                                    module0_1_conv2d_0_in_channels=240,
+                                    module0_1_conv2d_0_out_channels=240,
+                                    module0_1_conv2d_2_in_channels=240,
+                                    module0_1_conv2d_2_out_channels=240,
+                                    module0_1_conv2d_5_in_channels=240,
+                                    module0_1_conv2d_5_out_channels=240,
+                                    module0_1_conv2d_7_in_channels=240,
+                                    module0_1_conv2d_7_out_channels=240)
+        self.module7_19 = Module7(conv2d_0_in_channels=60,
+                                  conv2d_0_out_channels=30,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_20 = Module7(conv2d_0_in_channels=120,
+                                  conv2d_0_out_channels=30,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_21 = Module7(conv2d_0_in_channels=240,
+                                  conv2d_0_out_channels=30,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_647 = nn.ReLU()
+        self.conv2d_609 = nn.Conv2d(in_channels=30,
+                                    out_channels=60,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_22 = Module7(conv2d_0_in_channels=120,
+                                  conv2d_0_out_channels=60,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.module7_23 = Module7(conv2d_0_in_channels=240,
+                                  conv2d_0_out_channels=60,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_644 = nn.ReLU()
+        self.module11_6 = Module11(conv2d_0_in_channels=30,
+                                   conv2d_0_out_channels=120,
+                                   module5_0_conv2d_0_in_channels=30,
+                                   module5_0_conv2d_0_out_channels=30,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_613 = nn.Conv2d(in_channels=60,
+                                    out_channels=120,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_24 = Module7(conv2d_0_in_channels=240,
+                                  conv2d_0_out_channels=120,
+                                  resizenearestneighbor_1_size=(14, 14))
+        self.relu_649 = nn.ReLU()
+        self.module15_5 = Module15(conv2d_0_in_channels=30,
+                                   conv2d_0_out_channels=240,
+                                   conv2d_0_kernel_size=(3, 3),
+                                   conv2d_0_stride=(2, 2),
+                                   conv2d_0_padding=(1, 1, 1, 1),
+                                   conv2d_0_pad_mode="pad",
+                                   module5_0_conv2d_0_in_channels=30,
+                                   module5_0_conv2d_0_out_channels=30,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad",
+                                   module5_1_conv2d_0_in_channels=30,
+                                   module5_1_conv2d_0_out_channels=30,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(2, 2),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.module11_7 = Module11(conv2d_0_in_channels=60,
+                                   conv2d_0_out_channels=240,
+                                   module5_0_conv2d_0_in_channels=60,
+                                   module5_0_conv2d_0_out_channels=60,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_617 = nn.Conv2d(in_channels=120,
+                                    out_channels=240,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_658 = nn.ReLU()
+        self.module16_22 = Module16(module0_0_conv2d_0_in_channels=30,
+                                    module0_0_conv2d_0_out_channels=30,
+                                    module0_0_conv2d_2_in_channels=30,
+                                    module0_0_conv2d_2_out_channels=30,
+                                    module0_0_conv2d_5_in_channels=30,
+                                    module0_0_conv2d_5_out_channels=30,
+                                    module0_0_conv2d_7_in_channels=30,
+                                    module0_0_conv2d_7_out_channels=30,
+                                    module0_1_conv2d_0_in_channels=30,
+                                    module0_1_conv2d_0_out_channels=30,
+                                    module0_1_conv2d_2_in_channels=30,
+                                    module0_1_conv2d_2_out_channels=30,
+                                    module0_1_conv2d_5_in_channels=30,
+                                    module0_1_conv2d_5_out_channels=30,
+                                    module0_1_conv2d_7_in_channels=30,
+                                    module0_1_conv2d_7_out_channels=30)
+        self.module16_23 = Module16(module0_0_conv2d_0_in_channels=60,
+                                    module0_0_conv2d_0_out_channels=60,
+                                    module0_0_conv2d_2_in_channels=60,
+                                    module0_0_conv2d_2_out_channels=60,
+                                    module0_0_conv2d_5_in_channels=60,
+                                    module0_0_conv2d_5_out_channels=60,
+                                    module0_0_conv2d_7_in_channels=60,
+                                    module0_0_conv2d_7_out_channels=60,
+                                    module0_1_conv2d_0_in_channels=60,
+                                    module0_1_conv2d_0_out_channels=60,
+                                    module0_1_conv2d_2_in_channels=60,
+                                    module0_1_conv2d_2_out_channels=60,
+                                    module0_1_conv2d_5_in_channels=60,
+                                    module0_1_conv2d_5_out_channels=60,
+                                    module0_1_conv2d_7_in_channels=60,
+                                    module0_1_conv2d_7_out_channels=60)
+        self.module16_24 = Module16(module0_0_conv2d_0_in_channels=120,
+                                    module0_0_conv2d_0_out_channels=120,
+                                    module0_0_conv2d_2_in_channels=120,
+                                    module0_0_conv2d_2_out_channels=120,
+                                    module0_0_conv2d_5_in_channels=120,
+                                    module0_0_conv2d_5_out_channels=120,
+                                    module0_0_conv2d_7_in_channels=120,
+                                    module0_0_conv2d_7_out_channels=120,
+                                    module0_1_conv2d_0_in_channels=120,
+                                    module0_1_conv2d_0_out_channels=120,
+                                    module0_1_conv2d_2_in_channels=120,
+                                    module0_1_conv2d_2_out_channels=120,
+                                    module0_1_conv2d_5_in_channels=120,
+                                    module0_1_conv2d_5_out_channels=120,
+                                    module0_1_conv2d_7_in_channels=120,
+                                    module0_1_conv2d_7_out_channels=120)
+        self.module16_25 = Module16(module0_0_conv2d_0_in_channels=240,
+                                    module0_0_conv2d_0_out_channels=240,
+                                    module0_0_conv2d_2_in_channels=240,
+                                    module0_0_conv2d_2_out_channels=240,
+                                    module0_0_conv2d_5_in_channels=240,
+                                    module0_0_conv2d_5_out_channels=240,
+                                    module0_0_conv2d_7_in_channels=240,
+                                    module0_0_conv2d_7_out_channels=240,
+                                    module0_1_conv2d_0_in_channels=240,
+                                    module0_1_conv2d_0_out_channels=240,
+                                    module0_1_conv2d_2_in_channels=240,
+                                    module0_1_conv2d_2_out_channels=240,
+                                    module0_1_conv2d_5_in_channels=240,
+                                    module0_1_conv2d_5_out_channels=240,
+                                    module0_1_conv2d_7_in_channels=240,
+                                    module0_1_conv2d_7_out_channels=240)
+        self.module7_25 = Module7(conv2d_0_in_channels=60,
+                                  conv2d_0_out_channels=30,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_26 = Module7(conv2d_0_in_channels=120,
+                                  conv2d_0_out_channels=30,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_27 = Module7(conv2d_0_in_channels=240,
+                                  conv2d_0_out_channels=30,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_768 = nn.ReLU()
+        self.conv2d_733 = nn.Conv2d(in_channels=30,
+                                    out_channels=60,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_28 = Module7(conv2d_0_in_channels=120,
+                                  conv2d_0_out_channels=60,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.module7_29 = Module7(conv2d_0_in_channels=240,
+                                  conv2d_0_out_channels=60,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_769 = nn.ReLU()
+        self.module11_8 = Module11(conv2d_0_in_channels=30,
+                                   conv2d_0_out_channels=120,
+                                   module5_0_conv2d_0_in_channels=30,
+                                   module5_0_conv2d_0_out_channels=30,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_729 = nn.Conv2d(in_channels=60,
+                                    out_channels=120,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_30 = Module7(conv2d_0_in_channels=240,
+                                  conv2d_0_out_channels=120,
+                                  resizenearestneighbor_1_size=(14, 14))
+        self.relu_770 = nn.ReLU()
+        self.module15_6 = Module15(conv2d_0_in_channels=30,
+                                   conv2d_0_out_channels=240,
+                                   conv2d_0_kernel_size=(3, 3),
+                                   conv2d_0_stride=(2, 2),
+                                   conv2d_0_padding=(1, 1, 1, 1),
+                                   conv2d_0_pad_mode="pad",
+                                   module5_0_conv2d_0_in_channels=30,
+                                   module5_0_conv2d_0_out_channels=30,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad",
+                                   module5_1_conv2d_0_in_channels=30,
+                                   module5_1_conv2d_0_out_channels=30,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(2, 2),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.module11_9 = Module11(conv2d_0_in_channels=60,
+                                   conv2d_0_out_channels=240,
+                                   module5_0_conv2d_0_in_channels=60,
+                                   module5_0_conv2d_0_out_channels=60,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_740 = nn.Conv2d(in_channels=120,
+                                    out_channels=240,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_782 = nn.ReLU()
+        self.module15_7 = Module15(conv2d_0_in_channels=32,
+                                   conv2d_0_out_channels=128,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=30,
+                                   module5_0_conv2d_0_out_channels=32,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=32,
+                                   module5_1_conv2d_0_out_channels=32,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_773 = nn.Conv2d(in_channels=30,
+                                    out_channels=128,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_800 = nn.ReLU()
+        self.module15_8 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=60,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_775 = nn.Conv2d(in_channels=60,
+                                    out_channels=256,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_801 = nn.ReLU()
+        self.module5_5 = Module5(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module15_9 = Module15(conv2d_0_in_channels=128,
+                                   conv2d_0_out_channels=512,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=120,
+                                   module5_0_conv2d_0_out_channels=128,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=128,
+                                   module5_1_conv2d_0_out_channels=128,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_777 = nn.Conv2d(in_channels=120,
+                                    out_channels=512,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_802 = nn.ReLU()
+        self.module5_6 = Module5(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=512,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module15_10 = Module15(conv2d_0_in_channels=256,
+                                    conv2d_0_out_channels=1024,
+                                    conv2d_0_kernel_size=(1, 1),
+                                    conv2d_0_stride=(1, 1),
+                                    conv2d_0_padding=0,
+                                    conv2d_0_pad_mode="valid",
+                                    module5_0_conv2d_0_in_channels=240,
+                                    module5_0_conv2d_0_out_channels=256,
+                                    module5_0_conv2d_0_kernel_size=(1, 1),
+                                    module5_0_conv2d_0_stride=(1, 1),
+                                    module5_0_conv2d_0_padding=0,
+                                    module5_0_conv2d_0_pad_mode="valid",
+                                    module5_1_conv2d_0_in_channels=256,
+                                    module5_1_conv2d_0_out_channels=256,
+                                    module5_1_conv2d_0_kernel_size=(3, 3),
+                                    module5_1_conv2d_0_stride=(1, 1),
+                                    module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                    module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_787 = nn.Conv2d(in_channels=240,
+                                    out_channels=1024,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_807 = nn.ReLU()
+        self.module5_7 = Module5(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=1024,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module5_8 = Module5(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=2048,
+                                 conv2d_0_kernel_size=(1, 1),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=0,
+                                 conv2d_0_pad_mode="valid")
+        self.avgpool2d_817 = nn.AvgPool2d(kernel_size=(7, 7))
+        self.flatten_818 = nn.Flatten()
+        self.dense_819 = nn.Dense(in_channels=2048, out_channels=1000, has_bias=True)
+
+    def construct(self, input_1):
+        opt_conv2d_0 = self.conv2d_0(input_1)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        module5_0_opt = self.module5_0(opt_relu_1)
+        module15_0_opt = self.module15_0(module5_0_opt)
+        opt_conv2d_5 = self.conv2d_5(module5_0_opt)
+        opt_add_10 = P.Add()(module15_0_opt, opt_conv2d_5)
+        opt_relu_11 = self.relu_11(opt_add_10)
+        module15_1_opt = self.module15_1(opt_relu_11)
+        opt_add_17 = P.Add()(module15_1_opt, opt_relu_11)
+        opt_relu_18 = self.relu_18(opt_add_17)
+        module15_2_opt = self.module15_2(opt_relu_18)
+        opt_add_24 = P.Add()(module15_2_opt, opt_relu_18)
+        opt_relu_25 = self.relu_25(opt_add_24)
+        module15_3_opt = self.module15_3(opt_relu_25)
+        opt_add_31 = P.Add()(module15_3_opt, opt_relu_25)
+        opt_relu_32 = self.relu_32(opt_add_31)
+        module5_1_opt = self.module5_1(opt_relu_32)
+        module5_2_opt = self.module5_2(opt_relu_32)
+        module16_0_opt = self.module16_0(module5_1_opt)
+        module16_1_opt = self.module16_1(module5_2_opt)
+        module7_0_opt = self.module7_0(module16_1_opt)
+        opt_add_82 = P.Add()(module16_0_opt, module7_0_opt)
+        opt_relu_85 = self.relu_85(opt_add_82)
+        opt_conv2d_77 = self.conv2d_77(module16_0_opt)
+        opt_add_79 = P.Add()(opt_conv2d_77, module16_1_opt)
+        opt_relu_81 = self.relu_81(opt_add_79)
+        module5_3_opt = self.module5_3(opt_relu_81)
+        module16_2_opt = self.module16_2(opt_relu_85)
+        module16_3_opt = self.module16_3(opt_relu_81)
+        module16_4_opt = self.module16_4(module5_3_opt)
+        module7_1_opt = self.module7_1(module16_3_opt)
+        opt_add_153 = P.Add()(module16_2_opt, module7_1_opt)
+        module7_2_opt = self.module7_2(module16_4_opt)
+        opt_add_159 = P.Add()(opt_add_153, module7_2_opt)
+        opt_relu_162 = self.relu_162(opt_add_159)
+        opt_conv2d_149 = self.conv2d_149(module16_2_opt)
+        opt_add_154 = P.Add()(opt_conv2d_149, module16_3_opt)
+        module7_3_opt = self.module7_3(module16_4_opt)
+        opt_add_160 = P.Add()(opt_add_154, module7_3_opt)
+        opt_relu_163 = self.relu_163(opt_add_160)
+        module11_0_opt = self.module11_0(module16_2_opt)
+        opt_conv2d_145 = self.conv2d_145(module16_3_opt)
+        opt_add_161 = P.Add()(module11_0_opt, opt_conv2d_145)
+        opt_add_164 = P.Add()(opt_add_161, module16_4_opt)
+        opt_relu_167 = self.relu_167(opt_add_164)
+        module16_5_opt = self.module16_5(opt_relu_162)
+        module16_6_opt = self.module16_6(opt_relu_163)
+        module16_7_opt = self.module16_7(opt_relu_167)
+        module7_4_opt = self.module7_4(module16_6_opt)
+        opt_add_236 = P.Add()(module16_5_opt, module7_4_opt)
+        module7_5_opt = self.module7_5(module16_7_opt)
+        opt_add_240 = P.Add()(opt_add_236, module7_5_opt)
+        opt_relu_243 = self.relu_243(opt_add_240)
+        opt_conv2d_225 = self.conv2d_225(module16_5_opt)
+        opt_add_230 = P.Add()(opt_conv2d_225, module16_6_opt)
+        module7_6_opt = self.module7_6(module16_7_opt)
+        opt_add_241 = P.Add()(opt_add_230, module7_6_opt)
+        opt_relu_244 = self.relu_244(opt_add_241)
+        module11_1_opt = self.module11_1(module16_5_opt)
+        opt_conv2d_228 = self.conv2d_228(module16_6_opt)
+        opt_add_239 = P.Add()(module11_1_opt, opt_conv2d_228)
+        opt_add_242 = P.Add()(opt_add_239, module16_7_opt)
+        opt_relu_245 = self.relu_245(opt_add_242)
+        module16_8_opt = self.module16_8(opt_relu_243)
+        module16_9_opt = self.module16_9(opt_relu_244)
+        module16_10_opt = self.module16_10(opt_relu_245)
+        module7_7_opt = self.module7_7(module16_9_opt)
+        opt_add_318 = P.Add()(module16_8_opt, module7_7_opt)
+        module7_8_opt = self.module7_8(module16_10_opt)
+        opt_add_321 = P.Add()(opt_add_318, module7_8_opt)
+        opt_relu_324 = self.relu_324(opt_add_321)
+        opt_conv2d_306 = self.conv2d_306(module16_8_opt)
+        opt_add_312 = P.Add()(opt_conv2d_306, module16_9_opt)
+        module7_9_opt = self.module7_9(module16_10_opt)
+        opt_add_319 = P.Add()(opt_add_312, module7_9_opt)
+        opt_relu_322 = self.relu_322(opt_add_319)
+        module11_2_opt = self.module11_2(module16_8_opt)
+        opt_conv2d_309 = self.conv2d_309(module16_9_opt)
+        opt_add_320 = P.Add()(module11_2_opt, opt_conv2d_309)
+        opt_add_323 = P.Add()(opt_add_320, module16_10_opt)
+        opt_relu_326 = self.relu_326(opt_add_323)
+        module16_11_opt = self.module16_11(opt_relu_324)
+        module16_12_opt = self.module16_12(opt_relu_322)
+        module16_13_opt = self.module16_13(opt_relu_326)
+        module7_10_opt = self.module7_10(module16_12_opt)
+        opt_add_395 = P.Add()(module16_11_opt, module7_10_opt)
+        module7_11_opt = self.module7_11(module16_13_opt)
+        opt_add_399 = P.Add()(opt_add_395, module7_11_opt)
+        opt_relu_402 = self.relu_402(opt_add_399)
+        opt_conv2d_388 = self.conv2d_388(module16_11_opt)
+        opt_add_393 = P.Add()(opt_conv2d_388, module16_12_opt)
+        module7_12_opt = self.module7_12(module16_13_opt)
+        opt_add_400 = P.Add()(opt_add_393, module7_12_opt)
+        opt_relu_403 = self.relu_403(opt_add_400)
+        module11_3_opt = self.module11_3(module16_11_opt)
+        opt_conv2d_386 = self.conv2d_386(module16_12_opt)
+        opt_add_401 = P.Add()(module11_3_opt, opt_conv2d_386)
+        opt_add_404 = P.Add()(opt_add_401, module16_13_opt)
+        opt_relu_407 = self.relu_407(opt_add_404)
+        module5_4_opt = self.module5_4(opt_relu_407)
+        module16_14_opt = self.module16_14(opt_relu_402)
+        module16_15_opt = self.module16_15(opt_relu_403)
+        module16_16_opt = self.module16_16(opt_relu_407)
+        module16_17_opt = self.module16_17(module5_4_opt)
+        module7_13_opt = self.module7_13(module16_15_opt)
+        opt_add_503 = P.Add()(module16_14_opt, module7_13_opt)
+        module7_14_opt = self.module7_14(module16_16_opt)
+        opt_add_513 = P.Add()(opt_add_503, module7_14_opt)
+        module7_15_opt = self.module7_15(module16_17_opt)
+        opt_add_521 = P.Add()(opt_add_513, module7_15_opt)
+        opt_relu_525 = self.relu_525(opt_add_521)
+        opt_conv2d_484 = self.conv2d_484(module16_14_opt)
+        opt_add_492 = P.Add()(opt_conv2d_484, module16_15_opt)
+        module7_16_opt = self.module7_16(module16_16_opt)
+        opt_add_514 = P.Add()(opt_add_492, module7_16_opt)
+        module7_17_opt = self.module7_17(module16_17_opt)
+        opt_add_522 = P.Add()(opt_add_514, module7_17_opt)
+        opt_relu_526 = self.relu_526(opt_add_522)
+        module11_4_opt = self.module11_4(module16_14_opt)
+        opt_conv2d_488 = self.conv2d_488(module16_15_opt)
+        opt_add_508 = P.Add()(module11_4_opt, opt_conv2d_488)
+        opt_add_515 = P.Add()(opt_add_508, module16_16_opt)
+        module7_18_opt = self.module7_18(module16_17_opt)
+        opt_add_523 = P.Add()(opt_add_515, module7_18_opt)
+        opt_relu_527 = self.relu_527(opt_add_523)
+        module15_4_opt = self.module15_4(module16_14_opt)
+        module11_5_opt = self.module11_5(module16_15_opt)
+        opt_add_520 = P.Add()(module15_4_opt, module11_5_opt)
+        opt_conv2d_500 = self.conv2d_500(module16_16_opt)
+        opt_add_524 = P.Add()(opt_add_520, opt_conv2d_500)
+        opt_add_528 = P.Add()(opt_add_524, module16_17_opt)
+        opt_relu_532 = self.relu_532(opt_add_528)
+        module16_18_opt = self.module16_18(opt_relu_525)
+        module16_19_opt = self.module16_19(opt_relu_526)
+        module16_20_opt = self.module16_20(opt_relu_527)
+        module16_21_opt = self.module16_21(opt_relu_532)
+        module7_19_opt = self.module7_19(module16_19_opt)
+        opt_add_631 = P.Add()(module16_18_opt, module7_19_opt)
+        module7_20_opt = self.module7_20(module16_20_opt)
+        opt_add_639 = P.Add()(opt_add_631, module7_20_opt)
+        module7_21_opt = self.module7_21(module16_21_opt)
+        opt_add_643 = P.Add()(opt_add_639, module7_21_opt)
+        opt_relu_647 = self.relu_647(opt_add_643)
+        opt_conv2d_609 = self.conv2d_609(module16_18_opt)
+        opt_add_619 = P.Add()(opt_conv2d_609, module16_19_opt)
+        module7_22_opt = self.module7_22(module16_20_opt)
+        opt_add_633 = P.Add()(opt_add_619, module7_22_opt)
+        module7_23_opt = self.module7_23(module16_21_opt)
+        opt_add_640 = P.Add()(opt_add_633, module7_23_opt)
+        opt_relu_644 = self.relu_644(opt_add_640)
+        module11_6_opt = self.module11_6(module16_18_opt)
+        opt_conv2d_613 = self.conv2d_613(module16_19_opt)
+        opt_add_637 = P.Add()(module11_6_opt, opt_conv2d_613)
+        opt_add_641 = P.Add()(opt_add_637, module16_20_opt)
+        module7_24_opt = self.module7_24(module16_21_opt)
+        opt_add_645 = P.Add()(opt_add_641, module7_24_opt)
+        opt_relu_649 = self.relu_649(opt_add_645)
+        module15_5_opt = self.module15_5(module16_18_opt)
+        module11_7_opt = self.module11_7(module16_19_opt)
+        opt_add_646 = P.Add()(module15_5_opt, module11_7_opt)
+        opt_conv2d_617 = self.conv2d_617(module16_20_opt)
+        opt_add_650 = P.Add()(opt_add_646, opt_conv2d_617)
+        opt_add_654 = P.Add()(opt_add_650, module16_21_opt)
+        opt_relu_658 = self.relu_658(opt_add_654)
+        module16_22_opt = self.module16_22(opt_relu_647)
+        module16_23_opt = self.module16_23(opt_relu_644)
+        module16_24_opt = self.module16_24(opt_relu_649)
+        module16_25_opt = self.module16_25(opt_relu_658)
+        module7_25_opt = self.module7_25(module16_23_opt)
+        opt_add_745 = P.Add()(module16_22_opt, module7_25_opt)
+        module7_26_opt = self.module7_26(module16_24_opt)
+        opt_add_752 = P.Add()(opt_add_745, module7_26_opt)
+        module7_27_opt = self.module7_27(module16_25_opt)
+        opt_add_764 = P.Add()(opt_add_752, module7_27_opt)
+        opt_relu_768 = self.relu_768(opt_add_764)
+        opt_conv2d_733 = self.conv2d_733(module16_22_opt)
+        opt_add_742 = P.Add()(opt_conv2d_733, module16_23_opt)
+        module7_28_opt = self.module7_28(module16_24_opt)
+        opt_add_753 = P.Add()(opt_add_742, module7_28_opt)
+        module7_29_opt = self.module7_29(module16_25_opt)
+        opt_add_765 = P.Add()(opt_add_753, module7_29_opt)
+        opt_relu_769 = self.relu_769(opt_add_765)
+        module11_8_opt = self.module11_8(module16_22_opt)
+        opt_conv2d_729 = self.conv2d_729(module16_23_opt)
+        opt_add_757 = P.Add()(module11_8_opt, opt_conv2d_729)
+        opt_add_762 = P.Add()(opt_add_757, module16_24_opt)
+        module7_30_opt = self.module7_30(module16_25_opt)
+        opt_add_766 = P.Add()(opt_add_762, module7_30_opt)
+        opt_relu_770 = self.relu_770(opt_add_766)
+        module15_6_opt = self.module15_6(module16_22_opt)
+        module11_9_opt = self.module11_9(module16_23_opt)
+        opt_add_767 = P.Add()(module15_6_opt, module11_9_opt)
+        opt_conv2d_740 = self.conv2d_740(module16_24_opt)
+        opt_add_771 = P.Add()(opt_add_767, opt_conv2d_740)
+        opt_add_778 = P.Add()(opt_add_771, module16_25_opt)
+        opt_relu_782 = self.relu_782(opt_add_778)
+        module15_7_opt = self.module15_7(opt_relu_768)
+        opt_conv2d_773 = self.conv2d_773(opt_relu_768)
+        opt_add_796 = P.Add()(module15_7_opt, opt_conv2d_773)
+        opt_relu_800 = self.relu_800(opt_add_796)
+        module15_8_opt = self.module15_8(opt_relu_769)
+        opt_conv2d_775 = self.conv2d_775(opt_relu_769)
+        opt_add_797 = P.Add()(module15_8_opt, opt_conv2d_775)
+        opt_relu_801 = self.relu_801(opt_add_797)
+        module5_5_opt = self.module5_5(opt_relu_800)
+        opt_add_808 = P.Add()(opt_relu_801, module5_5_opt)
+        module15_9_opt = self.module15_9(opt_relu_770)
+        opt_conv2d_777 = self.conv2d_777(opt_relu_770)
+        opt_add_798 = P.Add()(module15_9_opt, opt_conv2d_777)
+        opt_relu_802 = self.relu_802(opt_add_798)
+        module5_6_opt = self.module5_6(opt_add_808)
+        opt_add_811 = P.Add()(opt_relu_802, module5_6_opt)
+        module15_10_opt = self.module15_10(opt_relu_782)
+        opt_conv2d_787 = self.conv2d_787(opt_relu_782)
+        opt_add_805 = P.Add()(module15_10_opt, opt_conv2d_787)
+        opt_relu_807 = self.relu_807(opt_add_805)
+        module5_7_opt = self.module5_7(opt_add_811)
+        opt_add_814 = P.Add()(opt_relu_807, module5_7_opt)
+        module5_8_opt = self.module5_8(opt_add_814)
+        opt_avgpool2d_817 = self.avgpool2d_817(module5_8_opt)
+        opt_flatten_818 = self.flatten_818(opt_avgpool2d_817)
+        opt_dense_819 = self.dense_819(opt_flatten_818)
+        return opt_dense_819
diff --git a/research/cvtmodel/hrnet/src/hrnet_w40.py b/research/cvtmodel/hrnet/src/hrnet_w40.py
new file mode 100644
index 0000000000000000000000000000000000000000..04e9478587dc1f4ee992483adc30499b9d47d2ad
--- /dev/null
+++ b/research/cvtmodel/hrnet/src/hrnet_w40.py
@@ -0,0 +1,1522 @@
+import mindspore.ops as P
+from mindspore import nn
+
+
+class Module5(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
+                 conv2d_0_padding, conv2d_0_pad_mode):
+        super(Module5, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=conv2d_0_kernel_size,
+                                  stride=conv2d_0_stride,
+                                  padding=conv2d_0_padding,
+                                  pad_mode=conv2d_0_pad_mode,
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        return opt_relu_1
+
+
+class Module15(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
+                 conv2d_0_padding, conv2d_0_pad_mode, module5_0_conv2d_0_in_channels, module5_0_conv2d_0_out_channels,
+                 module5_0_conv2d_0_kernel_size, module5_0_conv2d_0_stride, module5_0_conv2d_0_padding,
+                 module5_0_conv2d_0_pad_mode, module5_1_conv2d_0_in_channels, module5_1_conv2d_0_out_channels,
+                 module5_1_conv2d_0_kernel_size, module5_1_conv2d_0_stride, module5_1_conv2d_0_padding,
+                 module5_1_conv2d_0_pad_mode):
+        super(Module15, self).__init__()
+        self.module5_0 = Module5(conv2d_0_in_channels=module5_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module5_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module5_0_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module5_0_conv2d_0_stride,
+                                 conv2d_0_padding=module5_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module5_0_conv2d_0_pad_mode)
+        self.module5_1 = Module5(conv2d_0_in_channels=module5_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module5_1_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module5_1_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module5_1_conv2d_0_stride,
+                                 conv2d_0_padding=module5_1_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module5_1_conv2d_0_pad_mode)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=conv2d_0_kernel_size,
+                                  stride=conv2d_0_stride,
+                                  padding=conv2d_0_padding,
+                                  pad_mode=conv2d_0_pad_mode,
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module5_0_opt = self.module5_0(x)
+        module5_1_opt = self.module5_1(module5_0_opt)
+        opt_conv2d_0 = self.conv2d_0(module5_1_opt)
+        return opt_conv2d_0
+
+
+class Module0(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
+                 conv2d_5_in_channels, conv2d_5_out_channels, conv2d_7_in_channels, conv2d_7_out_channels):
+        super(Module0, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
+                                  out_channels=conv2d_2_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_4 = nn.ReLU()
+        self.conv2d_5 = nn.Conv2d(in_channels=conv2d_5_in_channels,
+                                  out_channels=conv2d_5_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_6 = nn.ReLU()
+        self.conv2d_7 = nn.Conv2d(in_channels=conv2d_7_in_channels,
+                                  out_channels=conv2d_7_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_9 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        opt_conv2d_2 = self.conv2d_2(opt_relu_1)
+        opt_add_3 = P.Add()(opt_conv2d_2, x)
+        opt_relu_4 = self.relu_4(opt_add_3)
+        opt_conv2d_5 = self.conv2d_5(opt_relu_4)
+        opt_relu_6 = self.relu_6(opt_conv2d_5)
+        opt_conv2d_7 = self.conv2d_7(opt_relu_6)
+        opt_add_8 = P.Add()(opt_conv2d_7, opt_relu_4)
+        opt_relu_9 = self.relu_9(opt_add_8)
+        return opt_relu_9
+
+
+class Module16(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_conv2d_5_in_channels, module0_0_conv2d_5_out_channels,
+                 module0_0_conv2d_7_in_channels, module0_0_conv2d_7_out_channels, module0_1_conv2d_0_in_channels,
+                 module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels,
+                 module0_1_conv2d_5_in_channels, module0_1_conv2d_5_out_channels, module0_1_conv2d_7_in_channels,
+                 module0_1_conv2d_7_out_channels):
+        super(Module16, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_5_in_channels=module0_0_conv2d_5_in_channels,
+                                 conv2d_5_out_channels=module0_0_conv2d_5_out_channels,
+                                 conv2d_7_in_channels=module0_0_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_0_conv2d_7_out_channels)
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_5_in_channels=module0_1_conv2d_5_in_channels,
+                                 conv2d_5_out_channels=module0_1_conv2d_5_out_channels,
+                                 conv2d_7_in_channels=module0_1_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_1_conv2d_7_out_channels)
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        module0_1_opt = self.module0_1(module0_0_opt)
+        return module0_1_opt
+
+
+class Module7(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, resizenearestneighbor_1_size):
+        super(Module7, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.resizenearestneighbor_1 = P.ResizeNearestNeighbor(size=resizenearestneighbor_1_size, align_corners=False)
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_resizenearestneighbor_1 = self.resizenearestneighbor_1(opt_conv2d_0)
+        return opt_resizenearestneighbor_1
+
+
+class Module11(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, module5_0_conv2d_0_in_channels,
+                 module5_0_conv2d_0_out_channels, module5_0_conv2d_0_kernel_size, module5_0_conv2d_0_stride,
+                 module5_0_conv2d_0_padding, module5_0_conv2d_0_pad_mode):
+        super(Module11, self).__init__()
+        self.module5_0 = Module5(conv2d_0_in_channels=module5_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module5_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module5_0_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module5_0_conv2d_0_stride,
+                                 conv2d_0_padding=module5_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module5_0_conv2d_0_pad_mode)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module5_0_opt = self.module5_0(x)
+        opt_conv2d_0 = self.conv2d_0(module5_0_opt)
+        return opt_conv2d_0
+
+
+class MindSporeModel(nn.Cell):
+    def __init__(self):
+        super(MindSporeModel, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=3,
+                                  out_channels=64,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.module5_0 = Module5(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module15_0 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_5 = nn.Conv2d(in_channels=64,
+                                  out_channels=256,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_11 = nn.ReLU()
+        self.module15_1 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=256,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.relu_18 = nn.ReLU()
+        self.module15_2 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=256,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.relu_25 = nn.ReLU()
+        self.module15_3 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=256,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.relu_32 = nn.ReLU()
+        self.module5_1 = Module5(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=40,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module5_2 = Module5(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=80,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module16_0 = Module16(module0_0_conv2d_0_in_channels=40,
+                                   module0_0_conv2d_0_out_channels=40,
+                                   module0_0_conv2d_2_in_channels=40,
+                                   module0_0_conv2d_2_out_channels=40,
+                                   module0_0_conv2d_5_in_channels=40,
+                                   module0_0_conv2d_5_out_channels=40,
+                                   module0_0_conv2d_7_in_channels=40,
+                                   module0_0_conv2d_7_out_channels=40,
+                                   module0_1_conv2d_0_in_channels=40,
+                                   module0_1_conv2d_0_out_channels=40,
+                                   module0_1_conv2d_2_in_channels=40,
+                                   module0_1_conv2d_2_out_channels=40,
+                                   module0_1_conv2d_5_in_channels=40,
+                                   module0_1_conv2d_5_out_channels=40,
+                                   module0_1_conv2d_7_in_channels=40,
+                                   module0_1_conv2d_7_out_channels=40)
+        self.module16_1 = Module16(module0_0_conv2d_0_in_channels=80,
+                                   module0_0_conv2d_0_out_channels=80,
+                                   module0_0_conv2d_2_in_channels=80,
+                                   module0_0_conv2d_2_out_channels=80,
+                                   module0_0_conv2d_5_in_channels=80,
+                                   module0_0_conv2d_5_out_channels=80,
+                                   module0_0_conv2d_7_in_channels=80,
+                                   module0_0_conv2d_7_out_channels=80,
+                                   module0_1_conv2d_0_in_channels=80,
+                                   module0_1_conv2d_0_out_channels=80,
+                                   module0_1_conv2d_2_in_channels=80,
+                                   module0_1_conv2d_2_out_channels=80,
+                                   module0_1_conv2d_5_in_channels=80,
+                                   module0_1_conv2d_5_out_channels=80,
+                                   module0_1_conv2d_7_in_channels=80,
+                                   module0_1_conv2d_7_out_channels=80)
+        self.module7_0 = Module7(conv2d_0_in_channels=80,
+                                 conv2d_0_out_channels=40,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_85 = nn.ReLU()
+        self.conv2d_77 = nn.Conv2d(in_channels=40,
+                                   out_channels=80,
+                                   kernel_size=(3, 3),
+                                   stride=(2, 2),
+                                   padding=(1, 1, 1, 1),
+                                   pad_mode="pad",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.relu_81 = nn.ReLU()
+        self.module5_3 = Module5(conv2d_0_in_channels=80,
+                                 conv2d_0_out_channels=160,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module16_2 = Module16(module0_0_conv2d_0_in_channels=40,
+                                   module0_0_conv2d_0_out_channels=40,
+                                   module0_0_conv2d_2_in_channels=40,
+                                   module0_0_conv2d_2_out_channels=40,
+                                   module0_0_conv2d_5_in_channels=40,
+                                   module0_0_conv2d_5_out_channels=40,
+                                   module0_0_conv2d_7_in_channels=40,
+                                   module0_0_conv2d_7_out_channels=40,
+                                   module0_1_conv2d_0_in_channels=40,
+                                   module0_1_conv2d_0_out_channels=40,
+                                   module0_1_conv2d_2_in_channels=40,
+                                   module0_1_conv2d_2_out_channels=40,
+                                   module0_1_conv2d_5_in_channels=40,
+                                   module0_1_conv2d_5_out_channels=40,
+                                   module0_1_conv2d_7_in_channels=40,
+                                   module0_1_conv2d_7_out_channels=40)
+        self.module16_3 = Module16(module0_0_conv2d_0_in_channels=80,
+                                   module0_0_conv2d_0_out_channels=80,
+                                   module0_0_conv2d_2_in_channels=80,
+                                   module0_0_conv2d_2_out_channels=80,
+                                   module0_0_conv2d_5_in_channels=80,
+                                   module0_0_conv2d_5_out_channels=80,
+                                   module0_0_conv2d_7_in_channels=80,
+                                   module0_0_conv2d_7_out_channels=80,
+                                   module0_1_conv2d_0_in_channels=80,
+                                   module0_1_conv2d_0_out_channels=80,
+                                   module0_1_conv2d_2_in_channels=80,
+                                   module0_1_conv2d_2_out_channels=80,
+                                   module0_1_conv2d_5_in_channels=80,
+                                   module0_1_conv2d_5_out_channels=80,
+                                   module0_1_conv2d_7_in_channels=80,
+                                   module0_1_conv2d_7_out_channels=80)
+        self.module16_4 = Module16(module0_0_conv2d_0_in_channels=160,
+                                   module0_0_conv2d_0_out_channels=160,
+                                   module0_0_conv2d_2_in_channels=160,
+                                   module0_0_conv2d_2_out_channels=160,
+                                   module0_0_conv2d_5_in_channels=160,
+                                   module0_0_conv2d_5_out_channels=160,
+                                   module0_0_conv2d_7_in_channels=160,
+                                   module0_0_conv2d_7_out_channels=160,
+                                   module0_1_conv2d_0_in_channels=160,
+                                   module0_1_conv2d_0_out_channels=160,
+                                   module0_1_conv2d_2_in_channels=160,
+                                   module0_1_conv2d_2_out_channels=160,
+                                   module0_1_conv2d_5_in_channels=160,
+                                   module0_1_conv2d_5_out_channels=160,
+                                   module0_1_conv2d_7_in_channels=160,
+                                   module0_1_conv2d_7_out_channels=160)
+        self.module7_1 = Module7(conv2d_0_in_channels=80,
+                                 conv2d_0_out_channels=40,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module7_2 = Module7(conv2d_0_in_channels=160,
+                                 conv2d_0_out_channels=40,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_162 = nn.ReLU()
+        self.conv2d_149 = nn.Conv2d(in_channels=40,
+                                    out_channels=80,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_3 = Module7(conv2d_0_in_channels=160,
+                                 conv2d_0_out_channels=80,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.relu_163 = nn.ReLU()
+        self.module11_0 = Module11(conv2d_0_in_channels=40,
+                                   conv2d_0_out_channels=160,
+                                   module5_0_conv2d_0_in_channels=40,
+                                   module5_0_conv2d_0_out_channels=40,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_145 = nn.Conv2d(in_channels=80,
+                                    out_channels=160,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_167 = nn.ReLU()
+        self.module16_5 = Module16(module0_0_conv2d_0_in_channels=40,
+                                   module0_0_conv2d_0_out_channels=40,
+                                   module0_0_conv2d_2_in_channels=40,
+                                   module0_0_conv2d_2_out_channels=40,
+                                   module0_0_conv2d_5_in_channels=40,
+                                   module0_0_conv2d_5_out_channels=40,
+                                   module0_0_conv2d_7_in_channels=40,
+                                   module0_0_conv2d_7_out_channels=40,
+                                   module0_1_conv2d_0_in_channels=40,
+                                   module0_1_conv2d_0_out_channels=40,
+                                   module0_1_conv2d_2_in_channels=40,
+                                   module0_1_conv2d_2_out_channels=40,
+                                   module0_1_conv2d_5_in_channels=40,
+                                   module0_1_conv2d_5_out_channels=40,
+                                   module0_1_conv2d_7_in_channels=40,
+                                   module0_1_conv2d_7_out_channels=40)
+        self.module16_6 = Module16(module0_0_conv2d_0_in_channels=80,
+                                   module0_0_conv2d_0_out_channels=80,
+                                   module0_0_conv2d_2_in_channels=80,
+                                   module0_0_conv2d_2_out_channels=80,
+                                   module0_0_conv2d_5_in_channels=80,
+                                   module0_0_conv2d_5_out_channels=80,
+                                   module0_0_conv2d_7_in_channels=80,
+                                   module0_0_conv2d_7_out_channels=80,
+                                   module0_1_conv2d_0_in_channels=80,
+                                   module0_1_conv2d_0_out_channels=80,
+                                   module0_1_conv2d_2_in_channels=80,
+                                   module0_1_conv2d_2_out_channels=80,
+                                   module0_1_conv2d_5_in_channels=80,
+                                   module0_1_conv2d_5_out_channels=80,
+                                   module0_1_conv2d_7_in_channels=80,
+                                   module0_1_conv2d_7_out_channels=80)
+        self.module16_7 = Module16(module0_0_conv2d_0_in_channels=160,
+                                   module0_0_conv2d_0_out_channels=160,
+                                   module0_0_conv2d_2_in_channels=160,
+                                   module0_0_conv2d_2_out_channels=160,
+                                   module0_0_conv2d_5_in_channels=160,
+                                   module0_0_conv2d_5_out_channels=160,
+                                   module0_0_conv2d_7_in_channels=160,
+                                   module0_0_conv2d_7_out_channels=160,
+                                   module0_1_conv2d_0_in_channels=160,
+                                   module0_1_conv2d_0_out_channels=160,
+                                   module0_1_conv2d_2_in_channels=160,
+                                   module0_1_conv2d_2_out_channels=160,
+                                   module0_1_conv2d_5_in_channels=160,
+                                   module0_1_conv2d_5_out_channels=160,
+                                   module0_1_conv2d_7_in_channels=160,
+                                   module0_1_conv2d_7_out_channels=160)
+        self.module7_4 = Module7(conv2d_0_in_channels=80,
+                                 conv2d_0_out_channels=40,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module7_5 = Module7(conv2d_0_in_channels=160,
+                                 conv2d_0_out_channels=40,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_243 = nn.ReLU()
+        self.conv2d_225 = nn.Conv2d(in_channels=40,
+                                    out_channels=80,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_6 = Module7(conv2d_0_in_channels=160,
+                                 conv2d_0_out_channels=80,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.relu_244 = nn.ReLU()
+        self.module11_1 = Module11(conv2d_0_in_channels=40,
+                                   conv2d_0_out_channels=160,
+                                   module5_0_conv2d_0_in_channels=40,
+                                   module5_0_conv2d_0_out_channels=40,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_228 = nn.Conv2d(in_channels=80,
+                                    out_channels=160,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_245 = nn.ReLU()
+        self.module16_8 = Module16(module0_0_conv2d_0_in_channels=40,
+                                   module0_0_conv2d_0_out_channels=40,
+                                   module0_0_conv2d_2_in_channels=40,
+                                   module0_0_conv2d_2_out_channels=40,
+                                   module0_0_conv2d_5_in_channels=40,
+                                   module0_0_conv2d_5_out_channels=40,
+                                   module0_0_conv2d_7_in_channels=40,
+                                   module0_0_conv2d_7_out_channels=40,
+                                   module0_1_conv2d_0_in_channels=40,
+                                   module0_1_conv2d_0_out_channels=40,
+                                   module0_1_conv2d_2_in_channels=40,
+                                   module0_1_conv2d_2_out_channels=40,
+                                   module0_1_conv2d_5_in_channels=40,
+                                   module0_1_conv2d_5_out_channels=40,
+                                   module0_1_conv2d_7_in_channels=40,
+                                   module0_1_conv2d_7_out_channels=40)
+        self.module16_9 = Module16(module0_0_conv2d_0_in_channels=80,
+                                   module0_0_conv2d_0_out_channels=80,
+                                   module0_0_conv2d_2_in_channels=80,
+                                   module0_0_conv2d_2_out_channels=80,
+                                   module0_0_conv2d_5_in_channels=80,
+                                   module0_0_conv2d_5_out_channels=80,
+                                   module0_0_conv2d_7_in_channels=80,
+                                   module0_0_conv2d_7_out_channels=80,
+                                   module0_1_conv2d_0_in_channels=80,
+                                   module0_1_conv2d_0_out_channels=80,
+                                   module0_1_conv2d_2_in_channels=80,
+                                   module0_1_conv2d_2_out_channels=80,
+                                   module0_1_conv2d_5_in_channels=80,
+                                   module0_1_conv2d_5_out_channels=80,
+                                   module0_1_conv2d_7_in_channels=80,
+                                   module0_1_conv2d_7_out_channels=80)
+        self.module16_10 = Module16(module0_0_conv2d_0_in_channels=160,
+                                    module0_0_conv2d_0_out_channels=160,
+                                    module0_0_conv2d_2_in_channels=160,
+                                    module0_0_conv2d_2_out_channels=160,
+                                    module0_0_conv2d_5_in_channels=160,
+                                    module0_0_conv2d_5_out_channels=160,
+                                    module0_0_conv2d_7_in_channels=160,
+                                    module0_0_conv2d_7_out_channels=160,
+                                    module0_1_conv2d_0_in_channels=160,
+                                    module0_1_conv2d_0_out_channels=160,
+                                    module0_1_conv2d_2_in_channels=160,
+                                    module0_1_conv2d_2_out_channels=160,
+                                    module0_1_conv2d_5_in_channels=160,
+                                    module0_1_conv2d_5_out_channels=160,
+                                    module0_1_conv2d_7_in_channels=160,
+                                    module0_1_conv2d_7_out_channels=160)
+        self.module7_7 = Module7(conv2d_0_in_channels=80,
+                                 conv2d_0_out_channels=40,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module7_8 = Module7(conv2d_0_in_channels=160,
+                                 conv2d_0_out_channels=40,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_324 = nn.ReLU()
+        self.conv2d_306 = nn.Conv2d(in_channels=40,
+                                    out_channels=80,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_9 = Module7(conv2d_0_in_channels=160,
+                                 conv2d_0_out_channels=80,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.relu_322 = nn.ReLU()
+        self.module11_2 = Module11(conv2d_0_in_channels=40,
+                                   conv2d_0_out_channels=160,
+                                   module5_0_conv2d_0_in_channels=40,
+                                   module5_0_conv2d_0_out_channels=40,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_309 = nn.Conv2d(in_channels=80,
+                                    out_channels=160,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_326 = nn.ReLU()
+        self.module16_11 = Module16(module0_0_conv2d_0_in_channels=40,
+                                    module0_0_conv2d_0_out_channels=40,
+                                    module0_0_conv2d_2_in_channels=40,
+                                    module0_0_conv2d_2_out_channels=40,
+                                    module0_0_conv2d_5_in_channels=40,
+                                    module0_0_conv2d_5_out_channels=40,
+                                    module0_0_conv2d_7_in_channels=40,
+                                    module0_0_conv2d_7_out_channels=40,
+                                    module0_1_conv2d_0_in_channels=40,
+                                    module0_1_conv2d_0_out_channels=40,
+                                    module0_1_conv2d_2_in_channels=40,
+                                    module0_1_conv2d_2_out_channels=40,
+                                    module0_1_conv2d_5_in_channels=40,
+                                    module0_1_conv2d_5_out_channels=40,
+                                    module0_1_conv2d_7_in_channels=40,
+                                    module0_1_conv2d_7_out_channels=40)
+        self.module16_12 = Module16(module0_0_conv2d_0_in_channels=80,
+                                    module0_0_conv2d_0_out_channels=80,
+                                    module0_0_conv2d_2_in_channels=80,
+                                    module0_0_conv2d_2_out_channels=80,
+                                    module0_0_conv2d_5_in_channels=80,
+                                    module0_0_conv2d_5_out_channels=80,
+                                    module0_0_conv2d_7_in_channels=80,
+                                    module0_0_conv2d_7_out_channels=80,
+                                    module0_1_conv2d_0_in_channels=80,
+                                    module0_1_conv2d_0_out_channels=80,
+                                    module0_1_conv2d_2_in_channels=80,
+                                    module0_1_conv2d_2_out_channels=80,
+                                    module0_1_conv2d_5_in_channels=80,
+                                    module0_1_conv2d_5_out_channels=80,
+                                    module0_1_conv2d_7_in_channels=80,
+                                    module0_1_conv2d_7_out_channels=80)
+        self.module16_13 = Module16(module0_0_conv2d_0_in_channels=160,
+                                    module0_0_conv2d_0_out_channels=160,
+                                    module0_0_conv2d_2_in_channels=160,
+                                    module0_0_conv2d_2_out_channels=160,
+                                    module0_0_conv2d_5_in_channels=160,
+                                    module0_0_conv2d_5_out_channels=160,
+                                    module0_0_conv2d_7_in_channels=160,
+                                    module0_0_conv2d_7_out_channels=160,
+                                    module0_1_conv2d_0_in_channels=160,
+                                    module0_1_conv2d_0_out_channels=160,
+                                    module0_1_conv2d_2_in_channels=160,
+                                    module0_1_conv2d_2_out_channels=160,
+                                    module0_1_conv2d_5_in_channels=160,
+                                    module0_1_conv2d_5_out_channels=160,
+                                    module0_1_conv2d_7_in_channels=160,
+                                    module0_1_conv2d_7_out_channels=160)
+        self.module7_10 = Module7(conv2d_0_in_channels=80,
+                                  conv2d_0_out_channels=40,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_11 = Module7(conv2d_0_in_channels=160,
+                                  conv2d_0_out_channels=40,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_402 = nn.ReLU()
+        self.conv2d_388 = nn.Conv2d(in_channels=40,
+                                    out_channels=80,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_12 = Module7(conv2d_0_in_channels=160,
+                                  conv2d_0_out_channels=80,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_403 = nn.ReLU()
+        self.module11_3 = Module11(conv2d_0_in_channels=40,
+                                   conv2d_0_out_channels=160,
+                                   module5_0_conv2d_0_in_channels=40,
+                                   module5_0_conv2d_0_out_channels=40,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_386 = nn.Conv2d(in_channels=80,
+                                    out_channels=160,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_407 = nn.ReLU()
+        self.module5_4 = Module5(conv2d_0_in_channels=160,
+                                 conv2d_0_out_channels=320,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module16_14 = Module16(module0_0_conv2d_0_in_channels=40,
+                                    module0_0_conv2d_0_out_channels=40,
+                                    module0_0_conv2d_2_in_channels=40,
+                                    module0_0_conv2d_2_out_channels=40,
+                                    module0_0_conv2d_5_in_channels=40,
+                                    module0_0_conv2d_5_out_channels=40,
+                                    module0_0_conv2d_7_in_channels=40,
+                                    module0_0_conv2d_7_out_channels=40,
+                                    module0_1_conv2d_0_in_channels=40,
+                                    module0_1_conv2d_0_out_channels=40,
+                                    module0_1_conv2d_2_in_channels=40,
+                                    module0_1_conv2d_2_out_channels=40,
+                                    module0_1_conv2d_5_in_channels=40,
+                                    module0_1_conv2d_5_out_channels=40,
+                                    module0_1_conv2d_7_in_channels=40,
+                                    module0_1_conv2d_7_out_channels=40)
+        self.module16_15 = Module16(module0_0_conv2d_0_in_channels=80,
+                                    module0_0_conv2d_0_out_channels=80,
+                                    module0_0_conv2d_2_in_channels=80,
+                                    module0_0_conv2d_2_out_channels=80,
+                                    module0_0_conv2d_5_in_channels=80,
+                                    module0_0_conv2d_5_out_channels=80,
+                                    module0_0_conv2d_7_in_channels=80,
+                                    module0_0_conv2d_7_out_channels=80,
+                                    module0_1_conv2d_0_in_channels=80,
+                                    module0_1_conv2d_0_out_channels=80,
+                                    module0_1_conv2d_2_in_channels=80,
+                                    module0_1_conv2d_2_out_channels=80,
+                                    module0_1_conv2d_5_in_channels=80,
+                                    module0_1_conv2d_5_out_channels=80,
+                                    module0_1_conv2d_7_in_channels=80,
+                                    module0_1_conv2d_7_out_channels=80)
+        self.module16_16 = Module16(module0_0_conv2d_0_in_channels=160,
+                                    module0_0_conv2d_0_out_channels=160,
+                                    module0_0_conv2d_2_in_channels=160,
+                                    module0_0_conv2d_2_out_channels=160,
+                                    module0_0_conv2d_5_in_channels=160,
+                                    module0_0_conv2d_5_out_channels=160,
+                                    module0_0_conv2d_7_in_channels=160,
+                                    module0_0_conv2d_7_out_channels=160,
+                                    module0_1_conv2d_0_in_channels=160,
+                                    module0_1_conv2d_0_out_channels=160,
+                                    module0_1_conv2d_2_in_channels=160,
+                                    module0_1_conv2d_2_out_channels=160,
+                                    module0_1_conv2d_5_in_channels=160,
+                                    module0_1_conv2d_5_out_channels=160,
+                                    module0_1_conv2d_7_in_channels=160,
+                                    module0_1_conv2d_7_out_channels=160)
+        self.module16_17 = Module16(module0_0_conv2d_0_in_channels=320,
+                                    module0_0_conv2d_0_out_channels=320,
+                                    module0_0_conv2d_2_in_channels=320,
+                                    module0_0_conv2d_2_out_channels=320,
+                                    module0_0_conv2d_5_in_channels=320,
+                                    module0_0_conv2d_5_out_channels=320,
+                                    module0_0_conv2d_7_in_channels=320,
+                                    module0_0_conv2d_7_out_channels=320,
+                                    module0_1_conv2d_0_in_channels=320,
+                                    module0_1_conv2d_0_out_channels=320,
+                                    module0_1_conv2d_2_in_channels=320,
+                                    module0_1_conv2d_2_out_channels=320,
+                                    module0_1_conv2d_5_in_channels=320,
+                                    module0_1_conv2d_5_out_channels=320,
+                                    module0_1_conv2d_7_in_channels=320,
+                                    module0_1_conv2d_7_out_channels=320)
+        self.module7_13 = Module7(conv2d_0_in_channels=80,
+                                  conv2d_0_out_channels=40,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_14 = Module7(conv2d_0_in_channels=160,
+                                  conv2d_0_out_channels=40,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_15 = Module7(conv2d_0_in_channels=320,
+                                  conv2d_0_out_channels=40,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_525 = nn.ReLU()
+        self.conv2d_484 = nn.Conv2d(in_channels=40,
+                                    out_channels=80,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_16 = Module7(conv2d_0_in_channels=160,
+                                  conv2d_0_out_channels=80,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.module7_17 = Module7(conv2d_0_in_channels=320,
+                                  conv2d_0_out_channels=80,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_526 = nn.ReLU()
+        self.module11_4 = Module11(conv2d_0_in_channels=40,
+                                   conv2d_0_out_channels=160,
+                                   module5_0_conv2d_0_in_channels=40,
+                                   module5_0_conv2d_0_out_channels=40,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_488 = nn.Conv2d(in_channels=80,
+                                    out_channels=160,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_18 = Module7(conv2d_0_in_channels=320,
+                                  conv2d_0_out_channels=160,
+                                  resizenearestneighbor_1_size=(14, 14))
+        self.relu_527 = nn.ReLU()
+        self.module15_4 = Module15(conv2d_0_in_channels=40,
+                                   conv2d_0_out_channels=320,
+                                   conv2d_0_kernel_size=(3, 3),
+                                   conv2d_0_stride=(2, 2),
+                                   conv2d_0_padding=(1, 1, 1, 1),
+                                   conv2d_0_pad_mode="pad",
+                                   module5_0_conv2d_0_in_channels=40,
+                                   module5_0_conv2d_0_out_channels=40,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad",
+                                   module5_1_conv2d_0_in_channels=40,
+                                   module5_1_conv2d_0_out_channels=40,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(2, 2),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.module11_5 = Module11(conv2d_0_in_channels=80,
+                                   conv2d_0_out_channels=320,
+                                   module5_0_conv2d_0_in_channels=80,
+                                   module5_0_conv2d_0_out_channels=80,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_500 = nn.Conv2d(in_channels=160,
+                                    out_channels=320,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_532 = nn.ReLU()
+        self.module16_18 = Module16(module0_0_conv2d_0_in_channels=40,
+                                    module0_0_conv2d_0_out_channels=40,
+                                    module0_0_conv2d_2_in_channels=40,
+                                    module0_0_conv2d_2_out_channels=40,
+                                    module0_0_conv2d_5_in_channels=40,
+                                    module0_0_conv2d_5_out_channels=40,
+                                    module0_0_conv2d_7_in_channels=40,
+                                    module0_0_conv2d_7_out_channels=40,
+                                    module0_1_conv2d_0_in_channels=40,
+                                    module0_1_conv2d_0_out_channels=40,
+                                    module0_1_conv2d_2_in_channels=40,
+                                    module0_1_conv2d_2_out_channels=40,
+                                    module0_1_conv2d_5_in_channels=40,
+                                    module0_1_conv2d_5_out_channels=40,
+                                    module0_1_conv2d_7_in_channels=40,
+                                    module0_1_conv2d_7_out_channels=40)
+        self.module16_19 = Module16(module0_0_conv2d_0_in_channels=80,
+                                    module0_0_conv2d_0_out_channels=80,
+                                    module0_0_conv2d_2_in_channels=80,
+                                    module0_0_conv2d_2_out_channels=80,
+                                    module0_0_conv2d_5_in_channels=80,
+                                    module0_0_conv2d_5_out_channels=80,
+                                    module0_0_conv2d_7_in_channels=80,
+                                    module0_0_conv2d_7_out_channels=80,
+                                    module0_1_conv2d_0_in_channels=80,
+                                    module0_1_conv2d_0_out_channels=80,
+                                    module0_1_conv2d_2_in_channels=80,
+                                    module0_1_conv2d_2_out_channels=80,
+                                    module0_1_conv2d_5_in_channels=80,
+                                    module0_1_conv2d_5_out_channels=80,
+                                    module0_1_conv2d_7_in_channels=80,
+                                    module0_1_conv2d_7_out_channels=80)
+        self.module16_20 = Module16(module0_0_conv2d_0_in_channels=160,
+                                    module0_0_conv2d_0_out_channels=160,
+                                    module0_0_conv2d_2_in_channels=160,
+                                    module0_0_conv2d_2_out_channels=160,
+                                    module0_0_conv2d_5_in_channels=160,
+                                    module0_0_conv2d_5_out_channels=160,
+                                    module0_0_conv2d_7_in_channels=160,
+                                    module0_0_conv2d_7_out_channels=160,
+                                    module0_1_conv2d_0_in_channels=160,
+                                    module0_1_conv2d_0_out_channels=160,
+                                    module0_1_conv2d_2_in_channels=160,
+                                    module0_1_conv2d_2_out_channels=160,
+                                    module0_1_conv2d_5_in_channels=160,
+                                    module0_1_conv2d_5_out_channels=160,
+                                    module0_1_conv2d_7_in_channels=160,
+                                    module0_1_conv2d_7_out_channels=160)
+        self.module16_21 = Module16(module0_0_conv2d_0_in_channels=320,
+                                    module0_0_conv2d_0_out_channels=320,
+                                    module0_0_conv2d_2_in_channels=320,
+                                    module0_0_conv2d_2_out_channels=320,
+                                    module0_0_conv2d_5_in_channels=320,
+                                    module0_0_conv2d_5_out_channels=320,
+                                    module0_0_conv2d_7_in_channels=320,
+                                    module0_0_conv2d_7_out_channels=320,
+                                    module0_1_conv2d_0_in_channels=320,
+                                    module0_1_conv2d_0_out_channels=320,
+                                    module0_1_conv2d_2_in_channels=320,
+                                    module0_1_conv2d_2_out_channels=320,
+                                    module0_1_conv2d_5_in_channels=320,
+                                    module0_1_conv2d_5_out_channels=320,
+                                    module0_1_conv2d_7_in_channels=320,
+                                    module0_1_conv2d_7_out_channels=320)
+        self.module7_19 = Module7(conv2d_0_in_channels=80,
+                                  conv2d_0_out_channels=40,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_20 = Module7(conv2d_0_in_channels=160,
+                                  conv2d_0_out_channels=40,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_21 = Module7(conv2d_0_in_channels=320,
+                                  conv2d_0_out_channels=40,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_647 = nn.ReLU()
+        self.conv2d_609 = nn.Conv2d(in_channels=40,
+                                    out_channels=80,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_22 = Module7(conv2d_0_in_channels=160,
+                                  conv2d_0_out_channels=80,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.module7_23 = Module7(conv2d_0_in_channels=320,
+                                  conv2d_0_out_channels=80,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_644 = nn.ReLU()
+        self.module11_6 = Module11(conv2d_0_in_channels=40,
+                                   conv2d_0_out_channels=160,
+                                   module5_0_conv2d_0_in_channels=40,
+                                   module5_0_conv2d_0_out_channels=40,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_613 = nn.Conv2d(in_channels=80,
+                                    out_channels=160,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_24 = Module7(conv2d_0_in_channels=320,
+                                  conv2d_0_out_channels=160,
+                                  resizenearestneighbor_1_size=(14, 14))
+        self.relu_649 = nn.ReLU()
+        self.module15_5 = Module15(conv2d_0_in_channels=40,
+                                   conv2d_0_out_channels=320,
+                                   conv2d_0_kernel_size=(3, 3),
+                                   conv2d_0_stride=(2, 2),
+                                   conv2d_0_padding=(1, 1, 1, 1),
+                                   conv2d_0_pad_mode="pad",
+                                   module5_0_conv2d_0_in_channels=40,
+                                   module5_0_conv2d_0_out_channels=40,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad",
+                                   module5_1_conv2d_0_in_channels=40,
+                                   module5_1_conv2d_0_out_channels=40,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(2, 2),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.module11_7 = Module11(conv2d_0_in_channels=80,
+                                   conv2d_0_out_channels=320,
+                                   module5_0_conv2d_0_in_channels=80,
+                                   module5_0_conv2d_0_out_channels=80,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_617 = nn.Conv2d(in_channels=160,
+                                    out_channels=320,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_658 = nn.ReLU()
+        self.module16_22 = Module16(module0_0_conv2d_0_in_channels=40,
+                                    module0_0_conv2d_0_out_channels=40,
+                                    module0_0_conv2d_2_in_channels=40,
+                                    module0_0_conv2d_2_out_channels=40,
+                                    module0_0_conv2d_5_in_channels=40,
+                                    module0_0_conv2d_5_out_channels=40,
+                                    module0_0_conv2d_7_in_channels=40,
+                                    module0_0_conv2d_7_out_channels=40,
+                                    module0_1_conv2d_0_in_channels=40,
+                                    module0_1_conv2d_0_out_channels=40,
+                                    module0_1_conv2d_2_in_channels=40,
+                                    module0_1_conv2d_2_out_channels=40,
+                                    module0_1_conv2d_5_in_channels=40,
+                                    module0_1_conv2d_5_out_channels=40,
+                                    module0_1_conv2d_7_in_channels=40,
+                                    module0_1_conv2d_7_out_channels=40)
+        self.module16_23 = Module16(module0_0_conv2d_0_in_channels=80,
+                                    module0_0_conv2d_0_out_channels=80,
+                                    module0_0_conv2d_2_in_channels=80,
+                                    module0_0_conv2d_2_out_channels=80,
+                                    module0_0_conv2d_5_in_channels=80,
+                                    module0_0_conv2d_5_out_channels=80,
+                                    module0_0_conv2d_7_in_channels=80,
+                                    module0_0_conv2d_7_out_channels=80,
+                                    module0_1_conv2d_0_in_channels=80,
+                                    module0_1_conv2d_0_out_channels=80,
+                                    module0_1_conv2d_2_in_channels=80,
+                                    module0_1_conv2d_2_out_channels=80,
+                                    module0_1_conv2d_5_in_channels=80,
+                                    module0_1_conv2d_5_out_channels=80,
+                                    module0_1_conv2d_7_in_channels=80,
+                                    module0_1_conv2d_7_out_channels=80)
+        self.module16_24 = Module16(module0_0_conv2d_0_in_channels=160,
+                                    module0_0_conv2d_0_out_channels=160,
+                                    module0_0_conv2d_2_in_channels=160,
+                                    module0_0_conv2d_2_out_channels=160,
+                                    module0_0_conv2d_5_in_channels=160,
+                                    module0_0_conv2d_5_out_channels=160,
+                                    module0_0_conv2d_7_in_channels=160,
+                                    module0_0_conv2d_7_out_channels=160,
+                                    module0_1_conv2d_0_in_channels=160,
+                                    module0_1_conv2d_0_out_channels=160,
+                                    module0_1_conv2d_2_in_channels=160,
+                                    module0_1_conv2d_2_out_channels=160,
+                                    module0_1_conv2d_5_in_channels=160,
+                                    module0_1_conv2d_5_out_channels=160,
+                                    module0_1_conv2d_7_in_channels=160,
+                                    module0_1_conv2d_7_out_channels=160)
+        self.module16_25 = Module16(module0_0_conv2d_0_in_channels=320,
+                                    module0_0_conv2d_0_out_channels=320,
+                                    module0_0_conv2d_2_in_channels=320,
+                                    module0_0_conv2d_2_out_channels=320,
+                                    module0_0_conv2d_5_in_channels=320,
+                                    module0_0_conv2d_5_out_channels=320,
+                                    module0_0_conv2d_7_in_channels=320,
+                                    module0_0_conv2d_7_out_channels=320,
+                                    module0_1_conv2d_0_in_channels=320,
+                                    module0_1_conv2d_0_out_channels=320,
+                                    module0_1_conv2d_2_in_channels=320,
+                                    module0_1_conv2d_2_out_channels=320,
+                                    module0_1_conv2d_5_in_channels=320,
+                                    module0_1_conv2d_5_out_channels=320,
+                                    module0_1_conv2d_7_in_channels=320,
+                                    module0_1_conv2d_7_out_channels=320)
+        self.module7_25 = Module7(conv2d_0_in_channels=80,
+                                  conv2d_0_out_channels=40,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_26 = Module7(conv2d_0_in_channels=160,
+                                  conv2d_0_out_channels=40,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_27 = Module7(conv2d_0_in_channels=320,
+                                  conv2d_0_out_channels=40,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_768 = nn.ReLU()
+        self.conv2d_733 = nn.Conv2d(in_channels=40,
+                                    out_channels=80,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_28 = Module7(conv2d_0_in_channels=160,
+                                  conv2d_0_out_channels=80,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.module7_29 = Module7(conv2d_0_in_channels=320,
+                                  conv2d_0_out_channels=80,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_769 = nn.ReLU()
+        self.module11_8 = Module11(conv2d_0_in_channels=40,
+                                   conv2d_0_out_channels=160,
+                                   module5_0_conv2d_0_in_channels=40,
+                                   module5_0_conv2d_0_out_channels=40,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_729 = nn.Conv2d(in_channels=80,
+                                    out_channels=160,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_30 = Module7(conv2d_0_in_channels=320,
+                                  conv2d_0_out_channels=160,
+                                  resizenearestneighbor_1_size=(14, 14))
+        self.relu_770 = nn.ReLU()
+        self.module15_6 = Module15(conv2d_0_in_channels=40,
+                                   conv2d_0_out_channels=320,
+                                   conv2d_0_kernel_size=(3, 3),
+                                   conv2d_0_stride=(2, 2),
+                                   conv2d_0_padding=(1, 1, 1, 1),
+                                   conv2d_0_pad_mode="pad",
+                                   module5_0_conv2d_0_in_channels=40,
+                                   module5_0_conv2d_0_out_channels=40,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad",
+                                   module5_1_conv2d_0_in_channels=40,
+                                   module5_1_conv2d_0_out_channels=40,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(2, 2),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.module11_9 = Module11(conv2d_0_in_channels=80,
+                                   conv2d_0_out_channels=320,
+                                   module5_0_conv2d_0_in_channels=80,
+                                   module5_0_conv2d_0_out_channels=80,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_740 = nn.Conv2d(in_channels=160,
+                                    out_channels=320,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_782 = nn.ReLU()
+        self.module15_7 = Module15(conv2d_0_in_channels=32,
+                                   conv2d_0_out_channels=128,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=40,
+                                   module5_0_conv2d_0_out_channels=32,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=32,
+                                   module5_1_conv2d_0_out_channels=32,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_773 = nn.Conv2d(in_channels=40,
+                                    out_channels=128,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_800 = nn.ReLU()
+        self.module15_8 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=80,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_775 = nn.Conv2d(in_channels=80,
+                                    out_channels=256,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_801 = nn.ReLU()
+        self.module5_5 = Module5(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module15_9 = Module15(conv2d_0_in_channels=128,
+                                   conv2d_0_out_channels=512,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=160,
+                                   module5_0_conv2d_0_out_channels=128,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=128,
+                                   module5_1_conv2d_0_out_channels=128,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_777 = nn.Conv2d(in_channels=160,
+                                    out_channels=512,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_802 = nn.ReLU()
+        self.module5_6 = Module5(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=512,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module15_10 = Module15(conv2d_0_in_channels=256,
+                                    conv2d_0_out_channels=1024,
+                                    conv2d_0_kernel_size=(1, 1),
+                                    conv2d_0_stride=(1, 1),
+                                    conv2d_0_padding=0,
+                                    conv2d_0_pad_mode="valid",
+                                    module5_0_conv2d_0_in_channels=320,
+                                    module5_0_conv2d_0_out_channels=256,
+                                    module5_0_conv2d_0_kernel_size=(1, 1),
+                                    module5_0_conv2d_0_stride=(1, 1),
+                                    module5_0_conv2d_0_padding=0,
+                                    module5_0_conv2d_0_pad_mode="valid",
+                                    module5_1_conv2d_0_in_channels=256,
+                                    module5_1_conv2d_0_out_channels=256,
+                                    module5_1_conv2d_0_kernel_size=(3, 3),
+                                    module5_1_conv2d_0_stride=(1, 1),
+                                    module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                    module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_787 = nn.Conv2d(in_channels=320,
+                                    out_channels=1024,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_807 = nn.ReLU()
+        self.module5_7 = Module5(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=1024,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module5_8 = Module5(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=2048,
+                                 conv2d_0_kernel_size=(1, 1),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=0,
+                                 conv2d_0_pad_mode="valid")
+        self.avgpool2d_817 = nn.AvgPool2d(kernel_size=(7, 7))
+        self.flatten_818 = nn.Flatten()
+        self.dense_819 = nn.Dense(in_channels=2048, out_channels=1000, has_bias=True)
+
+    def construct(self, input_1):
+        opt_conv2d_0 = self.conv2d_0(input_1)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        module5_0_opt = self.module5_0(opt_relu_1)
+        module15_0_opt = self.module15_0(module5_0_opt)
+        opt_conv2d_5 = self.conv2d_5(module5_0_opt)
+        opt_add_10 = P.Add()(module15_0_opt, opt_conv2d_5)
+        opt_relu_11 = self.relu_11(opt_add_10)
+        module15_1_opt = self.module15_1(opt_relu_11)
+        opt_add_17 = P.Add()(module15_1_opt, opt_relu_11)
+        opt_relu_18 = self.relu_18(opt_add_17)
+        module15_2_opt = self.module15_2(opt_relu_18)
+        opt_add_24 = P.Add()(module15_2_opt, opt_relu_18)
+        opt_relu_25 = self.relu_25(opt_add_24)
+        module15_3_opt = self.module15_3(opt_relu_25)
+        opt_add_31 = P.Add()(module15_3_opt, opt_relu_25)
+        opt_relu_32 = self.relu_32(opt_add_31)
+        module5_1_opt = self.module5_1(opt_relu_32)
+        module5_2_opt = self.module5_2(opt_relu_32)
+        module16_0_opt = self.module16_0(module5_1_opt)
+        module16_1_opt = self.module16_1(module5_2_opt)
+        module7_0_opt = self.module7_0(module16_1_opt)
+        opt_add_82 = P.Add()(module16_0_opt, module7_0_opt)
+        opt_relu_85 = self.relu_85(opt_add_82)
+        opt_conv2d_77 = self.conv2d_77(module16_0_opt)
+        opt_add_79 = P.Add()(opt_conv2d_77, module16_1_opt)
+        opt_relu_81 = self.relu_81(opt_add_79)
+        module5_3_opt = self.module5_3(opt_relu_81)
+        module16_2_opt = self.module16_2(opt_relu_85)
+        module16_3_opt = self.module16_3(opt_relu_81)
+        module16_4_opt = self.module16_4(module5_3_opt)
+        module7_1_opt = self.module7_1(module16_3_opt)
+        opt_add_153 = P.Add()(module16_2_opt, module7_1_opt)
+        module7_2_opt = self.module7_2(module16_4_opt)
+        opt_add_159 = P.Add()(opt_add_153, module7_2_opt)
+        opt_relu_162 = self.relu_162(opt_add_159)
+        opt_conv2d_149 = self.conv2d_149(module16_2_opt)
+        opt_add_154 = P.Add()(opt_conv2d_149, module16_3_opt)
+        module7_3_opt = self.module7_3(module16_4_opt)
+        opt_add_160 = P.Add()(opt_add_154, module7_3_opt)
+        opt_relu_163 = self.relu_163(opt_add_160)
+        module11_0_opt = self.module11_0(module16_2_opt)
+        opt_conv2d_145 = self.conv2d_145(module16_3_opt)
+        opt_add_161 = P.Add()(module11_0_opt, opt_conv2d_145)
+        opt_add_164 = P.Add()(opt_add_161, module16_4_opt)
+        opt_relu_167 = self.relu_167(opt_add_164)
+        module16_5_opt = self.module16_5(opt_relu_162)
+        module16_6_opt = self.module16_6(opt_relu_163)
+        module16_7_opt = self.module16_7(opt_relu_167)
+        module7_4_opt = self.module7_4(module16_6_opt)
+        opt_add_236 = P.Add()(module16_5_opt, module7_4_opt)
+        module7_5_opt = self.module7_5(module16_7_opt)
+        opt_add_240 = P.Add()(opt_add_236, module7_5_opt)
+        opt_relu_243 = self.relu_243(opt_add_240)
+        opt_conv2d_225 = self.conv2d_225(module16_5_opt)
+        opt_add_230 = P.Add()(opt_conv2d_225, module16_6_opt)
+        module7_6_opt = self.module7_6(module16_7_opt)
+        opt_add_241 = P.Add()(opt_add_230, module7_6_opt)
+        opt_relu_244 = self.relu_244(opt_add_241)
+        module11_1_opt = self.module11_1(module16_5_opt)
+        opt_conv2d_228 = self.conv2d_228(module16_6_opt)
+        opt_add_239 = P.Add()(module11_1_opt, opt_conv2d_228)
+        opt_add_242 = P.Add()(opt_add_239, module16_7_opt)
+        opt_relu_245 = self.relu_245(opt_add_242)
+        module16_8_opt = self.module16_8(opt_relu_243)
+        module16_9_opt = self.module16_9(opt_relu_244)
+        module16_10_opt = self.module16_10(opt_relu_245)
+        module7_7_opt = self.module7_7(module16_9_opt)
+        opt_add_318 = P.Add()(module16_8_opt, module7_7_opt)
+        module7_8_opt = self.module7_8(module16_10_opt)
+        opt_add_321 = P.Add()(opt_add_318, module7_8_opt)
+        opt_relu_324 = self.relu_324(opt_add_321)
+        opt_conv2d_306 = self.conv2d_306(module16_8_opt)
+        opt_add_312 = P.Add()(opt_conv2d_306, module16_9_opt)
+        module7_9_opt = self.module7_9(module16_10_opt)
+        opt_add_319 = P.Add()(opt_add_312, module7_9_opt)
+        opt_relu_322 = self.relu_322(opt_add_319)
+        module11_2_opt = self.module11_2(module16_8_opt)
+        opt_conv2d_309 = self.conv2d_309(module16_9_opt)
+        opt_add_320 = P.Add()(module11_2_opt, opt_conv2d_309)
+        opt_add_323 = P.Add()(opt_add_320, module16_10_opt)
+        opt_relu_326 = self.relu_326(opt_add_323)
+        module16_11_opt = self.module16_11(opt_relu_324)
+        module16_12_opt = self.module16_12(opt_relu_322)
+        module16_13_opt = self.module16_13(opt_relu_326)
+        module7_10_opt = self.module7_10(module16_12_opt)
+        opt_add_395 = P.Add()(module16_11_opt, module7_10_opt)
+        module7_11_opt = self.module7_11(module16_13_opt)
+        opt_add_399 = P.Add()(opt_add_395, module7_11_opt)
+        opt_relu_402 = self.relu_402(opt_add_399)
+        opt_conv2d_388 = self.conv2d_388(module16_11_opt)
+        opt_add_393 = P.Add()(opt_conv2d_388, module16_12_opt)
+        module7_12_opt = self.module7_12(module16_13_opt)
+        opt_add_400 = P.Add()(opt_add_393, module7_12_opt)
+        opt_relu_403 = self.relu_403(opt_add_400)
+        module11_3_opt = self.module11_3(module16_11_opt)
+        opt_conv2d_386 = self.conv2d_386(module16_12_opt)
+        opt_add_401 = P.Add()(module11_3_opt, opt_conv2d_386)
+        opt_add_404 = P.Add()(opt_add_401, module16_13_opt)
+        opt_relu_407 = self.relu_407(opt_add_404)
+        module5_4_opt = self.module5_4(opt_relu_407)
+        module16_14_opt = self.module16_14(opt_relu_402)
+        module16_15_opt = self.module16_15(opt_relu_403)
+        module16_16_opt = self.module16_16(opt_relu_407)
+        module16_17_opt = self.module16_17(module5_4_opt)
+        module7_13_opt = self.module7_13(module16_15_opt)
+        opt_add_503 = P.Add()(module16_14_opt, module7_13_opt)
+        module7_14_opt = self.module7_14(module16_16_opt)
+        opt_add_513 = P.Add()(opt_add_503, module7_14_opt)
+        module7_15_opt = self.module7_15(module16_17_opt)
+        opt_add_521 = P.Add()(opt_add_513, module7_15_opt)
+        opt_relu_525 = self.relu_525(opt_add_521)
+        opt_conv2d_484 = self.conv2d_484(module16_14_opt)
+        opt_add_492 = P.Add()(opt_conv2d_484, module16_15_opt)
+        module7_16_opt = self.module7_16(module16_16_opt)
+        opt_add_514 = P.Add()(opt_add_492, module7_16_opt)
+        module7_17_opt = self.module7_17(module16_17_opt)
+        opt_add_522 = P.Add()(opt_add_514, module7_17_opt)
+        opt_relu_526 = self.relu_526(opt_add_522)
+        module11_4_opt = self.module11_4(module16_14_opt)
+        opt_conv2d_488 = self.conv2d_488(module16_15_opt)
+        opt_add_508 = P.Add()(module11_4_opt, opt_conv2d_488)
+        opt_add_515 = P.Add()(opt_add_508, module16_16_opt)
+        module7_18_opt = self.module7_18(module16_17_opt)
+        opt_add_523 = P.Add()(opt_add_515, module7_18_opt)
+        opt_relu_527 = self.relu_527(opt_add_523)
+        module15_4_opt = self.module15_4(module16_14_opt)
+        module11_5_opt = self.module11_5(module16_15_opt)
+        opt_add_520 = P.Add()(module15_4_opt, module11_5_opt)
+        opt_conv2d_500 = self.conv2d_500(module16_16_opt)
+        opt_add_524 = P.Add()(opt_add_520, opt_conv2d_500)
+        opt_add_528 = P.Add()(opt_add_524, module16_17_opt)
+        opt_relu_532 = self.relu_532(opt_add_528)
+        module16_18_opt = self.module16_18(opt_relu_525)
+        module16_19_opt = self.module16_19(opt_relu_526)
+        module16_20_opt = self.module16_20(opt_relu_527)
+        module16_21_opt = self.module16_21(opt_relu_532)
+        module7_19_opt = self.module7_19(module16_19_opt)
+        opt_add_631 = P.Add()(module16_18_opt, module7_19_opt)
+        module7_20_opt = self.module7_20(module16_20_opt)
+        opt_add_639 = P.Add()(opt_add_631, module7_20_opt)
+        module7_21_opt = self.module7_21(module16_21_opt)
+        opt_add_643 = P.Add()(opt_add_639, module7_21_opt)
+        opt_relu_647 = self.relu_647(opt_add_643)
+        opt_conv2d_609 = self.conv2d_609(module16_18_opt)
+        opt_add_619 = P.Add()(opt_conv2d_609, module16_19_opt)
+        module7_22_opt = self.module7_22(module16_20_opt)
+        opt_add_633 = P.Add()(opt_add_619, module7_22_opt)
+        module7_23_opt = self.module7_23(module16_21_opt)
+        opt_add_640 = P.Add()(opt_add_633, module7_23_opt)
+        opt_relu_644 = self.relu_644(opt_add_640)
+        module11_6_opt = self.module11_6(module16_18_opt)
+        opt_conv2d_613 = self.conv2d_613(module16_19_opt)
+        opt_add_637 = P.Add()(module11_6_opt, opt_conv2d_613)
+        opt_add_641 = P.Add()(opt_add_637, module16_20_opt)
+        module7_24_opt = self.module7_24(module16_21_opt)
+        opt_add_645 = P.Add()(opt_add_641, module7_24_opt)
+        opt_relu_649 = self.relu_649(opt_add_645)
+        module15_5_opt = self.module15_5(module16_18_opt)
+        module11_7_opt = self.module11_7(module16_19_opt)
+        opt_add_646 = P.Add()(module15_5_opt, module11_7_opt)
+        opt_conv2d_617 = self.conv2d_617(module16_20_opt)
+        opt_add_650 = P.Add()(opt_add_646, opt_conv2d_617)
+        opt_add_654 = P.Add()(opt_add_650, module16_21_opt)
+        opt_relu_658 = self.relu_658(opt_add_654)
+        module16_22_opt = self.module16_22(opt_relu_647)
+        module16_23_opt = self.module16_23(opt_relu_644)
+        module16_24_opt = self.module16_24(opt_relu_649)
+        module16_25_opt = self.module16_25(opt_relu_658)
+        module7_25_opt = self.module7_25(module16_23_opt)
+        opt_add_745 = P.Add()(module16_22_opt, module7_25_opt)
+        module7_26_opt = self.module7_26(module16_24_opt)
+        opt_add_752 = P.Add()(opt_add_745, module7_26_opt)
+        module7_27_opt = self.module7_27(module16_25_opt)
+        opt_add_764 = P.Add()(opt_add_752, module7_27_opt)
+        opt_relu_768 = self.relu_768(opt_add_764)
+        opt_conv2d_733 = self.conv2d_733(module16_22_opt)
+        opt_add_742 = P.Add()(opt_conv2d_733, module16_23_opt)
+        module7_28_opt = self.module7_28(module16_24_opt)
+        opt_add_753 = P.Add()(opt_add_742, module7_28_opt)
+        module7_29_opt = self.module7_29(module16_25_opt)
+        opt_add_765 = P.Add()(opt_add_753, module7_29_opt)
+        opt_relu_769 = self.relu_769(opt_add_765)
+        module11_8_opt = self.module11_8(module16_22_opt)
+        opt_conv2d_729 = self.conv2d_729(module16_23_opt)
+        opt_add_757 = P.Add()(module11_8_opt, opt_conv2d_729)
+        opt_add_762 = P.Add()(opt_add_757, module16_24_opt)
+        module7_30_opt = self.module7_30(module16_25_opt)
+        opt_add_766 = P.Add()(opt_add_762, module7_30_opt)
+        opt_relu_770 = self.relu_770(opt_add_766)
+        module15_6_opt = self.module15_6(module16_22_opt)
+        module11_9_opt = self.module11_9(module16_23_opt)
+        opt_add_767 = P.Add()(module15_6_opt, module11_9_opt)
+        opt_conv2d_740 = self.conv2d_740(module16_24_opt)
+        opt_add_771 = P.Add()(opt_add_767, opt_conv2d_740)
+        opt_add_778 = P.Add()(opt_add_771, module16_25_opt)
+        opt_relu_782 = self.relu_782(opt_add_778)
+        module15_7_opt = self.module15_7(opt_relu_768)
+        opt_conv2d_773 = self.conv2d_773(opt_relu_768)
+        opt_add_796 = P.Add()(module15_7_opt, opt_conv2d_773)
+        opt_relu_800 = self.relu_800(opt_add_796)
+        module15_8_opt = self.module15_8(opt_relu_769)
+        opt_conv2d_775 = self.conv2d_775(opt_relu_769)
+        opt_add_797 = P.Add()(module15_8_opt, opt_conv2d_775)
+        opt_relu_801 = self.relu_801(opt_add_797)
+        module5_5_opt = self.module5_5(opt_relu_800)
+        opt_add_808 = P.Add()(opt_relu_801, module5_5_opt)
+        module15_9_opt = self.module15_9(opt_relu_770)
+        opt_conv2d_777 = self.conv2d_777(opt_relu_770)
+        opt_add_798 = P.Add()(module15_9_opt, opt_conv2d_777)
+        opt_relu_802 = self.relu_802(opt_add_798)
+        module5_6_opt = self.module5_6(opt_add_808)
+        opt_add_811 = P.Add()(opt_relu_802, module5_6_opt)
+        module15_10_opt = self.module15_10(opt_relu_782)
+        opt_conv2d_787 = self.conv2d_787(opt_relu_782)
+        opt_add_805 = P.Add()(module15_10_opt, opt_conv2d_787)
+        opt_relu_807 = self.relu_807(opt_add_805)
+        module5_7_opt = self.module5_7(opt_add_811)
+        opt_add_814 = P.Add()(opt_relu_807, module5_7_opt)
+        module5_8_opt = self.module5_8(opt_add_814)
+        opt_avgpool2d_817 = self.avgpool2d_817(module5_8_opt)
+        opt_flatten_818 = self.flatten_818(opt_avgpool2d_817)
+        opt_dense_819 = self.dense_819(opt_flatten_818)
+        return opt_dense_819
diff --git a/research/cvtmodel/hrnet/src/hrnet_w48.py b/research/cvtmodel/hrnet/src/hrnet_w48.py
new file mode 100644
index 0000000000000000000000000000000000000000..86a762115aab3125f91a67c64657e1a97dc8bdbe
--- /dev/null
+++ b/research/cvtmodel/hrnet/src/hrnet_w48.py
@@ -0,0 +1,1522 @@
+import mindspore.ops as P
+from mindspore import nn
+
+
+class Module5(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
+                 conv2d_0_padding, conv2d_0_pad_mode):
+        super(Module5, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=conv2d_0_kernel_size,
+                                  stride=conv2d_0_stride,
+                                  padding=conv2d_0_padding,
+                                  pad_mode=conv2d_0_pad_mode,
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        return opt_relu_1
+
+
+class Module15(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
+                 conv2d_0_padding, conv2d_0_pad_mode, module5_0_conv2d_0_in_channels, module5_0_conv2d_0_out_channels,
+                 module5_0_conv2d_0_kernel_size, module5_0_conv2d_0_stride, module5_0_conv2d_0_padding,
+                 module5_0_conv2d_0_pad_mode, module5_1_conv2d_0_in_channels, module5_1_conv2d_0_out_channels,
+                 module5_1_conv2d_0_kernel_size, module5_1_conv2d_0_stride, module5_1_conv2d_0_padding,
+                 module5_1_conv2d_0_pad_mode):
+        super(Module15, self).__init__()
+        self.module5_0 = Module5(conv2d_0_in_channels=module5_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module5_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module5_0_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module5_0_conv2d_0_stride,
+                                 conv2d_0_padding=module5_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module5_0_conv2d_0_pad_mode)
+        self.module5_1 = Module5(conv2d_0_in_channels=module5_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module5_1_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module5_1_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module5_1_conv2d_0_stride,
+                                 conv2d_0_padding=module5_1_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module5_1_conv2d_0_pad_mode)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=conv2d_0_kernel_size,
+                                  stride=conv2d_0_stride,
+                                  padding=conv2d_0_padding,
+                                  pad_mode=conv2d_0_pad_mode,
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module5_0_opt = self.module5_0(x)
+        module5_1_opt = self.module5_1(module5_0_opt)
+        opt_conv2d_0 = self.conv2d_0(module5_1_opt)
+        return opt_conv2d_0
+
+
+class Module0(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
+                 conv2d_5_in_channels, conv2d_5_out_channels, conv2d_7_in_channels, conv2d_7_out_channels):
+        super(Module0, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
+                                  out_channels=conv2d_2_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_4 = nn.ReLU()
+        self.conv2d_5 = nn.Conv2d(in_channels=conv2d_5_in_channels,
+                                  out_channels=conv2d_5_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_6 = nn.ReLU()
+        self.conv2d_7 = nn.Conv2d(in_channels=conv2d_7_in_channels,
+                                  out_channels=conv2d_7_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_9 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        opt_conv2d_2 = self.conv2d_2(opt_relu_1)
+        opt_add_3 = P.Add()(opt_conv2d_2, x)
+        opt_relu_4 = self.relu_4(opt_add_3)
+        opt_conv2d_5 = self.conv2d_5(opt_relu_4)
+        opt_relu_6 = self.relu_6(opt_conv2d_5)
+        opt_conv2d_7 = self.conv2d_7(opt_relu_6)
+        opt_add_8 = P.Add()(opt_conv2d_7, opt_relu_4)
+        opt_relu_9 = self.relu_9(opt_add_8)
+        return opt_relu_9
+
+
+class Module16(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_conv2d_5_in_channels, module0_0_conv2d_5_out_channels,
+                 module0_0_conv2d_7_in_channels, module0_0_conv2d_7_out_channels, module0_1_conv2d_0_in_channels,
+                 module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels,
+                 module0_1_conv2d_5_in_channels, module0_1_conv2d_5_out_channels, module0_1_conv2d_7_in_channels,
+                 module0_1_conv2d_7_out_channels):
+        super(Module16, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_5_in_channels=module0_0_conv2d_5_in_channels,
+                                 conv2d_5_out_channels=module0_0_conv2d_5_out_channels,
+                                 conv2d_7_in_channels=module0_0_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_0_conv2d_7_out_channels)
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_5_in_channels=module0_1_conv2d_5_in_channels,
+                                 conv2d_5_out_channels=module0_1_conv2d_5_out_channels,
+                                 conv2d_7_in_channels=module0_1_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_1_conv2d_7_out_channels)
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        module0_1_opt = self.module0_1(module0_0_opt)
+        return module0_1_opt
+
+
+class Module7(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, resizenearestneighbor_1_size):
+        super(Module7, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.resizenearestneighbor_1 = P.ResizeNearestNeighbor(size=resizenearestneighbor_1_size, align_corners=False)
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_resizenearestneighbor_1 = self.resizenearestneighbor_1(opt_conv2d_0)
+        return opt_resizenearestneighbor_1
+
+
+class Module11(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, module5_0_conv2d_0_in_channels,
+                 module5_0_conv2d_0_out_channels, module5_0_conv2d_0_kernel_size, module5_0_conv2d_0_stride,
+                 module5_0_conv2d_0_padding, module5_0_conv2d_0_pad_mode):
+        super(Module11, self).__init__()
+        self.module5_0 = Module5(conv2d_0_in_channels=module5_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module5_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module5_0_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module5_0_conv2d_0_stride,
+                                 conv2d_0_padding=module5_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module5_0_conv2d_0_pad_mode)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module5_0_opt = self.module5_0(x)
+        opt_conv2d_0 = self.conv2d_0(module5_0_opt)
+        return opt_conv2d_0
+
+
+class MindSporeModel(nn.Cell):
+    def __init__(self):
+        super(MindSporeModel, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=3,
+                                  out_channels=64,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.module5_0 = Module5(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module15_0 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_5 = nn.Conv2d(in_channels=64,
+                                  out_channels=256,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_11 = nn.ReLU()
+        self.module15_1 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=256,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.relu_18 = nn.ReLU()
+        self.module15_2 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=256,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.relu_25 = nn.ReLU()
+        self.module15_3 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=256,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.relu_32 = nn.ReLU()
+        self.module5_1 = Module5(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=48,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module5_2 = Module5(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=96,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module16_0 = Module16(module0_0_conv2d_0_in_channels=48,
+                                   module0_0_conv2d_0_out_channels=48,
+                                   module0_0_conv2d_2_in_channels=48,
+                                   module0_0_conv2d_2_out_channels=48,
+                                   module0_0_conv2d_5_in_channels=48,
+                                   module0_0_conv2d_5_out_channels=48,
+                                   module0_0_conv2d_7_in_channels=48,
+                                   module0_0_conv2d_7_out_channels=48,
+                                   module0_1_conv2d_0_in_channels=48,
+                                   module0_1_conv2d_0_out_channels=48,
+                                   module0_1_conv2d_2_in_channels=48,
+                                   module0_1_conv2d_2_out_channels=48,
+                                   module0_1_conv2d_5_in_channels=48,
+                                   module0_1_conv2d_5_out_channels=48,
+                                   module0_1_conv2d_7_in_channels=48,
+                                   module0_1_conv2d_7_out_channels=48)
+        self.module16_1 = Module16(module0_0_conv2d_0_in_channels=96,
+                                   module0_0_conv2d_0_out_channels=96,
+                                   module0_0_conv2d_2_in_channels=96,
+                                   module0_0_conv2d_2_out_channels=96,
+                                   module0_0_conv2d_5_in_channels=96,
+                                   module0_0_conv2d_5_out_channels=96,
+                                   module0_0_conv2d_7_in_channels=96,
+                                   module0_0_conv2d_7_out_channels=96,
+                                   module0_1_conv2d_0_in_channels=96,
+                                   module0_1_conv2d_0_out_channels=96,
+                                   module0_1_conv2d_2_in_channels=96,
+                                   module0_1_conv2d_2_out_channels=96,
+                                   module0_1_conv2d_5_in_channels=96,
+                                   module0_1_conv2d_5_out_channels=96,
+                                   module0_1_conv2d_7_in_channels=96,
+                                   module0_1_conv2d_7_out_channels=96)
+        self.module7_0 = Module7(conv2d_0_in_channels=96,
+                                 conv2d_0_out_channels=48,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_85 = nn.ReLU()
+        self.conv2d_77 = nn.Conv2d(in_channels=48,
+                                   out_channels=96,
+                                   kernel_size=(3, 3),
+                                   stride=(2, 2),
+                                   padding=(1, 1, 1, 1),
+                                   pad_mode="pad",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.relu_81 = nn.ReLU()
+        self.module5_3 = Module5(conv2d_0_in_channels=96,
+                                 conv2d_0_out_channels=192,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module16_2 = Module16(module0_0_conv2d_0_in_channels=48,
+                                   module0_0_conv2d_0_out_channels=48,
+                                   module0_0_conv2d_2_in_channels=48,
+                                   module0_0_conv2d_2_out_channels=48,
+                                   module0_0_conv2d_5_in_channels=48,
+                                   module0_0_conv2d_5_out_channels=48,
+                                   module0_0_conv2d_7_in_channels=48,
+                                   module0_0_conv2d_7_out_channels=48,
+                                   module0_1_conv2d_0_in_channels=48,
+                                   module0_1_conv2d_0_out_channels=48,
+                                   module0_1_conv2d_2_in_channels=48,
+                                   module0_1_conv2d_2_out_channels=48,
+                                   module0_1_conv2d_5_in_channels=48,
+                                   module0_1_conv2d_5_out_channels=48,
+                                   module0_1_conv2d_7_in_channels=48,
+                                   module0_1_conv2d_7_out_channels=48)
+        self.module16_3 = Module16(module0_0_conv2d_0_in_channels=96,
+                                   module0_0_conv2d_0_out_channels=96,
+                                   module0_0_conv2d_2_in_channels=96,
+                                   module0_0_conv2d_2_out_channels=96,
+                                   module0_0_conv2d_5_in_channels=96,
+                                   module0_0_conv2d_5_out_channels=96,
+                                   module0_0_conv2d_7_in_channels=96,
+                                   module0_0_conv2d_7_out_channels=96,
+                                   module0_1_conv2d_0_in_channels=96,
+                                   module0_1_conv2d_0_out_channels=96,
+                                   module0_1_conv2d_2_in_channels=96,
+                                   module0_1_conv2d_2_out_channels=96,
+                                   module0_1_conv2d_5_in_channels=96,
+                                   module0_1_conv2d_5_out_channels=96,
+                                   module0_1_conv2d_7_in_channels=96,
+                                   module0_1_conv2d_7_out_channels=96)
+        self.module16_4 = Module16(module0_0_conv2d_0_in_channels=192,
+                                   module0_0_conv2d_0_out_channels=192,
+                                   module0_0_conv2d_2_in_channels=192,
+                                   module0_0_conv2d_2_out_channels=192,
+                                   module0_0_conv2d_5_in_channels=192,
+                                   module0_0_conv2d_5_out_channels=192,
+                                   module0_0_conv2d_7_in_channels=192,
+                                   module0_0_conv2d_7_out_channels=192,
+                                   module0_1_conv2d_0_in_channels=192,
+                                   module0_1_conv2d_0_out_channels=192,
+                                   module0_1_conv2d_2_in_channels=192,
+                                   module0_1_conv2d_2_out_channels=192,
+                                   module0_1_conv2d_5_in_channels=192,
+                                   module0_1_conv2d_5_out_channels=192,
+                                   module0_1_conv2d_7_in_channels=192,
+                                   module0_1_conv2d_7_out_channels=192)
+        self.module7_1 = Module7(conv2d_0_in_channels=96,
+                                 conv2d_0_out_channels=48,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module7_2 = Module7(conv2d_0_in_channels=192,
+                                 conv2d_0_out_channels=48,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_162 = nn.ReLU()
+        self.conv2d_149 = nn.Conv2d(in_channels=48,
+                                    out_channels=96,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_3 = Module7(conv2d_0_in_channels=192,
+                                 conv2d_0_out_channels=96,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.relu_163 = nn.ReLU()
+        self.module11_0 = Module11(conv2d_0_in_channels=48,
+                                   conv2d_0_out_channels=192,
+                                   module5_0_conv2d_0_in_channels=48,
+                                   module5_0_conv2d_0_out_channels=48,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_145 = nn.Conv2d(in_channels=96,
+                                    out_channels=192,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_167 = nn.ReLU()
+        self.module16_5 = Module16(module0_0_conv2d_0_in_channels=48,
+                                   module0_0_conv2d_0_out_channels=48,
+                                   module0_0_conv2d_2_in_channels=48,
+                                   module0_0_conv2d_2_out_channels=48,
+                                   module0_0_conv2d_5_in_channels=48,
+                                   module0_0_conv2d_5_out_channels=48,
+                                   module0_0_conv2d_7_in_channels=48,
+                                   module0_0_conv2d_7_out_channels=48,
+                                   module0_1_conv2d_0_in_channels=48,
+                                   module0_1_conv2d_0_out_channels=48,
+                                   module0_1_conv2d_2_in_channels=48,
+                                   module0_1_conv2d_2_out_channels=48,
+                                   module0_1_conv2d_5_in_channels=48,
+                                   module0_1_conv2d_5_out_channels=48,
+                                   module0_1_conv2d_7_in_channels=48,
+                                   module0_1_conv2d_7_out_channels=48)
+        self.module16_6 = Module16(module0_0_conv2d_0_in_channels=96,
+                                   module0_0_conv2d_0_out_channels=96,
+                                   module0_0_conv2d_2_in_channels=96,
+                                   module0_0_conv2d_2_out_channels=96,
+                                   module0_0_conv2d_5_in_channels=96,
+                                   module0_0_conv2d_5_out_channels=96,
+                                   module0_0_conv2d_7_in_channels=96,
+                                   module0_0_conv2d_7_out_channels=96,
+                                   module0_1_conv2d_0_in_channels=96,
+                                   module0_1_conv2d_0_out_channels=96,
+                                   module0_1_conv2d_2_in_channels=96,
+                                   module0_1_conv2d_2_out_channels=96,
+                                   module0_1_conv2d_5_in_channels=96,
+                                   module0_1_conv2d_5_out_channels=96,
+                                   module0_1_conv2d_7_in_channels=96,
+                                   module0_1_conv2d_7_out_channels=96)
+        self.module16_7 = Module16(module0_0_conv2d_0_in_channels=192,
+                                   module0_0_conv2d_0_out_channels=192,
+                                   module0_0_conv2d_2_in_channels=192,
+                                   module0_0_conv2d_2_out_channels=192,
+                                   module0_0_conv2d_5_in_channels=192,
+                                   module0_0_conv2d_5_out_channels=192,
+                                   module0_0_conv2d_7_in_channels=192,
+                                   module0_0_conv2d_7_out_channels=192,
+                                   module0_1_conv2d_0_in_channels=192,
+                                   module0_1_conv2d_0_out_channels=192,
+                                   module0_1_conv2d_2_in_channels=192,
+                                   module0_1_conv2d_2_out_channels=192,
+                                   module0_1_conv2d_5_in_channels=192,
+                                   module0_1_conv2d_5_out_channels=192,
+                                   module0_1_conv2d_7_in_channels=192,
+                                   module0_1_conv2d_7_out_channels=192)
+        self.module7_4 = Module7(conv2d_0_in_channels=96,
+                                 conv2d_0_out_channels=48,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module7_5 = Module7(conv2d_0_in_channels=192,
+                                 conv2d_0_out_channels=48,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_243 = nn.ReLU()
+        self.conv2d_225 = nn.Conv2d(in_channels=48,
+                                    out_channels=96,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_6 = Module7(conv2d_0_in_channels=192,
+                                 conv2d_0_out_channels=96,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.relu_244 = nn.ReLU()
+        self.module11_1 = Module11(conv2d_0_in_channels=48,
+                                   conv2d_0_out_channels=192,
+                                   module5_0_conv2d_0_in_channels=48,
+                                   module5_0_conv2d_0_out_channels=48,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_228 = nn.Conv2d(in_channels=96,
+                                    out_channels=192,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_245 = nn.ReLU()
+        self.module16_8 = Module16(module0_0_conv2d_0_in_channels=48,
+                                   module0_0_conv2d_0_out_channels=48,
+                                   module0_0_conv2d_2_in_channels=48,
+                                   module0_0_conv2d_2_out_channels=48,
+                                   module0_0_conv2d_5_in_channels=48,
+                                   module0_0_conv2d_5_out_channels=48,
+                                   module0_0_conv2d_7_in_channels=48,
+                                   module0_0_conv2d_7_out_channels=48,
+                                   module0_1_conv2d_0_in_channels=48,
+                                   module0_1_conv2d_0_out_channels=48,
+                                   module0_1_conv2d_2_in_channels=48,
+                                   module0_1_conv2d_2_out_channels=48,
+                                   module0_1_conv2d_5_in_channels=48,
+                                   module0_1_conv2d_5_out_channels=48,
+                                   module0_1_conv2d_7_in_channels=48,
+                                   module0_1_conv2d_7_out_channels=48)
+        self.module16_9 = Module16(module0_0_conv2d_0_in_channels=96,
+                                   module0_0_conv2d_0_out_channels=96,
+                                   module0_0_conv2d_2_in_channels=96,
+                                   module0_0_conv2d_2_out_channels=96,
+                                   module0_0_conv2d_5_in_channels=96,
+                                   module0_0_conv2d_5_out_channels=96,
+                                   module0_0_conv2d_7_in_channels=96,
+                                   module0_0_conv2d_7_out_channels=96,
+                                   module0_1_conv2d_0_in_channels=96,
+                                   module0_1_conv2d_0_out_channels=96,
+                                   module0_1_conv2d_2_in_channels=96,
+                                   module0_1_conv2d_2_out_channels=96,
+                                   module0_1_conv2d_5_in_channels=96,
+                                   module0_1_conv2d_5_out_channels=96,
+                                   module0_1_conv2d_7_in_channels=96,
+                                   module0_1_conv2d_7_out_channels=96)
+        self.module16_10 = Module16(module0_0_conv2d_0_in_channels=192,
+                                    module0_0_conv2d_0_out_channels=192,
+                                    module0_0_conv2d_2_in_channels=192,
+                                    module0_0_conv2d_2_out_channels=192,
+                                    module0_0_conv2d_5_in_channels=192,
+                                    module0_0_conv2d_5_out_channels=192,
+                                    module0_0_conv2d_7_in_channels=192,
+                                    module0_0_conv2d_7_out_channels=192,
+                                    module0_1_conv2d_0_in_channels=192,
+                                    module0_1_conv2d_0_out_channels=192,
+                                    module0_1_conv2d_2_in_channels=192,
+                                    module0_1_conv2d_2_out_channels=192,
+                                    module0_1_conv2d_5_in_channels=192,
+                                    module0_1_conv2d_5_out_channels=192,
+                                    module0_1_conv2d_7_in_channels=192,
+                                    module0_1_conv2d_7_out_channels=192)
+        self.module7_7 = Module7(conv2d_0_in_channels=96,
+                                 conv2d_0_out_channels=48,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module7_8 = Module7(conv2d_0_in_channels=192,
+                                 conv2d_0_out_channels=48,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_324 = nn.ReLU()
+        self.conv2d_306 = nn.Conv2d(in_channels=48,
+                                    out_channels=96,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_9 = Module7(conv2d_0_in_channels=192,
+                                 conv2d_0_out_channels=96,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.relu_322 = nn.ReLU()
+        self.module11_2 = Module11(conv2d_0_in_channels=48,
+                                   conv2d_0_out_channels=192,
+                                   module5_0_conv2d_0_in_channels=48,
+                                   module5_0_conv2d_0_out_channels=48,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_309 = nn.Conv2d(in_channels=96,
+                                    out_channels=192,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_326 = nn.ReLU()
+        self.module16_11 = Module16(module0_0_conv2d_0_in_channels=48,
+                                    module0_0_conv2d_0_out_channels=48,
+                                    module0_0_conv2d_2_in_channels=48,
+                                    module0_0_conv2d_2_out_channels=48,
+                                    module0_0_conv2d_5_in_channels=48,
+                                    module0_0_conv2d_5_out_channels=48,
+                                    module0_0_conv2d_7_in_channels=48,
+                                    module0_0_conv2d_7_out_channels=48,
+                                    module0_1_conv2d_0_in_channels=48,
+                                    module0_1_conv2d_0_out_channels=48,
+                                    module0_1_conv2d_2_in_channels=48,
+                                    module0_1_conv2d_2_out_channels=48,
+                                    module0_1_conv2d_5_in_channels=48,
+                                    module0_1_conv2d_5_out_channels=48,
+                                    module0_1_conv2d_7_in_channels=48,
+                                    module0_1_conv2d_7_out_channels=48)
+        self.module16_12 = Module16(module0_0_conv2d_0_in_channels=96,
+                                    module0_0_conv2d_0_out_channels=96,
+                                    module0_0_conv2d_2_in_channels=96,
+                                    module0_0_conv2d_2_out_channels=96,
+                                    module0_0_conv2d_5_in_channels=96,
+                                    module0_0_conv2d_5_out_channels=96,
+                                    module0_0_conv2d_7_in_channels=96,
+                                    module0_0_conv2d_7_out_channels=96,
+                                    module0_1_conv2d_0_in_channels=96,
+                                    module0_1_conv2d_0_out_channels=96,
+                                    module0_1_conv2d_2_in_channels=96,
+                                    module0_1_conv2d_2_out_channels=96,
+                                    module0_1_conv2d_5_in_channels=96,
+                                    module0_1_conv2d_5_out_channels=96,
+                                    module0_1_conv2d_7_in_channels=96,
+                                    module0_1_conv2d_7_out_channels=96)
+        self.module16_13 = Module16(module0_0_conv2d_0_in_channels=192,
+                                    module0_0_conv2d_0_out_channels=192,
+                                    module0_0_conv2d_2_in_channels=192,
+                                    module0_0_conv2d_2_out_channels=192,
+                                    module0_0_conv2d_5_in_channels=192,
+                                    module0_0_conv2d_5_out_channels=192,
+                                    module0_0_conv2d_7_in_channels=192,
+                                    module0_0_conv2d_7_out_channels=192,
+                                    module0_1_conv2d_0_in_channels=192,
+                                    module0_1_conv2d_0_out_channels=192,
+                                    module0_1_conv2d_2_in_channels=192,
+                                    module0_1_conv2d_2_out_channels=192,
+                                    module0_1_conv2d_5_in_channels=192,
+                                    module0_1_conv2d_5_out_channels=192,
+                                    module0_1_conv2d_7_in_channels=192,
+                                    module0_1_conv2d_7_out_channels=192)
+        self.module7_10 = Module7(conv2d_0_in_channels=96,
+                                  conv2d_0_out_channels=48,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_11 = Module7(conv2d_0_in_channels=192,
+                                  conv2d_0_out_channels=48,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_402 = nn.ReLU()
+        self.conv2d_388 = nn.Conv2d(in_channels=48,
+                                    out_channels=96,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_12 = Module7(conv2d_0_in_channels=192,
+                                  conv2d_0_out_channels=96,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_403 = nn.ReLU()
+        self.module11_3 = Module11(conv2d_0_in_channels=48,
+                                   conv2d_0_out_channels=192,
+                                   module5_0_conv2d_0_in_channels=48,
+                                   module5_0_conv2d_0_out_channels=48,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_386 = nn.Conv2d(in_channels=96,
+                                    out_channels=192,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_407 = nn.ReLU()
+        self.module5_4 = Module5(conv2d_0_in_channels=192,
+                                 conv2d_0_out_channels=384,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module16_14 = Module16(module0_0_conv2d_0_in_channels=48,
+                                    module0_0_conv2d_0_out_channels=48,
+                                    module0_0_conv2d_2_in_channels=48,
+                                    module0_0_conv2d_2_out_channels=48,
+                                    module0_0_conv2d_5_in_channels=48,
+                                    module0_0_conv2d_5_out_channels=48,
+                                    module0_0_conv2d_7_in_channels=48,
+                                    module0_0_conv2d_7_out_channels=48,
+                                    module0_1_conv2d_0_in_channels=48,
+                                    module0_1_conv2d_0_out_channels=48,
+                                    module0_1_conv2d_2_in_channels=48,
+                                    module0_1_conv2d_2_out_channels=48,
+                                    module0_1_conv2d_5_in_channels=48,
+                                    module0_1_conv2d_5_out_channels=48,
+                                    module0_1_conv2d_7_in_channels=48,
+                                    module0_1_conv2d_7_out_channels=48)
+        self.module16_15 = Module16(module0_0_conv2d_0_in_channels=96,
+                                    module0_0_conv2d_0_out_channels=96,
+                                    module0_0_conv2d_2_in_channels=96,
+                                    module0_0_conv2d_2_out_channels=96,
+                                    module0_0_conv2d_5_in_channels=96,
+                                    module0_0_conv2d_5_out_channels=96,
+                                    module0_0_conv2d_7_in_channels=96,
+                                    module0_0_conv2d_7_out_channels=96,
+                                    module0_1_conv2d_0_in_channels=96,
+                                    module0_1_conv2d_0_out_channels=96,
+                                    module0_1_conv2d_2_in_channels=96,
+                                    module0_1_conv2d_2_out_channels=96,
+                                    module0_1_conv2d_5_in_channels=96,
+                                    module0_1_conv2d_5_out_channels=96,
+                                    module0_1_conv2d_7_in_channels=96,
+                                    module0_1_conv2d_7_out_channels=96)
+        self.module16_16 = Module16(module0_0_conv2d_0_in_channels=192,
+                                    module0_0_conv2d_0_out_channels=192,
+                                    module0_0_conv2d_2_in_channels=192,
+                                    module0_0_conv2d_2_out_channels=192,
+                                    module0_0_conv2d_5_in_channels=192,
+                                    module0_0_conv2d_5_out_channels=192,
+                                    module0_0_conv2d_7_in_channels=192,
+                                    module0_0_conv2d_7_out_channels=192,
+                                    module0_1_conv2d_0_in_channels=192,
+                                    module0_1_conv2d_0_out_channels=192,
+                                    module0_1_conv2d_2_in_channels=192,
+                                    module0_1_conv2d_2_out_channels=192,
+                                    module0_1_conv2d_5_in_channels=192,
+                                    module0_1_conv2d_5_out_channels=192,
+                                    module0_1_conv2d_7_in_channels=192,
+                                    module0_1_conv2d_7_out_channels=192)
+        self.module16_17 = Module16(module0_0_conv2d_0_in_channels=384,
+                                    module0_0_conv2d_0_out_channels=384,
+                                    module0_0_conv2d_2_in_channels=384,
+                                    module0_0_conv2d_2_out_channels=384,
+                                    module0_0_conv2d_5_in_channels=384,
+                                    module0_0_conv2d_5_out_channels=384,
+                                    module0_0_conv2d_7_in_channels=384,
+                                    module0_0_conv2d_7_out_channels=384,
+                                    module0_1_conv2d_0_in_channels=384,
+                                    module0_1_conv2d_0_out_channels=384,
+                                    module0_1_conv2d_2_in_channels=384,
+                                    module0_1_conv2d_2_out_channels=384,
+                                    module0_1_conv2d_5_in_channels=384,
+                                    module0_1_conv2d_5_out_channels=384,
+                                    module0_1_conv2d_7_in_channels=384,
+                                    module0_1_conv2d_7_out_channels=384)
+        self.module7_13 = Module7(conv2d_0_in_channels=96,
+                                  conv2d_0_out_channels=48,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_14 = Module7(conv2d_0_in_channels=192,
+                                  conv2d_0_out_channels=48,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_15 = Module7(conv2d_0_in_channels=384,
+                                  conv2d_0_out_channels=48,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_525 = nn.ReLU()
+        self.conv2d_484 = nn.Conv2d(in_channels=48,
+                                    out_channels=96,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_16 = Module7(conv2d_0_in_channels=192,
+                                  conv2d_0_out_channels=96,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.module7_17 = Module7(conv2d_0_in_channels=384,
+                                  conv2d_0_out_channels=96,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_526 = nn.ReLU()
+        self.module11_4 = Module11(conv2d_0_in_channels=48,
+                                   conv2d_0_out_channels=192,
+                                   module5_0_conv2d_0_in_channels=48,
+                                   module5_0_conv2d_0_out_channels=48,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_488 = nn.Conv2d(in_channels=96,
+                                    out_channels=192,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_18 = Module7(conv2d_0_in_channels=384,
+                                  conv2d_0_out_channels=192,
+                                  resizenearestneighbor_1_size=(14, 14))
+        self.relu_527 = nn.ReLU()
+        self.module15_4 = Module15(conv2d_0_in_channels=48,
+                                   conv2d_0_out_channels=384,
+                                   conv2d_0_kernel_size=(3, 3),
+                                   conv2d_0_stride=(2, 2),
+                                   conv2d_0_padding=(1, 1, 1, 1),
+                                   conv2d_0_pad_mode="pad",
+                                   module5_0_conv2d_0_in_channels=48,
+                                   module5_0_conv2d_0_out_channels=48,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad",
+                                   module5_1_conv2d_0_in_channels=48,
+                                   module5_1_conv2d_0_out_channels=48,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(2, 2),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.module11_5 = Module11(conv2d_0_in_channels=96,
+                                   conv2d_0_out_channels=384,
+                                   module5_0_conv2d_0_in_channels=96,
+                                   module5_0_conv2d_0_out_channels=96,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_500 = nn.Conv2d(in_channels=192,
+                                    out_channels=384,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_532 = nn.ReLU()
+        self.module16_18 = Module16(module0_0_conv2d_0_in_channels=48,
+                                    module0_0_conv2d_0_out_channels=48,
+                                    module0_0_conv2d_2_in_channels=48,
+                                    module0_0_conv2d_2_out_channels=48,
+                                    module0_0_conv2d_5_in_channels=48,
+                                    module0_0_conv2d_5_out_channels=48,
+                                    module0_0_conv2d_7_in_channels=48,
+                                    module0_0_conv2d_7_out_channels=48,
+                                    module0_1_conv2d_0_in_channels=48,
+                                    module0_1_conv2d_0_out_channels=48,
+                                    module0_1_conv2d_2_in_channels=48,
+                                    module0_1_conv2d_2_out_channels=48,
+                                    module0_1_conv2d_5_in_channels=48,
+                                    module0_1_conv2d_5_out_channels=48,
+                                    module0_1_conv2d_7_in_channels=48,
+                                    module0_1_conv2d_7_out_channels=48)
+        self.module16_19 = Module16(module0_0_conv2d_0_in_channels=96,
+                                    module0_0_conv2d_0_out_channels=96,
+                                    module0_0_conv2d_2_in_channels=96,
+                                    module0_0_conv2d_2_out_channels=96,
+                                    module0_0_conv2d_5_in_channels=96,
+                                    module0_0_conv2d_5_out_channels=96,
+                                    module0_0_conv2d_7_in_channels=96,
+                                    module0_0_conv2d_7_out_channels=96,
+                                    module0_1_conv2d_0_in_channels=96,
+                                    module0_1_conv2d_0_out_channels=96,
+                                    module0_1_conv2d_2_in_channels=96,
+                                    module0_1_conv2d_2_out_channels=96,
+                                    module0_1_conv2d_5_in_channels=96,
+                                    module0_1_conv2d_5_out_channels=96,
+                                    module0_1_conv2d_7_in_channels=96,
+                                    module0_1_conv2d_7_out_channels=96)
+        self.module16_20 = Module16(module0_0_conv2d_0_in_channels=192,
+                                    module0_0_conv2d_0_out_channels=192,
+                                    module0_0_conv2d_2_in_channels=192,
+                                    module0_0_conv2d_2_out_channels=192,
+                                    module0_0_conv2d_5_in_channels=192,
+                                    module0_0_conv2d_5_out_channels=192,
+                                    module0_0_conv2d_7_in_channels=192,
+                                    module0_0_conv2d_7_out_channels=192,
+                                    module0_1_conv2d_0_in_channels=192,
+                                    module0_1_conv2d_0_out_channels=192,
+                                    module0_1_conv2d_2_in_channels=192,
+                                    module0_1_conv2d_2_out_channels=192,
+                                    module0_1_conv2d_5_in_channels=192,
+                                    module0_1_conv2d_5_out_channels=192,
+                                    module0_1_conv2d_7_in_channels=192,
+                                    module0_1_conv2d_7_out_channels=192)
+        self.module16_21 = Module16(module0_0_conv2d_0_in_channels=384,
+                                    module0_0_conv2d_0_out_channels=384,
+                                    module0_0_conv2d_2_in_channels=384,
+                                    module0_0_conv2d_2_out_channels=384,
+                                    module0_0_conv2d_5_in_channels=384,
+                                    module0_0_conv2d_5_out_channels=384,
+                                    module0_0_conv2d_7_in_channels=384,
+                                    module0_0_conv2d_7_out_channels=384,
+                                    module0_1_conv2d_0_in_channels=384,
+                                    module0_1_conv2d_0_out_channels=384,
+                                    module0_1_conv2d_2_in_channels=384,
+                                    module0_1_conv2d_2_out_channels=384,
+                                    module0_1_conv2d_5_in_channels=384,
+                                    module0_1_conv2d_5_out_channels=384,
+                                    module0_1_conv2d_7_in_channels=384,
+                                    module0_1_conv2d_7_out_channels=384)
+        self.module7_19 = Module7(conv2d_0_in_channels=96,
+                                  conv2d_0_out_channels=48,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_20 = Module7(conv2d_0_in_channels=192,
+                                  conv2d_0_out_channels=48,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_21 = Module7(conv2d_0_in_channels=384,
+                                  conv2d_0_out_channels=48,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_647 = nn.ReLU()
+        self.conv2d_609 = nn.Conv2d(in_channels=48,
+                                    out_channels=96,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_22 = Module7(conv2d_0_in_channels=192,
+                                  conv2d_0_out_channels=96,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.module7_23 = Module7(conv2d_0_in_channels=384,
+                                  conv2d_0_out_channels=96,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_644 = nn.ReLU()
+        self.module11_6 = Module11(conv2d_0_in_channels=48,
+                                   conv2d_0_out_channels=192,
+                                   module5_0_conv2d_0_in_channels=48,
+                                   module5_0_conv2d_0_out_channels=48,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_613 = nn.Conv2d(in_channels=96,
+                                    out_channels=192,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_24 = Module7(conv2d_0_in_channels=384,
+                                  conv2d_0_out_channels=192,
+                                  resizenearestneighbor_1_size=(14, 14))
+        self.relu_649 = nn.ReLU()
+        self.module15_5 = Module15(conv2d_0_in_channels=48,
+                                   conv2d_0_out_channels=384,
+                                   conv2d_0_kernel_size=(3, 3),
+                                   conv2d_0_stride=(2, 2),
+                                   conv2d_0_padding=(1, 1, 1, 1),
+                                   conv2d_0_pad_mode="pad",
+                                   module5_0_conv2d_0_in_channels=48,
+                                   module5_0_conv2d_0_out_channels=48,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad",
+                                   module5_1_conv2d_0_in_channels=48,
+                                   module5_1_conv2d_0_out_channels=48,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(2, 2),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.module11_7 = Module11(conv2d_0_in_channels=96,
+                                   conv2d_0_out_channels=384,
+                                   module5_0_conv2d_0_in_channels=96,
+                                   module5_0_conv2d_0_out_channels=96,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_617 = nn.Conv2d(in_channels=192,
+                                    out_channels=384,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_658 = nn.ReLU()
+        self.module16_22 = Module16(module0_0_conv2d_0_in_channels=48,
+                                    module0_0_conv2d_0_out_channels=48,
+                                    module0_0_conv2d_2_in_channels=48,
+                                    module0_0_conv2d_2_out_channels=48,
+                                    module0_0_conv2d_5_in_channels=48,
+                                    module0_0_conv2d_5_out_channels=48,
+                                    module0_0_conv2d_7_in_channels=48,
+                                    module0_0_conv2d_7_out_channels=48,
+                                    module0_1_conv2d_0_in_channels=48,
+                                    module0_1_conv2d_0_out_channels=48,
+                                    module0_1_conv2d_2_in_channels=48,
+                                    module0_1_conv2d_2_out_channels=48,
+                                    module0_1_conv2d_5_in_channels=48,
+                                    module0_1_conv2d_5_out_channels=48,
+                                    module0_1_conv2d_7_in_channels=48,
+                                    module0_1_conv2d_7_out_channels=48)
+        self.module16_23 = Module16(module0_0_conv2d_0_in_channels=96,
+                                    module0_0_conv2d_0_out_channels=96,
+                                    module0_0_conv2d_2_in_channels=96,
+                                    module0_0_conv2d_2_out_channels=96,
+                                    module0_0_conv2d_5_in_channels=96,
+                                    module0_0_conv2d_5_out_channels=96,
+                                    module0_0_conv2d_7_in_channels=96,
+                                    module0_0_conv2d_7_out_channels=96,
+                                    module0_1_conv2d_0_in_channels=96,
+                                    module0_1_conv2d_0_out_channels=96,
+                                    module0_1_conv2d_2_in_channels=96,
+                                    module0_1_conv2d_2_out_channels=96,
+                                    module0_1_conv2d_5_in_channels=96,
+                                    module0_1_conv2d_5_out_channels=96,
+                                    module0_1_conv2d_7_in_channels=96,
+                                    module0_1_conv2d_7_out_channels=96)
+        self.module16_24 = Module16(module0_0_conv2d_0_in_channels=192,
+                                    module0_0_conv2d_0_out_channels=192,
+                                    module0_0_conv2d_2_in_channels=192,
+                                    module0_0_conv2d_2_out_channels=192,
+                                    module0_0_conv2d_5_in_channels=192,
+                                    module0_0_conv2d_5_out_channels=192,
+                                    module0_0_conv2d_7_in_channels=192,
+                                    module0_0_conv2d_7_out_channels=192,
+                                    module0_1_conv2d_0_in_channels=192,
+                                    module0_1_conv2d_0_out_channels=192,
+                                    module0_1_conv2d_2_in_channels=192,
+                                    module0_1_conv2d_2_out_channels=192,
+                                    module0_1_conv2d_5_in_channels=192,
+                                    module0_1_conv2d_5_out_channels=192,
+                                    module0_1_conv2d_7_in_channels=192,
+                                    module0_1_conv2d_7_out_channels=192)
+        self.module16_25 = Module16(module0_0_conv2d_0_in_channels=384,
+                                    module0_0_conv2d_0_out_channels=384,
+                                    module0_0_conv2d_2_in_channels=384,
+                                    module0_0_conv2d_2_out_channels=384,
+                                    module0_0_conv2d_5_in_channels=384,
+                                    module0_0_conv2d_5_out_channels=384,
+                                    module0_0_conv2d_7_in_channels=384,
+                                    module0_0_conv2d_7_out_channels=384,
+                                    module0_1_conv2d_0_in_channels=384,
+                                    module0_1_conv2d_0_out_channels=384,
+                                    module0_1_conv2d_2_in_channels=384,
+                                    module0_1_conv2d_2_out_channels=384,
+                                    module0_1_conv2d_5_in_channels=384,
+                                    module0_1_conv2d_5_out_channels=384,
+                                    module0_1_conv2d_7_in_channels=384,
+                                    module0_1_conv2d_7_out_channels=384)
+        self.module7_25 = Module7(conv2d_0_in_channels=96,
+                                  conv2d_0_out_channels=48,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_26 = Module7(conv2d_0_in_channels=192,
+                                  conv2d_0_out_channels=48,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_27 = Module7(conv2d_0_in_channels=384,
+                                  conv2d_0_out_channels=48,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_768 = nn.ReLU()
+        self.conv2d_733 = nn.Conv2d(in_channels=48,
+                                    out_channels=96,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_28 = Module7(conv2d_0_in_channels=192,
+                                  conv2d_0_out_channels=96,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.module7_29 = Module7(conv2d_0_in_channels=384,
+                                  conv2d_0_out_channels=96,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_769 = nn.ReLU()
+        self.module11_8 = Module11(conv2d_0_in_channels=48,
+                                   conv2d_0_out_channels=192,
+                                   module5_0_conv2d_0_in_channels=48,
+                                   module5_0_conv2d_0_out_channels=48,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_729 = nn.Conv2d(in_channels=96,
+                                    out_channels=192,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_30 = Module7(conv2d_0_in_channels=384,
+                                  conv2d_0_out_channels=192,
+                                  resizenearestneighbor_1_size=(14, 14))
+        self.relu_770 = nn.ReLU()
+        self.module15_6 = Module15(conv2d_0_in_channels=48,
+                                   conv2d_0_out_channels=384,
+                                   conv2d_0_kernel_size=(3, 3),
+                                   conv2d_0_stride=(2, 2),
+                                   conv2d_0_padding=(1, 1, 1, 1),
+                                   conv2d_0_pad_mode="pad",
+                                   module5_0_conv2d_0_in_channels=48,
+                                   module5_0_conv2d_0_out_channels=48,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad",
+                                   module5_1_conv2d_0_in_channels=48,
+                                   module5_1_conv2d_0_out_channels=48,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(2, 2),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.module11_9 = Module11(conv2d_0_in_channels=96,
+                                   conv2d_0_out_channels=384,
+                                   module5_0_conv2d_0_in_channels=96,
+                                   module5_0_conv2d_0_out_channels=96,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_740 = nn.Conv2d(in_channels=192,
+                                    out_channels=384,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_782 = nn.ReLU()
+        self.module15_7 = Module15(conv2d_0_in_channels=32,
+                                   conv2d_0_out_channels=128,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=48,
+                                   module5_0_conv2d_0_out_channels=32,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=32,
+                                   module5_1_conv2d_0_out_channels=32,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_773 = nn.Conv2d(in_channels=48,
+                                    out_channels=128,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_800 = nn.ReLU()
+        self.module15_8 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=96,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_775 = nn.Conv2d(in_channels=96,
+                                    out_channels=256,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_801 = nn.ReLU()
+        self.module5_5 = Module5(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module15_9 = Module15(conv2d_0_in_channels=128,
+                                   conv2d_0_out_channels=512,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=192,
+                                   module5_0_conv2d_0_out_channels=128,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=128,
+                                   module5_1_conv2d_0_out_channels=128,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_777 = nn.Conv2d(in_channels=192,
+                                    out_channels=512,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_802 = nn.ReLU()
+        self.module5_6 = Module5(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=512,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module15_10 = Module15(conv2d_0_in_channels=256,
+                                    conv2d_0_out_channels=1024,
+                                    conv2d_0_kernel_size=(1, 1),
+                                    conv2d_0_stride=(1, 1),
+                                    conv2d_0_padding=0,
+                                    conv2d_0_pad_mode="valid",
+                                    module5_0_conv2d_0_in_channels=384,
+                                    module5_0_conv2d_0_out_channels=256,
+                                    module5_0_conv2d_0_kernel_size=(1, 1),
+                                    module5_0_conv2d_0_stride=(1, 1),
+                                    module5_0_conv2d_0_padding=0,
+                                    module5_0_conv2d_0_pad_mode="valid",
+                                    module5_1_conv2d_0_in_channels=256,
+                                    module5_1_conv2d_0_out_channels=256,
+                                    module5_1_conv2d_0_kernel_size=(3, 3),
+                                    module5_1_conv2d_0_stride=(1, 1),
+                                    module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                    module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_787 = nn.Conv2d(in_channels=384,
+                                    out_channels=1024,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_807 = nn.ReLU()
+        self.module5_7 = Module5(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=1024,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module5_8 = Module5(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=2048,
+                                 conv2d_0_kernel_size=(1, 1),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=0,
+                                 conv2d_0_pad_mode="valid")
+        self.avgpool2d_817 = nn.AvgPool2d(kernel_size=(7, 7))
+        self.flatten_818 = nn.Flatten()
+        self.dense_819 = nn.Dense(in_channels=2048, out_channels=1000, has_bias=True)
+
+    def construct(self, input_1):
+        opt_conv2d_0 = self.conv2d_0(input_1)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        module5_0_opt = self.module5_0(opt_relu_1)
+        module15_0_opt = self.module15_0(module5_0_opt)
+        opt_conv2d_5 = self.conv2d_5(module5_0_opt)
+        opt_add_10 = P.Add()(module15_0_opt, opt_conv2d_5)
+        opt_relu_11 = self.relu_11(opt_add_10)
+        module15_1_opt = self.module15_1(opt_relu_11)
+        opt_add_17 = P.Add()(module15_1_opt, opt_relu_11)
+        opt_relu_18 = self.relu_18(opt_add_17)
+        module15_2_opt = self.module15_2(opt_relu_18)
+        opt_add_24 = P.Add()(module15_2_opt, opt_relu_18)
+        opt_relu_25 = self.relu_25(opt_add_24)
+        module15_3_opt = self.module15_3(opt_relu_25)
+        opt_add_31 = P.Add()(module15_3_opt, opt_relu_25)
+        opt_relu_32 = self.relu_32(opt_add_31)
+        module5_1_opt = self.module5_1(opt_relu_32)
+        module5_2_opt = self.module5_2(opt_relu_32)
+        module16_0_opt = self.module16_0(module5_1_opt)
+        module16_1_opt = self.module16_1(module5_2_opt)
+        module7_0_opt = self.module7_0(module16_1_opt)
+        opt_add_82 = P.Add()(module16_0_opt, module7_0_opt)
+        opt_relu_85 = self.relu_85(opt_add_82)
+        opt_conv2d_77 = self.conv2d_77(module16_0_opt)
+        opt_add_79 = P.Add()(opt_conv2d_77, module16_1_opt)
+        opt_relu_81 = self.relu_81(opt_add_79)
+        module5_3_opt = self.module5_3(opt_relu_81)
+        module16_2_opt = self.module16_2(opt_relu_85)
+        module16_3_opt = self.module16_3(opt_relu_81)
+        module16_4_opt = self.module16_4(module5_3_opt)
+        module7_1_opt = self.module7_1(module16_3_opt)
+        opt_add_153 = P.Add()(module16_2_opt, module7_1_opt)
+        module7_2_opt = self.module7_2(module16_4_opt)
+        opt_add_159 = P.Add()(opt_add_153, module7_2_opt)
+        opt_relu_162 = self.relu_162(opt_add_159)
+        opt_conv2d_149 = self.conv2d_149(module16_2_opt)
+        opt_add_154 = P.Add()(opt_conv2d_149, module16_3_opt)
+        module7_3_opt = self.module7_3(module16_4_opt)
+        opt_add_160 = P.Add()(opt_add_154, module7_3_opt)
+        opt_relu_163 = self.relu_163(opt_add_160)
+        module11_0_opt = self.module11_0(module16_2_opt)
+        opt_conv2d_145 = self.conv2d_145(module16_3_opt)
+        opt_add_161 = P.Add()(module11_0_opt, opt_conv2d_145)
+        opt_add_164 = P.Add()(opt_add_161, module16_4_opt)
+        opt_relu_167 = self.relu_167(opt_add_164)
+        module16_5_opt = self.module16_5(opt_relu_162)
+        module16_6_opt = self.module16_6(opt_relu_163)
+        module16_7_opt = self.module16_7(opt_relu_167)
+        module7_4_opt = self.module7_4(module16_6_opt)
+        opt_add_236 = P.Add()(module16_5_opt, module7_4_opt)
+        module7_5_opt = self.module7_5(module16_7_opt)
+        opt_add_240 = P.Add()(opt_add_236, module7_5_opt)
+        opt_relu_243 = self.relu_243(opt_add_240)
+        opt_conv2d_225 = self.conv2d_225(module16_5_opt)
+        opt_add_230 = P.Add()(opt_conv2d_225, module16_6_opt)
+        module7_6_opt = self.module7_6(module16_7_opt)
+        opt_add_241 = P.Add()(opt_add_230, module7_6_opt)
+        opt_relu_244 = self.relu_244(opt_add_241)
+        module11_1_opt = self.module11_1(module16_5_opt)
+        opt_conv2d_228 = self.conv2d_228(module16_6_opt)
+        opt_add_239 = P.Add()(module11_1_opt, opt_conv2d_228)
+        opt_add_242 = P.Add()(opt_add_239, module16_7_opt)
+        opt_relu_245 = self.relu_245(opt_add_242)
+        module16_8_opt = self.module16_8(opt_relu_243)
+        module16_9_opt = self.module16_9(opt_relu_244)
+        module16_10_opt = self.module16_10(opt_relu_245)
+        module7_7_opt = self.module7_7(module16_9_opt)
+        opt_add_318 = P.Add()(module16_8_opt, module7_7_opt)
+        module7_8_opt = self.module7_8(module16_10_opt)
+        opt_add_321 = P.Add()(opt_add_318, module7_8_opt)
+        opt_relu_324 = self.relu_324(opt_add_321)
+        opt_conv2d_306 = self.conv2d_306(module16_8_opt)
+        opt_add_312 = P.Add()(opt_conv2d_306, module16_9_opt)
+        module7_9_opt = self.module7_9(module16_10_opt)
+        opt_add_319 = P.Add()(opt_add_312, module7_9_opt)
+        opt_relu_322 = self.relu_322(opt_add_319)
+        module11_2_opt = self.module11_2(module16_8_opt)
+        opt_conv2d_309 = self.conv2d_309(module16_9_opt)
+        opt_add_320 = P.Add()(module11_2_opt, opt_conv2d_309)
+        opt_add_323 = P.Add()(opt_add_320, module16_10_opt)
+        opt_relu_326 = self.relu_326(opt_add_323)
+        module16_11_opt = self.module16_11(opt_relu_324)
+        module16_12_opt = self.module16_12(opt_relu_322)
+        module16_13_opt = self.module16_13(opt_relu_326)
+        module7_10_opt = self.module7_10(module16_12_opt)
+        opt_add_395 = P.Add()(module16_11_opt, module7_10_opt)
+        module7_11_opt = self.module7_11(module16_13_opt)
+        opt_add_399 = P.Add()(opt_add_395, module7_11_opt)
+        opt_relu_402 = self.relu_402(opt_add_399)
+        opt_conv2d_388 = self.conv2d_388(module16_11_opt)
+        opt_add_393 = P.Add()(opt_conv2d_388, module16_12_opt)
+        module7_12_opt = self.module7_12(module16_13_opt)
+        opt_add_400 = P.Add()(opt_add_393, module7_12_opt)
+        opt_relu_403 = self.relu_403(opt_add_400)
+        module11_3_opt = self.module11_3(module16_11_opt)
+        opt_conv2d_386 = self.conv2d_386(module16_12_opt)
+        opt_add_401 = P.Add()(module11_3_opt, opt_conv2d_386)
+        opt_add_404 = P.Add()(opt_add_401, module16_13_opt)
+        opt_relu_407 = self.relu_407(opt_add_404)
+        module5_4_opt = self.module5_4(opt_relu_407)
+        module16_14_opt = self.module16_14(opt_relu_402)
+        module16_15_opt = self.module16_15(opt_relu_403)
+        module16_16_opt = self.module16_16(opt_relu_407)
+        module16_17_opt = self.module16_17(module5_4_opt)
+        module7_13_opt = self.module7_13(module16_15_opt)
+        opt_add_503 = P.Add()(module16_14_opt, module7_13_opt)
+        module7_14_opt = self.module7_14(module16_16_opt)
+        opt_add_513 = P.Add()(opt_add_503, module7_14_opt)
+        module7_15_opt = self.module7_15(module16_17_opt)
+        opt_add_521 = P.Add()(opt_add_513, module7_15_opt)
+        opt_relu_525 = self.relu_525(opt_add_521)
+        opt_conv2d_484 = self.conv2d_484(module16_14_opt)
+        opt_add_492 = P.Add()(opt_conv2d_484, module16_15_opt)
+        module7_16_opt = self.module7_16(module16_16_opt)
+        opt_add_514 = P.Add()(opt_add_492, module7_16_opt)
+        module7_17_opt = self.module7_17(module16_17_opt)
+        opt_add_522 = P.Add()(opt_add_514, module7_17_opt)
+        opt_relu_526 = self.relu_526(opt_add_522)
+        module11_4_opt = self.module11_4(module16_14_opt)
+        opt_conv2d_488 = self.conv2d_488(module16_15_opt)
+        opt_add_508 = P.Add()(module11_4_opt, opt_conv2d_488)
+        opt_add_515 = P.Add()(opt_add_508, module16_16_opt)
+        module7_18_opt = self.module7_18(module16_17_opt)
+        opt_add_523 = P.Add()(opt_add_515, module7_18_opt)
+        opt_relu_527 = self.relu_527(opt_add_523)
+        module15_4_opt = self.module15_4(module16_14_opt)
+        module11_5_opt = self.module11_5(module16_15_opt)
+        opt_add_520 = P.Add()(module15_4_opt, module11_5_opt)
+        opt_conv2d_500 = self.conv2d_500(module16_16_opt)
+        opt_add_524 = P.Add()(opt_add_520, opt_conv2d_500)
+        opt_add_528 = P.Add()(opt_add_524, module16_17_opt)
+        opt_relu_532 = self.relu_532(opt_add_528)
+        module16_18_opt = self.module16_18(opt_relu_525)
+        module16_19_opt = self.module16_19(opt_relu_526)
+        module16_20_opt = self.module16_20(opt_relu_527)
+        module16_21_opt = self.module16_21(opt_relu_532)
+        module7_19_opt = self.module7_19(module16_19_opt)
+        opt_add_631 = P.Add()(module16_18_opt, module7_19_opt)
+        module7_20_opt = self.module7_20(module16_20_opt)
+        opt_add_639 = P.Add()(opt_add_631, module7_20_opt)
+        module7_21_opt = self.module7_21(module16_21_opt)
+        opt_add_643 = P.Add()(opt_add_639, module7_21_opt)
+        opt_relu_647 = self.relu_647(opt_add_643)
+        opt_conv2d_609 = self.conv2d_609(module16_18_opt)
+        opt_add_619 = P.Add()(opt_conv2d_609, module16_19_opt)
+        module7_22_opt = self.module7_22(module16_20_opt)
+        opt_add_633 = P.Add()(opt_add_619, module7_22_opt)
+        module7_23_opt = self.module7_23(module16_21_opt)
+        opt_add_640 = P.Add()(opt_add_633, module7_23_opt)
+        opt_relu_644 = self.relu_644(opt_add_640)
+        module11_6_opt = self.module11_6(module16_18_opt)
+        opt_conv2d_613 = self.conv2d_613(module16_19_opt)
+        opt_add_637 = P.Add()(module11_6_opt, opt_conv2d_613)
+        opt_add_641 = P.Add()(opt_add_637, module16_20_opt)
+        module7_24_opt = self.module7_24(module16_21_opt)
+        opt_add_645 = P.Add()(opt_add_641, module7_24_opt)
+        opt_relu_649 = self.relu_649(opt_add_645)
+        module15_5_opt = self.module15_5(module16_18_opt)
+        module11_7_opt = self.module11_7(module16_19_opt)
+        opt_add_646 = P.Add()(module15_5_opt, module11_7_opt)
+        opt_conv2d_617 = self.conv2d_617(module16_20_opt)
+        opt_add_650 = P.Add()(opt_add_646, opt_conv2d_617)
+        opt_add_654 = P.Add()(opt_add_650, module16_21_opt)
+        opt_relu_658 = self.relu_658(opt_add_654)
+        module16_22_opt = self.module16_22(opt_relu_647)
+        module16_23_opt = self.module16_23(opt_relu_644)
+        module16_24_opt = self.module16_24(opt_relu_649)
+        module16_25_opt = self.module16_25(opt_relu_658)
+        module7_25_opt = self.module7_25(module16_23_opt)
+        opt_add_745 = P.Add()(module16_22_opt, module7_25_opt)
+        module7_26_opt = self.module7_26(module16_24_opt)
+        opt_add_752 = P.Add()(opt_add_745, module7_26_opt)
+        module7_27_opt = self.module7_27(module16_25_opt)
+        opt_add_764 = P.Add()(opt_add_752, module7_27_opt)
+        opt_relu_768 = self.relu_768(opt_add_764)
+        opt_conv2d_733 = self.conv2d_733(module16_22_opt)
+        opt_add_742 = P.Add()(opt_conv2d_733, module16_23_opt)
+        module7_28_opt = self.module7_28(module16_24_opt)
+        opt_add_753 = P.Add()(opt_add_742, module7_28_opt)
+        module7_29_opt = self.module7_29(module16_25_opt)
+        opt_add_765 = P.Add()(opt_add_753, module7_29_opt)
+        opt_relu_769 = self.relu_769(opt_add_765)
+        module11_8_opt = self.module11_8(module16_22_opt)
+        opt_conv2d_729 = self.conv2d_729(module16_23_opt)
+        opt_add_757 = P.Add()(module11_8_opt, opt_conv2d_729)
+        opt_add_762 = P.Add()(opt_add_757, module16_24_opt)
+        module7_30_opt = self.module7_30(module16_25_opt)
+        opt_add_766 = P.Add()(opt_add_762, module7_30_opt)
+        opt_relu_770 = self.relu_770(opt_add_766)
+        module15_6_opt = self.module15_6(module16_22_opt)
+        module11_9_opt = self.module11_9(module16_23_opt)
+        opt_add_767 = P.Add()(module15_6_opt, module11_9_opt)
+        opt_conv2d_740 = self.conv2d_740(module16_24_opt)
+        opt_add_771 = P.Add()(opt_add_767, opt_conv2d_740)
+        opt_add_778 = P.Add()(opt_add_771, module16_25_opt)
+        opt_relu_782 = self.relu_782(opt_add_778)
+        module15_7_opt = self.module15_7(opt_relu_768)
+        opt_conv2d_773 = self.conv2d_773(opt_relu_768)
+        opt_add_796 = P.Add()(module15_7_opt, opt_conv2d_773)
+        opt_relu_800 = self.relu_800(opt_add_796)
+        module15_8_opt = self.module15_8(opt_relu_769)
+        opt_conv2d_775 = self.conv2d_775(opt_relu_769)
+        opt_add_797 = P.Add()(module15_8_opt, opt_conv2d_775)
+        opt_relu_801 = self.relu_801(opt_add_797)
+        module5_5_opt = self.module5_5(opt_relu_800)
+        opt_add_808 = P.Add()(opt_relu_801, module5_5_opt)
+        module15_9_opt = self.module15_9(opt_relu_770)
+        opt_conv2d_777 = self.conv2d_777(opt_relu_770)
+        opt_add_798 = P.Add()(module15_9_opt, opt_conv2d_777)
+        opt_relu_802 = self.relu_802(opt_add_798)
+        module5_6_opt = self.module5_6(opt_add_808)
+        opt_add_811 = P.Add()(opt_relu_802, module5_6_opt)
+        module15_10_opt = self.module15_10(opt_relu_782)
+        opt_conv2d_787 = self.conv2d_787(opt_relu_782)
+        opt_add_805 = P.Add()(module15_10_opt, opt_conv2d_787)
+        opt_relu_807 = self.relu_807(opt_add_805)
+        module5_7_opt = self.module5_7(opt_add_811)
+        opt_add_814 = P.Add()(opt_relu_807, module5_7_opt)
+        module5_8_opt = self.module5_8(opt_add_814)
+        opt_avgpool2d_817 = self.avgpool2d_817(module5_8_opt)
+        opt_flatten_818 = self.flatten_818(opt_avgpool2d_817)
+        opt_dense_819 = self.dense_819(opt_flatten_818)
+        return opt_dense_819
diff --git a/research/cvtmodel/hrnet/src/hrnet_w64.py b/research/cvtmodel/hrnet/src/hrnet_w64.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e58049d56418364675f8f6e541e053f7ad6ba66
--- /dev/null
+++ b/research/cvtmodel/hrnet/src/hrnet_w64.py
@@ -0,0 +1,1522 @@
+import mindspore.ops as P
+from mindspore import nn
+
+
+class Module5(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
+                 conv2d_0_padding, conv2d_0_pad_mode):
+        super(Module5, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=conv2d_0_kernel_size,
+                                  stride=conv2d_0_stride,
+                                  padding=conv2d_0_padding,
+                                  pad_mode=conv2d_0_pad_mode,
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        return opt_relu_1
+
+
+class Module15(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
+                 conv2d_0_padding, conv2d_0_pad_mode, module5_0_conv2d_0_in_channels, module5_0_conv2d_0_out_channels,
+                 module5_0_conv2d_0_kernel_size, module5_0_conv2d_0_stride, module5_0_conv2d_0_padding,
+                 module5_0_conv2d_0_pad_mode, module5_1_conv2d_0_in_channels, module5_1_conv2d_0_out_channels,
+                 module5_1_conv2d_0_kernel_size, module5_1_conv2d_0_stride, module5_1_conv2d_0_padding,
+                 module5_1_conv2d_0_pad_mode):
+        super(Module15, self).__init__()
+        self.module5_0 = Module5(conv2d_0_in_channels=module5_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module5_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module5_0_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module5_0_conv2d_0_stride,
+                                 conv2d_0_padding=module5_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module5_0_conv2d_0_pad_mode)
+        self.module5_1 = Module5(conv2d_0_in_channels=module5_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module5_1_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module5_1_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module5_1_conv2d_0_stride,
+                                 conv2d_0_padding=module5_1_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module5_1_conv2d_0_pad_mode)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=conv2d_0_kernel_size,
+                                  stride=conv2d_0_stride,
+                                  padding=conv2d_0_padding,
+                                  pad_mode=conv2d_0_pad_mode,
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module5_0_opt = self.module5_0(x)
+        module5_1_opt = self.module5_1(module5_0_opt)
+        opt_conv2d_0 = self.conv2d_0(module5_1_opt)
+        return opt_conv2d_0
+
+
+class Module0(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
+                 conv2d_5_in_channels, conv2d_5_out_channels, conv2d_7_in_channels, conv2d_7_out_channels):
+        super(Module0, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
+                                  out_channels=conv2d_2_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_4 = nn.ReLU()
+        self.conv2d_5 = nn.Conv2d(in_channels=conv2d_5_in_channels,
+                                  out_channels=conv2d_5_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_6 = nn.ReLU()
+        self.conv2d_7 = nn.Conv2d(in_channels=conv2d_7_in_channels,
+                                  out_channels=conv2d_7_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_9 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        opt_conv2d_2 = self.conv2d_2(opt_relu_1)
+        opt_add_3 = P.Add()(opt_conv2d_2, x)
+        opt_relu_4 = self.relu_4(opt_add_3)
+        opt_conv2d_5 = self.conv2d_5(opt_relu_4)
+        opt_relu_6 = self.relu_6(opt_conv2d_5)
+        opt_conv2d_7 = self.conv2d_7(opt_relu_6)
+        opt_add_8 = P.Add()(opt_conv2d_7, opt_relu_4)
+        opt_relu_9 = self.relu_9(opt_add_8)
+        return opt_relu_9
+
+
+class Module16(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_conv2d_5_in_channels, module0_0_conv2d_5_out_channels,
+                 module0_0_conv2d_7_in_channels, module0_0_conv2d_7_out_channels, module0_1_conv2d_0_in_channels,
+                 module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels,
+                 module0_1_conv2d_5_in_channels, module0_1_conv2d_5_out_channels, module0_1_conv2d_7_in_channels,
+                 module0_1_conv2d_7_out_channels):
+        super(Module16, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_5_in_channels=module0_0_conv2d_5_in_channels,
+                                 conv2d_5_out_channels=module0_0_conv2d_5_out_channels,
+                                 conv2d_7_in_channels=module0_0_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_0_conv2d_7_out_channels)
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_5_in_channels=module0_1_conv2d_5_in_channels,
+                                 conv2d_5_out_channels=module0_1_conv2d_5_out_channels,
+                                 conv2d_7_in_channels=module0_1_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_1_conv2d_7_out_channels)
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        module0_1_opt = self.module0_1(module0_0_opt)
+        return module0_1_opt
+
+
+class Module7(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, resizenearestneighbor_1_size):
+        super(Module7, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.resizenearestneighbor_1 = P.ResizeNearestNeighbor(size=resizenearestneighbor_1_size, align_corners=False)
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_resizenearestneighbor_1 = self.resizenearestneighbor_1(opt_conv2d_0)
+        return opt_resizenearestneighbor_1
+
+
+class Module11(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, module5_0_conv2d_0_in_channels,
+                 module5_0_conv2d_0_out_channels, module5_0_conv2d_0_kernel_size, module5_0_conv2d_0_stride,
+                 module5_0_conv2d_0_padding, module5_0_conv2d_0_pad_mode):
+        super(Module11, self).__init__()
+        self.module5_0 = Module5(conv2d_0_in_channels=module5_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module5_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module5_0_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module5_0_conv2d_0_stride,
+                                 conv2d_0_padding=module5_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module5_0_conv2d_0_pad_mode)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module5_0_opt = self.module5_0(x)
+        opt_conv2d_0 = self.conv2d_0(module5_0_opt)
+        return opt_conv2d_0
+
+
+class MindSporeModel(nn.Cell):
+    def __init__(self):
+        super(MindSporeModel, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=3,
+                                  out_channels=64,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.module5_0 = Module5(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module15_0 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_5 = nn.Conv2d(in_channels=64,
+                                  out_channels=256,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_11 = nn.ReLU()
+        self.module15_1 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=256,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.relu_18 = nn.ReLU()
+        self.module15_2 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=256,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.relu_25 = nn.ReLU()
+        self.module15_3 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=256,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.relu_32 = nn.ReLU()
+        self.module5_1 = Module5(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module5_2 = Module5(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=128,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module16_0 = Module16(module0_0_conv2d_0_in_channels=64,
+                                   module0_0_conv2d_0_out_channels=64,
+                                   module0_0_conv2d_2_in_channels=64,
+                                   module0_0_conv2d_2_out_channels=64,
+                                   module0_0_conv2d_5_in_channels=64,
+                                   module0_0_conv2d_5_out_channels=64,
+                                   module0_0_conv2d_7_in_channels=64,
+                                   module0_0_conv2d_7_out_channels=64,
+                                   module0_1_conv2d_0_in_channels=64,
+                                   module0_1_conv2d_0_out_channels=64,
+                                   module0_1_conv2d_2_in_channels=64,
+                                   module0_1_conv2d_2_out_channels=64,
+                                   module0_1_conv2d_5_in_channels=64,
+                                   module0_1_conv2d_5_out_channels=64,
+                                   module0_1_conv2d_7_in_channels=64,
+                                   module0_1_conv2d_7_out_channels=64)
+        self.module16_1 = Module16(module0_0_conv2d_0_in_channels=128,
+                                   module0_0_conv2d_0_out_channels=128,
+                                   module0_0_conv2d_2_in_channels=128,
+                                   module0_0_conv2d_2_out_channels=128,
+                                   module0_0_conv2d_5_in_channels=128,
+                                   module0_0_conv2d_5_out_channels=128,
+                                   module0_0_conv2d_7_in_channels=128,
+                                   module0_0_conv2d_7_out_channels=128,
+                                   module0_1_conv2d_0_in_channels=128,
+                                   module0_1_conv2d_0_out_channels=128,
+                                   module0_1_conv2d_2_in_channels=128,
+                                   module0_1_conv2d_2_out_channels=128,
+                                   module0_1_conv2d_5_in_channels=128,
+                                   module0_1_conv2d_5_out_channels=128,
+                                   module0_1_conv2d_7_in_channels=128,
+                                   module0_1_conv2d_7_out_channels=128)
+        self.module7_0 = Module7(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=64,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_85 = nn.ReLU()
+        self.conv2d_77 = nn.Conv2d(in_channels=64,
+                                   out_channels=128,
+                                   kernel_size=(3, 3),
+                                   stride=(2, 2),
+                                   padding=(1, 1, 1, 1),
+                                   pad_mode="pad",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.relu_81 = nn.ReLU()
+        self.module5_3 = Module5(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module16_2 = Module16(module0_0_conv2d_0_in_channels=64,
+                                   module0_0_conv2d_0_out_channels=64,
+                                   module0_0_conv2d_2_in_channels=64,
+                                   module0_0_conv2d_2_out_channels=64,
+                                   module0_0_conv2d_5_in_channels=64,
+                                   module0_0_conv2d_5_out_channels=64,
+                                   module0_0_conv2d_7_in_channels=64,
+                                   module0_0_conv2d_7_out_channels=64,
+                                   module0_1_conv2d_0_in_channels=64,
+                                   module0_1_conv2d_0_out_channels=64,
+                                   module0_1_conv2d_2_in_channels=64,
+                                   module0_1_conv2d_2_out_channels=64,
+                                   module0_1_conv2d_5_in_channels=64,
+                                   module0_1_conv2d_5_out_channels=64,
+                                   module0_1_conv2d_7_in_channels=64,
+                                   module0_1_conv2d_7_out_channels=64)
+        self.module16_3 = Module16(module0_0_conv2d_0_in_channels=128,
+                                   module0_0_conv2d_0_out_channels=128,
+                                   module0_0_conv2d_2_in_channels=128,
+                                   module0_0_conv2d_2_out_channels=128,
+                                   module0_0_conv2d_5_in_channels=128,
+                                   module0_0_conv2d_5_out_channels=128,
+                                   module0_0_conv2d_7_in_channels=128,
+                                   module0_0_conv2d_7_out_channels=128,
+                                   module0_1_conv2d_0_in_channels=128,
+                                   module0_1_conv2d_0_out_channels=128,
+                                   module0_1_conv2d_2_in_channels=128,
+                                   module0_1_conv2d_2_out_channels=128,
+                                   module0_1_conv2d_5_in_channels=128,
+                                   module0_1_conv2d_5_out_channels=128,
+                                   module0_1_conv2d_7_in_channels=128,
+                                   module0_1_conv2d_7_out_channels=128)
+        self.module16_4 = Module16(module0_0_conv2d_0_in_channels=256,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_conv2d_5_in_channels=256,
+                                   module0_0_conv2d_5_out_channels=256,
+                                   module0_0_conv2d_7_in_channels=256,
+                                   module0_0_conv2d_7_out_channels=256,
+                                   module0_1_conv2d_0_in_channels=256,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=256,
+                                   module0_1_conv2d_5_in_channels=256,
+                                   module0_1_conv2d_5_out_channels=256,
+                                   module0_1_conv2d_7_in_channels=256,
+                                   module0_1_conv2d_7_out_channels=256)
+        self.module7_1 = Module7(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=64,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module7_2 = Module7(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=64,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_162 = nn.ReLU()
+        self.conv2d_149 = nn.Conv2d(in_channels=64,
+                                    out_channels=128,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_3 = Module7(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=128,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.relu_163 = nn.ReLU()
+        self.module11_0 = Module11(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_145 = nn.Conv2d(in_channels=128,
+                                    out_channels=256,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_167 = nn.ReLU()
+        self.module16_5 = Module16(module0_0_conv2d_0_in_channels=64,
+                                   module0_0_conv2d_0_out_channels=64,
+                                   module0_0_conv2d_2_in_channels=64,
+                                   module0_0_conv2d_2_out_channels=64,
+                                   module0_0_conv2d_5_in_channels=64,
+                                   module0_0_conv2d_5_out_channels=64,
+                                   module0_0_conv2d_7_in_channels=64,
+                                   module0_0_conv2d_7_out_channels=64,
+                                   module0_1_conv2d_0_in_channels=64,
+                                   module0_1_conv2d_0_out_channels=64,
+                                   module0_1_conv2d_2_in_channels=64,
+                                   module0_1_conv2d_2_out_channels=64,
+                                   module0_1_conv2d_5_in_channels=64,
+                                   module0_1_conv2d_5_out_channels=64,
+                                   module0_1_conv2d_7_in_channels=64,
+                                   module0_1_conv2d_7_out_channels=64)
+        self.module16_6 = Module16(module0_0_conv2d_0_in_channels=128,
+                                   module0_0_conv2d_0_out_channels=128,
+                                   module0_0_conv2d_2_in_channels=128,
+                                   module0_0_conv2d_2_out_channels=128,
+                                   module0_0_conv2d_5_in_channels=128,
+                                   module0_0_conv2d_5_out_channels=128,
+                                   module0_0_conv2d_7_in_channels=128,
+                                   module0_0_conv2d_7_out_channels=128,
+                                   module0_1_conv2d_0_in_channels=128,
+                                   module0_1_conv2d_0_out_channels=128,
+                                   module0_1_conv2d_2_in_channels=128,
+                                   module0_1_conv2d_2_out_channels=128,
+                                   module0_1_conv2d_5_in_channels=128,
+                                   module0_1_conv2d_5_out_channels=128,
+                                   module0_1_conv2d_7_in_channels=128,
+                                   module0_1_conv2d_7_out_channels=128)
+        self.module16_7 = Module16(module0_0_conv2d_0_in_channels=256,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_conv2d_5_in_channels=256,
+                                   module0_0_conv2d_5_out_channels=256,
+                                   module0_0_conv2d_7_in_channels=256,
+                                   module0_0_conv2d_7_out_channels=256,
+                                   module0_1_conv2d_0_in_channels=256,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=256,
+                                   module0_1_conv2d_5_in_channels=256,
+                                   module0_1_conv2d_5_out_channels=256,
+                                   module0_1_conv2d_7_in_channels=256,
+                                   module0_1_conv2d_7_out_channels=256)
+        self.module7_4 = Module7(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=64,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module7_5 = Module7(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=64,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_243 = nn.ReLU()
+        self.conv2d_225 = nn.Conv2d(in_channels=64,
+                                    out_channels=128,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_6 = Module7(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=128,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.relu_244 = nn.ReLU()
+        self.module11_1 = Module11(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_228 = nn.Conv2d(in_channels=128,
+                                    out_channels=256,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_245 = nn.ReLU()
+        self.module16_8 = Module16(module0_0_conv2d_0_in_channels=64,
+                                   module0_0_conv2d_0_out_channels=64,
+                                   module0_0_conv2d_2_in_channels=64,
+                                   module0_0_conv2d_2_out_channels=64,
+                                   module0_0_conv2d_5_in_channels=64,
+                                   module0_0_conv2d_5_out_channels=64,
+                                   module0_0_conv2d_7_in_channels=64,
+                                   module0_0_conv2d_7_out_channels=64,
+                                   module0_1_conv2d_0_in_channels=64,
+                                   module0_1_conv2d_0_out_channels=64,
+                                   module0_1_conv2d_2_in_channels=64,
+                                   module0_1_conv2d_2_out_channels=64,
+                                   module0_1_conv2d_5_in_channels=64,
+                                   module0_1_conv2d_5_out_channels=64,
+                                   module0_1_conv2d_7_in_channels=64,
+                                   module0_1_conv2d_7_out_channels=64)
+        self.module16_9 = Module16(module0_0_conv2d_0_in_channels=128,
+                                   module0_0_conv2d_0_out_channels=128,
+                                   module0_0_conv2d_2_in_channels=128,
+                                   module0_0_conv2d_2_out_channels=128,
+                                   module0_0_conv2d_5_in_channels=128,
+                                   module0_0_conv2d_5_out_channels=128,
+                                   module0_0_conv2d_7_in_channels=128,
+                                   module0_0_conv2d_7_out_channels=128,
+                                   module0_1_conv2d_0_in_channels=128,
+                                   module0_1_conv2d_0_out_channels=128,
+                                   module0_1_conv2d_2_in_channels=128,
+                                   module0_1_conv2d_2_out_channels=128,
+                                   module0_1_conv2d_5_in_channels=128,
+                                   module0_1_conv2d_5_out_channels=128,
+                                   module0_1_conv2d_7_in_channels=128,
+                                   module0_1_conv2d_7_out_channels=128)
+        self.module16_10 = Module16(module0_0_conv2d_0_in_channels=256,
+                                    module0_0_conv2d_0_out_channels=256,
+                                    module0_0_conv2d_2_in_channels=256,
+                                    module0_0_conv2d_2_out_channels=256,
+                                    module0_0_conv2d_5_in_channels=256,
+                                    module0_0_conv2d_5_out_channels=256,
+                                    module0_0_conv2d_7_in_channels=256,
+                                    module0_0_conv2d_7_out_channels=256,
+                                    module0_1_conv2d_0_in_channels=256,
+                                    module0_1_conv2d_0_out_channels=256,
+                                    module0_1_conv2d_2_in_channels=256,
+                                    module0_1_conv2d_2_out_channels=256,
+                                    module0_1_conv2d_5_in_channels=256,
+                                    module0_1_conv2d_5_out_channels=256,
+                                    module0_1_conv2d_7_in_channels=256,
+                                    module0_1_conv2d_7_out_channels=256)
+        self.module7_7 = Module7(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=64,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.module7_8 = Module7(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=64,
+                                 resizenearestneighbor_1_size=(56, 56))
+        self.relu_324 = nn.ReLU()
+        self.conv2d_306 = nn.Conv2d(in_channels=64,
+                                    out_channels=128,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_9 = Module7(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=128,
+                                 resizenearestneighbor_1_size=(28, 28))
+        self.relu_322 = nn.ReLU()
+        self.module11_2 = Module11(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_309 = nn.Conv2d(in_channels=128,
+                                    out_channels=256,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_326 = nn.ReLU()
+        self.module16_11 = Module16(module0_0_conv2d_0_in_channels=64,
+                                    module0_0_conv2d_0_out_channels=64,
+                                    module0_0_conv2d_2_in_channels=64,
+                                    module0_0_conv2d_2_out_channels=64,
+                                    module0_0_conv2d_5_in_channels=64,
+                                    module0_0_conv2d_5_out_channels=64,
+                                    module0_0_conv2d_7_in_channels=64,
+                                    module0_0_conv2d_7_out_channels=64,
+                                    module0_1_conv2d_0_in_channels=64,
+                                    module0_1_conv2d_0_out_channels=64,
+                                    module0_1_conv2d_2_in_channels=64,
+                                    module0_1_conv2d_2_out_channels=64,
+                                    module0_1_conv2d_5_in_channels=64,
+                                    module0_1_conv2d_5_out_channels=64,
+                                    module0_1_conv2d_7_in_channels=64,
+                                    module0_1_conv2d_7_out_channels=64)
+        self.module16_12 = Module16(module0_0_conv2d_0_in_channels=128,
+                                    module0_0_conv2d_0_out_channels=128,
+                                    module0_0_conv2d_2_in_channels=128,
+                                    module0_0_conv2d_2_out_channels=128,
+                                    module0_0_conv2d_5_in_channels=128,
+                                    module0_0_conv2d_5_out_channels=128,
+                                    module0_0_conv2d_7_in_channels=128,
+                                    module0_0_conv2d_7_out_channels=128,
+                                    module0_1_conv2d_0_in_channels=128,
+                                    module0_1_conv2d_0_out_channels=128,
+                                    module0_1_conv2d_2_in_channels=128,
+                                    module0_1_conv2d_2_out_channels=128,
+                                    module0_1_conv2d_5_in_channels=128,
+                                    module0_1_conv2d_5_out_channels=128,
+                                    module0_1_conv2d_7_in_channels=128,
+                                    module0_1_conv2d_7_out_channels=128)
+        self.module16_13 = Module16(module0_0_conv2d_0_in_channels=256,
+                                    module0_0_conv2d_0_out_channels=256,
+                                    module0_0_conv2d_2_in_channels=256,
+                                    module0_0_conv2d_2_out_channels=256,
+                                    module0_0_conv2d_5_in_channels=256,
+                                    module0_0_conv2d_5_out_channels=256,
+                                    module0_0_conv2d_7_in_channels=256,
+                                    module0_0_conv2d_7_out_channels=256,
+                                    module0_1_conv2d_0_in_channels=256,
+                                    module0_1_conv2d_0_out_channels=256,
+                                    module0_1_conv2d_2_in_channels=256,
+                                    module0_1_conv2d_2_out_channels=256,
+                                    module0_1_conv2d_5_in_channels=256,
+                                    module0_1_conv2d_5_out_channels=256,
+                                    module0_1_conv2d_7_in_channels=256,
+                                    module0_1_conv2d_7_out_channels=256)
+        self.module7_10 = Module7(conv2d_0_in_channels=128,
+                                  conv2d_0_out_channels=64,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_11 = Module7(conv2d_0_in_channels=256,
+                                  conv2d_0_out_channels=64,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_402 = nn.ReLU()
+        self.conv2d_388 = nn.Conv2d(in_channels=64,
+                                    out_channels=128,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_12 = Module7(conv2d_0_in_channels=256,
+                                  conv2d_0_out_channels=128,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_403 = nn.ReLU()
+        self.module11_3 = Module11(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_386 = nn.Conv2d(in_channels=128,
+                                    out_channels=256,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_407 = nn.ReLU()
+        self.module5_4 = Module5(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=512,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module16_14 = Module16(module0_0_conv2d_0_in_channels=64,
+                                    module0_0_conv2d_0_out_channels=64,
+                                    module0_0_conv2d_2_in_channels=64,
+                                    module0_0_conv2d_2_out_channels=64,
+                                    module0_0_conv2d_5_in_channels=64,
+                                    module0_0_conv2d_5_out_channels=64,
+                                    module0_0_conv2d_7_in_channels=64,
+                                    module0_0_conv2d_7_out_channels=64,
+                                    module0_1_conv2d_0_in_channels=64,
+                                    module0_1_conv2d_0_out_channels=64,
+                                    module0_1_conv2d_2_in_channels=64,
+                                    module0_1_conv2d_2_out_channels=64,
+                                    module0_1_conv2d_5_in_channels=64,
+                                    module0_1_conv2d_5_out_channels=64,
+                                    module0_1_conv2d_7_in_channels=64,
+                                    module0_1_conv2d_7_out_channels=64)
+        self.module16_15 = Module16(module0_0_conv2d_0_in_channels=128,
+                                    module0_0_conv2d_0_out_channels=128,
+                                    module0_0_conv2d_2_in_channels=128,
+                                    module0_0_conv2d_2_out_channels=128,
+                                    module0_0_conv2d_5_in_channels=128,
+                                    module0_0_conv2d_5_out_channels=128,
+                                    module0_0_conv2d_7_in_channels=128,
+                                    module0_0_conv2d_7_out_channels=128,
+                                    module0_1_conv2d_0_in_channels=128,
+                                    module0_1_conv2d_0_out_channels=128,
+                                    module0_1_conv2d_2_in_channels=128,
+                                    module0_1_conv2d_2_out_channels=128,
+                                    module0_1_conv2d_5_in_channels=128,
+                                    module0_1_conv2d_5_out_channels=128,
+                                    module0_1_conv2d_7_in_channels=128,
+                                    module0_1_conv2d_7_out_channels=128)
+        self.module16_16 = Module16(module0_0_conv2d_0_in_channels=256,
+                                    module0_0_conv2d_0_out_channels=256,
+                                    module0_0_conv2d_2_in_channels=256,
+                                    module0_0_conv2d_2_out_channels=256,
+                                    module0_0_conv2d_5_in_channels=256,
+                                    module0_0_conv2d_5_out_channels=256,
+                                    module0_0_conv2d_7_in_channels=256,
+                                    module0_0_conv2d_7_out_channels=256,
+                                    module0_1_conv2d_0_in_channels=256,
+                                    module0_1_conv2d_0_out_channels=256,
+                                    module0_1_conv2d_2_in_channels=256,
+                                    module0_1_conv2d_2_out_channels=256,
+                                    module0_1_conv2d_5_in_channels=256,
+                                    module0_1_conv2d_5_out_channels=256,
+                                    module0_1_conv2d_7_in_channels=256,
+                                    module0_1_conv2d_7_out_channels=256)
+        self.module16_17 = Module16(module0_0_conv2d_0_in_channels=512,
+                                    module0_0_conv2d_0_out_channels=512,
+                                    module0_0_conv2d_2_in_channels=512,
+                                    module0_0_conv2d_2_out_channels=512,
+                                    module0_0_conv2d_5_in_channels=512,
+                                    module0_0_conv2d_5_out_channels=512,
+                                    module0_0_conv2d_7_in_channels=512,
+                                    module0_0_conv2d_7_out_channels=512,
+                                    module0_1_conv2d_0_in_channels=512,
+                                    module0_1_conv2d_0_out_channels=512,
+                                    module0_1_conv2d_2_in_channels=512,
+                                    module0_1_conv2d_2_out_channels=512,
+                                    module0_1_conv2d_5_in_channels=512,
+                                    module0_1_conv2d_5_out_channels=512,
+                                    module0_1_conv2d_7_in_channels=512,
+                                    module0_1_conv2d_7_out_channels=512)
+        self.module7_13 = Module7(conv2d_0_in_channels=128,
+                                  conv2d_0_out_channels=64,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_14 = Module7(conv2d_0_in_channels=256,
+                                  conv2d_0_out_channels=64,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_15 = Module7(conv2d_0_in_channels=512,
+                                  conv2d_0_out_channels=64,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_525 = nn.ReLU()
+        self.conv2d_484 = nn.Conv2d(in_channels=64,
+                                    out_channels=128,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_16 = Module7(conv2d_0_in_channels=256,
+                                  conv2d_0_out_channels=128,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.module7_17 = Module7(conv2d_0_in_channels=512,
+                                  conv2d_0_out_channels=128,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_526 = nn.ReLU()
+        self.module11_4 = Module11(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_488 = nn.Conv2d(in_channels=128,
+                                    out_channels=256,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_18 = Module7(conv2d_0_in_channels=512,
+                                  conv2d_0_out_channels=256,
+                                  resizenearestneighbor_1_size=(14, 14))
+        self.relu_527 = nn.ReLU()
+        self.module15_4 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=512,
+                                   conv2d_0_kernel_size=(3, 3),
+                                   conv2d_0_stride=(2, 2),
+                                   conv2d_0_padding=(1, 1, 1, 1),
+                                   conv2d_0_pad_mode="pad",
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(2, 2),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.module11_5 = Module11(conv2d_0_in_channels=128,
+                                   conv2d_0_out_channels=512,
+                                   module5_0_conv2d_0_in_channels=128,
+                                   module5_0_conv2d_0_out_channels=128,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_500 = nn.Conv2d(in_channels=256,
+                                    out_channels=512,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_532 = nn.ReLU()
+        self.module16_18 = Module16(module0_0_conv2d_0_in_channels=64,
+                                    module0_0_conv2d_0_out_channels=64,
+                                    module0_0_conv2d_2_in_channels=64,
+                                    module0_0_conv2d_2_out_channels=64,
+                                    module0_0_conv2d_5_in_channels=64,
+                                    module0_0_conv2d_5_out_channels=64,
+                                    module0_0_conv2d_7_in_channels=64,
+                                    module0_0_conv2d_7_out_channels=64,
+                                    module0_1_conv2d_0_in_channels=64,
+                                    module0_1_conv2d_0_out_channels=64,
+                                    module0_1_conv2d_2_in_channels=64,
+                                    module0_1_conv2d_2_out_channels=64,
+                                    module0_1_conv2d_5_in_channels=64,
+                                    module0_1_conv2d_5_out_channels=64,
+                                    module0_1_conv2d_7_in_channels=64,
+                                    module0_1_conv2d_7_out_channels=64)
+        self.module16_19 = Module16(module0_0_conv2d_0_in_channels=128,
+                                    module0_0_conv2d_0_out_channels=128,
+                                    module0_0_conv2d_2_in_channels=128,
+                                    module0_0_conv2d_2_out_channels=128,
+                                    module0_0_conv2d_5_in_channels=128,
+                                    module0_0_conv2d_5_out_channels=128,
+                                    module0_0_conv2d_7_in_channels=128,
+                                    module0_0_conv2d_7_out_channels=128,
+                                    module0_1_conv2d_0_in_channels=128,
+                                    module0_1_conv2d_0_out_channels=128,
+                                    module0_1_conv2d_2_in_channels=128,
+                                    module0_1_conv2d_2_out_channels=128,
+                                    module0_1_conv2d_5_in_channels=128,
+                                    module0_1_conv2d_5_out_channels=128,
+                                    module0_1_conv2d_7_in_channels=128,
+                                    module0_1_conv2d_7_out_channels=128)
+        self.module16_20 = Module16(module0_0_conv2d_0_in_channels=256,
+                                    module0_0_conv2d_0_out_channels=256,
+                                    module0_0_conv2d_2_in_channels=256,
+                                    module0_0_conv2d_2_out_channels=256,
+                                    module0_0_conv2d_5_in_channels=256,
+                                    module0_0_conv2d_5_out_channels=256,
+                                    module0_0_conv2d_7_in_channels=256,
+                                    module0_0_conv2d_7_out_channels=256,
+                                    module0_1_conv2d_0_in_channels=256,
+                                    module0_1_conv2d_0_out_channels=256,
+                                    module0_1_conv2d_2_in_channels=256,
+                                    module0_1_conv2d_2_out_channels=256,
+                                    module0_1_conv2d_5_in_channels=256,
+                                    module0_1_conv2d_5_out_channels=256,
+                                    module0_1_conv2d_7_in_channels=256,
+                                    module0_1_conv2d_7_out_channels=256)
+        self.module16_21 = Module16(module0_0_conv2d_0_in_channels=512,
+                                    module0_0_conv2d_0_out_channels=512,
+                                    module0_0_conv2d_2_in_channels=512,
+                                    module0_0_conv2d_2_out_channels=512,
+                                    module0_0_conv2d_5_in_channels=512,
+                                    module0_0_conv2d_5_out_channels=512,
+                                    module0_0_conv2d_7_in_channels=512,
+                                    module0_0_conv2d_7_out_channels=512,
+                                    module0_1_conv2d_0_in_channels=512,
+                                    module0_1_conv2d_0_out_channels=512,
+                                    module0_1_conv2d_2_in_channels=512,
+                                    module0_1_conv2d_2_out_channels=512,
+                                    module0_1_conv2d_5_in_channels=512,
+                                    module0_1_conv2d_5_out_channels=512,
+                                    module0_1_conv2d_7_in_channels=512,
+                                    module0_1_conv2d_7_out_channels=512)
+        self.module7_19 = Module7(conv2d_0_in_channels=128,
+                                  conv2d_0_out_channels=64,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_20 = Module7(conv2d_0_in_channels=256,
+                                  conv2d_0_out_channels=64,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_21 = Module7(conv2d_0_in_channels=512,
+                                  conv2d_0_out_channels=64,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_647 = nn.ReLU()
+        self.conv2d_609 = nn.Conv2d(in_channels=64,
+                                    out_channels=128,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_22 = Module7(conv2d_0_in_channels=256,
+                                  conv2d_0_out_channels=128,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.module7_23 = Module7(conv2d_0_in_channels=512,
+                                  conv2d_0_out_channels=128,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_644 = nn.ReLU()
+        self.module11_6 = Module11(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_613 = nn.Conv2d(in_channels=128,
+                                    out_channels=256,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_24 = Module7(conv2d_0_in_channels=512,
+                                  conv2d_0_out_channels=256,
+                                  resizenearestneighbor_1_size=(14, 14))
+        self.relu_649 = nn.ReLU()
+        self.module15_5 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=512,
+                                   conv2d_0_kernel_size=(3, 3),
+                                   conv2d_0_stride=(2, 2),
+                                   conv2d_0_padding=(1, 1, 1, 1),
+                                   conv2d_0_pad_mode="pad",
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(2, 2),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.module11_7 = Module11(conv2d_0_in_channels=128,
+                                   conv2d_0_out_channels=512,
+                                   module5_0_conv2d_0_in_channels=128,
+                                   module5_0_conv2d_0_out_channels=128,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_617 = nn.Conv2d(in_channels=256,
+                                    out_channels=512,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_658 = nn.ReLU()
+        self.module16_22 = Module16(module0_0_conv2d_0_in_channels=64,
+                                    module0_0_conv2d_0_out_channels=64,
+                                    module0_0_conv2d_2_in_channels=64,
+                                    module0_0_conv2d_2_out_channels=64,
+                                    module0_0_conv2d_5_in_channels=64,
+                                    module0_0_conv2d_5_out_channels=64,
+                                    module0_0_conv2d_7_in_channels=64,
+                                    module0_0_conv2d_7_out_channels=64,
+                                    module0_1_conv2d_0_in_channels=64,
+                                    module0_1_conv2d_0_out_channels=64,
+                                    module0_1_conv2d_2_in_channels=64,
+                                    module0_1_conv2d_2_out_channels=64,
+                                    module0_1_conv2d_5_in_channels=64,
+                                    module0_1_conv2d_5_out_channels=64,
+                                    module0_1_conv2d_7_in_channels=64,
+                                    module0_1_conv2d_7_out_channels=64)
+        self.module16_23 = Module16(module0_0_conv2d_0_in_channels=128,
+                                    module0_0_conv2d_0_out_channels=128,
+                                    module0_0_conv2d_2_in_channels=128,
+                                    module0_0_conv2d_2_out_channels=128,
+                                    module0_0_conv2d_5_in_channels=128,
+                                    module0_0_conv2d_5_out_channels=128,
+                                    module0_0_conv2d_7_in_channels=128,
+                                    module0_0_conv2d_7_out_channels=128,
+                                    module0_1_conv2d_0_in_channels=128,
+                                    module0_1_conv2d_0_out_channels=128,
+                                    module0_1_conv2d_2_in_channels=128,
+                                    module0_1_conv2d_2_out_channels=128,
+                                    module0_1_conv2d_5_in_channels=128,
+                                    module0_1_conv2d_5_out_channels=128,
+                                    module0_1_conv2d_7_in_channels=128,
+                                    module0_1_conv2d_7_out_channels=128)
+        self.module16_24 = Module16(module0_0_conv2d_0_in_channels=256,
+                                    module0_0_conv2d_0_out_channels=256,
+                                    module0_0_conv2d_2_in_channels=256,
+                                    module0_0_conv2d_2_out_channels=256,
+                                    module0_0_conv2d_5_in_channels=256,
+                                    module0_0_conv2d_5_out_channels=256,
+                                    module0_0_conv2d_7_in_channels=256,
+                                    module0_0_conv2d_7_out_channels=256,
+                                    module0_1_conv2d_0_in_channels=256,
+                                    module0_1_conv2d_0_out_channels=256,
+                                    module0_1_conv2d_2_in_channels=256,
+                                    module0_1_conv2d_2_out_channels=256,
+                                    module0_1_conv2d_5_in_channels=256,
+                                    module0_1_conv2d_5_out_channels=256,
+                                    module0_1_conv2d_7_in_channels=256,
+                                    module0_1_conv2d_7_out_channels=256)
+        self.module16_25 = Module16(module0_0_conv2d_0_in_channels=512,
+                                    module0_0_conv2d_0_out_channels=512,
+                                    module0_0_conv2d_2_in_channels=512,
+                                    module0_0_conv2d_2_out_channels=512,
+                                    module0_0_conv2d_5_in_channels=512,
+                                    module0_0_conv2d_5_out_channels=512,
+                                    module0_0_conv2d_7_in_channels=512,
+                                    module0_0_conv2d_7_out_channels=512,
+                                    module0_1_conv2d_0_in_channels=512,
+                                    module0_1_conv2d_0_out_channels=512,
+                                    module0_1_conv2d_2_in_channels=512,
+                                    module0_1_conv2d_2_out_channels=512,
+                                    module0_1_conv2d_5_in_channels=512,
+                                    module0_1_conv2d_5_out_channels=512,
+                                    module0_1_conv2d_7_in_channels=512,
+                                    module0_1_conv2d_7_out_channels=512)
+        self.module7_25 = Module7(conv2d_0_in_channels=128,
+                                  conv2d_0_out_channels=64,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_26 = Module7(conv2d_0_in_channels=256,
+                                  conv2d_0_out_channels=64,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.module7_27 = Module7(conv2d_0_in_channels=512,
+                                  conv2d_0_out_channels=64,
+                                  resizenearestneighbor_1_size=(56, 56))
+        self.relu_768 = nn.ReLU()
+        self.conv2d_733 = nn.Conv2d(in_channels=64,
+                                    out_channels=128,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_28 = Module7(conv2d_0_in_channels=256,
+                                  conv2d_0_out_channels=128,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.module7_29 = Module7(conv2d_0_in_channels=512,
+                                  conv2d_0_out_channels=128,
+                                  resizenearestneighbor_1_size=(28, 28))
+        self.relu_769 = nn.ReLU()
+        self.module11_8 = Module11(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_729 = nn.Conv2d(in_channels=128,
+                                    out_channels=256,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module7_30 = Module7(conv2d_0_in_channels=512,
+                                  conv2d_0_out_channels=256,
+                                  resizenearestneighbor_1_size=(14, 14))
+        self.relu_770 = nn.ReLU()
+        self.module15_6 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=512,
+                                   conv2d_0_kernel_size=(3, 3),
+                                   conv2d_0_stride=(2, 2),
+                                   conv2d_0_padding=(1, 1, 1, 1),
+                                   conv2d_0_pad_mode="pad",
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(2, 2),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.module11_9 = Module11(conv2d_0_in_channels=128,
+                                   conv2d_0_out_channels=512,
+                                   module5_0_conv2d_0_in_channels=128,
+                                   module5_0_conv2d_0_out_channels=128,
+                                   module5_0_conv2d_0_kernel_size=(3, 3),
+                                   module5_0_conv2d_0_stride=(2, 2),
+                                   module5_0_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_0_conv2d_0_pad_mode="pad")
+        self.conv2d_740 = nn.Conv2d(in_channels=256,
+                                    out_channels=512,
+                                    kernel_size=(3, 3),
+                                    stride=(2, 2),
+                                    padding=(1, 1, 1, 1),
+                                    pad_mode="pad",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_782 = nn.ReLU()
+        self.module15_7 = Module15(conv2d_0_in_channels=32,
+                                   conv2d_0_out_channels=128,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=64,
+                                   module5_0_conv2d_0_out_channels=32,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=32,
+                                   module5_1_conv2d_0_out_channels=32,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_773 = nn.Conv2d(in_channels=64,
+                                    out_channels=128,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_800 = nn.ReLU()
+        self.module15_8 = Module15(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=128,
+                                   module5_0_conv2d_0_out_channels=64,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=64,
+                                   module5_1_conv2d_0_out_channels=64,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_775 = nn.Conv2d(in_channels=128,
+                                    out_channels=256,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_801 = nn.ReLU()
+        self.module5_5 = Module5(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module15_9 = Module15(conv2d_0_in_channels=128,
+                                   conv2d_0_out_channels=512,
+                                   conv2d_0_kernel_size=(1, 1),
+                                   conv2d_0_stride=(1, 1),
+                                   conv2d_0_padding=0,
+                                   conv2d_0_pad_mode="valid",
+                                   module5_0_conv2d_0_in_channels=256,
+                                   module5_0_conv2d_0_out_channels=128,
+                                   module5_0_conv2d_0_kernel_size=(1, 1),
+                                   module5_0_conv2d_0_stride=(1, 1),
+                                   module5_0_conv2d_0_padding=0,
+                                   module5_0_conv2d_0_pad_mode="valid",
+                                   module5_1_conv2d_0_in_channels=128,
+                                   module5_1_conv2d_0_out_channels=128,
+                                   module5_1_conv2d_0_kernel_size=(3, 3),
+                                   module5_1_conv2d_0_stride=(1, 1),
+                                   module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_777 = nn.Conv2d(in_channels=256,
+                                    out_channels=512,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_802 = nn.ReLU()
+        self.module5_6 = Module5(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=512,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module15_10 = Module15(conv2d_0_in_channels=256,
+                                    conv2d_0_out_channels=1024,
+                                    conv2d_0_kernel_size=(1, 1),
+                                    conv2d_0_stride=(1, 1),
+                                    conv2d_0_padding=0,
+                                    conv2d_0_pad_mode="valid",
+                                    module5_0_conv2d_0_in_channels=512,
+                                    module5_0_conv2d_0_out_channels=256,
+                                    module5_0_conv2d_0_kernel_size=(1, 1),
+                                    module5_0_conv2d_0_stride=(1, 1),
+                                    module5_0_conv2d_0_padding=0,
+                                    module5_0_conv2d_0_pad_mode="valid",
+                                    module5_1_conv2d_0_in_channels=256,
+                                    module5_1_conv2d_0_out_channels=256,
+                                    module5_1_conv2d_0_kernel_size=(3, 3),
+                                    module5_1_conv2d_0_stride=(1, 1),
+                                    module5_1_conv2d_0_padding=(1, 1, 1, 1),
+                                    module5_1_conv2d_0_pad_mode="pad")
+        self.conv2d_787 = nn.Conv2d(in_channels=512,
+                                    out_channels=1024,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.relu_807 = nn.ReLU()
+        self.module5_7 = Module5(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=1024,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(2, 2),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module5_8 = Module5(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=2048,
+                                 conv2d_0_kernel_size=(1, 1),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=0,
+                                 conv2d_0_pad_mode="valid")
+        self.avgpool2d_817 = nn.AvgPool2d(kernel_size=(7, 7))
+        self.flatten_818 = nn.Flatten()
+        self.dense_819 = nn.Dense(in_channels=2048, out_channels=1000, has_bias=True)
+
+    def construct(self, input_1):
+        opt_conv2d_0 = self.conv2d_0(input_1)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        module5_0_opt = self.module5_0(opt_relu_1)
+        module15_0_opt = self.module15_0(module5_0_opt)
+        opt_conv2d_5 = self.conv2d_5(module5_0_opt)
+        opt_add_10 = P.Add()(module15_0_opt, opt_conv2d_5)
+        opt_relu_11 = self.relu_11(opt_add_10)
+        module15_1_opt = self.module15_1(opt_relu_11)
+        opt_add_17 = P.Add()(module15_1_opt, opt_relu_11)
+        opt_relu_18 = self.relu_18(opt_add_17)
+        module15_2_opt = self.module15_2(opt_relu_18)
+        opt_add_24 = P.Add()(module15_2_opt, opt_relu_18)
+        opt_relu_25 = self.relu_25(opt_add_24)
+        module15_3_opt = self.module15_3(opt_relu_25)
+        opt_add_31 = P.Add()(module15_3_opt, opt_relu_25)
+        opt_relu_32 = self.relu_32(opt_add_31)
+        module5_1_opt = self.module5_1(opt_relu_32)
+        module5_2_opt = self.module5_2(opt_relu_32)
+        module16_0_opt = self.module16_0(module5_1_opt)
+        module16_1_opt = self.module16_1(module5_2_opt)
+        module7_0_opt = self.module7_0(module16_1_opt)
+        opt_add_82 = P.Add()(module16_0_opt, module7_0_opt)
+        opt_relu_85 = self.relu_85(opt_add_82)
+        opt_conv2d_77 = self.conv2d_77(module16_0_opt)
+        opt_add_79 = P.Add()(opt_conv2d_77, module16_1_opt)
+        opt_relu_81 = self.relu_81(opt_add_79)
+        module5_3_opt = self.module5_3(opt_relu_81)
+        module16_2_opt = self.module16_2(opt_relu_85)
+        module16_3_opt = self.module16_3(opt_relu_81)
+        module16_4_opt = self.module16_4(module5_3_opt)
+        module7_1_opt = self.module7_1(module16_3_opt)
+        opt_add_153 = P.Add()(module16_2_opt, module7_1_opt)
+        module7_2_opt = self.module7_2(module16_4_opt)
+        opt_add_159 = P.Add()(opt_add_153, module7_2_opt)
+        opt_relu_162 = self.relu_162(opt_add_159)
+        opt_conv2d_149 = self.conv2d_149(module16_2_opt)
+        opt_add_154 = P.Add()(opt_conv2d_149, module16_3_opt)
+        module7_3_opt = self.module7_3(module16_4_opt)
+        opt_add_160 = P.Add()(opt_add_154, module7_3_opt)
+        opt_relu_163 = self.relu_163(opt_add_160)
+        module11_0_opt = self.module11_0(module16_2_opt)
+        opt_conv2d_145 = self.conv2d_145(module16_3_opt)
+        opt_add_161 = P.Add()(module11_0_opt, opt_conv2d_145)
+        opt_add_164 = P.Add()(opt_add_161, module16_4_opt)
+        opt_relu_167 = self.relu_167(opt_add_164)
+        module16_5_opt = self.module16_5(opt_relu_162)
+        module16_6_opt = self.module16_6(opt_relu_163)
+        module16_7_opt = self.module16_7(opt_relu_167)
+        module7_4_opt = self.module7_4(module16_6_opt)
+        opt_add_236 = P.Add()(module16_5_opt, module7_4_opt)
+        module7_5_opt = self.module7_5(module16_7_opt)
+        opt_add_240 = P.Add()(opt_add_236, module7_5_opt)
+        opt_relu_243 = self.relu_243(opt_add_240)
+        opt_conv2d_225 = self.conv2d_225(module16_5_opt)
+        opt_add_230 = P.Add()(opt_conv2d_225, module16_6_opt)
+        module7_6_opt = self.module7_6(module16_7_opt)
+        opt_add_241 = P.Add()(opt_add_230, module7_6_opt)
+        opt_relu_244 = self.relu_244(opt_add_241)
+        module11_1_opt = self.module11_1(module16_5_opt)
+        opt_conv2d_228 = self.conv2d_228(module16_6_opt)
+        opt_add_239 = P.Add()(module11_1_opt, opt_conv2d_228)
+        opt_add_242 = P.Add()(opt_add_239, module16_7_opt)
+        opt_relu_245 = self.relu_245(opt_add_242)
+        module16_8_opt = self.module16_8(opt_relu_243)
+        module16_9_opt = self.module16_9(opt_relu_244)
+        module16_10_opt = self.module16_10(opt_relu_245)
+        module7_7_opt = self.module7_7(module16_9_opt)
+        opt_add_318 = P.Add()(module16_8_opt, module7_7_opt)
+        module7_8_opt = self.module7_8(module16_10_opt)
+        opt_add_321 = P.Add()(opt_add_318, module7_8_opt)
+        opt_relu_324 = self.relu_324(opt_add_321)
+        opt_conv2d_306 = self.conv2d_306(module16_8_opt)
+        opt_add_312 = P.Add()(opt_conv2d_306, module16_9_opt)
+        module7_9_opt = self.module7_9(module16_10_opt)
+        opt_add_319 = P.Add()(opt_add_312, module7_9_opt)
+        opt_relu_322 = self.relu_322(opt_add_319)
+        module11_2_opt = self.module11_2(module16_8_opt)
+        opt_conv2d_309 = self.conv2d_309(module16_9_opt)
+        opt_add_320 = P.Add()(module11_2_opt, opt_conv2d_309)
+        opt_add_323 = P.Add()(opt_add_320, module16_10_opt)
+        opt_relu_326 = self.relu_326(opt_add_323)
+        module16_11_opt = self.module16_11(opt_relu_324)
+        module16_12_opt = self.module16_12(opt_relu_322)
+        module16_13_opt = self.module16_13(opt_relu_326)
+        module7_10_opt = self.module7_10(module16_12_opt)
+        opt_add_395 = P.Add()(module16_11_opt, module7_10_opt)
+        module7_11_opt = self.module7_11(module16_13_opt)
+        opt_add_399 = P.Add()(opt_add_395, module7_11_opt)
+        opt_relu_402 = self.relu_402(opt_add_399)
+        opt_conv2d_388 = self.conv2d_388(module16_11_opt)
+        opt_add_393 = P.Add()(opt_conv2d_388, module16_12_opt)
+        module7_12_opt = self.module7_12(module16_13_opt)
+        opt_add_400 = P.Add()(opt_add_393, module7_12_opt)
+        opt_relu_403 = self.relu_403(opt_add_400)
+        module11_3_opt = self.module11_3(module16_11_opt)
+        opt_conv2d_386 = self.conv2d_386(module16_12_opt)
+        opt_add_401 = P.Add()(module11_3_opt, opt_conv2d_386)
+        opt_add_404 = P.Add()(opt_add_401, module16_13_opt)
+        opt_relu_407 = self.relu_407(opt_add_404)
+        module5_4_opt = self.module5_4(opt_relu_407)
+        module16_14_opt = self.module16_14(opt_relu_402)
+        module16_15_opt = self.module16_15(opt_relu_403)
+        module16_16_opt = self.module16_16(opt_relu_407)
+        module16_17_opt = self.module16_17(module5_4_opt)
+        module7_13_opt = self.module7_13(module16_15_opt)
+        opt_add_503 = P.Add()(module16_14_opt, module7_13_opt)
+        module7_14_opt = self.module7_14(module16_16_opt)
+        opt_add_513 = P.Add()(opt_add_503, module7_14_opt)
+        module7_15_opt = self.module7_15(module16_17_opt)
+        opt_add_521 = P.Add()(opt_add_513, module7_15_opt)
+        opt_relu_525 = self.relu_525(opt_add_521)
+        opt_conv2d_484 = self.conv2d_484(module16_14_opt)
+        opt_add_492 = P.Add()(opt_conv2d_484, module16_15_opt)
+        module7_16_opt = self.module7_16(module16_16_opt)
+        opt_add_514 = P.Add()(opt_add_492, module7_16_opt)
+        module7_17_opt = self.module7_17(module16_17_opt)
+        opt_add_522 = P.Add()(opt_add_514, module7_17_opt)
+        opt_relu_526 = self.relu_526(opt_add_522)
+        module11_4_opt = self.module11_4(module16_14_opt)
+        opt_conv2d_488 = self.conv2d_488(module16_15_opt)
+        opt_add_508 = P.Add()(module11_4_opt, opt_conv2d_488)
+        opt_add_515 = P.Add()(opt_add_508, module16_16_opt)
+        module7_18_opt = self.module7_18(module16_17_opt)
+        opt_add_523 = P.Add()(opt_add_515, module7_18_opt)
+        opt_relu_527 = self.relu_527(opt_add_523)
+        module15_4_opt = self.module15_4(module16_14_opt)
+        module11_5_opt = self.module11_5(module16_15_opt)
+        opt_add_520 = P.Add()(module15_4_opt, module11_5_opt)
+        opt_conv2d_500 = self.conv2d_500(module16_16_opt)
+        opt_add_524 = P.Add()(opt_add_520, opt_conv2d_500)
+        opt_add_528 = P.Add()(opt_add_524, module16_17_opt)
+        opt_relu_532 = self.relu_532(opt_add_528)
+        module16_18_opt = self.module16_18(opt_relu_525)
+        module16_19_opt = self.module16_19(opt_relu_526)
+        module16_20_opt = self.module16_20(opt_relu_527)
+        module16_21_opt = self.module16_21(opt_relu_532)
+        module7_19_opt = self.module7_19(module16_19_opt)
+        opt_add_631 = P.Add()(module16_18_opt, module7_19_opt)
+        module7_20_opt = self.module7_20(module16_20_opt)
+        opt_add_639 = P.Add()(opt_add_631, module7_20_opt)
+        module7_21_opt = self.module7_21(module16_21_opt)
+        opt_add_643 = P.Add()(opt_add_639, module7_21_opt)
+        opt_relu_647 = self.relu_647(opt_add_643)
+        opt_conv2d_609 = self.conv2d_609(module16_18_opt)
+        opt_add_619 = P.Add()(opt_conv2d_609, module16_19_opt)
+        module7_22_opt = self.module7_22(module16_20_opt)
+        opt_add_633 = P.Add()(opt_add_619, module7_22_opt)
+        module7_23_opt = self.module7_23(module16_21_opt)
+        opt_add_640 = P.Add()(opt_add_633, module7_23_opt)
+        opt_relu_644 = self.relu_644(opt_add_640)
+        module11_6_opt = self.module11_6(module16_18_opt)
+        opt_conv2d_613 = self.conv2d_613(module16_19_opt)
+        opt_add_637 = P.Add()(module11_6_opt, opt_conv2d_613)
+        opt_add_641 = P.Add()(opt_add_637, module16_20_opt)
+        module7_24_opt = self.module7_24(module16_21_opt)
+        opt_add_645 = P.Add()(opt_add_641, module7_24_opt)
+        opt_relu_649 = self.relu_649(opt_add_645)
+        module15_5_opt = self.module15_5(module16_18_opt)
+        module11_7_opt = self.module11_7(module16_19_opt)
+        opt_add_646 = P.Add()(module15_5_opt, module11_7_opt)
+        opt_conv2d_617 = self.conv2d_617(module16_20_opt)
+        opt_add_650 = P.Add()(opt_add_646, opt_conv2d_617)
+        opt_add_654 = P.Add()(opt_add_650, module16_21_opt)
+        opt_relu_658 = self.relu_658(opt_add_654)
+        module16_22_opt = self.module16_22(opt_relu_647)
+        module16_23_opt = self.module16_23(opt_relu_644)
+        module16_24_opt = self.module16_24(opt_relu_649)
+        module16_25_opt = self.module16_25(opt_relu_658)
+        module7_25_opt = self.module7_25(module16_23_opt)
+        opt_add_745 = P.Add()(module16_22_opt, module7_25_opt)
+        module7_26_opt = self.module7_26(module16_24_opt)
+        opt_add_752 = P.Add()(opt_add_745, module7_26_opt)
+        module7_27_opt = self.module7_27(module16_25_opt)
+        opt_add_764 = P.Add()(opt_add_752, module7_27_opt)
+        opt_relu_768 = self.relu_768(opt_add_764)
+        opt_conv2d_733 = self.conv2d_733(module16_22_opt)
+        opt_add_742 = P.Add()(opt_conv2d_733, module16_23_opt)
+        module7_28_opt = self.module7_28(module16_24_opt)
+        opt_add_753 = P.Add()(opt_add_742, module7_28_opt)
+        module7_29_opt = self.module7_29(module16_25_opt)
+        opt_add_765 = P.Add()(opt_add_753, module7_29_opt)
+        opt_relu_769 = self.relu_769(opt_add_765)
+        module11_8_opt = self.module11_8(module16_22_opt)
+        opt_conv2d_729 = self.conv2d_729(module16_23_opt)
+        opt_add_757 = P.Add()(module11_8_opt, opt_conv2d_729)
+        opt_add_762 = P.Add()(opt_add_757, module16_24_opt)
+        module7_30_opt = self.module7_30(module16_25_opt)
+        opt_add_766 = P.Add()(opt_add_762, module7_30_opt)
+        opt_relu_770 = self.relu_770(opt_add_766)
+        module15_6_opt = self.module15_6(module16_22_opt)
+        module11_9_opt = self.module11_9(module16_23_opt)
+        opt_add_767 = P.Add()(module15_6_opt, module11_9_opt)
+        opt_conv2d_740 = self.conv2d_740(module16_24_opt)
+        opt_add_771 = P.Add()(opt_add_767, opt_conv2d_740)
+        opt_add_778 = P.Add()(opt_add_771, module16_25_opt)
+        opt_relu_782 = self.relu_782(opt_add_778)
+        module15_7_opt = self.module15_7(opt_relu_768)
+        opt_conv2d_773 = self.conv2d_773(opt_relu_768)
+        opt_add_796 = P.Add()(module15_7_opt, opt_conv2d_773)
+        opt_relu_800 = self.relu_800(opt_add_796)
+        module15_8_opt = self.module15_8(opt_relu_769)
+        opt_conv2d_775 = self.conv2d_775(opt_relu_769)
+        opt_add_797 = P.Add()(module15_8_opt, opt_conv2d_775)
+        opt_relu_801 = self.relu_801(opt_add_797)
+        module5_5_opt = self.module5_5(opt_relu_800)
+        opt_add_808 = P.Add()(opt_relu_801, module5_5_opt)
+        module15_9_opt = self.module15_9(opt_relu_770)
+        opt_conv2d_777 = self.conv2d_777(opt_relu_770)
+        opt_add_798 = P.Add()(module15_9_opt, opt_conv2d_777)
+        opt_relu_802 = self.relu_802(opt_add_798)
+        module5_6_opt = self.module5_6(opt_add_808)
+        opt_add_811 = P.Add()(opt_relu_802, module5_6_opt)
+        module15_10_opt = self.module15_10(opt_relu_782)
+        opt_conv2d_787 = self.conv2d_787(opt_relu_782)
+        opt_add_805 = P.Add()(module15_10_opt, opt_conv2d_787)
+        opt_relu_807 = self.relu_807(opt_add_805)
+        module5_7_opt = self.module5_7(opt_add_811)
+        opt_add_814 = P.Add()(opt_relu_807, module5_7_opt)
+        module5_8_opt = self.module5_8(opt_add_814)
+        opt_avgpool2d_817 = self.avgpool2d_817(module5_8_opt)
+        opt_flatten_818 = self.flatten_818(opt_avgpool2d_817)
+        opt_dense_819 = self.dense_819(opt_flatten_818)
+        return opt_dense_819
diff --git a/research/cvtmodel/resnet_ipl/README_CN.md b/research/cvtmodel/resnet_ipl/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..9d951b1c88cdf8dbe079da8d55f776bb50f5e59b
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/README_CN.md
@@ -0,0 +1,165 @@
+# 目录
+
+<!-- TOC -->
+
+- [目录](#目录)
+- [Resnet描述](#Resnet描述)
+- [模型架构](#模型架构)
+- [数据集](#数据集)
+- [环境要求](#环境要求)
+- [脚本说明](#脚本说明)
+    - [脚本及样例代码](#脚本及样例代码)
+    - [导出过程](#导出过程)
+        - [导出](#导出)
+    - [推理过程](#推理过程)
+        - [推理](#推理)
+- [ModelZoo主页](#modelzoo主页)
+
+<!-- /TOC -->
+
+# Resnet_ipl描述
+
+Resnet_ipl是一系列基于resnet扩展的网络模型,用于图像分类。有关该模型的描述,可查阅(http://rwightman.github.io/pytorch-image-models/models/)。
+本仓库中是基于torch提供的模型文件,使用MindConverter工具转化出Mindspore来ckpt文件,进行全量推理以验证模型文件精度。
+
+# 模型架构
+
+Resnet模型支持四种模式:Resnet26t, Resnet51q,Resnet101d, Resnetrs50, Resnetrs200, Seresnet152d, Resnet101e, Gernet_l。
+
+# 数据集
+
+Resnet使用的数据集: ImageNet
+
+数据集的默认配置如下:
+
+- 测试数据集预处理:
+    - 图像的输入尺寸(Resnet26t):272\*272(将图像缩放到272\*272,然后在中央区域裁剪图像)
+    - 图像的输入尺寸(Resnet51q):288\*288(将图像缩放到288\*288,然后在中央区域裁剪图像)
+    - 图像的输入尺寸(Resnet101d):256\*256(将图像缩放到256\*256,然后在中央区域裁剪图像)
+    - 图像的输入尺寸(Resnetrs50):224\*224(将图像缩放到256\*256,然后在中央区域裁剪图像)
+    - 图像的输入尺寸(Resnetrs200):320\*320(将图像缩放到320\*320,然后在中央区域裁剪图像)
+    - 图像的输入尺寸(Seresnet152d):320\*320(将图像缩放到320\*320,然后在中央区域裁剪图像)
+    - 图像的输入尺寸(Resnet101e):320\*320(将图像缩放到320\*320,然后在中央区域裁剪图像)
+    - 图像的输入尺寸(Gernet_l):292\*292(将图像缩放到292\*292,然后在中央区域裁剪图像)
+    - 根据平均值和标准偏差对输入图像进行归一化
+
+# 环境要求
+
+- 硬件(Ascend/GPU)
+- 准备Ascend或GPU处理器搭建硬件环境。
+- 框架
+- [MindSpore](https://www.mindspore.cn/install)
+- 如需查看详情,请参见如下资源:
+- [MindSpore教程](https://www.mindspore.cn/tutorials/zh-CN/master/index.html)
+- [MindSpore Python API](https://www.mindspore.cn/docs/api/zh-CN/master/index.html)
+
+# 脚本说明
+
+## 脚本及样例代码
+
+```shell
+├── model_zoo
+    ├── README.md                          // 所有模型的说明
+    ├── Resnet
+        ├── README_CN.md                 // Resnet相关说明
+        ├── ascend310_infer              // 实现310推理源代码
+        ├── scripts
+        │   ├── run_infer_310.sh                    // Ascend 310 推理shell脚本
+        ├── src
+        │   ├── resnet26t.py             // resnet26t模型文件
+        │   ├── resnet51q.py             // resnet51q模型文件
+        │   ├── resnet101d.py             // resnet101d模型文件
+        │   ├── resnetrs50.py             // resnetrs50模型文件
+        │   ├── resnetrs200.py             // resnetrs200模型文件
+        │   ├── seresnet152d.py            // seresnet152d模型文件
+        │   ├── Resnet101e.py            // Resnet101e模型文件
+        │   ├── gernet_l.py             // gernet_l模型文件
+        ├── export.py                   // 导出脚本
+        ├── preprocess.py                   // 数据预处理脚本
+        ├── postprocess.py                   // 310 推理后处理脚本
+```
+
+## 导出过程
+
+### 导出
+
+```shell
+python export.py --backbone [NET_NAME] --ckpt_path [CKPT_PATH] --device_target [DEVICE_TARGET] --device_id 0 --file_format [EXPORT_FORMAT] --file_name [FILE_NAME]
+```
+
+`backbone` 可选 ["resnet26t", "resnet51q","resnet101d", "Resnetrs50", "Resnetrs200", "Seresnet152d", "Resnet101e"]
+`EXPORT_FORMAT` 设定为 ["MINDIR"]
+
+## 推理过程
+
+### 推理
+
+在推理之前需要先导出模型,MINDIR可以在任意环境上导出。
+
+```shell
+# 昇腾310 推理
+bash run_infer_310.sh [MINDIR_PATH] [BACKBONE] [DATASET] [DATA_PATH] [DEVICE_ID]
+```
+
+-注: Resnet系列网络使用ImageNet数据集。
+
+推理的结果保存在当前目录下,在acc.log日志文件中可以找到类似以下的结果。
+Resnet26t网络使用ImageNet推理得到的结果如下:
+
+  ```log
+  after allreduce eval: top1_correct=40560, tot=50000, acc=81.12%
+  after allreduce eval: top5_correct=47792, tot=50000, acc=95.58%
+  ```
+
+Resnet51q网络使用ImageNet推理得到的结果如下:
+
+  ```log
+  after allreduce eval: top1_correct=40816, tot=50000, acc=81.63%
+  after allreduce eval: top5_correct=47901, tot=50000, acc=95.80%
+  ```
+
+Resnet101d网络使用ImageNet推理得到的结果如下:
+
+  ```log
+  after allreduce eval: top1_correct=41240, tot=50000, acc=82.48%
+  after allreduce eval: top5_correct=48055, tot=50000, acc=96.11%
+  ```  
+
+Resnetrs50网络使用ImageNet推理得到的结果如下:
+
+  ```log
+  after allreduce eval: top1_correct=40106, tot=50000, acc=80.21%
+  after allreduce eval: top5_correct=47648, tot=50000, acc=95.30%
+  ```
+
+Resnetrs200网络使用ImageNet推理得到的结果如下:
+
+  ```log
+  after allreduce eval: top1_correct=41613, tot=50000, acc=83.23%
+  after allreduce eval: top5_correct=48288, tot=50000, acc=96.58%
+  ```
+
+Seresnet152d网络使用ImageNet推理得到的结果如下:
+
+  ```log
+  after allreduce eval: top1_correct=41813, tot=50000, acc=83.63%
+  after allreduce eval: top5_correct=48396, tot=50000, acc=96.79%
+  ```
+
+Resnet101e网络使用ImageNet推理得到的结果如下:
+
+  ```log
+  after allreduce eval: top1_correct=41334, tot=50000, acc=82.67%
+  after allreduce eval: top5_correct=48153, tot=50000, acc=96.31%
+  ```
+
+Gernet_l网络使用ImageNet推理得到的结果如下:
+
+  ```log
+  after allreduce eval: top1_correct=40700, tot=50000, acc=81.40%
+  after allreduce eval: top5_correct=47806, tot=50000, acc=95.61%
+  ```
+
+# ModelZoo主页
+
+ 请浏览官网[主页](https://gitee.com/mindspore/models)。
diff --git a/research/cvtmodel/resnet_ipl/ascend310_infer/CMakeLists.txt b/research/cvtmodel/resnet_ipl/ascend310_infer/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ee3c85447340e0449ff2b70ed24f60a17e07b2b6
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/ascend310_infer/CMakeLists.txt
@@ -0,0 +1,14 @@
+cmake_minimum_required(VERSION 3.14.1)
+project(Ascend310Infer)
+add_compile_definitions(_GLIBCXX_USE_CXX11_ABI=0)
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -std=c++17 -Werror -Wall -fPIE -Wl,--allow-shlib-undefined")
+set(PROJECT_SRC_ROOT ${CMAKE_CURRENT_LIST_DIR}/)
+option(MINDSPORE_PATH "mindspore install path" "")
+include_directories(${MINDSPORE_PATH})
+include_directories(${MINDSPORE_PATH}/include)
+include_directories(${PROJECT_SRC_ROOT})
+find_library(MS_LIB libmindspore.so ${MINDSPORE_PATH}/lib)
+file(GLOB_RECURSE MD_LIB ${MINDSPORE_PATH}/_c_dataengine*)
+
+add_executable(main src/main.cc src/utils.cc)
+target_link_libraries(main ${MS_LIB} ${MD_LIB} gflags)
diff --git a/research/cvtmodel/resnet_ipl/ascend310_infer/build.sh b/research/cvtmodel/resnet_ipl/ascend310_infer/build.sh
new file mode 100644
index 0000000000000000000000000000000000000000..770a8851efade7f352039fc8665d307ae1abbb00
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/ascend310_infer/build.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [ ! -d out ]; then
+  mkdir out
+fi
+cd out || exit
+cmake .. \
+    -DMINDSPORE_PATH="`pip show mindspore-ascend | grep Location | awk '{print $2"/mindspore"}' | xargs realpath`"
+make
diff --git a/research/cvtmodel/resnet_ipl/ascend310_infer/inc/utils.h b/research/cvtmodel/resnet_ipl/ascend310_infer/inc/utils.h
new file mode 100644
index 0000000000000000000000000000000000000000..efebe03a8c1179f5a1f9d5f7ee07e0352a9937c6
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/ascend310_infer/inc/utils.h
@@ -0,0 +1,32 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MINDSPORE_INFERENCE_UTILS_H_
+#define MINDSPORE_INFERENCE_UTILS_H_
+
+#include <sys/stat.h>
+#include <dirent.h>
+#include <vector>
+#include <string>
+#include <memory>
+#include "include/api/types.h"
+
+std::vector<std::string> GetAllFiles(std::string_view dirName);
+DIR *OpenDir(std::string_view dirName);
+std::string RealPath(std::string_view path);
+mindspore::MSTensor ReadFileToTensor(const std::string &file);
+int WriteResult(const std::string& imageFile, const std::vector<mindspore::MSTensor> &outputs);
+#endif
diff --git a/research/cvtmodel/resnet_ipl/ascend310_infer/src/main.cc b/research/cvtmodel/resnet_ipl/ascend310_infer/src/main.cc
new file mode 100644
index 0000000000000000000000000000000000000000..a71d65121dea4955e64bf9338e179708b84bf4a1
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/ascend310_infer/src/main.cc
@@ -0,0 +1,172 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sys/time.h>
+#include <gflags/gflags.h>
+#include <dirent.h>
+#include <iostream>
+#include <string>
+#include <algorithm>
+#include <iosfwd>
+#include <vector>
+#include <fstream>
+#include <sstream>
+
+#include "../inc/utils.h"
+#include "include/dataset/execute.h"
+#include "include/dataset/transforms.h"
+#include "include/dataset/vision.h"
+#include "include/dataset/vision_ascend.h"
+#include "include/api/types.h"
+#include "include/api/model.h"
+#include "include/api/serialization.h"
+#include "include/api/context.h"
+
+using mindspore::Serialization;
+using mindspore::Model;
+using mindspore::Context;
+using mindspore::Status;
+using mindspore::ModelType;
+using mindspore::Graph;
+using mindspore::GraphCell;
+using mindspore::kSuccess;
+using mindspore::MSTensor;
+using mindspore::DataType;
+using mindspore::dataset::Execute;
+using mindspore::dataset::TensorTransform;
+using mindspore::dataset::InterpolationMode;
+using mindspore::dataset::vision::Decode;
+using mindspore::dataset::vision::Resize;
+using mindspore::dataset::vision::CenterCrop;
+using mindspore::dataset::vision::Normalize;
+using mindspore::dataset::vision::HWC2CHW;
+
+using mindspore::dataset::transforms::TypeCast;
+
+
+DEFINE_string(model_path, "", "model path");
+DEFINE_int32(resize, 256, "resize size");
+DEFINE_int32(crop, 256, "crop size");
+DEFINE_string(inter, "kCubicPil", "InterpolationMode");
+DEFINE_string(backbone, "resnetrs50", "backbone type");
+DEFINE_string(dataset_path, ".", "dataset path");
+DEFINE_int32(device_id, 0, "device id");
+
+int main(int argc, char **argv) {
+    gflags::ParseCommandLineFlags(&argc, &argv, true);
+    if (RealPath(FLAGS_model_path).empty()) {
+      std::cout << "Invalid model" << std::endl;
+      return 1;
+    }
+
+
+    auto context = std::make_shared<Context>();
+    auto ascend310_info = std::make_shared<mindspore::Ascend310DeviceInfo>();
+    ascend310_info->SetDeviceID(FLAGS_device_id);
+    context->MutableDeviceInfo().push_back(ascend310_info);
+
+    Graph graph;
+    Status ret = Serialization::Load(FLAGS_model_path, ModelType::kMindIR, &graph);
+    if (ret != kSuccess) {
+        std::cout << "Load model failed." << std::endl;
+        return 1;
+    }
+
+    Model model;
+    ret = model.Build(GraphCell(graph), context);
+    if (ret != kSuccess) {
+        std::cout << "ERROR: Build failed." << std::endl;
+        return 1;
+    }
+
+    std::vector<MSTensor> modelInputs = model.GetInputs();
+
+    auto all_files = GetAllFiles(FLAGS_dataset_path);
+    if (all_files.empty()) {
+        std::cout << "ERROR: no input data." << std::endl;
+        return 1;
+    }
+
+    std::shared_ptr<TensorTransform> decode(new Decode());
+    std::shared_ptr<TensorTransform> resizeC(new Resize({FLAGS_resize, FLAGS_resize}, InterpolationMode::kCubicPil));
+    std::shared_ptr<TensorTransform> resizeArea(new Resize({FLAGS_resize, FLAGS_resize}, InterpolationMode::kArea));
+    std::shared_ptr<TensorTransform> resize(new Resize({FLAGS_resize, FLAGS_resize}));
+    std::shared_ptr<TensorTransform> centerCrop(new CenterCrop({FLAGS_crop, FLAGS_crop}));
+    std::shared_ptr<TensorTransform> normImageNet(new Normalize({123.675, 116.28, 103.53}, {58.395, 57.12, 57.375}));
+    std::shared_ptr<TensorTransform> hwc2chw(new HWC2CHW());
+
+    mindspore::dataset::Execute transformCubic({decode, resizeC, centerCrop, normImageNet, hwc2chw});
+    mindspore::dataset::Execute transformArea({decode, resizeArea, centerCrop, normImageNet, hwc2chw});
+    mindspore::dataset::Execute transform({decode, resize, centerCrop, normImageNet, hwc2chw});
+
+    std::map<double, double> costTime_map;
+
+    size_t size = all_files.size();
+    for (size_t i = 0; i < size; ++i) {
+        struct timeval start;
+        struct timeval end;
+        double startTime_ms;
+        double endTime_ms;
+        std::vector<MSTensor> inputs;
+        std::vector<MSTensor> outputs;
+
+        std::cout << "Start predict input files:" << all_files[i] << std::endl;
+        mindspore::MSTensor image =  ReadFileToTensor(all_files[i]);
+
+        if ((FLAGS_backbone.compare("gernet_l") == 0) || (FLAGS_backbone.compare("resnet51q") == 0)) {
+            transformArea(image, &image);
+        } else if (FLAGS_backbone.compare("resnest101e") == 0) {
+            transform(image, &image);
+        } else {
+            transformCubic(image, &image);
+        }
+
+        inputs.emplace_back(modelInputs[0].Name(), modelInputs[0].DataType(), modelInputs[0].Shape(),
+                            image.Data().get(), image.DataSize());
+
+        gettimeofday(&start, NULL);
+        model.Predict(inputs, &outputs);
+        gettimeofday(&end, NULL);
+
+        startTime_ms = (1.0 * start.tv_sec * 1000000 + start.tv_usec) / 1000;
+        endTime_ms = (1.0 * end.tv_sec * 1000000 + end.tv_usec) / 1000;
+        costTime_map.insert(std::pair<double, double>(startTime_ms, endTime_ms));
+        int rst = WriteResult(all_files[i], outputs);
+        if (rst != 0) {
+            std::cout << "write result failed." << std::endl;
+            return rst;
+        }
+    }
+    double average = 0.0;
+    int inferCount = 0;
+
+    for (auto iter = costTime_map.begin(); iter != costTime_map.end(); iter++) {
+        double diff = 0.0;
+        diff = iter->second - iter->first;
+        average += diff;
+        inferCount++;
+    }
+    average = average / inferCount;
+    std::stringstream timeCost;
+    timeCost << "NN inference cost average time: "<< average << " ms of infer_count " << inferCount << std::endl;
+    std::cout << "NN inference cost average time: "<< average << "ms of infer_count " << inferCount << std::endl;
+    std::string fileName = "./time_Result" + std::string("/test_perform_static.txt");
+    std::ofstream fileStream(fileName.c_str(), std::ios::trunc);
+    fileStream << timeCost.str();
+    fileStream.close();
+    costTime_map.clear();
+  return 0;
+}
diff --git a/research/cvtmodel/resnet_ipl/ascend310_infer/src/utils.cc b/research/cvtmodel/resnet_ipl/ascend310_infer/src/utils.cc
new file mode 100644
index 0000000000000000000000000000000000000000..653b1de44962614ac77d44481f8d1a7bde52caaf
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/ascend310_infer/src/utils.cc
@@ -0,0 +1,160 @@
+/**
+ * Copyright 2021 Huawei Technologies Co., Ltd
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "inc/utils.h"
+
+#include <fstream>
+#include <algorithm>
+#include <iostream>
+
+using mindspore::MSTensor;
+using mindspore::DataType;
+
+std::vector<std::string> GetAllFiles(std::string_view dirName) {
+    struct dirent *filename;
+    DIR *dir = OpenDir(dirName);
+    if (dir == nullptr) {
+        return {};
+    }
+    std::vector<std::string> dirs;
+    std::vector<std::string> files;
+    while ((filename = readdir(dir)) != nullptr) {
+        std::string dName = std::string(filename->d_name);
+        if (dName == "." || dName == "..") {
+            continue;
+        } else if (filename->d_type == DT_DIR) {
+            dirs.emplace_back(std::string(dirName) + "/" + filename->d_name);
+        } else if (filename->d_type == DT_REG) {
+            files.emplace_back(std::string(dirName) + "/" + filename->d_name);
+        } else {
+            continue;
+        }
+    }
+
+    for (auto d : dirs) {
+        dir = OpenDir(d);
+        while ((filename = readdir(dir)) != nullptr) {
+            std::string dName = std::string(filename->d_name);
+            if (dName == "." || dName == ".." || filename->d_type != DT_REG) {
+                continue;
+            }
+            files.emplace_back(std::string(d) + "/" + filename->d_name);
+        }
+    }
+    std::sort(files.begin(), files.end());
+    for (auto &f : files) {
+        std::cout << "image file: " << f << std::endl;
+    }
+    return files;
+}
+
+int WriteResult(const std::string& imageFile, const std::vector<MSTensor> &outputs) {
+    std::string homePath = "./result_Files";
+    const int INVALID_POINTER = -1;
+    const int ERROR = -2;
+    for (size_t i = 0; i < outputs.size(); ++i) {
+        size_t outputSize;
+        std::shared_ptr<const void> netOutput;
+        netOutput = outputs[i].Data();
+        outputSize = outputs[i].DataSize();
+        int pos = imageFile.rfind('/');
+        std::string fileName(imageFile, pos + 1);
+        fileName.replace(fileName.find('.'), fileName.size() - fileName.find('.'), '_' + std::to_string(i) + ".bin");
+        std::string outFileName = homePath + "/" + fileName;
+        FILE * outputFile = fopen(outFileName.c_str(), "wb");
+        if (outputFile == nullptr) {
+            std::cout << "open result file " << outFileName << " failed" << std::endl;
+            return INVALID_POINTER;
+        }
+        size_t size = fwrite(netOutput.get(), sizeof(char), outputSize, outputFile);
+        if (size != outputSize) {
+            fclose(outputFile);
+            outputFile = nullptr;
+            std::cout << "write result file " << outFileName << " failed, write size[" << size <<
+                "] is smaller than output size[" << outputSize << "], maybe the disk is full." << std::endl;
+            return ERROR;
+        }
+        fclose(outputFile);
+        outputFile = nullptr;
+    }
+    return 0;
+}
+
+mindspore::MSTensor ReadFileToTensor(const std::string &file) {
+  if (file.empty()) {
+    std::cout << "Pointer file is nullptr" << std::endl;
+    return mindspore::MSTensor();
+  }
+
+  std::ifstream ifs(file);
+  if (!ifs.good()) {
+    std::cout << "File: " << file << " is not exist" << std::endl;
+    return mindspore::MSTensor();
+  }
+
+  if (!ifs.is_open()) {
+    std::cout << "File: " << file << "open failed" << std::endl;
+    return mindspore::MSTensor();
+  }
+
+  ifs.seekg(0, std::ios::end);
+  size_t size = ifs.tellg();
+  mindspore::MSTensor buffer(file, mindspore::DataType::kNumberTypeUInt8, {static_cast<int64_t>(size)}, nullptr, size);
+
+  ifs.seekg(0, std::ios::beg);
+  ifs.read(reinterpret_cast<char *>(buffer.MutableData()), size);
+  ifs.close();
+
+  return buffer;
+}
+
+
+DIR *OpenDir(std::string_view dirName) {
+    if (dirName.empty()) {
+        std::cout << " dirName is null ! " << std::endl;
+        return nullptr;
+    }
+    std::string realPath = RealPath(dirName);
+    struct stat s;
+    lstat(realPath.c_str(), &s);
+    if (!S_ISDIR(s.st_mode)) {
+        std::cout << "dirName is not a valid directory !" << std::endl;
+        return nullptr;
+    }
+    DIR *dir;
+    dir = opendir(realPath.c_str());
+    if (dir == nullptr) {
+        std::cout << "Can not open dir " << dirName << std::endl;
+        return nullptr;
+    }
+    std::cout << "Successfully opened the dir " << dirName << std::endl;
+    return dir;
+}
+
+std::string RealPath(std::string_view path) {
+    char realPathMem[PATH_MAX] = {0};
+    char *realPathRet = nullptr;
+    realPathRet = realpath(path.data(), realPathMem);
+
+    if (realPathRet == nullptr) {
+        std::cout << "File: " << path << " is not exist.";
+        return "";
+    }
+
+    std::string realPath(realPathMem);
+    std::cout << path << " realpath is: " << realPath << std::endl;
+    return realPath;
+}
diff --git a/research/cvtmodel/resnet_ipl/export.py b/research/cvtmodel/resnet_ipl/export.py
new file mode 100644
index 0000000000000000000000000000000000000000..89ce4400043f60b6e61d88635bf874c62a2da972
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/export.py
@@ -0,0 +1,71 @@
+# Copyright 2020-2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+"""export checkpoint file into air, onnx, mindir models
+   Suggest run as python export.py --backbone [backbone] --ckpt_path [ckpt_path]
+                                   --file_name [file_name] --file_format [file format]
+"""
+import argparse
+import numpy as np
+import mindspore as ms
+from mindspore import context, Tensor
+from mindspore.train.serialization import export, load_checkpoint, load_param_into_net
+
+parser = argparse.ArgumentParser(description='post process for 310 inference')
+parser.add_argument("--backbone", type=str, required=True, default="resnetrs50", help="model backbone")
+parser.add_argument("--ckpt_path", type=str, required=True, help="checkpoint file path")
+parser.add_argument("--file_name", type=str, default="resnest50", help="file name")
+parser.add_argument("--file_format", type=str, default="MINDIR", choices=["MINDIR", "AIR"], help="file format")
+parser.add_argument("--device_target", type=str, default="Ascend", choices=["Ascend", "GPU"], help="device target")
+parser.add_argument("--device_id", type=int, default=0, help="device target")
+args = parser.parse_args()
+
+context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
+
+def model_export():
+    '''export main function'''
+    if args.device_target == "Ascend":
+        context.set_context(device_id=args.device_id)
+
+    image_size = 256
+
+    if args.backbone == "resnetrs50":
+        from src.resnetrs50 import MindSporeModel
+        image_size = 224
+    elif args.backbone == "resnet26t":
+        from src.resnet26t import MindSporeModel
+    elif args.backbone == "resnet101d":
+        from src.resnet101d import MindSporeModel
+    elif args.backbone == "seresnet152d":
+        from src.seresnet152d import MindSporeModel
+    elif args.backbone == "resnetrs200":
+        from src.resnetrs200 import MindSporeModel
+    elif args.backbone == "gernet_l":
+        from src.gernet_l import MindSporeModel
+    elif args.backbone == "resnet51q":
+        from src.resnet51q import MindSporeModel
+    elif args.backbone == "resnest101e":
+        from src.resnest101e import MindSporeModel
+
+    net = MindSporeModel()
+
+    param_dict = load_checkpoint(args.ckpt_path)
+    load_param_into_net(net, param_dict)
+
+    input_arr = Tensor(np.zeros([1, 3, image_size, image_size]), ms.float32)
+    export(net, input_arr, file_name=args.file_name, file_format=args.file_format)
+
+if __name__ == '__main__':
+    model_export()
diff --git a/research/cvtmodel/resnet_ipl/postprocess.py b/research/cvtmodel/resnet_ipl/postprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..28df6bd4463797b19274aa32d5532e0615c7b2c6
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/postprocess.py
@@ -0,0 +1,82 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+'''post process for 310 inference'''
+import os
+import argparse
+import numpy as np
+
+parser = argparse.ArgumentParser(description='post process for 310 inference')
+parser.add_argument("--dataset", type=str, default="imagenet", help="result file path")
+parser.add_argument("--result_path", type=str, required=True, help="result file path")
+parser.add_argument("--label_file", type=str, required=True, help="label file")
+args = parser.parse_args()
+
+def get_top5_acc(top5_arg, gt_class):
+    sub_count = 0
+    for top5, gt in zip(top5_arg, gt_class):
+        if gt in top5:
+            sub_count += 1
+    return sub_count
+
+def read_label(label_file):
+    '''read label file'''
+    f = open(label_file, "r")
+    lines = f.readlines()
+
+    img_label = {}
+    for line in lines:
+        img_id = line.split(":")[0]
+        label = line.split(":")[1]
+        img_label[img_id] = label
+
+    return img_label
+
+def cal_acc(dataset, result_path, label_file):
+    '''main acc calculation function'''
+    img_label = read_label(label_file)
+
+    img_tot = 0
+    top1_correct = 0
+    top5_correct = 0
+
+    files = os.listdir(result_path)
+    for file in files:
+        full_file_path = os.path.join(result_path, file)
+        if os.path.isfile(full_file_path):
+            result = np.fromfile(full_file_path, dtype=np.float32).reshape(1, 1000)
+            gt_classes = int(img_label[file[:-6]])
+
+            top1_output = np.argmax(result, (-1))
+            top5_output = np.argsort(result)[:, -5:]
+
+            t1_correct = np.equal(top1_output, gt_classes).sum()
+            top1_correct += t1_correct
+            top5_correct += get_top5_acc(top5_output, [gt_classes])
+            img_tot += 1
+
+    results = [[top1_correct], [top5_correct], [img_tot]]
+
+    results = np.array(results)
+    top1_correct = results[0, 0]
+    top5_correct = results[1, 0]
+    img_tot = results[2, 0]
+    acc1 = 100.0 * top1_correct / img_tot
+    acc5 = 100.0 * top5_correct / img_tot
+    print('eval: top1_correct={}, tot={}, acc={:.2f}%'.format(top1_correct, img_tot, acc1))
+    if dataset == 'imagenet':
+        print('top5_correct={}, tot={}, acc={:.2f}%'.format(top5_correct, img_tot, acc5))
+
+if __name__ == "__main__":
+    cal_acc(args.dataset, args.result_path, args.label_file)
diff --git a/research/cvtmodel/resnet_ipl/preprocess.py b/research/cvtmodel/resnet_ipl/preprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef53c220ef47609d17ab0600ff47ca6a743325be
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/preprocess.py
@@ -0,0 +1,47 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""preprocess"""
+import os
+import argparse
+
+parser = argparse.ArgumentParser('preprocess')
+parser.add_argument('--data_path', type=str, default='', help='eval data dir')
+args = parser.parse_args()
+
+def create_label(result_path, dir_path):
+    print("[WARNING] Create imagenet label. Currently only use for Imagenet2012!")
+    text_path = os.path.join(result_path, "imagenet_label.txt")
+    dirs = os.listdir(dir_path)
+    file_list = []
+    for file in dirs:
+        file_list.append(file)
+    file_list = sorted(file_list)
+    total = 0
+    img_label = {}
+    text_file = open(text_path, 'a')
+    for i, file_dir in enumerate(file_list):
+        files = os.listdir(os.path.join(dir_path, file_dir))
+        for f in files:
+            img_label[f.split('.')[0]] = i
+            line = f.split('.')[0] + ":" + str(i)
+            text_file.write(line)
+            text_file.write('\n')
+        total += len(files)
+    text_file.close()
+    print("[INFO] Completed! Total {} data.".format(total))
+
+if __name__ == "__main__":
+    create_label('./preprocess_Result/', args.data_path)
+        
\ No newline at end of file
diff --git a/research/cvtmodel/resnet_ipl/scripts/run_infer_310.sh b/research/cvtmodel/resnet_ipl/scripts/run_infer_310.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d0178e334c585ab821d38a6f74d1e300efde00eb
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/scripts/run_infer_310.sh
@@ -0,0 +1,133 @@
+#!/bin/bash
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+
+if [[ $# -lt 4 || $# -gt 5 ]]; then 
+    echo "Usage: bash run_infer_310.sh [MINDIR_PATH] [BACKBONE] [DATASET] [DATA_PATH] [DEVICE_ID]
+    DEVICE_ID is optional, it can be set by environment variable device_id, otherwise the value is zero"
+exit 1
+fi
+
+get_real_path(){
+  if [ "${1:0:1}" == "/" ]; then
+    echo "$1"
+  else
+    echo "$(realpath -m $PWD/$1)"
+  fi
+}
+
+model=$(get_real_path $1)
+backbone=$2
+dataset=$3
+data_path=$(get_real_path $4)
+
+device_id=0
+
+if [ $# == 5 ]; then
+    device_id=$5
+fi
+
+echo $model
+echo $dataset
+echo $data_path
+echo $device_id
+
+export ASCEND_HOME=/usr/local/Ascend/
+if [ -d ${ASCEND_HOME}/ascend-toolkit ]; then
+    export PATH=$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/ccec_compiler/bin:$ASCEND_HOME/ascend-toolkit/latest/atc/bin:$PATH
+    export LD_LIBRARY_PATH=/usr/local/lib:$ASCEND_HOME/ascend-toolkit/latest/atc/lib64:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:$LD_LIBRARY_PATH
+    export TBE_IMPL_PATH=$ASCEND_HOME/ascend-toolkit/latest/opp/op_impl/built-in/ai_core/tbe
+    export PYTHONPATH=${TBE_IMPL_PATH}:$ASCEND_HOME/ascend-toolkit/latest/fwkacllib/python/site-packages:$PYTHONPATH
+    export ASCEND_OPP_PATH=$ASCEND_HOME/ascend-toolkit/latest/opp
+else
+    export PATH=$ASCEND_HOME/atc/ccec_compiler/bin:$ASCEND_HOME/atc/bin:$PATH
+    export LD_LIBRARY_PATH=/usr/local/lib:$ASCEND_HOME/atc/lib64:$ASCEND_HOME/acllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:$LD_LIBRARY_PATH
+    export PYTHONPATH=$ASCEND_HOME/atc/python/site-packages:$PYTHONPATH
+    export ASCEND_OPP_PATH=$ASCEND_HOME/opp
+fi
+
+resize=256
+crop=256
+
+if [ "$backbone" == "resnet26t" ]; then
+    resize=272
+elif [ "$backbone" == "seresnet152d" ]; then
+    resize=320
+elif [ "$backbone" == "resnetrs200" ]; then
+    resize=320
+elif [ "$backbone" == "gernet_l" ]; then
+    resize=292
+elif [ "$backbone" == "resnet51q" ]; then
+    resize=288
+elif [ "$backbone" == "resnetrs50" ]; then
+    crop=224
+fi
+
+function preprocess_data()
+{
+    if [ -d preprocess_Result ]; then
+        rm -rf ./preprocess_Result
+    fi
+    mkdir preprocess_Result
+    python ../preprocess.py --data_path=$data_path
+}
+
+function compile_app()
+{
+    cd ../ascend310_infer || exit
+    if [ -f "Makefile" ]; then
+        make clean
+    fi
+    sh build.sh &> build.log
+
+    if [ $? -ne 0 ]; then
+        echo "compile app code failed"
+        exit 1
+    fi
+    cd - || exit
+}
+
+function infer()
+{
+    if [ -d result_Files ]; then
+        rm -rf ./result_Files
+    fi
+     if [ -d time_Result ]; then
+        rm -rf ./time_Result
+    fi
+    mkdir result_Files
+    mkdir time_Result
+    ../ascend310_infer/out/main --model_path=$model --resize=$resize --backbone=$backbone --crop=$crop \
+    --dataset_path=$data_path --device_id=$device_id &> infer.log
+
+    if [ $? -ne 0 ]; then
+        echo "execute inference failed"
+        exit 1
+    fi
+}
+
+function cal_acc()
+{
+    python ../postprocess.py --dataset=$dataset --label_file=./preprocess_Result/imagenet_label.txt --result_path=result_Files &> acc.log
+    if [ $? -ne 0 ]; then
+        echo "calculate accuracy failed"
+        exit 1
+    fi
+}
+
+preprocess_data
+compile_app
+infer
+cal_acc
diff --git a/research/cvtmodel/resnet_ipl/src/gernet_l.py b/research/cvtmodel/resnet_ipl/src/gernet_l.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbcba39f35ef57cddbe0fbcbe41dc1c22210c657
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/src/gernet_l.py
@@ -0,0 +1,393 @@
+import mindspore.ops as P
+from mindspore import nn
+
+
+class Module2(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
+                 conv2d_0_padding, conv2d_0_pad_mode, conv2d_0_group):
+        super(Module2, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=conv2d_0_kernel_size,
+                                  stride=conv2d_0_stride,
+                                  padding=conv2d_0_padding,
+                                  pad_mode=conv2d_0_pad_mode,
+                                  dilation=(1, 1),
+                                  group=conv2d_0_group,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        return opt_relu_1
+
+
+class Module4(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, module2_0_conv2d_0_in_channels,
+                 module2_0_conv2d_0_out_channels, module2_0_conv2d_0_kernel_size, module2_0_conv2d_0_stride,
+                 module2_0_conv2d_0_padding, module2_0_conv2d_0_pad_mode, module2_0_conv2d_0_group):
+        super(Module4, self).__init__()
+        self.module2_0 = Module2(conv2d_0_in_channels=module2_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module2_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module2_0_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module2_0_conv2d_0_stride,
+                                 conv2d_0_padding=module2_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module2_0_conv2d_0_pad_mode,
+                                 conv2d_0_group=module2_0_conv2d_0_group)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module2_0_opt = self.module2_0(x)
+        opt_conv2d_0 = self.conv2d_0(module2_0_opt)
+        return opt_conv2d_0
+
+
+class Module8(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, module2_0_conv2d_0_in_channels, module2_0_conv2d_0_out_channels,
+                 module2_0_conv2d_0_kernel_size, module2_0_conv2d_0_stride, module2_0_conv2d_0_padding,
+                 module2_0_conv2d_0_pad_mode, module2_0_conv2d_0_group, module2_1_conv2d_0_in_channels,
+                 module2_1_conv2d_0_out_channels, module2_1_conv2d_0_kernel_size, module2_1_conv2d_0_stride,
+                 module2_1_conv2d_0_padding, module2_1_conv2d_0_pad_mode, module2_1_conv2d_0_group):
+        super(Module8, self).__init__()
+        self.module2_0 = Module2(conv2d_0_in_channels=module2_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module2_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module2_0_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module2_0_conv2d_0_stride,
+                                 conv2d_0_padding=module2_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module2_0_conv2d_0_pad_mode,
+                                 conv2d_0_group=module2_0_conv2d_0_group)
+        self.module2_1 = Module2(conv2d_0_in_channels=module2_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module2_1_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module2_1_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module2_1_conv2d_0_stride,
+                                 conv2d_0_padding=module2_1_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module2_1_conv2d_0_pad_mode,
+                                 conv2d_0_group=module2_1_conv2d_0_group)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=640,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module2_0_opt = self.module2_0(x)
+        module2_1_opt = self.module2_1(module2_0_opt)
+        opt_conv2d_0 = self.conv2d_0(module2_1_opt)
+        return opt_conv2d_0
+
+
+class Module0(nn.Cell):
+    def __init__(self, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels, conv2d_2_group,
+                 conv2d_4_in_channels):
+        super(Module0, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=640,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
+                                  out_channels=conv2d_2_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=conv2d_2_group,
+                                  has_bias=True)
+        self.relu_3 = nn.ReLU()
+        self.conv2d_4 = nn.Conv2d(in_channels=conv2d_4_in_channels,
+                                  out_channels=640,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_6 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        opt_conv2d_2 = self.conv2d_2(opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_conv2d_2)
+        opt_conv2d_4 = self.conv2d_4(opt_relu_3)
+        opt_add_5 = P.Add()(opt_conv2d_4, x)
+        opt_relu_6 = self.relu_6(opt_add_5)
+        return opt_relu_6
+
+
+class Module17(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels, module0_0_conv2d_2_out_channels,
+                 module0_0_conv2d_2_group, module0_0_conv2d_4_in_channels, module0_1_conv2d_0_out_channels,
+                 module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels, module0_1_conv2d_2_group,
+                 module0_1_conv2d_4_in_channels, module0_2_conv2d_0_out_channels, module0_2_conv2d_2_in_channels,
+                 module0_2_conv2d_2_out_channels, module0_2_conv2d_2_group, module0_2_conv2d_4_in_channels,
+                 module0_3_conv2d_0_out_channels, module0_3_conv2d_2_in_channels, module0_3_conv2d_2_out_channels,
+                 module0_3_conv2d_2_group, module0_3_conv2d_4_in_channels):
+        super(Module17, self).__init__()
+        self.module0_0 = Module0(conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_2_group=module0_0_conv2d_2_group,
+                                 conv2d_4_in_channels=module0_0_conv2d_4_in_channels)
+        self.module0_1 = Module0(conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_2_group=module0_1_conv2d_2_group,
+                                 conv2d_4_in_channels=module0_1_conv2d_4_in_channels)
+        self.module0_2 = Module0(conv2d_0_out_channels=module0_2_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_2_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_2_conv2d_2_out_channels,
+                                 conv2d_2_group=module0_2_conv2d_2_group,
+                                 conv2d_4_in_channels=module0_2_conv2d_4_in_channels)
+        self.module0_3 = Module0(conv2d_0_out_channels=module0_3_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_3_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_3_conv2d_2_out_channels,
+                                 conv2d_2_group=module0_3_conv2d_2_group,
+                                 conv2d_4_in_channels=module0_3_conv2d_4_in_channels)
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        module0_1_opt = self.module0_1(module0_0_opt)
+        module0_2_opt = self.module0_2(module0_1_opt)
+        module0_3_opt = self.module0_3(module0_2_opt)
+        return module0_3_opt
+
+
+class MindSporeModel(nn.Cell):
+    def __init__(self):
+        super(MindSporeModel, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=3,
+                                  out_channels=32,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.module4_0 = Module4(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=128,
+                                 module2_0_conv2d_0_in_channels=32,
+                                 module2_0_conv2d_0_out_channels=128,
+                                 module2_0_conv2d_0_kernel_size=(3, 3),
+                                 module2_0_conv2d_0_stride=(2, 2),
+                                 module2_0_conv2d_0_padding=(1, 1, 1, 1),
+                                 module2_0_conv2d_0_pad_mode="pad",
+                                 module2_0_conv2d_0_group=1)
+        self.conv2d_3 = nn.Conv2d(in_channels=32,
+                                  out_channels=128,
+                                  kernel_size=(1, 1),
+                                  stride=(2, 2),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_7 = nn.ReLU()
+        self.module4_1 = Module4(conv2d_0_in_channels=192,
+                                 conv2d_0_out_channels=192,
+                                 module2_0_conv2d_0_in_channels=128,
+                                 module2_0_conv2d_0_out_channels=192,
+                                 module2_0_conv2d_0_kernel_size=(3, 3),
+                                 module2_0_conv2d_0_stride=(2, 2),
+                                 module2_0_conv2d_0_padding=(1, 1, 1, 1),
+                                 module2_0_conv2d_0_pad_mode="pad",
+                                 module2_0_conv2d_0_group=1)
+        self.conv2d_9 = nn.Conv2d(in_channels=128,
+                                  out_channels=192,
+                                  kernel_size=(1, 1),
+                                  stride=(2, 2),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_13 = nn.ReLU()
+        self.module4_2 = Module4(conv2d_0_in_channels=192,
+                                 conv2d_0_out_channels=192,
+                                 module2_0_conv2d_0_in_channels=192,
+                                 module2_0_conv2d_0_out_channels=192,
+                                 module2_0_conv2d_0_kernel_size=(3, 3),
+                                 module2_0_conv2d_0_stride=(1, 1),
+                                 module2_0_conv2d_0_padding=(1, 1, 1, 1),
+                                 module2_0_conv2d_0_pad_mode="pad",
+                                 module2_0_conv2d_0_group=1)
+        self.relu_18 = nn.ReLU()
+        self.module8_0 = Module8(conv2d_0_in_channels=160,
+                                 module2_0_conv2d_0_in_channels=192,
+                                 module2_0_conv2d_0_out_channels=160,
+                                 module2_0_conv2d_0_kernel_size=(1, 1),
+                                 module2_0_conv2d_0_stride=(1, 1),
+                                 module2_0_conv2d_0_padding=0,
+                                 module2_0_conv2d_0_pad_mode="valid",
+                                 module2_0_conv2d_0_group=1,
+                                 module2_1_conv2d_0_in_channels=160,
+                                 module2_1_conv2d_0_out_channels=160,
+                                 module2_1_conv2d_0_kernel_size=(3, 3),
+                                 module2_1_conv2d_0_stride=(2, 2),
+                                 module2_1_conv2d_0_padding=(1, 1, 1, 1),
+                                 module2_1_conv2d_0_pad_mode="pad",
+                                 module2_1_conv2d_0_group=1)
+        self.conv2d_20 = nn.Conv2d(in_channels=192,
+                                   out_channels=640,
+                                   kernel_size=(1, 1),
+                                   stride=(2, 2),
+                                   padding=0,
+                                   pad_mode="valid",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.relu_26 = nn.ReLU()
+        self.module17_0 = Module17(module0_0_conv2d_0_out_channels=160,
+                                   module0_0_conv2d_2_in_channels=160,
+                                   module0_0_conv2d_2_out_channels=160,
+                                   module0_0_conv2d_2_group=1,
+                                   module0_0_conv2d_4_in_channels=160,
+                                   module0_1_conv2d_0_out_channels=160,
+                                   module0_1_conv2d_2_in_channels=160,
+                                   module0_1_conv2d_2_out_channels=160,
+                                   module0_1_conv2d_2_group=1,
+                                   module0_1_conv2d_4_in_channels=160,
+                                   module0_2_conv2d_0_out_channels=160,
+                                   module0_2_conv2d_2_in_channels=160,
+                                   module0_2_conv2d_2_out_channels=160,
+                                   module0_2_conv2d_2_group=1,
+                                   module0_2_conv2d_4_in_channels=160,
+                                   module0_3_conv2d_0_out_channels=160,
+                                   module0_3_conv2d_2_in_channels=160,
+                                   module0_3_conv2d_2_out_channels=160,
+                                   module0_3_conv2d_2_group=1,
+                                   module0_3_conv2d_4_in_channels=160)
+        self.module0_0 = Module0(conv2d_0_out_channels=160,
+                                 conv2d_2_in_channels=160,
+                                 conv2d_2_out_channels=160,
+                                 conv2d_2_group=1,
+                                 conv2d_4_in_channels=160)
+        self.module8_1 = Module8(conv2d_0_in_channels=1920,
+                                 module2_0_conv2d_0_in_channels=640,
+                                 module2_0_conv2d_0_out_channels=1920,
+                                 module2_0_conv2d_0_kernel_size=(1, 1),
+                                 module2_0_conv2d_0_stride=(1, 1),
+                                 module2_0_conv2d_0_padding=0,
+                                 module2_0_conv2d_0_pad_mode="valid",
+                                 module2_0_conv2d_0_group=1,
+                                 module2_1_conv2d_0_in_channels=1920,
+                                 module2_1_conv2d_0_out_channels=1920,
+                                 module2_1_conv2d_0_kernel_size=(3, 3),
+                                 module2_1_conv2d_0_stride=(2, 2),
+                                 module2_1_conv2d_0_padding=(1, 1, 1, 1),
+                                 module2_1_conv2d_0_pad_mode="pad",
+                                 module2_1_conv2d_0_group=1920)
+        self.conv2d_63 = nn.Conv2d(in_channels=640,
+                                   out_channels=640,
+                                   kernel_size=(1, 1),
+                                   stride=(2, 2),
+                                   padding=0,
+                                   pad_mode="valid",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.relu_69 = nn.ReLU()
+        self.module17_1 = Module17(module0_0_conv2d_0_out_channels=1920,
+                                   module0_0_conv2d_2_in_channels=1920,
+                                   module0_0_conv2d_2_out_channels=1920,
+                                   module0_0_conv2d_2_group=1920,
+                                   module0_0_conv2d_4_in_channels=1920,
+                                   module0_1_conv2d_0_out_channels=1920,
+                                   module0_1_conv2d_2_in_channels=1920,
+                                   module0_1_conv2d_2_out_channels=1920,
+                                   module0_1_conv2d_2_group=1920,
+                                   module0_1_conv2d_4_in_channels=1920,
+                                   module0_2_conv2d_0_out_channels=1920,
+                                   module0_2_conv2d_2_in_channels=1920,
+                                   module0_2_conv2d_2_out_channels=1920,
+                                   module0_2_conv2d_2_group=1920,
+                                   module0_2_conv2d_4_in_channels=1920,
+                                   module0_3_conv2d_0_out_channels=1920,
+                                   module0_3_conv2d_2_in_channels=1920,
+                                   module0_3_conv2d_2_out_channels=1920,
+                                   module0_3_conv2d_2_group=1920,
+                                   module0_3_conv2d_4_in_channels=1920)
+        self.module17_2 = Module17(module0_0_conv2d_0_out_channels=1920,
+                                   module0_0_conv2d_2_in_channels=1920,
+                                   module0_0_conv2d_2_out_channels=1920,
+                                   module0_0_conv2d_2_group=1920,
+                                   module0_0_conv2d_4_in_channels=1920,
+                                   module0_1_conv2d_0_out_channels=1920,
+                                   module0_1_conv2d_2_in_channels=1920,
+                                   module0_1_conv2d_2_out_channels=1920,
+                                   module0_1_conv2d_2_group=1920,
+                                   module0_1_conv2d_4_in_channels=1920,
+                                   module0_2_conv2d_0_out_channels=1920,
+                                   module0_2_conv2d_2_in_channels=1920,
+                                   module0_2_conv2d_2_out_channels=1920,
+                                   module0_2_conv2d_2_group=1920,
+                                   module0_2_conv2d_4_in_channels=1920,
+                                   module0_3_conv2d_0_out_channels=1920,
+                                   module0_3_conv2d_2_in_channels=1920,
+                                   module0_3_conv2d_2_out_channels=1920,
+                                   module0_3_conv2d_2_group=1920,
+                                   module0_3_conv2d_4_in_channels=1920)
+        self.module2_0 = Module2(conv2d_0_in_channels=640,
+                                 conv2d_0_out_channels=2560,
+                                 conv2d_0_kernel_size=(1, 1),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=0,
+                                 conv2d_0_pad_mode="valid",
+                                 conv2d_0_group=1)
+        self.avgpool2d_128 = nn.AvgPool2d(kernel_size=(8, 8))
+        self.flatten_129 = nn.Flatten()
+        self.dense_130 = nn.Dense(in_channels=2560, out_channels=1000, has_bias=True)
+
+    def construct(self, input_1):
+        opt_conv2d_0 = self.conv2d_0(input_1)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        module4_0_opt = self.module4_0(opt_relu_1)
+        opt_conv2d_3 = self.conv2d_3(opt_relu_1)
+        opt_add_6 = P.Add()(module4_0_opt, opt_conv2d_3)
+        opt_relu_7 = self.relu_7(opt_add_6)
+        module4_1_opt = self.module4_1(opt_relu_7)
+        opt_conv2d_9 = self.conv2d_9(opt_relu_7)
+        opt_add_12 = P.Add()(module4_1_opt, opt_conv2d_9)
+        opt_relu_13 = self.relu_13(opt_add_12)
+        module4_2_opt = self.module4_2(opt_relu_13)
+        opt_add_17 = P.Add()(module4_2_opt, opt_relu_13)
+        opt_relu_18 = self.relu_18(opt_add_17)
+        module8_0_opt = self.module8_0(opt_relu_18)
+        opt_conv2d_20 = self.conv2d_20(opt_relu_18)
+        opt_add_25 = P.Add()(module8_0_opt, opt_conv2d_20)
+        opt_relu_26 = self.relu_26(opt_add_25)
+        module17_0_opt = self.module17_0(opt_relu_26)
+        module0_0_opt = self.module0_0(module17_0_opt)
+        module8_1_opt = self.module8_1(module0_0_opt)
+        opt_conv2d_63 = self.conv2d_63(module0_0_opt)
+        opt_add_68 = P.Add()(module8_1_opt, opt_conv2d_63)
+        opt_relu_69 = self.relu_69(opt_add_68)
+        module17_1_opt = self.module17_1(opt_relu_69)
+        module17_2_opt = self.module17_2(module17_1_opt)
+        module2_0_opt = self.module2_0(module17_2_opt)
+        opt_avgpool2d_128 = self.avgpool2d_128(module2_0_opt)
+        opt_flatten_129 = self.flatten_129(opt_avgpool2d_128)
+        opt_dense_130 = self.dense_130(opt_flatten_129)
+        return opt_dense_130
diff --git a/research/cvtmodel/resnet_ipl/src/resnest101e.py b/research/cvtmodel/resnet_ipl/src/resnest101e.py
new file mode 100644
index 0000000000000000000000000000000000000000..62245e4e66cc936197bf24a3d4ca83caf4120b1f
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/src/resnest101e.py
@@ -0,0 +1,1178 @@
+import mindspore.ops as P
+from mindspore import nn
+
+class Module0(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
+                 reshape_4_shape, conv2d_7_in_channels, conv2d_7_out_channels, conv2d_9_in_channels,
+                 conv2d_9_out_channels, reshape_10_shape, reshape_14_shape, reshape_15_shape, reshape_16_shape):
+        super(Module0, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
+                                  out_channels=conv2d_2_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=2,
+                                  has_bias=True)
+        self.relu_3 = nn.ReLU()
+        self.reshape_4 = P.Reshape()
+        self.reshape_4_shape = tuple(reshape_4_shape)
+        self.reducesum_5 = P.ReduceSum(keep_dims=False)
+        self.reducesum_5_axis = 1
+        self.reducemean_6 = P.ReduceMean(keep_dims=True)
+        self.reducemean_6_axis = (2, 3)
+        self.conv2d_7 = nn.Conv2d(in_channels=conv2d_7_in_channels,
+                                  out_channels=conv2d_7_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_8 = nn.ReLU()
+        self.conv2d_9 = nn.Conv2d(in_channels=conv2d_9_in_channels,
+                                  out_channels=conv2d_9_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.reshape_10 = P.Reshape()
+        self.reshape_10_shape = tuple(reshape_10_shape)
+        self.transpose_11 = P.Transpose()
+        self.softmax_12 = nn.Softmax(axis=3)
+        self.transpose_13 = P.Transpose()
+        self.reshape_14 = P.Reshape()
+        self.reshape_14_shape = tuple(reshape_14_shape)
+        self.reshape_15 = P.Reshape()
+        self.reshape_15_shape = tuple(reshape_15_shape)
+        self.reshape_16 = P.Reshape()
+        self.reshape_16_shape = tuple(reshape_16_shape)
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        opt_conv2d_2 = self.conv2d_2(opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_conv2d_2)
+        opt_reshape_4 = self.reshape_4(opt_relu_3, self.reshape_4_shape)
+        opt_reducesum_5 = self.reducesum_5(opt_reshape_4, self.reducesum_5_axis)
+        opt_reducemean_6 = self.reducemean_6(opt_reducesum_5, self.reducemean_6_axis)
+        opt_conv2d_7 = self.conv2d_7(opt_reducemean_6)
+        opt_relu_8 = self.relu_8(opt_conv2d_7)
+        opt_conv2d_9 = self.conv2d_9(opt_relu_8)
+        opt_reshape_10 = self.reshape_10(opt_conv2d_9, self.reshape_10_shape)
+        opt_transpose_11 = self.transpose_11(opt_reshape_10, (0, 3, 1, 2))
+        opt_softmax_12 = self.softmax_12(opt_transpose_11)
+        opt_transpose_13 = self.transpose_13(opt_softmax_12, (0, 3, 2, 1))
+        opt_reshape_14 = self.reshape_14(opt_transpose_13, self.reshape_14_shape)
+        opt_reshape_15 = self.reshape_15(opt_reshape_14, self.reshape_15_shape)
+        opt_reshape_16 = self.reshape_16(opt_reshape_15, self.reshape_16_shape)
+        opt_mul_17 = P.Mul()(opt_reshape_4, opt_reshape_16)
+        return opt_mul_17
+
+
+class Module4(nn.Cell):
+    def __init__(self):
+        super(Module4, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_2_in_channels=64,
+                                 conv2d_2_out_channels=128,
+                                 reshape_4_shape=[1, 2, 64, 64, 64],
+                                 conv2d_7_in_channels=64,
+                                 conv2d_7_out_channels=32,
+                                 conv2d_9_in_channels=32,
+                                 conv2d_9_out_channels=128,
+                                 reshape_10_shape=[1, 1, 2, 64],
+                                 reshape_14_shape=[1, 128],
+                                 reshape_15_shape=[1, 128, 1, 1],
+                                 reshape_16_shape=[1, 2, 64, 1, 1])
+        self.reducesum_0 = P.ReduceSum(keep_dims=False)
+        self.reducesum_0_axis = 1
+        self.conv2d_1 = nn.Conv2d(in_channels=64,
+                                  out_channels=256,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_reducesum_0 = self.reducesum_0(module0_0_opt, self.reducesum_0_axis)
+        opt_conv2d_1 = self.conv2d_1(opt_reducesum_0)
+        return opt_conv2d_1
+
+
+class Module16(nn.Cell):
+    def __init__(self, conv2d_1_in_channels, conv2d_1_out_channels, conv2d_5_in_channels, conv2d_5_out_channels,
+                 module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_reshape_4_shape, module0_0_conv2d_7_in_channels,
+                 module0_0_conv2d_7_out_channels, module0_0_conv2d_9_in_channels, module0_0_conv2d_9_out_channels,
+                 module0_0_reshape_10_shape, module0_0_reshape_14_shape, module0_0_reshape_15_shape,
+                 module0_0_reshape_16_shape, module0_1_conv2d_0_in_channels, module0_1_conv2d_0_out_channels,
+                 module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels, module0_1_reshape_4_shape,
+                 module0_1_conv2d_7_in_channels, module0_1_conv2d_7_out_channels, module0_1_conv2d_9_in_channels,
+                 module0_1_conv2d_9_out_channels, module0_1_reshape_10_shape, module0_1_reshape_14_shape,
+                 module0_1_reshape_15_shape, module0_1_reshape_16_shape):
+        super(Module16, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 reshape_4_shape=module0_0_reshape_4_shape,
+                                 conv2d_7_in_channels=module0_0_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_0_conv2d_7_out_channels,
+                                 conv2d_9_in_channels=module0_0_conv2d_9_in_channels,
+                                 conv2d_9_out_channels=module0_0_conv2d_9_out_channels,
+                                 reshape_10_shape=module0_0_reshape_10_shape,
+                                 reshape_14_shape=module0_0_reshape_14_shape,
+                                 reshape_15_shape=module0_0_reshape_15_shape,
+                                 reshape_16_shape=module0_0_reshape_16_shape)
+        self.reducesum_0 = P.ReduceSum(keep_dims=False)
+        self.reducesum_0_axis = 1
+        self.conv2d_1 = nn.Conv2d(in_channels=conv2d_1_in_channels,
+                                  out_channels=conv2d_1_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_3 = nn.ReLU()
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 reshape_4_shape=module0_1_reshape_4_shape,
+                                 conv2d_7_in_channels=module0_1_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_1_conv2d_7_out_channels,
+                                 conv2d_9_in_channels=module0_1_conv2d_9_in_channels,
+                                 conv2d_9_out_channels=module0_1_conv2d_9_out_channels,
+                                 reshape_10_shape=module0_1_reshape_10_shape,
+                                 reshape_14_shape=module0_1_reshape_14_shape,
+                                 reshape_15_shape=module0_1_reshape_15_shape,
+                                 reshape_16_shape=module0_1_reshape_16_shape)
+        self.reducesum_4 = P.ReduceSum(keep_dims=False)
+        self.reducesum_4_axis = 1
+        self.conv2d_5 = nn.Conv2d(in_channels=conv2d_5_in_channels,
+                                  out_channels=conv2d_5_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_7 = nn.ReLU()
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_reducesum_0 = self.reducesum_0(module0_0_opt, self.reducesum_0_axis)
+        opt_conv2d_1 = self.conv2d_1(opt_reducesum_0)
+        opt_add_2 = P.Add()(opt_conv2d_1, x)
+        opt_relu_3 = self.relu_3(opt_add_2)
+        module0_1_opt = self.module0_1(opt_relu_3)
+        opt_reducesum_4 = self.reducesum_4(module0_1_opt, self.reducesum_4_axis)
+        opt_conv2d_5 = self.conv2d_5(opt_reducesum_4)
+        opt_add_6 = P.Add()(opt_conv2d_5, opt_relu_3)
+        opt_relu_7 = self.relu_7(opt_add_6)
+        return opt_relu_7
+
+
+class Module10(nn.Cell):
+    def __init__(self, avgpool2d_0_kernel_size, conv2d_1_in_channels, conv2d_1_out_channels):
+        super(Module10, self).__init__()
+        self.pad_avgpool2d_0 = nn.Pad(paddings=((0, 0), (0, 0), (0, 0), (0, 0)))
+        self.avgpool2d_0 = nn.AvgPool2d(kernel_size=avgpool2d_0_kernel_size, stride=(2, 2))
+        self.conv2d_1 = nn.Conv2d(in_channels=conv2d_1_in_channels,
+                                  out_channels=conv2d_1_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        opt_avgpool2d_0 = self.pad_avgpool2d_0(x)
+        opt_avgpool2d_0 = self.avgpool2d_0(opt_avgpool2d_0)
+        opt_conv2d_1 = self.conv2d_1(opt_avgpool2d_0)
+        return opt_conv2d_1
+
+
+class Module15(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_reshape_4_shape, module0_0_conv2d_7_in_channels,
+                 module0_0_conv2d_7_out_channels, module0_0_conv2d_9_in_channels, module0_0_conv2d_9_out_channels,
+                 module0_0_reshape_10_shape, module0_0_reshape_14_shape, module0_0_reshape_15_shape,
+                 module0_0_reshape_16_shape, module10_0_avgpool2d_0_kernel_size, module10_0_conv2d_1_in_channels,
+                 module10_0_conv2d_1_out_channels):
+        super(Module15, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 reshape_4_shape=module0_0_reshape_4_shape,
+                                 conv2d_7_in_channels=module0_0_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_0_conv2d_7_out_channels,
+                                 conv2d_9_in_channels=module0_0_conv2d_9_in_channels,
+                                 conv2d_9_out_channels=module0_0_conv2d_9_out_channels,
+                                 reshape_10_shape=module0_0_reshape_10_shape,
+                                 reshape_14_shape=module0_0_reshape_14_shape,
+                                 reshape_15_shape=module0_0_reshape_15_shape,
+                                 reshape_16_shape=module0_0_reshape_16_shape)
+        self.reducesum_0 = P.ReduceSum(keep_dims=False)
+        self.reducesum_0_axis = 1
+        self.pad_1 = nn.Pad(paddings=((0, 0), (0, 0), (1, 1), (1, 1)), mode="CONSTANT")
+        self.module10_0 = Module10(avgpool2d_0_kernel_size=module10_0_avgpool2d_0_kernel_size,
+                                   conv2d_1_in_channels=module10_0_conv2d_1_in_channels,
+                                   conv2d_1_out_channels=module10_0_conv2d_1_out_channels)
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_reducesum_0 = self.reducesum_0(module0_0_opt, self.reducesum_0_axis)
+        opt_pad_1 = self.pad_1(opt_reducesum_0)
+        module10_0_opt = self.module10_0(opt_pad_1)
+        return module10_0_opt
+
+
+class Module6(nn.Cell):
+    def __init__(self):
+        super(Module6, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=128,
+                                 conv2d_2_in_channels=128,
+                                 conv2d_2_out_channels=256,
+                                 reshape_4_shape=[1, 2, 128, 32, 32],
+                                 conv2d_7_in_channels=128,
+                                 conv2d_7_out_channels=64,
+                                 conv2d_9_in_channels=64,
+                                 conv2d_9_out_channels=256,
+                                 reshape_10_shape=[1, 1, 2, 128],
+                                 reshape_14_shape=[1, 256],
+                                 reshape_15_shape=[1, 256, 1, 1],
+                                 reshape_16_shape=[1, 2, 128, 1, 1])
+        self.reducesum_0 = P.ReduceSum(keep_dims=False)
+        self.reducesum_0_axis = 1
+        self.conv2d_1 = nn.Conv2d(in_channels=128,
+                                  out_channels=512,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_3 = nn.ReLU()
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_reducesum_0 = self.reducesum_0(module0_0_opt, self.reducesum_0_axis)
+        opt_conv2d_1 = self.conv2d_1(opt_reducesum_0)
+        opt_add_2 = P.Add()(opt_conv2d_1, x)
+        opt_relu_3 = self.relu_3(opt_add_2)
+        return opt_relu_3
+
+
+class Module44(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_reshape_4_shape, module0_0_conv2d_7_in_channels,
+                 module0_0_conv2d_7_out_channels, module0_0_conv2d_9_in_channels, module0_0_conv2d_9_out_channels,
+                 module0_0_reshape_10_shape, module0_0_reshape_14_shape, module0_0_reshape_15_shape,
+                 module0_0_reshape_16_shape, module0_1_conv2d_0_in_channels, module0_1_conv2d_0_out_channels,
+                 module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels, module0_1_reshape_4_shape,
+                 module0_1_conv2d_7_in_channels, module0_1_conv2d_7_out_channels, module0_1_conv2d_9_in_channels,
+                 module0_1_conv2d_9_out_channels, module0_1_reshape_10_shape, module0_1_reshape_14_shape,
+                 module0_1_reshape_15_shape, module0_1_reshape_16_shape, module0_2_conv2d_0_in_channels,
+                 module0_2_conv2d_0_out_channels, module0_2_conv2d_2_in_channels, module0_2_conv2d_2_out_channels,
+                 module0_2_reshape_4_shape, module0_2_conv2d_7_in_channels, module0_2_conv2d_7_out_channels,
+                 module0_2_conv2d_9_in_channels, module0_2_conv2d_9_out_channels, module0_2_reshape_10_shape,
+                 module0_2_reshape_14_shape, module0_2_reshape_15_shape, module0_2_reshape_16_shape,
+                 module0_3_conv2d_0_in_channels, module0_3_conv2d_0_out_channels, module0_3_conv2d_2_in_channels,
+                 module0_3_conv2d_2_out_channels, module0_3_reshape_4_shape, module0_3_conv2d_7_in_channels,
+                 module0_3_conv2d_7_out_channels, module0_3_conv2d_9_in_channels, module0_3_conv2d_9_out_channels,
+                 module0_3_reshape_10_shape, module0_3_reshape_14_shape, module0_3_reshape_15_shape,
+                 module0_3_reshape_16_shape, module0_4_conv2d_0_in_channels, module0_4_conv2d_0_out_channels,
+                 module0_4_conv2d_2_in_channels, module0_4_conv2d_2_out_channels, module0_4_reshape_4_shape,
+                 module0_4_conv2d_7_in_channels, module0_4_conv2d_7_out_channels, module0_4_conv2d_9_in_channels,
+                 module0_4_conv2d_9_out_channels, module0_4_reshape_10_shape, module0_4_reshape_14_shape,
+                 module0_4_reshape_15_shape, module0_4_reshape_16_shape, module0_5_conv2d_0_in_channels,
+                 module0_5_conv2d_0_out_channels, module0_5_conv2d_2_in_channels, module0_5_conv2d_2_out_channels,
+                 module0_5_reshape_4_shape, module0_5_conv2d_7_in_channels, module0_5_conv2d_7_out_channels,
+                 module0_5_conv2d_9_in_channels, module0_5_conv2d_9_out_channels, module0_5_reshape_10_shape,
+                 module0_5_reshape_14_shape, module0_5_reshape_15_shape, module0_5_reshape_16_shape,
+                 module0_6_conv2d_0_in_channels, module0_6_conv2d_0_out_channels, module0_6_conv2d_2_in_channels,
+                 module0_6_conv2d_2_out_channels, module0_6_reshape_4_shape, module0_6_conv2d_7_in_channels,
+                 module0_6_conv2d_7_out_channels, module0_6_conv2d_9_in_channels, module0_6_conv2d_9_out_channels,
+                 module0_6_reshape_10_shape, module0_6_reshape_14_shape, module0_6_reshape_15_shape,
+                 module0_6_reshape_16_shape, module0_7_conv2d_0_in_channels, module0_7_conv2d_0_out_channels,
+                 module0_7_conv2d_2_in_channels, module0_7_conv2d_2_out_channels, module0_7_reshape_4_shape,
+                 module0_7_conv2d_7_in_channels, module0_7_conv2d_7_out_channels, module0_7_conv2d_9_in_channels,
+                 module0_7_conv2d_9_out_channels, module0_7_reshape_10_shape, module0_7_reshape_14_shape,
+                 module0_7_reshape_15_shape, module0_7_reshape_16_shape):
+        super(Module44, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 reshape_4_shape=module0_0_reshape_4_shape,
+                                 conv2d_7_in_channels=module0_0_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_0_conv2d_7_out_channels,
+                                 conv2d_9_in_channels=module0_0_conv2d_9_in_channels,
+                                 conv2d_9_out_channels=module0_0_conv2d_9_out_channels,
+                                 reshape_10_shape=module0_0_reshape_10_shape,
+                                 reshape_14_shape=module0_0_reshape_14_shape,
+                                 reshape_15_shape=module0_0_reshape_15_shape,
+                                 reshape_16_shape=module0_0_reshape_16_shape)
+        self.reducesum_0 = P.ReduceSum(keep_dims=False)
+        self.reducesum_0_axis = 1
+        self.conv2d_1 = nn.Conv2d(in_channels=256,
+                                  out_channels=1024,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_3 = nn.ReLU()
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 reshape_4_shape=module0_1_reshape_4_shape,
+                                 conv2d_7_in_channels=module0_1_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_1_conv2d_7_out_channels,
+                                 conv2d_9_in_channels=module0_1_conv2d_9_in_channels,
+                                 conv2d_9_out_channels=module0_1_conv2d_9_out_channels,
+                                 reshape_10_shape=module0_1_reshape_10_shape,
+                                 reshape_14_shape=module0_1_reshape_14_shape,
+                                 reshape_15_shape=module0_1_reshape_15_shape,
+                                 reshape_16_shape=module0_1_reshape_16_shape)
+        self.reducesum_4 = P.ReduceSum(keep_dims=False)
+        self.reducesum_4_axis = 1
+        self.conv2d_5 = nn.Conv2d(in_channels=256,
+                                  out_channels=1024,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_7 = nn.ReLU()
+        self.module0_2 = Module0(conv2d_0_in_channels=module0_2_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_2_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_2_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_2_conv2d_2_out_channels,
+                                 reshape_4_shape=module0_2_reshape_4_shape,
+                                 conv2d_7_in_channels=module0_2_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_2_conv2d_7_out_channels,
+                                 conv2d_9_in_channels=module0_2_conv2d_9_in_channels,
+                                 conv2d_9_out_channels=module0_2_conv2d_9_out_channels,
+                                 reshape_10_shape=module0_2_reshape_10_shape,
+                                 reshape_14_shape=module0_2_reshape_14_shape,
+                                 reshape_15_shape=module0_2_reshape_15_shape,
+                                 reshape_16_shape=module0_2_reshape_16_shape)
+        self.reducesum_8 = P.ReduceSum(keep_dims=False)
+        self.reducesum_8_axis = 1
+        self.conv2d_9 = nn.Conv2d(in_channels=256,
+                                  out_channels=1024,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_11 = nn.ReLU()
+        self.module0_3 = Module0(conv2d_0_in_channels=module0_3_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_3_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_3_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_3_conv2d_2_out_channels,
+                                 reshape_4_shape=module0_3_reshape_4_shape,
+                                 conv2d_7_in_channels=module0_3_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_3_conv2d_7_out_channels,
+                                 conv2d_9_in_channels=module0_3_conv2d_9_in_channels,
+                                 conv2d_9_out_channels=module0_3_conv2d_9_out_channels,
+                                 reshape_10_shape=module0_3_reshape_10_shape,
+                                 reshape_14_shape=module0_3_reshape_14_shape,
+                                 reshape_15_shape=module0_3_reshape_15_shape,
+                                 reshape_16_shape=module0_3_reshape_16_shape)
+        self.reducesum_12 = P.ReduceSum(keep_dims=False)
+        self.reducesum_12_axis = 1
+        self.conv2d_13 = nn.Conv2d(in_channels=256,
+                                   out_channels=1024,
+                                   kernel_size=(1, 1),
+                                   stride=(1, 1),
+                                   padding=0,
+                                   pad_mode="valid",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.relu_15 = nn.ReLU()
+        self.module0_4 = Module0(conv2d_0_in_channels=module0_4_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_4_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_4_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_4_conv2d_2_out_channels,
+                                 reshape_4_shape=module0_4_reshape_4_shape,
+                                 conv2d_7_in_channels=module0_4_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_4_conv2d_7_out_channels,
+                                 conv2d_9_in_channels=module0_4_conv2d_9_in_channels,
+                                 conv2d_9_out_channels=module0_4_conv2d_9_out_channels,
+                                 reshape_10_shape=module0_4_reshape_10_shape,
+                                 reshape_14_shape=module0_4_reshape_14_shape,
+                                 reshape_15_shape=module0_4_reshape_15_shape,
+                                 reshape_16_shape=module0_4_reshape_16_shape)
+        self.reducesum_16 = P.ReduceSum(keep_dims=False)
+        self.reducesum_16_axis = 1
+        self.conv2d_17 = nn.Conv2d(in_channels=256,
+                                   out_channels=1024,
+                                   kernel_size=(1, 1),
+                                   stride=(1, 1),
+                                   padding=0,
+                                   pad_mode="valid",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.relu_19 = nn.ReLU()
+        self.module0_5 = Module0(conv2d_0_in_channels=module0_5_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_5_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_5_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_5_conv2d_2_out_channels,
+                                 reshape_4_shape=module0_5_reshape_4_shape,
+                                 conv2d_7_in_channels=module0_5_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_5_conv2d_7_out_channels,
+                                 conv2d_9_in_channels=module0_5_conv2d_9_in_channels,
+                                 conv2d_9_out_channels=module0_5_conv2d_9_out_channels,
+                                 reshape_10_shape=module0_5_reshape_10_shape,
+                                 reshape_14_shape=module0_5_reshape_14_shape,
+                                 reshape_15_shape=module0_5_reshape_15_shape,
+                                 reshape_16_shape=module0_5_reshape_16_shape)
+        self.reducesum_20 = P.ReduceSum(keep_dims=False)
+        self.reducesum_20_axis = 1
+        self.conv2d_21 = nn.Conv2d(in_channels=256,
+                                   out_channels=1024,
+                                   kernel_size=(1, 1),
+                                   stride=(1, 1),
+                                   padding=0,
+                                   pad_mode="valid",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.relu_23 = nn.ReLU()
+        self.module0_6 = Module0(conv2d_0_in_channels=module0_6_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_6_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_6_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_6_conv2d_2_out_channels,
+                                 reshape_4_shape=module0_6_reshape_4_shape,
+                                 conv2d_7_in_channels=module0_6_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_6_conv2d_7_out_channels,
+                                 conv2d_9_in_channels=module0_6_conv2d_9_in_channels,
+                                 conv2d_9_out_channels=module0_6_conv2d_9_out_channels,
+                                 reshape_10_shape=module0_6_reshape_10_shape,
+                                 reshape_14_shape=module0_6_reshape_14_shape,
+                                 reshape_15_shape=module0_6_reshape_15_shape,
+                                 reshape_16_shape=module0_6_reshape_16_shape)
+        self.reducesum_24 = P.ReduceSum(keep_dims=False)
+        self.reducesum_24_axis = 1
+        self.conv2d_25 = nn.Conv2d(in_channels=256,
+                                   out_channels=1024,
+                                   kernel_size=(1, 1),
+                                   stride=(1, 1),
+                                   padding=0,
+                                   pad_mode="valid",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.relu_27 = nn.ReLU()
+        self.module0_7 = Module0(conv2d_0_in_channels=module0_7_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_7_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_7_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_7_conv2d_2_out_channels,
+                                 reshape_4_shape=module0_7_reshape_4_shape,
+                                 conv2d_7_in_channels=module0_7_conv2d_7_in_channels,
+                                 conv2d_7_out_channels=module0_7_conv2d_7_out_channels,
+                                 conv2d_9_in_channels=module0_7_conv2d_9_in_channels,
+                                 conv2d_9_out_channels=module0_7_conv2d_9_out_channels,
+                                 reshape_10_shape=module0_7_reshape_10_shape,
+                                 reshape_14_shape=module0_7_reshape_14_shape,
+                                 reshape_15_shape=module0_7_reshape_15_shape,
+                                 reshape_16_shape=module0_7_reshape_16_shape)
+        self.reducesum_28 = P.ReduceSum(keep_dims=False)
+        self.reducesum_28_axis = 1
+        self.conv2d_29 = nn.Conv2d(in_channels=256,
+                                   out_channels=1024,
+                                   kernel_size=(1, 1),
+                                   stride=(1, 1),
+                                   padding=0,
+                                   pad_mode="valid",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.relu_31 = nn.ReLU()
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_reducesum_0 = self.reducesum_0(module0_0_opt, self.reducesum_0_axis)
+        opt_conv2d_1 = self.conv2d_1(opt_reducesum_0)
+        opt_add_2 = P.Add()(opt_conv2d_1, x)
+        opt_relu_3 = self.relu_3(opt_add_2)
+        module0_1_opt = self.module0_1(opt_relu_3)
+        opt_reducesum_4 = self.reducesum_4(module0_1_opt, self.reducesum_4_axis)
+        opt_conv2d_5 = self.conv2d_5(opt_reducesum_4)
+        opt_add_6 = P.Add()(opt_conv2d_5, opt_relu_3)
+        opt_relu_7 = self.relu_7(opt_add_6)
+        module0_2_opt = self.module0_2(opt_relu_7)
+        opt_reducesum_8 = self.reducesum_8(module0_2_opt, self.reducesum_8_axis)
+        opt_conv2d_9 = self.conv2d_9(opt_reducesum_8)
+        opt_add_10 = P.Add()(opt_conv2d_9, opt_relu_7)
+        opt_relu_11 = self.relu_11(opt_add_10)
+        module0_3_opt = self.module0_3(opt_relu_11)
+        opt_reducesum_12 = self.reducesum_12(module0_3_opt, self.reducesum_12_axis)
+        opt_conv2d_13 = self.conv2d_13(opt_reducesum_12)
+        opt_add_14 = P.Add()(opt_conv2d_13, opt_relu_11)
+        opt_relu_15 = self.relu_15(opt_add_14)
+        module0_4_opt = self.module0_4(opt_relu_15)
+        opt_reducesum_16 = self.reducesum_16(module0_4_opt, self.reducesum_16_axis)
+        opt_conv2d_17 = self.conv2d_17(opt_reducesum_16)
+        opt_add_18 = P.Add()(opt_conv2d_17, opt_relu_15)
+        opt_relu_19 = self.relu_19(opt_add_18)
+        module0_5_opt = self.module0_5(opt_relu_19)
+        opt_reducesum_20 = self.reducesum_20(module0_5_opt, self.reducesum_20_axis)
+        opt_conv2d_21 = self.conv2d_21(opt_reducesum_20)
+        opt_add_22 = P.Add()(opt_conv2d_21, opt_relu_19)
+        opt_relu_23 = self.relu_23(opt_add_22)
+        module0_6_opt = self.module0_6(opt_relu_23)
+        opt_reducesum_24 = self.reducesum_24(module0_6_opt, self.reducesum_24_axis)
+        opt_conv2d_25 = self.conv2d_25(opt_reducesum_24)
+        opt_add_26 = P.Add()(opt_conv2d_25, opt_relu_23)
+        opt_relu_27 = self.relu_27(opt_add_26)
+        module0_7_opt = self.module0_7(opt_relu_27)
+        opt_reducesum_28 = self.reducesum_28(module0_7_opt, self.reducesum_28_axis)
+        opt_conv2d_29 = self.conv2d_29(opt_reducesum_28)
+        opt_add_30 = P.Add()(opt_conv2d_29, opt_relu_27)
+        opt_relu_31 = self.relu_31(opt_add_30)
+        return opt_relu_31
+
+
+class Module43(nn.Cell):
+    def __init__(self):
+        super(Module43, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=512,
+                                 reshape_4_shape=[1, 2, 256, 16, 16],
+                                 conv2d_7_in_channels=256,
+                                 conv2d_7_out_channels=128,
+                                 conv2d_9_in_channels=128,
+                                 conv2d_9_out_channels=512,
+                                 reshape_10_shape=[1, 1, 2, 256],
+                                 reshape_14_shape=[1, 512],
+                                 reshape_15_shape=[1, 512, 1, 1],
+                                 reshape_16_shape=[1, 2, 256, 1, 1])
+        self.reducesum_0 = P.ReduceSum(keep_dims=False)
+        self.reducesum_0_axis = 1
+        self.conv2d_1 = nn.Conv2d(in_channels=256,
+                                  out_channels=1024,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_3 = nn.ReLU()
+        self.module0_1 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=512,
+                                 reshape_4_shape=[1, 2, 256, 16, 16],
+                                 conv2d_7_in_channels=256,
+                                 conv2d_7_out_channels=128,
+                                 conv2d_9_in_channels=128,
+                                 conv2d_9_out_channels=512,
+                                 reshape_10_shape=[1, 1, 2, 256],
+                                 reshape_14_shape=[1, 512],
+                                 reshape_15_shape=[1, 512, 1, 1],
+                                 reshape_16_shape=[1, 2, 256, 1, 1])
+        self.reducesum_4 = P.ReduceSum(keep_dims=False)
+        self.reducesum_4_axis = 1
+        self.conv2d_5 = nn.Conv2d(in_channels=256,
+                                  out_channels=1024,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_7 = nn.ReLU()
+        self.module0_2 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=512,
+                                 reshape_4_shape=[1, 2, 256, 16, 16],
+                                 conv2d_7_in_channels=256,
+                                 conv2d_7_out_channels=128,
+                                 conv2d_9_in_channels=128,
+                                 conv2d_9_out_channels=512,
+                                 reshape_10_shape=[1, 1, 2, 256],
+                                 reshape_14_shape=[1, 512],
+                                 reshape_15_shape=[1, 512, 1, 1],
+                                 reshape_16_shape=[1, 2, 256, 1, 1])
+        self.reducesum_8 = P.ReduceSum(keep_dims=False)
+        self.reducesum_8_axis = 1
+        self.conv2d_9 = nn.Conv2d(in_channels=256,
+                                  out_channels=1024,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_11 = nn.ReLU()
+        self.module0_3 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=512,
+                                 reshape_4_shape=[1, 2, 256, 16, 16],
+                                 conv2d_7_in_channels=256,
+                                 conv2d_7_out_channels=128,
+                                 conv2d_9_in_channels=128,
+                                 conv2d_9_out_channels=512,
+                                 reshape_10_shape=[1, 1, 2, 256],
+                                 reshape_14_shape=[1, 512],
+                                 reshape_15_shape=[1, 512, 1, 1],
+                                 reshape_16_shape=[1, 2, 256, 1, 1])
+        self.reducesum_12 = P.ReduceSum(keep_dims=False)
+        self.reducesum_12_axis = 1
+        self.conv2d_13 = nn.Conv2d(in_channels=256,
+                                   out_channels=1024,
+                                   kernel_size=(1, 1),
+                                   stride=(1, 1),
+                                   padding=0,
+                                   pad_mode="valid",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.relu_15 = nn.ReLU()
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_reducesum_0 = self.reducesum_0(module0_0_opt, self.reducesum_0_axis)
+        opt_conv2d_1 = self.conv2d_1(opt_reducesum_0)
+        opt_add_2 = P.Add()(opt_conv2d_1, x)
+        opt_relu_3 = self.relu_3(opt_add_2)
+        module0_1_opt = self.module0_1(opt_relu_3)
+        opt_reducesum_4 = self.reducesum_4(module0_1_opt, self.reducesum_4_axis)
+        opt_conv2d_5 = self.conv2d_5(opt_reducesum_4)
+        opt_add_6 = P.Add()(opt_conv2d_5, opt_relu_3)
+        opt_relu_7 = self.relu_7(opt_add_6)
+        module0_2_opt = self.module0_2(opt_relu_7)
+        opt_reducesum_8 = self.reducesum_8(module0_2_opt, self.reducesum_8_axis)
+        opt_conv2d_9 = self.conv2d_9(opt_reducesum_8)
+        opt_add_10 = P.Add()(opt_conv2d_9, opt_relu_7)
+        opt_relu_11 = self.relu_11(opt_add_10)
+        module0_3_opt = self.module0_3(opt_relu_11)
+        opt_reducesum_12 = self.reducesum_12(module0_3_opt, self.reducesum_12_axis)
+        opt_conv2d_13 = self.conv2d_13(opt_reducesum_12)
+        opt_add_14 = P.Add()(opt_conv2d_13, opt_relu_11)
+        opt_relu_15 = self.relu_15(opt_add_14)
+        return opt_relu_15
+
+
+class MindSporeModel(nn.Cell):
+    def __init__(self):
+        super(MindSporeModel, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=3,
+                                  out_channels=64,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.conv2d_2 = nn.Conv2d(in_channels=64,
+                                  out_channels=64,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_3 = nn.ReLU()
+        self.conv2d_4 = nn.Conv2d(in_channels=64,
+                                  out_channels=128,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_5 = nn.ReLU()
+        self.pad_maxpool2d_6 = nn.Pad(paddings=((0, 0), (0, 0), (1, 0), (1, 0)))
+        self.maxpool2d_6 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))
+        self.module4_0 = Module4()
+        self.conv2d_8 = nn.Conv2d(in_channels=128,
+                                  out_channels=256,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_29 = nn.ReLU()
+        self.module16_0 = Module16(conv2d_1_in_channels=64,
+                                   conv2d_1_out_channels=256,
+                                   conv2d_5_in_channels=64,
+                                   conv2d_5_out_channels=256,
+                                   module0_0_conv2d_0_in_channels=256,
+                                   module0_0_conv2d_0_out_channels=64,
+                                   module0_0_conv2d_2_in_channels=64,
+                                   module0_0_conv2d_2_out_channels=128,
+                                   module0_0_reshape_4_shape=[1, 2, 64, 64, 64],
+                                   module0_0_conv2d_7_in_channels=64,
+                                   module0_0_conv2d_7_out_channels=32,
+                                   module0_0_conv2d_9_in_channels=32,
+                                   module0_0_conv2d_9_out_channels=128,
+                                   module0_0_reshape_10_shape=[1, 1, 2, 64],
+                                   module0_0_reshape_14_shape=[1, 128],
+                                   module0_0_reshape_15_shape=[1, 128, 1, 1],
+                                   module0_0_reshape_16_shape=[1, 2, 64, 1, 1],
+                                   module0_1_conv2d_0_in_channels=256,
+                                   module0_1_conv2d_0_out_channels=64,
+                                   module0_1_conv2d_2_in_channels=64,
+                                   module0_1_conv2d_2_out_channels=128,
+                                   module0_1_reshape_4_shape=[1, 2, 64, 64, 64],
+                                   module0_1_conv2d_7_in_channels=64,
+                                   module0_1_conv2d_7_out_channels=32,
+                                   module0_1_conv2d_9_in_channels=32,
+                                   module0_1_conv2d_9_out_channels=128,
+                                   module0_1_reshape_10_shape=[1, 1, 2, 64],
+                                   module0_1_reshape_14_shape=[1, 128],
+                                   module0_1_reshape_15_shape=[1, 128, 1, 1],
+                                   module0_1_reshape_16_shape=[1, 2, 64, 1, 1])
+        self.module15_0 = Module15(module0_0_conv2d_0_in_channels=256,
+                                   module0_0_conv2d_0_out_channels=128,
+                                   module0_0_conv2d_2_in_channels=128,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_reshape_4_shape=[1, 2, 128, 64, 64],
+                                   module0_0_conv2d_7_in_channels=128,
+                                   module0_0_conv2d_7_out_channels=64,
+                                   module0_0_conv2d_9_in_channels=64,
+                                   module0_0_conv2d_9_out_channels=256,
+                                   module0_0_reshape_10_shape=[1, 1, 2, 128],
+                                   module0_0_reshape_14_shape=[1, 256],
+                                   module0_0_reshape_15_shape=[1, 256, 1, 1],
+                                   module0_0_reshape_16_shape=[1, 2, 128, 1, 1],
+                                   module10_0_avgpool2d_0_kernel_size=(3, 3),
+                                   module10_0_conv2d_1_in_channels=128,
+                                   module10_0_conv2d_1_out_channels=512)
+        self.module10_0 = Module10(avgpool2d_0_kernel_size=(2, 2), conv2d_1_in_channels=256, conv2d_1_out_channels=512)
+        self.relu_99 = nn.ReLU()
+        self.module16_1 = Module16(conv2d_1_in_channels=128,
+                                   conv2d_1_out_channels=512,
+                                   conv2d_5_in_channels=128,
+                                   conv2d_5_out_channels=512,
+                                   module0_0_conv2d_0_in_channels=512,
+                                   module0_0_conv2d_0_out_channels=128,
+                                   module0_0_conv2d_2_in_channels=128,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_reshape_4_shape=[1, 2, 128, 32, 32],
+                                   module0_0_conv2d_7_in_channels=128,
+                                   module0_0_conv2d_7_out_channels=64,
+                                   module0_0_conv2d_9_in_channels=64,
+                                   module0_0_conv2d_9_out_channels=256,
+                                   module0_0_reshape_10_shape=[1, 1, 2, 128],
+                                   module0_0_reshape_14_shape=[1, 256],
+                                   module0_0_reshape_15_shape=[1, 256, 1, 1],
+                                   module0_0_reshape_16_shape=[1, 2, 128, 1, 1],
+                                   module0_1_conv2d_0_in_channels=512,
+                                   module0_1_conv2d_0_out_channels=128,
+                                   module0_1_conv2d_2_in_channels=128,
+                                   module0_1_conv2d_2_out_channels=256,
+                                   module0_1_reshape_4_shape=[1, 2, 128, 32, 32],
+                                   module0_1_conv2d_7_in_channels=128,
+                                   module0_1_conv2d_7_out_channels=64,
+                                   module0_1_conv2d_9_in_channels=64,
+                                   module0_1_conv2d_9_out_channels=256,
+                                   module0_1_reshape_10_shape=[1, 1, 2, 128],
+                                   module0_1_reshape_14_shape=[1, 256],
+                                   module0_1_reshape_15_shape=[1, 256, 1, 1],
+                                   module0_1_reshape_16_shape=[1, 2, 128, 1, 1])
+        self.module6_0 = Module6()
+        self.module15_1 = Module15(module0_0_conv2d_0_in_channels=512,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=512,
+                                   module0_0_reshape_4_shape=[1, 2, 256, 32, 32],
+                                   module0_0_conv2d_7_in_channels=256,
+                                   module0_0_conv2d_7_out_channels=128,
+                                   module0_0_conv2d_9_in_channels=128,
+                                   module0_0_conv2d_9_out_channels=512,
+                                   module0_0_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_0_reshape_14_shape=[1, 512],
+                                   module0_0_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_0_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module10_0_avgpool2d_0_kernel_size=(3, 3),
+                                   module10_0_conv2d_1_in_channels=256,
+                                   module10_0_conv2d_1_out_channels=1024)
+        self.module10_1 = Module10(avgpool2d_0_kernel_size=(2, 2), conv2d_1_in_channels=512, conv2d_1_out_channels=1024)
+        self.relu_191 = nn.ReLU()
+        self.module44_0 = Module44(module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=512,
+                                   module0_0_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_0_conv2d_7_in_channels=256,
+                                   module0_0_conv2d_7_out_channels=128,
+                                   module0_0_conv2d_9_in_channels=128,
+                                   module0_0_conv2d_9_out_channels=512,
+                                   module0_0_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_0_reshape_14_shape=[1, 512],
+                                   module0_0_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_0_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_1_conv2d_0_in_channels=1024,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=512,
+                                   module0_1_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_1_conv2d_7_in_channels=256,
+                                   module0_1_conv2d_7_out_channels=128,
+                                   module0_1_conv2d_9_in_channels=128,
+                                   module0_1_conv2d_9_out_channels=512,
+                                   module0_1_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_1_reshape_14_shape=[1, 512],
+                                   module0_1_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_1_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_2_conv2d_0_in_channels=1024,
+                                   module0_2_conv2d_0_out_channels=256,
+                                   module0_2_conv2d_2_in_channels=256,
+                                   module0_2_conv2d_2_out_channels=512,
+                                   module0_2_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_2_conv2d_7_in_channels=256,
+                                   module0_2_conv2d_7_out_channels=128,
+                                   module0_2_conv2d_9_in_channels=128,
+                                   module0_2_conv2d_9_out_channels=512,
+                                   module0_2_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_2_reshape_14_shape=[1, 512],
+                                   module0_2_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_2_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_3_conv2d_0_in_channels=1024,
+                                   module0_3_conv2d_0_out_channels=256,
+                                   module0_3_conv2d_2_in_channels=256,
+                                   module0_3_conv2d_2_out_channels=512,
+                                   module0_3_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_3_conv2d_7_in_channels=256,
+                                   module0_3_conv2d_7_out_channels=128,
+                                   module0_3_conv2d_9_in_channels=128,
+                                   module0_3_conv2d_9_out_channels=512,
+                                   module0_3_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_3_reshape_14_shape=[1, 512],
+                                   module0_3_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_3_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_4_conv2d_0_in_channels=1024,
+                                   module0_4_conv2d_0_out_channels=256,
+                                   module0_4_conv2d_2_in_channels=256,
+                                   module0_4_conv2d_2_out_channels=512,
+                                   module0_4_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_4_conv2d_7_in_channels=256,
+                                   module0_4_conv2d_7_out_channels=128,
+                                   module0_4_conv2d_9_in_channels=128,
+                                   module0_4_conv2d_9_out_channels=512,
+                                   module0_4_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_4_reshape_14_shape=[1, 512],
+                                   module0_4_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_4_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_5_conv2d_0_in_channels=1024,
+                                   module0_5_conv2d_0_out_channels=256,
+                                   module0_5_conv2d_2_in_channels=256,
+                                   module0_5_conv2d_2_out_channels=512,
+                                   module0_5_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_5_conv2d_7_in_channels=256,
+                                   module0_5_conv2d_7_out_channels=128,
+                                   module0_5_conv2d_9_in_channels=128,
+                                   module0_5_conv2d_9_out_channels=512,
+                                   module0_5_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_5_reshape_14_shape=[1, 512],
+                                   module0_5_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_5_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_6_conv2d_0_in_channels=1024,
+                                   module0_6_conv2d_0_out_channels=256,
+                                   module0_6_conv2d_2_in_channels=256,
+                                   module0_6_conv2d_2_out_channels=512,
+                                   module0_6_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_6_conv2d_7_in_channels=256,
+                                   module0_6_conv2d_7_out_channels=128,
+                                   module0_6_conv2d_9_in_channels=128,
+                                   module0_6_conv2d_9_out_channels=512,
+                                   module0_6_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_6_reshape_14_shape=[1, 512],
+                                   module0_6_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_6_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_7_conv2d_0_in_channels=1024,
+                                   module0_7_conv2d_0_out_channels=256,
+                                   module0_7_conv2d_2_in_channels=256,
+                                   module0_7_conv2d_2_out_channels=512,
+                                   module0_7_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_7_conv2d_7_in_channels=256,
+                                   module0_7_conv2d_7_out_channels=128,
+                                   module0_7_conv2d_9_in_channels=128,
+                                   module0_7_conv2d_9_out_channels=512,
+                                   module0_7_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_7_reshape_14_shape=[1, 512],
+                                   module0_7_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_7_reshape_16_shape=[1, 2, 256, 1, 1])
+        self.module44_1 = Module44(module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=512,
+                                   module0_0_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_0_conv2d_7_in_channels=256,
+                                   module0_0_conv2d_7_out_channels=128,
+                                   module0_0_conv2d_9_in_channels=128,
+                                   module0_0_conv2d_9_out_channels=512,
+                                   module0_0_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_0_reshape_14_shape=[1, 512],
+                                   module0_0_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_0_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_1_conv2d_0_in_channels=1024,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=512,
+                                   module0_1_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_1_conv2d_7_in_channels=256,
+                                   module0_1_conv2d_7_out_channels=128,
+                                   module0_1_conv2d_9_in_channels=128,
+                                   module0_1_conv2d_9_out_channels=512,
+                                   module0_1_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_1_reshape_14_shape=[1, 512],
+                                   module0_1_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_1_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_2_conv2d_0_in_channels=1024,
+                                   module0_2_conv2d_0_out_channels=256,
+                                   module0_2_conv2d_2_in_channels=256,
+                                   module0_2_conv2d_2_out_channels=512,
+                                   module0_2_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_2_conv2d_7_in_channels=256,
+                                   module0_2_conv2d_7_out_channels=128,
+                                   module0_2_conv2d_9_in_channels=128,
+                                   module0_2_conv2d_9_out_channels=512,
+                                   module0_2_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_2_reshape_14_shape=[1, 512],
+                                   module0_2_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_2_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_3_conv2d_0_in_channels=1024,
+                                   module0_3_conv2d_0_out_channels=256,
+                                   module0_3_conv2d_2_in_channels=256,
+                                   module0_3_conv2d_2_out_channels=512,
+                                   module0_3_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_3_conv2d_7_in_channels=256,
+                                   module0_3_conv2d_7_out_channels=128,
+                                   module0_3_conv2d_9_in_channels=128,
+                                   module0_3_conv2d_9_out_channels=512,
+                                   module0_3_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_3_reshape_14_shape=[1, 512],
+                                   module0_3_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_3_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_4_conv2d_0_in_channels=1024,
+                                   module0_4_conv2d_0_out_channels=256,
+                                   module0_4_conv2d_2_in_channels=256,
+                                   module0_4_conv2d_2_out_channels=512,
+                                   module0_4_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_4_conv2d_7_in_channels=256,
+                                   module0_4_conv2d_7_out_channels=128,
+                                   module0_4_conv2d_9_in_channels=128,
+                                   module0_4_conv2d_9_out_channels=512,
+                                   module0_4_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_4_reshape_14_shape=[1, 512],
+                                   module0_4_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_4_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_5_conv2d_0_in_channels=1024,
+                                   module0_5_conv2d_0_out_channels=256,
+                                   module0_5_conv2d_2_in_channels=256,
+                                   module0_5_conv2d_2_out_channels=512,
+                                   module0_5_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_5_conv2d_7_in_channels=256,
+                                   module0_5_conv2d_7_out_channels=128,
+                                   module0_5_conv2d_9_in_channels=128,
+                                   module0_5_conv2d_9_out_channels=512,
+                                   module0_5_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_5_reshape_14_shape=[1, 512],
+                                   module0_5_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_5_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_6_conv2d_0_in_channels=1024,
+                                   module0_6_conv2d_0_out_channels=256,
+                                   module0_6_conv2d_2_in_channels=256,
+                                   module0_6_conv2d_2_out_channels=512,
+                                   module0_6_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_6_conv2d_7_in_channels=256,
+                                   module0_6_conv2d_7_out_channels=128,
+                                   module0_6_conv2d_9_in_channels=128,
+                                   module0_6_conv2d_9_out_channels=512,
+                                   module0_6_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_6_reshape_14_shape=[1, 512],
+                                   module0_6_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_6_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_7_conv2d_0_in_channels=1024,
+                                   module0_7_conv2d_0_out_channels=256,
+                                   module0_7_conv2d_2_in_channels=256,
+                                   module0_7_conv2d_2_out_channels=512,
+                                   module0_7_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_7_conv2d_7_in_channels=256,
+                                   module0_7_conv2d_7_out_channels=128,
+                                   module0_7_conv2d_9_in_channels=128,
+                                   module0_7_conv2d_9_out_channels=512,
+                                   module0_7_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_7_reshape_14_shape=[1, 512],
+                                   module0_7_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_7_reshape_16_shape=[1, 2, 256, 1, 1])
+        self.module43_0 = Module43()
+        self.module16_2 = Module16(conv2d_1_in_channels=256,
+                                   conv2d_1_out_channels=1024,
+                                   conv2d_5_in_channels=256,
+                                   conv2d_5_out_channels=1024,
+                                   module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=512,
+                                   module0_0_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_0_conv2d_7_in_channels=256,
+                                   module0_0_conv2d_7_out_channels=128,
+                                   module0_0_conv2d_9_in_channels=128,
+                                   module0_0_conv2d_9_out_channels=512,
+                                   module0_0_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_0_reshape_14_shape=[1, 512],
+                                   module0_0_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_0_reshape_16_shape=[1, 2, 256, 1, 1],
+                                   module0_1_conv2d_0_in_channels=1024,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=512,
+                                   module0_1_reshape_4_shape=[1, 2, 256, 16, 16],
+                                   module0_1_conv2d_7_in_channels=256,
+                                   module0_1_conv2d_7_out_channels=128,
+                                   module0_1_conv2d_9_in_channels=128,
+                                   module0_1_conv2d_9_out_channels=512,
+                                   module0_1_reshape_10_shape=[1, 1, 2, 256],
+                                   module0_1_reshape_14_shape=[1, 512],
+                                   module0_1_reshape_15_shape=[1, 512, 1, 1],
+                                   module0_1_reshape_16_shape=[1, 2, 256, 1, 1])
+        self.module15_2 = Module15(module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=512,
+                                   module0_0_conv2d_2_in_channels=512,
+                                   module0_0_conv2d_2_out_channels=1024,
+                                   module0_0_reshape_4_shape=[1, 2, 512, 16, 16],
+                                   module0_0_conv2d_7_in_channels=512,
+                                   module0_0_conv2d_7_out_channels=256,
+                                   module0_0_conv2d_9_in_channels=256,
+                                   module0_0_conv2d_9_out_channels=1024,
+                                   module0_0_reshape_10_shape=[1, 1, 2, 512],
+                                   module0_0_reshape_14_shape=[1, 1024],
+                                   module0_0_reshape_15_shape=[1, 1024, 1, 1],
+                                   module0_0_reshape_16_shape=[1, 2, 512, 1, 1],
+                                   module10_0_avgpool2d_0_kernel_size=(3, 3),
+                                   module10_0_conv2d_1_in_channels=512,
+                                   module10_0_conv2d_1_out_channels=2048)
+        self.module10_2 = Module10(avgpool2d_0_kernel_size=(2, 2),
+                                   conv2d_1_in_channels=1024,
+                                   conv2d_1_out_channels=2048)
+        self.relu_701 = nn.ReLU()
+        self.module16_3 = Module16(conv2d_1_in_channels=512,
+                                   conv2d_1_out_channels=2048,
+                                   conv2d_5_in_channels=512,
+                                   conv2d_5_out_channels=2048,
+                                   module0_0_conv2d_0_in_channels=2048,
+                                   module0_0_conv2d_0_out_channels=512,
+                                   module0_0_conv2d_2_in_channels=512,
+                                   module0_0_conv2d_2_out_channels=1024,
+                                   module0_0_reshape_4_shape=[1, 2, 512, 8, 8],
+                                   module0_0_conv2d_7_in_channels=512,
+                                   module0_0_conv2d_7_out_channels=256,
+                                   module0_0_conv2d_9_in_channels=256,
+                                   module0_0_conv2d_9_out_channels=1024,
+                                   module0_0_reshape_10_shape=[1, 1, 2, 512],
+                                   module0_0_reshape_14_shape=[1, 1024],
+                                   module0_0_reshape_15_shape=[1, 1024, 1, 1],
+                                   module0_0_reshape_16_shape=[1, 2, 512, 1, 1],
+                                   module0_1_conv2d_0_in_channels=2048,
+                                   module0_1_conv2d_0_out_channels=512,
+                                   module0_1_conv2d_2_in_channels=512,
+                                   module0_1_conv2d_2_out_channels=1024,
+                                   module0_1_reshape_4_shape=[1, 2, 512, 8, 8],
+                                   module0_1_conv2d_7_in_channels=512,
+                                   module0_1_conv2d_7_out_channels=256,
+                                   module0_1_conv2d_9_in_channels=256,
+                                   module0_1_conv2d_9_out_channels=1024,
+                                   module0_1_reshape_10_shape=[1, 1, 2, 512],
+                                   module0_1_reshape_14_shape=[1, 1024],
+                                   module0_1_reshape_15_shape=[1, 1024, 1, 1],
+                                   module0_1_reshape_16_shape=[1, 2, 512, 1, 1])
+        self.avgpool2d_746 = nn.AvgPool2d(kernel_size=(8, 8))
+        self.flatten_747 = nn.Flatten()
+        self.dense_748 = nn.Dense(in_channels=2048, out_channels=1000, has_bias=True)
+
+    def construct(self, input_1):
+        opt_conv2d_0 = self.conv2d_0(input_1)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        opt_conv2d_2 = self.conv2d_2(opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_conv2d_2)
+        opt_conv2d_4 = self.conv2d_4(opt_relu_3)
+        opt_relu_5 = self.relu_5(opt_conv2d_4)
+        opt_maxpool2d_6 = self.pad_maxpool2d_6(opt_relu_5)
+        opt_maxpool2d_6 = self.maxpool2d_6(opt_maxpool2d_6)
+        module4_0_opt = self.module4_0(opt_maxpool2d_6)
+        opt_conv2d_8 = self.conv2d_8(opt_maxpool2d_6)
+        opt_add_28 = P.Add()(module4_0_opt, opt_conv2d_8)
+        opt_relu_29 = self.relu_29(opt_add_28)
+        module16_0_opt = self.module16_0(opt_relu_29)
+        module15_0_opt = self.module15_0(module16_0_opt)
+        module10_0_opt = self.module10_0(module16_0_opt)
+        opt_add_98 = P.Add()(module15_0_opt, module10_0_opt)
+        opt_relu_99 = self.relu_99(opt_add_98)
+        module16_1_opt = self.module16_1(opt_relu_99)
+        module6_0_opt = self.module6_0(module16_1_opt)
+        module15_1_opt = self.module15_1(module6_0_opt)
+        module10_1_opt = self.module10_1(module6_0_opt)
+        opt_add_190 = P.Add()(module15_1_opt, module10_1_opt)
+        opt_relu_191 = self.relu_191(opt_add_190)
+        module44_0_opt = self.module44_0(opt_relu_191)
+        module44_1_opt = self.module44_1(module44_0_opt)
+        module43_0_opt = self.module43_0(module44_1_opt)
+        module16_2_opt = self.module16_2(module43_0_opt)
+        module15_2_opt = self.module15_2(module16_2_opt)
+        module10_2_opt = self.module10_2(module16_2_opt)
+        opt_add_700 = P.Add()(module15_2_opt, module10_2_opt)
+        opt_relu_701 = self.relu_701(opt_add_700)
+        module16_3_opt = self.module16_3(opt_relu_701)
+        opt_avgpool2d_746 = self.avgpool2d_746(module16_3_opt)
+        opt_flatten_747 = self.flatten_747(opt_avgpool2d_746)
+        opt_dense_748 = self.dense_748(opt_flatten_747)
+        return opt_dense_748
diff --git a/research/cvtmodel/resnet_ipl/src/resnet101d.py b/research/cvtmodel/resnet_ipl/src/resnet101d.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf0b3127cbf088a6110c1c43a2f4a978f42813ca
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/src/resnet101d.py
@@ -0,0 +1,568 @@
+import mindspore.ops as P
+from mindspore import nn
+
+
+class Module3(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
+                 conv2d_0_padding, conv2d_0_pad_mode):
+        super(Module3, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=conv2d_0_kernel_size,
+                                  stride=conv2d_0_stride,
+                                  padding=conv2d_0_padding,
+                                  pad_mode=conv2d_0_pad_mode,
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        return opt_relu_1
+
+
+class Module6(nn.Cell):
+    def __init__(self):
+        super(Module6, self).__init__()
+        self.module3_0 = Module3(conv2d_0_in_channels=32,
+                                 conv2d_0_out_channels=32,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module3_1 = Module3(conv2d_0_in_channels=32,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+
+    def construct(self, x):
+        module3_0_opt = self.module3_0(x)
+        module3_1_opt = self.module3_1(module3_0_opt)
+        return module3_1_opt
+
+
+class Module10(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, module3_0_conv2d_0_in_channels,
+                 module3_0_conv2d_0_out_channels, module3_0_conv2d_0_kernel_size, module3_0_conv2d_0_stride,
+                 module3_0_conv2d_0_padding, module3_0_conv2d_0_pad_mode, module3_1_conv2d_0_in_channels,
+                 module3_1_conv2d_0_out_channels, module3_1_conv2d_0_kernel_size, module3_1_conv2d_0_stride,
+                 module3_1_conv2d_0_padding, module3_1_conv2d_0_pad_mode):
+        super(Module10, self).__init__()
+        self.module3_0 = Module3(conv2d_0_in_channels=module3_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module3_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module3_0_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module3_0_conv2d_0_stride,
+                                 conv2d_0_padding=module3_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module3_0_conv2d_0_pad_mode)
+        self.module3_1 = Module3(conv2d_0_in_channels=module3_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module3_1_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module3_1_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module3_1_conv2d_0_stride,
+                                 conv2d_0_padding=module3_1_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module3_1_conv2d_0_pad_mode)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module3_0_opt = self.module3_0(x)
+        module3_1_opt = self.module3_1(module3_0_opt)
+        opt_conv2d_0 = self.conv2d_0(module3_1_opt)
+        return opt_conv2d_0
+
+
+class Module0(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
+                 conv2d_4_in_channels, conv2d_4_out_channels):
+        super(Module0, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
+                                  out_channels=conv2d_2_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_3 = nn.ReLU()
+        self.conv2d_4 = nn.Conv2d(in_channels=conv2d_4_in_channels,
+                                  out_channels=conv2d_4_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_6 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        opt_conv2d_2 = self.conv2d_2(opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_conv2d_2)
+        opt_conv2d_4 = self.conv2d_4(opt_relu_3)
+        opt_add_5 = P.Add()(opt_conv2d_4, x)
+        opt_relu_6 = self.relu_6(opt_add_5)
+        return opt_relu_6
+
+
+class Module14(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_conv2d_4_in_channels, module0_0_conv2d_4_out_channels,
+                 module0_1_conv2d_0_in_channels, module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels,
+                 module0_1_conv2d_2_out_channels, module0_1_conv2d_4_in_channels, module0_1_conv2d_4_out_channels):
+        super(Module14, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_4_in_channels=module0_0_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_0_conv2d_4_out_channels)
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_4_in_channels=module0_1_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_1_conv2d_4_out_channels)
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        module0_1_opt = self.module0_1(module0_0_opt)
+        return module0_1_opt
+
+
+class Module13(nn.Cell):
+    def __init__(self, conv2d_1_in_channels, conv2d_1_out_channels):
+        super(Module13, self).__init__()
+        self.pad_avgpool2d_0 = nn.Pad(paddings=((0, 0), (0, 0), (0, 0), (0, 0)))
+        self.avgpool2d_0 = nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2))
+        self.conv2d_1 = nn.Conv2d(in_channels=conv2d_1_in_channels,
+                                  out_channels=conv2d_1_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        opt_avgpool2d_0 = self.pad_avgpool2d_0(x)
+        opt_avgpool2d_0 = self.avgpool2d_0(opt_avgpool2d_0)
+        opt_conv2d_1 = self.conv2d_1(opt_avgpool2d_0)
+        return opt_conv2d_1
+
+
+class Module31(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_conv2d_4_in_channels, module0_0_conv2d_4_out_channels,
+                 module0_1_conv2d_0_in_channels, module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels,
+                 module0_1_conv2d_2_out_channels, module0_1_conv2d_4_in_channels, module0_1_conv2d_4_out_channels,
+                 module0_2_conv2d_0_in_channels, module0_2_conv2d_0_out_channels, module0_2_conv2d_2_in_channels,
+                 module0_2_conv2d_2_out_channels, module0_2_conv2d_4_in_channels, module0_2_conv2d_4_out_channels,
+                 module0_3_conv2d_0_in_channels, module0_3_conv2d_0_out_channels, module0_3_conv2d_2_in_channels,
+                 module0_3_conv2d_2_out_channels, module0_3_conv2d_4_in_channels, module0_3_conv2d_4_out_channels,
+                 module0_4_conv2d_0_in_channels, module0_4_conv2d_0_out_channels, module0_4_conv2d_2_in_channels,
+                 module0_4_conv2d_2_out_channels, module0_4_conv2d_4_in_channels, module0_4_conv2d_4_out_channels,
+                 module0_5_conv2d_0_in_channels, module0_5_conv2d_0_out_channels, module0_5_conv2d_2_in_channels,
+                 module0_5_conv2d_2_out_channels, module0_5_conv2d_4_in_channels, module0_5_conv2d_4_out_channels,
+                 module0_6_conv2d_0_in_channels, module0_6_conv2d_0_out_channels, module0_6_conv2d_2_in_channels,
+                 module0_6_conv2d_2_out_channels, module0_6_conv2d_4_in_channels, module0_6_conv2d_4_out_channels,
+                 module0_7_conv2d_0_in_channels, module0_7_conv2d_0_out_channels, module0_7_conv2d_2_in_channels,
+                 module0_7_conv2d_2_out_channels, module0_7_conv2d_4_in_channels, module0_7_conv2d_4_out_channels):
+        super(Module31, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_4_in_channels=module0_0_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_0_conv2d_4_out_channels)
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_4_in_channels=module0_1_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_1_conv2d_4_out_channels)
+        self.module0_2 = Module0(conv2d_0_in_channels=module0_2_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_2_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_2_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_2_conv2d_2_out_channels,
+                                 conv2d_4_in_channels=module0_2_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_2_conv2d_4_out_channels)
+        self.module0_3 = Module0(conv2d_0_in_channels=module0_3_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_3_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_3_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_3_conv2d_2_out_channels,
+                                 conv2d_4_in_channels=module0_3_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_3_conv2d_4_out_channels)
+        self.module0_4 = Module0(conv2d_0_in_channels=module0_4_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_4_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_4_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_4_conv2d_2_out_channels,
+                                 conv2d_4_in_channels=module0_4_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_4_conv2d_4_out_channels)
+        self.module0_5 = Module0(conv2d_0_in_channels=module0_5_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_5_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_5_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_5_conv2d_2_out_channels,
+                                 conv2d_4_in_channels=module0_5_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_5_conv2d_4_out_channels)
+        self.module0_6 = Module0(conv2d_0_in_channels=module0_6_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_6_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_6_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_6_conv2d_2_out_channels,
+                                 conv2d_4_in_channels=module0_6_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_6_conv2d_4_out_channels)
+        self.module0_7 = Module0(conv2d_0_in_channels=module0_7_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_7_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_7_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_7_conv2d_2_out_channels,
+                                 conv2d_4_in_channels=module0_7_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_7_conv2d_4_out_channels)
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        module0_1_opt = self.module0_1(module0_0_opt)
+        module0_2_opt = self.module0_2(module0_1_opt)
+        module0_3_opt = self.module0_3(module0_2_opt)
+        module0_4_opt = self.module0_4(module0_3_opt)
+        module0_5_opt = self.module0_5(module0_4_opt)
+        module0_6_opt = self.module0_6(module0_5_opt)
+        module0_7_opt = self.module0_7(module0_6_opt)
+        return module0_7_opt
+
+
+class Module30(nn.Cell):
+    def __init__(self):
+        super(Module30, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=256,
+                                 conv2d_4_in_channels=256,
+                                 conv2d_4_out_channels=1024)
+        self.module0_1 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=256,
+                                 conv2d_4_in_channels=256,
+                                 conv2d_4_out_channels=1024)
+        self.module0_2 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=256,
+                                 conv2d_4_in_channels=256,
+                                 conv2d_4_out_channels=1024)
+        self.module0_3 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=256,
+                                 conv2d_4_in_channels=256,
+                                 conv2d_4_out_channels=1024)
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        module0_1_opt = self.module0_1(module0_0_opt)
+        module0_2_opt = self.module0_2(module0_1_opt)
+        module0_3_opt = self.module0_3(module0_2_opt)
+        return module0_3_opt
+
+
+class MindSporeModel(nn.Cell):
+    def __init__(self):
+        super(MindSporeModel, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=3,
+                                  out_channels=32,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.module6_0 = Module6()
+        self.pad_maxpool2d_6 = nn.Pad(paddings=((0, 0), (0, 0), (1, 0), (1, 0)))
+        self.maxpool2d_6 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))
+        self.module10_0 = Module10(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   module3_0_conv2d_0_in_channels=64,
+                                   module3_0_conv2d_0_out_channels=64,
+                                   module3_0_conv2d_0_kernel_size=(1, 1),
+                                   module3_0_conv2d_0_stride=(1, 1),
+                                   module3_0_conv2d_0_padding=0,
+                                   module3_0_conv2d_0_pad_mode="valid",
+                                   module3_1_conv2d_0_in_channels=64,
+                                   module3_1_conv2d_0_out_channels=64,
+                                   module3_1_conv2d_0_kernel_size=(3, 3),
+                                   module3_1_conv2d_0_stride=(1, 1),
+                                   module3_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module3_1_conv2d_0_pad_mode="pad")
+        self.conv2d_8 = nn.Conv2d(in_channels=64,
+                                  out_channels=256,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_14 = nn.ReLU()
+        self.module14_0 = Module14(module0_0_conv2d_0_in_channels=256,
+                                   module0_0_conv2d_0_out_channels=64,
+                                   module0_0_conv2d_2_in_channels=64,
+                                   module0_0_conv2d_2_out_channels=64,
+                                   module0_0_conv2d_4_in_channels=64,
+                                   module0_0_conv2d_4_out_channels=256,
+                                   module0_1_conv2d_0_in_channels=256,
+                                   module0_1_conv2d_0_out_channels=64,
+                                   module0_1_conv2d_2_in_channels=64,
+                                   module0_1_conv2d_2_out_channels=64,
+                                   module0_1_conv2d_4_in_channels=64,
+                                   module0_1_conv2d_4_out_channels=256)
+        self.module10_1 = Module10(conv2d_0_in_channels=128,
+                                   conv2d_0_out_channels=512,
+                                   module3_0_conv2d_0_in_channels=256,
+                                   module3_0_conv2d_0_out_channels=128,
+                                   module3_0_conv2d_0_kernel_size=(1, 1),
+                                   module3_0_conv2d_0_stride=(1, 1),
+                                   module3_0_conv2d_0_padding=0,
+                                   module3_0_conv2d_0_pad_mode="valid",
+                                   module3_1_conv2d_0_in_channels=128,
+                                   module3_1_conv2d_0_out_channels=128,
+                                   module3_1_conv2d_0_kernel_size=(3, 3),
+                                   module3_1_conv2d_0_stride=(2, 2),
+                                   module3_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module3_1_conv2d_0_pad_mode="pad")
+        self.module13_0 = Module13(conv2d_1_in_channels=256, conv2d_1_out_channels=512)
+        self.relu_37 = nn.ReLU()
+        self.module14_1 = Module14(module0_0_conv2d_0_in_channels=512,
+                                   module0_0_conv2d_0_out_channels=128,
+                                   module0_0_conv2d_2_in_channels=128,
+                                   module0_0_conv2d_2_out_channels=128,
+                                   module0_0_conv2d_4_in_channels=128,
+                                   module0_0_conv2d_4_out_channels=512,
+                                   module0_1_conv2d_0_in_channels=512,
+                                   module0_1_conv2d_0_out_channels=128,
+                                   module0_1_conv2d_2_in_channels=128,
+                                   module0_1_conv2d_2_out_channels=128,
+                                   module0_1_conv2d_4_in_channels=128,
+                                   module0_1_conv2d_4_out_channels=512)
+        self.module0_0 = Module0(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=128,
+                                 conv2d_2_in_channels=128,
+                                 conv2d_2_out_channels=128,
+                                 conv2d_4_in_channels=128,
+                                 conv2d_4_out_channels=512)
+        self.module10_2 = Module10(conv2d_0_in_channels=256,
+                                   conv2d_0_out_channels=1024,
+                                   module3_0_conv2d_0_in_channels=512,
+                                   module3_0_conv2d_0_out_channels=256,
+                                   module3_0_conv2d_0_kernel_size=(1, 1),
+                                   module3_0_conv2d_0_stride=(1, 1),
+                                   module3_0_conv2d_0_padding=0,
+                                   module3_0_conv2d_0_pad_mode="valid",
+                                   module3_1_conv2d_0_in_channels=256,
+                                   module3_1_conv2d_0_out_channels=256,
+                                   module3_1_conv2d_0_kernel_size=(3, 3),
+                                   module3_1_conv2d_0_stride=(2, 2),
+                                   module3_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module3_1_conv2d_0_pad_mode="pad")
+        self.module13_1 = Module13(conv2d_1_in_channels=512, conv2d_1_out_channels=1024)
+        self.relu_67 = nn.ReLU()
+        self.module31_0 = Module31(module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_conv2d_4_in_channels=256,
+                                   module0_0_conv2d_4_out_channels=1024,
+                                   module0_1_conv2d_0_in_channels=1024,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=256,
+                                   module0_1_conv2d_4_in_channels=256,
+                                   module0_1_conv2d_4_out_channels=1024,
+                                   module0_2_conv2d_0_in_channels=1024,
+                                   module0_2_conv2d_0_out_channels=256,
+                                   module0_2_conv2d_2_in_channels=256,
+                                   module0_2_conv2d_2_out_channels=256,
+                                   module0_2_conv2d_4_in_channels=256,
+                                   module0_2_conv2d_4_out_channels=1024,
+                                   module0_3_conv2d_0_in_channels=1024,
+                                   module0_3_conv2d_0_out_channels=256,
+                                   module0_3_conv2d_2_in_channels=256,
+                                   module0_3_conv2d_2_out_channels=256,
+                                   module0_3_conv2d_4_in_channels=256,
+                                   module0_3_conv2d_4_out_channels=1024,
+                                   module0_4_conv2d_0_in_channels=1024,
+                                   module0_4_conv2d_0_out_channels=256,
+                                   module0_4_conv2d_2_in_channels=256,
+                                   module0_4_conv2d_2_out_channels=256,
+                                   module0_4_conv2d_4_in_channels=256,
+                                   module0_4_conv2d_4_out_channels=1024,
+                                   module0_5_conv2d_0_in_channels=1024,
+                                   module0_5_conv2d_0_out_channels=256,
+                                   module0_5_conv2d_2_in_channels=256,
+                                   module0_5_conv2d_2_out_channels=256,
+                                   module0_5_conv2d_4_in_channels=256,
+                                   module0_5_conv2d_4_out_channels=1024,
+                                   module0_6_conv2d_0_in_channels=1024,
+                                   module0_6_conv2d_0_out_channels=256,
+                                   module0_6_conv2d_2_in_channels=256,
+                                   module0_6_conv2d_2_out_channels=256,
+                                   module0_6_conv2d_4_in_channels=256,
+                                   module0_6_conv2d_4_out_channels=1024,
+                                   module0_7_conv2d_0_in_channels=1024,
+                                   module0_7_conv2d_0_out_channels=256,
+                                   module0_7_conv2d_2_in_channels=256,
+                                   module0_7_conv2d_2_out_channels=256,
+                                   module0_7_conv2d_4_in_channels=256,
+                                   module0_7_conv2d_4_out_channels=1024)
+        self.module31_1 = Module31(module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_conv2d_4_in_channels=256,
+                                   module0_0_conv2d_4_out_channels=1024,
+                                   module0_1_conv2d_0_in_channels=1024,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=256,
+                                   module0_1_conv2d_4_in_channels=256,
+                                   module0_1_conv2d_4_out_channels=1024,
+                                   module0_2_conv2d_0_in_channels=1024,
+                                   module0_2_conv2d_0_out_channels=256,
+                                   module0_2_conv2d_2_in_channels=256,
+                                   module0_2_conv2d_2_out_channels=256,
+                                   module0_2_conv2d_4_in_channels=256,
+                                   module0_2_conv2d_4_out_channels=1024,
+                                   module0_3_conv2d_0_in_channels=1024,
+                                   module0_3_conv2d_0_out_channels=256,
+                                   module0_3_conv2d_2_in_channels=256,
+                                   module0_3_conv2d_2_out_channels=256,
+                                   module0_3_conv2d_4_in_channels=256,
+                                   module0_3_conv2d_4_out_channels=1024,
+                                   module0_4_conv2d_0_in_channels=1024,
+                                   module0_4_conv2d_0_out_channels=256,
+                                   module0_4_conv2d_2_in_channels=256,
+                                   module0_4_conv2d_2_out_channels=256,
+                                   module0_4_conv2d_4_in_channels=256,
+                                   module0_4_conv2d_4_out_channels=1024,
+                                   module0_5_conv2d_0_in_channels=1024,
+                                   module0_5_conv2d_0_out_channels=256,
+                                   module0_5_conv2d_2_in_channels=256,
+                                   module0_5_conv2d_2_out_channels=256,
+                                   module0_5_conv2d_4_in_channels=256,
+                                   module0_5_conv2d_4_out_channels=1024,
+                                   module0_6_conv2d_0_in_channels=1024,
+                                   module0_6_conv2d_0_out_channels=256,
+                                   module0_6_conv2d_2_in_channels=256,
+                                   module0_6_conv2d_2_out_channels=256,
+                                   module0_6_conv2d_4_in_channels=256,
+                                   module0_6_conv2d_4_out_channels=1024,
+                                   module0_7_conv2d_0_in_channels=1024,
+                                   module0_7_conv2d_0_out_channels=256,
+                                   module0_7_conv2d_2_in_channels=256,
+                                   module0_7_conv2d_2_out_channels=256,
+                                   module0_7_conv2d_4_in_channels=256,
+                                   module0_7_conv2d_4_out_channels=1024)
+        self.module30_0 = Module30()
+        self.module14_2 = Module14(module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_conv2d_4_in_channels=256,
+                                   module0_0_conv2d_4_out_channels=1024,
+                                   module0_1_conv2d_0_in_channels=1024,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=256,
+                                   module0_1_conv2d_4_in_channels=256,
+                                   module0_1_conv2d_4_out_channels=1024)
+        self.module10_3 = Module10(conv2d_0_in_channels=512,
+                                   conv2d_0_out_channels=2048,
+                                   module3_0_conv2d_0_in_channels=1024,
+                                   module3_0_conv2d_0_out_channels=512,
+                                   module3_0_conv2d_0_kernel_size=(1, 1),
+                                   module3_0_conv2d_0_stride=(1, 1),
+                                   module3_0_conv2d_0_padding=0,
+                                   module3_0_conv2d_0_pad_mode="valid",
+                                   module3_1_conv2d_0_in_channels=512,
+                                   module3_1_conv2d_0_out_channels=512,
+                                   module3_1_conv2d_0_kernel_size=(3, 3),
+                                   module3_1_conv2d_0_stride=(2, 2),
+                                   module3_1_conv2d_0_padding=(1, 1, 1, 1),
+                                   module3_1_conv2d_0_pad_mode="pad")
+        self.module13_2 = Module13(conv2d_1_in_channels=1024, conv2d_1_out_channels=2048)
+        self.relu_230 = nn.ReLU()
+        self.module14_3 = Module14(module0_0_conv2d_0_in_channels=2048,
+                                   module0_0_conv2d_0_out_channels=512,
+                                   module0_0_conv2d_2_in_channels=512,
+                                   module0_0_conv2d_2_out_channels=512,
+                                   module0_0_conv2d_4_in_channels=512,
+                                   module0_0_conv2d_4_out_channels=2048,
+                                   module0_1_conv2d_0_in_channels=2048,
+                                   module0_1_conv2d_0_out_channels=512,
+                                   module0_1_conv2d_2_in_channels=512,
+                                   module0_1_conv2d_2_out_channels=512,
+                                   module0_1_conv2d_4_in_channels=512,
+                                   module0_1_conv2d_4_out_channels=2048)
+        self.avgpool2d_245 = nn.AvgPool2d(kernel_size=(8, 8))
+        self.flatten_246 = nn.Flatten()
+        self.dense_247 = nn.Dense(in_channels=2048, out_channels=1000, has_bias=True)
+
+    def construct(self, input_1):
+        opt_conv2d_0 = self.conv2d_0(input_1)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        module6_0_opt = self.module6_0(opt_relu_1)
+        opt_maxpool2d_6 = self.pad_maxpool2d_6(module6_0_opt)
+        opt_maxpool2d_6 = self.maxpool2d_6(opt_maxpool2d_6)
+        module10_0_opt = self.module10_0(opt_maxpool2d_6)
+        opt_conv2d_8 = self.conv2d_8(opt_maxpool2d_6)
+        opt_add_13 = P.Add()(module10_0_opt, opt_conv2d_8)
+        opt_relu_14 = self.relu_14(opt_add_13)
+        module14_0_opt = self.module14_0(opt_relu_14)
+        module10_1_opt = self.module10_1(module14_0_opt)
+        module13_0_opt = self.module13_0(module14_0_opt)
+        opt_add_36 = P.Add()(module10_1_opt, module13_0_opt)
+        opt_relu_37 = self.relu_37(opt_add_36)
+        module14_1_opt = self.module14_1(opt_relu_37)
+        module0_0_opt = self.module0_0(module14_1_opt)
+        module10_2_opt = self.module10_2(module0_0_opt)
+        module13_1_opt = self.module13_1(module0_0_opt)
+        opt_add_66 = P.Add()(module10_2_opt, module13_1_opt)
+        opt_relu_67 = self.relu_67(opt_add_66)
+        module31_0_opt = self.module31_0(opt_relu_67)
+        module31_1_opt = self.module31_1(module31_0_opt)
+        module30_0_opt = self.module30_0(module31_1_opt)
+        module14_2_opt = self.module14_2(module30_0_opt)
+        module10_3_opt = self.module10_3(module14_2_opt)
+        module13_2_opt = self.module13_2(module14_2_opt)
+        opt_add_229 = P.Add()(module10_3_opt, module13_2_opt)
+        opt_relu_230 = self.relu_230(opt_add_229)
+        module14_3_opt = self.module14_3(opt_relu_230)
+        opt_avgpool2d_245 = self.avgpool2d_245(module14_3_opt)
+        opt_flatten_246 = self.flatten_246(opt_avgpool2d_245)
+        opt_dense_247 = self.dense_247(opt_flatten_246)
+        return opt_dense_247
diff --git a/research/cvtmodel/resnet_ipl/src/resnet26t.py b/research/cvtmodel/resnet_ipl/src/resnet26t.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8b19b860da96ed84206090a999f14ceccff48e3
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/src/resnet26t.py
@@ -0,0 +1,297 @@
+import mindspore.ops as P
+from mindspore import nn
+
+
+class Module1(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
+                 conv2d_0_padding, conv2d_0_pad_mode):
+        super(Module1, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=conv2d_0_kernel_size,
+                                  stride=conv2d_0_stride,
+                                  padding=conv2d_0_padding,
+                                  pad_mode=conv2d_0_pad_mode,
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        return opt_relu_1
+
+
+class Module3(nn.Cell):
+    def __init__(self):
+        super(Module3, self).__init__()
+        self.module1_0 = Module1(conv2d_0_in_channels=24,
+                                 conv2d_0_out_channels=32,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+        self.module1_1 = Module1(conv2d_0_in_channels=32,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_stride=(1, 1),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad")
+
+    def construct(self, x):
+        module1_0_opt = self.module1_0(x)
+        module1_1_opt = self.module1_1(module1_0_opt)
+        return module1_1_opt
+
+
+class Module8(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, module1_0_conv2d_0_in_channels,
+                 module1_0_conv2d_0_out_channels, module1_0_conv2d_0_kernel_size, module1_0_conv2d_0_stride,
+                 module1_0_conv2d_0_padding, module1_0_conv2d_0_pad_mode, module1_1_conv2d_0_in_channels,
+                 module1_1_conv2d_0_out_channels, module1_1_conv2d_0_kernel_size, module1_1_conv2d_0_stride,
+                 module1_1_conv2d_0_padding, module1_1_conv2d_0_pad_mode):
+        super(Module8, self).__init__()
+        self.module1_0 = Module1(conv2d_0_in_channels=module1_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module1_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module1_0_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module1_0_conv2d_0_stride,
+                                 conv2d_0_padding=module1_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module1_0_conv2d_0_pad_mode)
+        self.module1_1 = Module1(conv2d_0_in_channels=module1_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module1_1_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module1_1_conv2d_0_kernel_size,
+                                 conv2d_0_stride=module1_1_conv2d_0_stride,
+                                 conv2d_0_padding=module1_1_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module1_1_conv2d_0_pad_mode)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module1_0_opt = self.module1_0(x)
+        module1_1_opt = self.module1_1(module1_0_opt)
+        opt_conv2d_0 = self.conv2d_0(module1_1_opt)
+        return opt_conv2d_0
+
+
+class Module0(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
+                 conv2d_4_in_channels, conv2d_4_out_channels):
+        super(Module0, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
+                                  out_channels=conv2d_2_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_3 = nn.ReLU()
+        self.conv2d_4 = nn.Conv2d(in_channels=conv2d_4_in_channels,
+                                  out_channels=conv2d_4_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_6 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        opt_conv2d_2 = self.conv2d_2(opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_conv2d_2)
+        opt_conv2d_4 = self.conv2d_4(opt_relu_3)
+        opt_add_5 = P.Add()(opt_conv2d_4, x)
+        opt_relu_6 = self.relu_6(opt_add_5)
+        return opt_relu_6
+
+
+class Module7(nn.Cell):
+    def __init__(self, conv2d_1_in_channels, conv2d_1_out_channels):
+        super(Module7, self).__init__()
+        self.pad_avgpool2d_0 = nn.Pad(paddings=((0, 0), (0, 0), (0, 0), (0, 0)))
+        self.avgpool2d_0 = nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2))
+        self.conv2d_1 = nn.Conv2d(in_channels=conv2d_1_in_channels,
+                                  out_channels=conv2d_1_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        opt_avgpool2d_0 = self.pad_avgpool2d_0(x)
+        opt_avgpool2d_0 = self.avgpool2d_0(opt_avgpool2d_0)
+        opt_conv2d_1 = self.conv2d_1(opt_avgpool2d_0)
+        return opt_conv2d_1
+
+
+class MindSporeModel(nn.Cell):
+    def __init__(self):
+        super(MindSporeModel, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=3,
+                                  out_channels=24,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.module3_0 = Module3()
+        self.pad_maxpool2d_6 = nn.Pad(paddings=((0, 0), (0, 0), (1, 0), (1, 0)))
+        self.maxpool2d_6 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))
+        self.module8_0 = Module8(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=256,
+                                 module1_0_conv2d_0_in_channels=64,
+                                 module1_0_conv2d_0_out_channels=64,
+                                 module1_0_conv2d_0_kernel_size=(1, 1),
+                                 module1_0_conv2d_0_stride=(1, 1),
+                                 module1_0_conv2d_0_padding=0,
+                                 module1_0_conv2d_0_pad_mode="valid",
+                                 module1_1_conv2d_0_in_channels=64,
+                                 module1_1_conv2d_0_out_channels=64,
+                                 module1_1_conv2d_0_kernel_size=(3, 3),
+                                 module1_1_conv2d_0_stride=(1, 1),
+                                 module1_1_conv2d_0_padding=(1, 1, 1, 1),
+                                 module1_1_conv2d_0_pad_mode="pad")
+        self.conv2d_8 = nn.Conv2d(in_channels=64,
+                                  out_channels=256,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_14 = nn.ReLU()
+        self.module0_0 = Module0(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_2_in_channels=64,
+                                 conv2d_2_out_channels=64,
+                                 conv2d_4_in_channels=64,
+                                 conv2d_4_out_channels=256)
+        self.module8_1 = Module8(conv2d_0_in_channels=128,
+                                 conv2d_0_out_channels=512,
+                                 module1_0_conv2d_0_in_channels=256,
+                                 module1_0_conv2d_0_out_channels=128,
+                                 module1_0_conv2d_0_kernel_size=(1, 1),
+                                 module1_0_conv2d_0_stride=(1, 1),
+                                 module1_0_conv2d_0_padding=0,
+                                 module1_0_conv2d_0_pad_mode="valid",
+                                 module1_1_conv2d_0_in_channels=128,
+                                 module1_1_conv2d_0_out_channels=128,
+                                 module1_1_conv2d_0_kernel_size=(3, 3),
+                                 module1_1_conv2d_0_stride=(2, 2),
+                                 module1_1_conv2d_0_padding=(1, 1, 1, 1),
+                                 module1_1_conv2d_0_pad_mode="pad")
+        self.module7_0 = Module7(conv2d_1_in_channels=256, conv2d_1_out_channels=512)
+        self.relu_30 = nn.ReLU()
+        self.module0_1 = Module0(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=128,
+                                 conv2d_2_in_channels=128,
+                                 conv2d_2_out_channels=128,
+                                 conv2d_4_in_channels=128,
+                                 conv2d_4_out_channels=512)
+        self.module8_2 = Module8(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=1024,
+                                 module1_0_conv2d_0_in_channels=512,
+                                 module1_0_conv2d_0_out_channels=256,
+                                 module1_0_conv2d_0_kernel_size=(1, 1),
+                                 module1_0_conv2d_0_stride=(1, 1),
+                                 module1_0_conv2d_0_padding=0,
+                                 module1_0_conv2d_0_pad_mode="valid",
+                                 module1_1_conv2d_0_in_channels=256,
+                                 module1_1_conv2d_0_out_channels=256,
+                                 module1_1_conv2d_0_kernel_size=(3, 3),
+                                 module1_1_conv2d_0_stride=(2, 2),
+                                 module1_1_conv2d_0_padding=(1, 1, 1, 1),
+                                 module1_1_conv2d_0_pad_mode="pad")
+        self.module7_1 = Module7(conv2d_1_in_channels=512, conv2d_1_out_channels=1024)
+        self.relu_46 = nn.ReLU()
+        self.module0_2 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=256,
+                                 conv2d_4_in_channels=256,
+                                 conv2d_4_out_channels=1024)
+        self.module8_3 = Module8(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=2048,
+                                 module1_0_conv2d_0_in_channels=1024,
+                                 module1_0_conv2d_0_out_channels=512,
+                                 module1_0_conv2d_0_kernel_size=(1, 1),
+                                 module1_0_conv2d_0_stride=(1, 1),
+                                 module1_0_conv2d_0_padding=0,
+                                 module1_0_conv2d_0_pad_mode="valid",
+                                 module1_1_conv2d_0_in_channels=512,
+                                 module1_1_conv2d_0_out_channels=512,
+                                 module1_1_conv2d_0_kernel_size=(3, 3),
+                                 module1_1_conv2d_0_stride=(2, 2),
+                                 module1_1_conv2d_0_padding=(1, 1, 1, 1),
+                                 module1_1_conv2d_0_pad_mode="pad")
+        self.module7_2 = Module7(conv2d_1_in_channels=1024, conv2d_1_out_channels=2048)
+        self.relu_62 = nn.ReLU()
+        self.module0_3 = Module0(conv2d_0_in_channels=2048,
+                                 conv2d_0_out_channels=512,
+                                 conv2d_2_in_channels=512,
+                                 conv2d_2_out_channels=512,
+                                 conv2d_4_in_channels=512,
+                                 conv2d_4_out_channels=2048)
+        self.avgpool2d_70 = nn.AvgPool2d(kernel_size=(8, 8))
+        self.flatten_71 = nn.Flatten()
+        self.dense_72 = nn.Dense(in_channels=2048, out_channels=1000, has_bias=True)
+
+    def construct(self, input_1):
+        opt_conv2d_0 = self.conv2d_0(input_1)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        module3_0_opt = self.module3_0(opt_relu_1)
+        opt_maxpool2d_6 = self.pad_maxpool2d_6(module3_0_opt)
+        opt_maxpool2d_6 = self.maxpool2d_6(opt_maxpool2d_6)
+        module8_0_opt = self.module8_0(opt_maxpool2d_6)
+        opt_conv2d_8 = self.conv2d_8(opt_maxpool2d_6)
+        opt_add_13 = P.Add()(module8_0_opt, opt_conv2d_8)
+        opt_relu_14 = self.relu_14(opt_add_13)
+        module0_0_opt = self.module0_0(opt_relu_14)
+        module8_1_opt = self.module8_1(module0_0_opt)
+        module7_0_opt = self.module7_0(module0_0_opt)
+        opt_add_29 = P.Add()(module8_1_opt, module7_0_opt)
+        opt_relu_30 = self.relu_30(opt_add_29)
+        module0_1_opt = self.module0_1(opt_relu_30)
+        module8_2_opt = self.module8_2(module0_1_opt)
+        module7_1_opt = self.module7_1(module0_1_opt)
+        opt_add_45 = P.Add()(module8_2_opt, module7_1_opt)
+        opt_relu_46 = self.relu_46(opt_add_45)
+        module0_2_opt = self.module0_2(opt_relu_46)
+        module8_3_opt = self.module8_3(module0_2_opt)
+        module7_2_opt = self.module7_2(module0_2_opt)
+        opt_add_61 = P.Add()(module8_3_opt, module7_2_opt)
+        opt_relu_62 = self.relu_62(opt_add_61)
+        module0_3_opt = self.module0_3(opt_relu_62)
+        opt_avgpool2d_70 = self.avgpool2d_70(module0_3_opt)
+        opt_flatten_71 = self.flatten_71(opt_avgpool2d_70)
+        opt_dense_72 = self.dense_72(opt_flatten_71)
+        return opt_dense_72
diff --git a/research/cvtmodel/resnet_ipl/src/resnet51q.py b/research/cvtmodel/resnet_ipl/src/resnet51q.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e1b8164c48500c53623520cfe773a430d4ffef5
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/src/resnet51q.py
@@ -0,0 +1,509 @@
+import mindspore.ops as P
+from mindspore import nn
+
+
+class Module0(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_padding,
+                 conv2d_0_pad_mode, conv2d_3_in_channels, conv2d_3_out_channels, conv2d_3_stride, conv2d_3_group):
+        super(Module0, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=conv2d_0_kernel_size,
+                                  stride=(1, 1),
+                                  padding=conv2d_0_padding,
+                                  pad_mode=conv2d_0_pad_mode,
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.sigmoid_1 = nn.Sigmoid()
+        self.conv2d_3 = nn.Conv2d(in_channels=conv2d_3_in_channels,
+                                  out_channels=conv2d_3_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=conv2d_3_stride,
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=conv2d_3_group,
+                                  has_bias=True)
+        self.sigmoid_4 = nn.Sigmoid()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_sigmoid_1 = self.sigmoid_1(opt_conv2d_0)
+        opt_mul_2 = P.Mul()(opt_conv2d_0, opt_sigmoid_1)
+        opt_conv2d_3 = self.conv2d_3(opt_mul_2)
+        opt_sigmoid_4 = self.sigmoid_4(opt_conv2d_3)
+        opt_mul_5 = P.Mul()(opt_conv2d_3, opt_sigmoid_4)
+        return opt_mul_5
+
+
+class Module14(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, module0_0_conv2d_0_in_channels,
+                 module0_0_conv2d_0_out_channels, module0_0_conv2d_0_kernel_size, module0_0_conv2d_0_padding,
+                 module0_0_conv2d_0_pad_mode, module0_0_conv2d_3_in_channels, module0_0_conv2d_3_out_channels,
+                 module0_0_conv2d_3_stride, module0_0_conv2d_3_group):
+        super(Module14, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module0_0_conv2d_0_kernel_size,
+                                 conv2d_0_padding=module0_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module0_0_conv2d_0_pad_mode,
+                                 conv2d_3_in_channels=module0_0_conv2d_3_in_channels,
+                                 conv2d_3_out_channels=module0_0_conv2d_3_out_channels,
+                                 conv2d_3_stride=module0_0_conv2d_3_stride,
+                                 conv2d_3_group=module0_0_conv2d_3_group)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_conv2d_0 = self.conv2d_0(module0_0_opt)
+        return opt_conv2d_0
+
+
+class Module4(nn.Cell):
+    def __init__(self):
+        super(Module4, self).__init__()
+        self.sigmoid_0 = nn.Sigmoid()
+
+    def construct(self, x):
+        opt_sigmoid_0 = self.sigmoid_0(x)
+        opt_mul_1 = P.Mul()(x, opt_sigmoid_0)
+        return opt_mul_1
+
+
+class Module12(nn.Cell):
+    def __init__(self):
+        super(Module12, self).__init__()
+        self.module4_0 = Module4()
+        self.module0_0 = Module0(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_0_kernel_size=(1, 1),
+                                 conv2d_0_padding=0,
+                                 conv2d_0_pad_mode="valid",
+                                 conv2d_3_in_channels=64,
+                                 conv2d_3_out_channels=64,
+                                 conv2d_3_stride=(1, 1),
+                                 conv2d_3_group=2)
+        self.conv2d_0 = nn.Conv2d(in_channels=64,
+                                  out_channels=256,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.module4_1 = Module4()
+
+    def construct(self, x):
+        module4_0_opt = self.module4_0(x)
+        module0_0_opt = self.module0_0(module4_0_opt)
+        opt_conv2d_0 = self.conv2d_0(module0_0_opt)
+        opt_add_1 = P.Add()(opt_conv2d_0, module4_0_opt)
+        module4_1_opt = self.module4_1(opt_add_1)
+        return module4_1_opt
+
+
+class Module16(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
+                 conv2d_4_in_channels, conv2d_4_out_channels, module0_0_conv2d_0_in_channels,
+                 module0_0_conv2d_0_out_channels, module0_0_conv2d_0_kernel_size, module0_0_conv2d_0_padding,
+                 module0_0_conv2d_0_pad_mode, module0_0_conv2d_3_in_channels, module0_0_conv2d_3_out_channels,
+                 module0_0_conv2d_3_stride, module0_0_conv2d_3_group, module0_1_conv2d_0_in_channels,
+                 module0_1_conv2d_0_out_channels, module0_1_conv2d_0_kernel_size, module0_1_conv2d_0_padding,
+                 module0_1_conv2d_0_pad_mode, module0_1_conv2d_3_in_channels, module0_1_conv2d_3_out_channels,
+                 module0_1_conv2d_3_stride, module0_1_conv2d_3_group, module0_2_conv2d_0_in_channels,
+                 module0_2_conv2d_0_out_channels, module0_2_conv2d_0_kernel_size, module0_2_conv2d_0_padding,
+                 module0_2_conv2d_0_pad_mode, module0_2_conv2d_3_in_channels, module0_2_conv2d_3_out_channels,
+                 module0_2_conv2d_3_stride, module0_2_conv2d_3_group):
+        super(Module16, self).__init__()
+        self.module4_0 = Module4()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module0_0_conv2d_0_kernel_size,
+                                 conv2d_0_padding=module0_0_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module0_0_conv2d_0_pad_mode,
+                                 conv2d_3_in_channels=module0_0_conv2d_3_in_channels,
+                                 conv2d_3_out_channels=module0_0_conv2d_3_out_channels,
+                                 conv2d_3_stride=module0_0_conv2d_3_stride,
+                                 conv2d_3_group=module0_0_conv2d_3_group)
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.module4_1 = Module4()
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module0_1_conv2d_0_kernel_size,
+                                 conv2d_0_padding=module0_1_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module0_1_conv2d_0_pad_mode,
+                                 conv2d_3_in_channels=module0_1_conv2d_3_in_channels,
+                                 conv2d_3_out_channels=module0_1_conv2d_3_out_channels,
+                                 conv2d_3_stride=module0_1_conv2d_3_stride,
+                                 conv2d_3_group=module0_1_conv2d_3_group)
+        self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
+                                  out_channels=conv2d_2_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.module4_2 = Module4()
+        self.module0_2 = Module0(conv2d_0_in_channels=module0_2_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_2_conv2d_0_out_channels,
+                                 conv2d_0_kernel_size=module0_2_conv2d_0_kernel_size,
+                                 conv2d_0_padding=module0_2_conv2d_0_padding,
+                                 conv2d_0_pad_mode=module0_2_conv2d_0_pad_mode,
+                                 conv2d_3_in_channels=module0_2_conv2d_3_in_channels,
+                                 conv2d_3_out_channels=module0_2_conv2d_3_out_channels,
+                                 conv2d_3_stride=module0_2_conv2d_3_stride,
+                                 conv2d_3_group=module0_2_conv2d_3_group)
+        self.conv2d_4 = nn.Conv2d(in_channels=conv2d_4_in_channels,
+                                  out_channels=conv2d_4_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.module4_3 = Module4()
+
+    def construct(self, x):
+        module4_0_opt = self.module4_0(x)
+        module0_0_opt = self.module0_0(module4_0_opt)
+        opt_conv2d_0 = self.conv2d_0(module0_0_opt)
+        opt_add_1 = P.Add()(opt_conv2d_0, module4_0_opt)
+        module4_1_opt = self.module4_1(opt_add_1)
+        module0_1_opt = self.module0_1(module4_1_opt)
+        opt_conv2d_2 = self.conv2d_2(module0_1_opt)
+        opt_add_3 = P.Add()(opt_conv2d_2, module4_1_opt)
+        module4_2_opt = self.module4_2(opt_add_3)
+        module0_2_opt = self.module0_2(module4_2_opt)
+        opt_conv2d_4 = self.conv2d_4(module0_2_opt)
+        opt_add_5 = P.Add()(opt_conv2d_4, module4_2_opt)
+        module4_3_opt = self.module4_3(opt_add_5)
+        return module4_3_opt
+
+
+class Module10(nn.Cell):
+    def __init__(self):
+        super(Module10, self).__init__()
+        self.module4_0 = Module4()
+        self.module0_0 = Module0(conv2d_0_in_channels=1536,
+                                 conv2d_0_out_channels=384,
+                                 conv2d_0_kernel_size=(1, 1),
+                                 conv2d_0_padding=0,
+                                 conv2d_0_pad_mode="valid",
+                                 conv2d_3_in_channels=384,
+                                 conv2d_3_out_channels=384,
+                                 conv2d_3_stride=(1, 1),
+                                 conv2d_3_group=12)
+        self.conv2d_0 = nn.Conv2d(in_channels=384,
+                                  out_channels=1536,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.module4_1 = Module4()
+        self.module0_1 = Module0(conv2d_0_in_channels=1536,
+                                 conv2d_0_out_channels=384,
+                                 conv2d_0_kernel_size=(1, 1),
+                                 conv2d_0_padding=0,
+                                 conv2d_0_pad_mode="valid",
+                                 conv2d_3_in_channels=384,
+                                 conv2d_3_out_channels=384,
+                                 conv2d_3_stride=(1, 1),
+                                 conv2d_3_group=12)
+        self.conv2d_2 = nn.Conv2d(in_channels=384,
+                                  out_channels=1536,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        module4_0_opt = self.module4_0(x)
+        module0_0_opt = self.module0_0(module4_0_opt)
+        opt_conv2d_0 = self.conv2d_0(module0_0_opt)
+        opt_add_1 = P.Add()(opt_conv2d_0, module4_0_opt)
+        module4_1_opt = self.module4_1(opt_add_1)
+        module0_1_opt = self.module0_1(module4_1_opt)
+        opt_conv2d_2 = self.conv2d_2(module0_1_opt)
+        opt_add_3 = P.Add()(opt_conv2d_2, module4_1_opt)
+        return opt_add_3
+
+
+class MindSporeModel(nn.Cell):
+    def __init__(self):
+        super(MindSporeModel, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=3,
+                                  out_channels=16,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=False)
+        self.conv2d_1 = nn.Conv2d(in_channels=16,
+                                  out_channels=32,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=False)
+        self.module0_0 = Module0(conv2d_0_in_channels=32,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_0_kernel_size=(3, 3),
+                                 conv2d_0_padding=(1, 1, 1, 1),
+                                 conv2d_0_pad_mode="pad",
+                                 conv2d_3_in_channels=64,
+                                 conv2d_3_out_channels=128,
+                                 conv2d_3_stride=(2, 2),
+                                 conv2d_3_group=1)
+        self.module14_0 = Module14(conv2d_0_in_channels=64,
+                                   conv2d_0_out_channels=256,
+                                   module0_0_conv2d_0_in_channels=128,
+                                   module0_0_conv2d_0_out_channels=64,
+                                   module0_0_conv2d_0_kernel_size=(1, 1),
+                                   module0_0_conv2d_0_padding=0,
+                                   module0_0_conv2d_0_pad_mode="valid",
+                                   module0_0_conv2d_3_in_channels=64,
+                                   module0_0_conv2d_3_out_channels=64,
+                                   module0_0_conv2d_3_stride=(1, 1),
+                                   module0_0_conv2d_3_group=2)
+        self.conv2d_9 = nn.Conv2d(in_channels=128,
+                                  out_channels=256,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.module12_0 = Module12()
+        self.module14_1 = Module14(conv2d_0_in_channels=128,
+                                   conv2d_0_out_channels=512,
+                                   module0_0_conv2d_0_in_channels=256,
+                                   module0_0_conv2d_0_out_channels=128,
+                                   module0_0_conv2d_0_kernel_size=(1, 1),
+                                   module0_0_conv2d_0_padding=0,
+                                   module0_0_conv2d_0_pad_mode="valid",
+                                   module0_0_conv2d_3_in_channels=128,
+                                   module0_0_conv2d_3_out_channels=128,
+                                   module0_0_conv2d_3_stride=(2, 2),
+                                   module0_0_conv2d_3_group=4)
+        self.conv2d_30 = nn.Conv2d(in_channels=256,
+                                   out_channels=512,
+                                   kernel_size=(1, 1),
+                                   stride=(2, 2),
+                                   padding=0,
+                                   pad_mode="valid",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.module16_0 = Module16(conv2d_0_in_channels=128,
+                                   conv2d_0_out_channels=512,
+                                   conv2d_2_in_channels=128,
+                                   conv2d_2_out_channels=512,
+                                   conv2d_4_in_channels=128,
+                                   conv2d_4_out_channels=512,
+                                   module0_0_conv2d_0_in_channels=512,
+                                   module0_0_conv2d_0_out_channels=128,
+                                   module0_0_conv2d_0_kernel_size=(1, 1),
+                                   module0_0_conv2d_0_padding=0,
+                                   module0_0_conv2d_0_pad_mode="valid",
+                                   module0_0_conv2d_3_in_channels=128,
+                                   module0_0_conv2d_3_out_channels=128,
+                                   module0_0_conv2d_3_stride=(1, 1),
+                                   module0_0_conv2d_3_group=4,
+                                   module0_1_conv2d_0_in_channels=512,
+                                   module0_1_conv2d_0_out_channels=128,
+                                   module0_1_conv2d_0_kernel_size=(1, 1),
+                                   module0_1_conv2d_0_padding=0,
+                                   module0_1_conv2d_0_pad_mode="valid",
+                                   module0_1_conv2d_3_in_channels=128,
+                                   module0_1_conv2d_3_out_channels=128,
+                                   module0_1_conv2d_3_stride=(1, 1),
+                                   module0_1_conv2d_3_group=4,
+                                   module0_2_conv2d_0_in_channels=512,
+                                   module0_2_conv2d_0_out_channels=128,
+                                   module0_2_conv2d_0_kernel_size=(1, 1),
+                                   module0_2_conv2d_0_padding=0,
+                                   module0_2_conv2d_0_pad_mode="valid",
+                                   module0_2_conv2d_3_in_channels=128,
+                                   module0_2_conv2d_3_out_channels=128,
+                                   module0_2_conv2d_3_stride=(1, 1),
+                                   module0_2_conv2d_3_group=4)
+        self.module14_2 = Module14(conv2d_0_in_channels=384,
+                                   conv2d_0_out_channels=1536,
+                                   module0_0_conv2d_0_in_channels=512,
+                                   module0_0_conv2d_0_out_channels=384,
+                                   module0_0_conv2d_0_kernel_size=(1, 1),
+                                   module0_0_conv2d_0_padding=0,
+                                   module0_0_conv2d_0_pad_mode="valid",
+                                   module0_0_conv2d_3_in_channels=384,
+                                   module0_0_conv2d_3_out_channels=384,
+                                   module0_0_conv2d_3_stride=(2, 2),
+                                   module0_0_conv2d_3_group=12)
+        self.conv2d_71 = nn.Conv2d(in_channels=512,
+                                   out_channels=1536,
+                                   kernel_size=(1, 1),
+                                   stride=(2, 2),
+                                   padding=0,
+                                   pad_mode="valid",
+                                   dilation=(1, 1),
+                                   group=1,
+                                   has_bias=True)
+        self.module10_0 = Module10()
+        self.module16_1 = Module16(conv2d_0_in_channels=384,
+                                   conv2d_0_out_channels=1536,
+                                   conv2d_2_in_channels=384,
+                                   conv2d_2_out_channels=1536,
+                                   conv2d_4_in_channels=384,
+                                   conv2d_4_out_channels=1536,
+                                   module0_0_conv2d_0_in_channels=1536,
+                                   module0_0_conv2d_0_out_channels=384,
+                                   module0_0_conv2d_0_kernel_size=(1, 1),
+                                   module0_0_conv2d_0_padding=0,
+                                   module0_0_conv2d_0_pad_mode="valid",
+                                   module0_0_conv2d_3_in_channels=384,
+                                   module0_0_conv2d_3_out_channels=384,
+                                   module0_0_conv2d_3_stride=(1, 1),
+                                   module0_0_conv2d_3_group=12,
+                                   module0_1_conv2d_0_in_channels=1536,
+                                   module0_1_conv2d_0_out_channels=384,
+                                   module0_1_conv2d_0_kernel_size=(1, 1),
+                                   module0_1_conv2d_0_padding=0,
+                                   module0_1_conv2d_0_pad_mode="valid",
+                                   module0_1_conv2d_3_in_channels=384,
+                                   module0_1_conv2d_3_out_channels=384,
+                                   module0_1_conv2d_3_stride=(1, 1),
+                                   module0_1_conv2d_3_group=12,
+                                   module0_2_conv2d_0_in_channels=1536,
+                                   module0_2_conv2d_0_out_channels=384,
+                                   module0_2_conv2d_0_kernel_size=(1, 1),
+                                   module0_2_conv2d_0_padding=0,
+                                   module0_2_conv2d_0_pad_mode="valid",
+                                   module0_2_conv2d_3_in_channels=384,
+                                   module0_2_conv2d_3_out_channels=384,
+                                   module0_2_conv2d_3_stride=(1, 1),
+                                   module0_2_conv2d_3_group=12)
+        self.module14_3 = Module14(conv2d_0_in_channels=1536,
+                                   conv2d_0_out_channels=1536,
+                                   module0_0_conv2d_0_in_channels=1536,
+                                   module0_0_conv2d_0_out_channels=1536,
+                                   module0_0_conv2d_0_kernel_size=(1, 1),
+                                   module0_0_conv2d_0_padding=0,
+                                   module0_0_conv2d_0_pad_mode="valid",
+                                   module0_0_conv2d_3_in_channels=1536,
+                                   module0_0_conv2d_3_out_channels=1536,
+                                   module0_0_conv2d_3_stride=(2, 2),
+                                   module0_0_conv2d_3_group=1536)
+        self.conv2d_132 = nn.Conv2d(in_channels=1536,
+                                    out_channels=1536,
+                                    kernel_size=(1, 1),
+                                    stride=(2, 2),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module16_2 = Module16(conv2d_0_in_channels=1536,
+                                   conv2d_0_out_channels=1536,
+                                   conv2d_2_in_channels=1536,
+                                   conv2d_2_out_channels=1536,
+                                   conv2d_4_in_channels=1536,
+                                   conv2d_4_out_channels=1536,
+                                   module0_0_conv2d_0_in_channels=1536,
+                                   module0_0_conv2d_0_out_channels=1536,
+                                   module0_0_conv2d_0_kernel_size=(1, 1),
+                                   module0_0_conv2d_0_padding=0,
+                                   module0_0_conv2d_0_pad_mode="valid",
+                                   module0_0_conv2d_3_in_channels=1536,
+                                   module0_0_conv2d_3_out_channels=1536,
+                                   module0_0_conv2d_3_stride=(1, 1),
+                                   module0_0_conv2d_3_group=1536,
+                                   module0_1_conv2d_0_in_channels=1536,
+                                   module0_1_conv2d_0_out_channels=1536,
+                                   module0_1_conv2d_0_kernel_size=(1, 1),
+                                   module0_1_conv2d_0_padding=0,
+                                   module0_1_conv2d_0_pad_mode="valid",
+                                   module0_1_conv2d_3_in_channels=1536,
+                                   module0_1_conv2d_3_out_channels=1536,
+                                   module0_1_conv2d_3_stride=(1, 1),
+                                   module0_1_conv2d_3_group=1536,
+                                   module0_2_conv2d_0_in_channels=1536,
+                                   module0_2_conv2d_0_out_channels=1536,
+                                   module0_2_conv2d_0_kernel_size=(1, 1),
+                                   module0_2_conv2d_0_padding=0,
+                                   module0_2_conv2d_0_pad_mode="valid",
+                                   module0_2_conv2d_3_in_channels=1536,
+                                   module0_2_conv2d_3_out_channels=1536,
+                                   module0_2_conv2d_3_stride=(1, 1),
+                                   module0_2_conv2d_3_group=1536)
+        self.conv2d_172 = nn.Conv2d(in_channels=1536,
+                                    out_channels=2048,
+                                    kernel_size=(1, 1),
+                                    stride=(1, 1),
+                                    padding=0,
+                                    pad_mode="valid",
+                                    dilation=(1, 1),
+                                    group=1,
+                                    has_bias=True)
+        self.module4_0 = Module4()
+        self.avgpool2d_175 = nn.AvgPool2d(kernel_size=(8, 8))
+        self.flatten_176 = nn.Flatten()
+        self.dense_177 = nn.Dense(in_channels=2048, out_channels=1000, has_bias=True)
+
+    def construct(self, input_1):
+        opt_conv2d_0 = self.conv2d_0(input_1)
+        opt_conv2d_1 = self.conv2d_1(opt_conv2d_0)
+        module0_0_opt = self.module0_0(opt_conv2d_1)
+        module14_0_opt = self.module14_0(module0_0_opt)
+        opt_conv2d_9 = self.conv2d_9(module0_0_opt)
+        opt_add_16 = P.Add()(module14_0_opt, opt_conv2d_9)
+        module12_0_opt = self.module12_0(opt_add_16)
+        module14_1_opt = self.module14_1(module12_0_opt)
+        opt_conv2d_30 = self.conv2d_30(module12_0_opt)
+        opt_add_37 = P.Add()(module14_1_opt, opt_conv2d_30)
+        module16_0_opt = self.module16_0(opt_add_37)
+        module14_2_opt = self.module14_2(module16_0_opt)
+        opt_conv2d_71 = self.conv2d_71(module16_0_opt)
+        opt_add_78 = P.Add()(module14_2_opt, opt_conv2d_71)
+        module10_0_opt = self.module10_0(opt_add_78)
+        module16_1_opt = self.module16_1(module10_0_opt)
+        module14_3_opt = self.module14_3(module16_1_opt)
+        opt_conv2d_132 = self.conv2d_132(module16_1_opt)
+        opt_add_139 = P.Add()(module14_3_opt, opt_conv2d_132)
+        module16_2_opt = self.module16_2(opt_add_139)
+        opt_conv2d_172 = self.conv2d_172(module16_2_opt)
+        module4_0_opt = self.module4_0(opt_conv2d_172)
+        opt_avgpool2d_175 = self.avgpool2d_175(module4_0_opt)
+        opt_flatten_176 = self.flatten_176(opt_avgpool2d_175)
+        opt_dense_177 = self.dense_177(opt_flatten_176)
+        return opt_dense_177
diff --git a/research/cvtmodel/resnet_ipl/src/resnetrs200.py b/research/cvtmodel/resnet_ipl/src/resnetrs200.py
new file mode 100644
index 0000000000000000000000000000000000000000..82fe185679e8114afb475876f3715e94f5dca36c
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/src/resnetrs200.py
@@ -0,0 +1,1353 @@
+import mindspore.ops as P
+from mindspore import nn
+
+
+class Module11(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_stride):
+        super(Module11, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=conv2d_0_stride,
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        return opt_relu_1
+
+
+class Module0(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
+                 conv2d_2_stride, conv2d_4_in_channels, conv2d_4_out_channels, conv2d_6_in_channels,
+                 conv2d_6_out_channels, conv2d_8_in_channels, conv2d_8_out_channels):
+        super(Module0, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
+                                  out_channels=conv2d_2_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=conv2d_2_stride,
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_3 = nn.ReLU()
+        self.conv2d_4 = nn.Conv2d(in_channels=conv2d_4_in_channels,
+                                  out_channels=conv2d_4_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.reducemean_5 = P.ReduceMean(keep_dims=True)
+        self.reducemean_5_axis = (2, 3)
+        self.conv2d_6 = nn.Conv2d(in_channels=conv2d_6_in_channels,
+                                  out_channels=conv2d_6_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_7 = nn.ReLU()
+        self.conv2d_8 = nn.Conv2d(in_channels=conv2d_8_in_channels,
+                                  out_channels=conv2d_8_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.sigmoid_9 = nn.Sigmoid()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        opt_conv2d_2 = self.conv2d_2(opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_conv2d_2)
+        opt_conv2d_4 = self.conv2d_4(opt_relu_3)
+        opt_reducemean_5 = self.reducemean_5(opt_conv2d_4, self.reducemean_5_axis)
+        opt_conv2d_6 = self.conv2d_6(opt_reducemean_5)
+        opt_relu_7 = self.relu_7(opt_conv2d_6)
+        opt_conv2d_8 = self.conv2d_8(opt_relu_7)
+        opt_sigmoid_9 = self.sigmoid_9(opt_conv2d_8)
+        opt_mul_10 = P.Mul()(opt_conv2d_4, opt_sigmoid_9)
+        return opt_mul_10
+
+
+class Module2(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_conv2d_2_stride, module0_0_conv2d_4_in_channels,
+                 module0_0_conv2d_4_out_channels, module0_0_conv2d_6_in_channels, module0_0_conv2d_6_out_channels,
+                 module0_0_conv2d_8_in_channels, module0_0_conv2d_8_out_channels, module0_1_conv2d_0_in_channels,
+                 module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels,
+                 module0_1_conv2d_2_stride, module0_1_conv2d_4_in_channels, module0_1_conv2d_4_out_channels,
+                 module0_1_conv2d_6_in_channels, module0_1_conv2d_6_out_channels, module0_1_conv2d_8_in_channels,
+                 module0_1_conv2d_8_out_channels):
+        super(Module2, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_0_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_0_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_0_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_0_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_0_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_0_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_0_conv2d_8_out_channels)
+        self.relu_1 = nn.ReLU()
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_1_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_1_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_1_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_1_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_1_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_1_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_1_conv2d_8_out_channels)
+        self.relu_3 = nn.ReLU()
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_add_0 = P.Add()(module0_0_opt, x)
+        opt_relu_1 = self.relu_1(opt_add_0)
+        module0_1_opt = self.module0_1(opt_relu_1)
+        opt_add_2 = P.Add()(module0_1_opt, opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_add_2)
+        return opt_relu_3
+
+
+class Module6(nn.Cell):
+    def __init__(self, conv2d_1_in_channels, conv2d_1_out_channels):
+        super(Module6, self).__init__()
+        self.pad_avgpool2d_0 = nn.Pad(paddings=((0, 0), (0, 0), (0, 0), (0, 0)))
+        self.avgpool2d_0 = nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2))
+        self.conv2d_1 = nn.Conv2d(in_channels=conv2d_1_in_channels,
+                                  out_channels=conv2d_1_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        opt_avgpool2d_0 = self.pad_avgpool2d_0(x)
+        opt_avgpool2d_0 = self.avgpool2d_0(opt_avgpool2d_0)
+        opt_conv2d_1 = self.conv2d_1(opt_avgpool2d_0)
+        return opt_conv2d_1
+
+
+class Module40(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_conv2d_2_stride, module0_0_conv2d_4_in_channels,
+                 module0_0_conv2d_4_out_channels, module0_0_conv2d_6_in_channels, module0_0_conv2d_6_out_channels,
+                 module0_0_conv2d_8_in_channels, module0_0_conv2d_8_out_channels, module0_1_conv2d_0_in_channels,
+                 module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels,
+                 module0_1_conv2d_2_stride, module0_1_conv2d_4_in_channels, module0_1_conv2d_4_out_channels,
+                 module0_1_conv2d_6_in_channels, module0_1_conv2d_6_out_channels, module0_1_conv2d_8_in_channels,
+                 module0_1_conv2d_8_out_channels, module0_2_conv2d_0_in_channels, module0_2_conv2d_0_out_channels,
+                 module0_2_conv2d_2_in_channels, module0_2_conv2d_2_out_channels, module0_2_conv2d_2_stride,
+                 module0_2_conv2d_4_in_channels, module0_2_conv2d_4_out_channels, module0_2_conv2d_6_in_channels,
+                 module0_2_conv2d_6_out_channels, module0_2_conv2d_8_in_channels, module0_2_conv2d_8_out_channels,
+                 module0_3_conv2d_0_in_channels, module0_3_conv2d_0_out_channels, module0_3_conv2d_2_in_channels,
+                 module0_3_conv2d_2_out_channels, module0_3_conv2d_2_stride, module0_3_conv2d_4_in_channels,
+                 module0_3_conv2d_4_out_channels, module0_3_conv2d_6_in_channels, module0_3_conv2d_6_out_channels,
+                 module0_3_conv2d_8_in_channels, module0_3_conv2d_8_out_channels, module0_4_conv2d_0_in_channels,
+                 module0_4_conv2d_0_out_channels, module0_4_conv2d_2_in_channels, module0_4_conv2d_2_out_channels,
+                 module0_4_conv2d_2_stride, module0_4_conv2d_4_in_channels, module0_4_conv2d_4_out_channels,
+                 module0_4_conv2d_6_in_channels, module0_4_conv2d_6_out_channels, module0_4_conv2d_8_in_channels,
+                 module0_4_conv2d_8_out_channels, module0_5_conv2d_0_in_channels, module0_5_conv2d_0_out_channels,
+                 module0_5_conv2d_2_in_channels, module0_5_conv2d_2_out_channels, module0_5_conv2d_2_stride,
+                 module0_5_conv2d_4_in_channels, module0_5_conv2d_4_out_channels, module0_5_conv2d_6_in_channels,
+                 module0_5_conv2d_6_out_channels, module0_5_conv2d_8_in_channels, module0_5_conv2d_8_out_channels,
+                 module0_6_conv2d_0_in_channels, module0_6_conv2d_0_out_channels, module0_6_conv2d_2_in_channels,
+                 module0_6_conv2d_2_out_channels, module0_6_conv2d_2_stride, module0_6_conv2d_4_in_channels,
+                 module0_6_conv2d_4_out_channels, module0_6_conv2d_6_in_channels, module0_6_conv2d_6_out_channels,
+                 module0_6_conv2d_8_in_channels, module0_6_conv2d_8_out_channels, module0_7_conv2d_0_in_channels,
+                 module0_7_conv2d_0_out_channels, module0_7_conv2d_2_in_channels, module0_7_conv2d_2_out_channels,
+                 module0_7_conv2d_2_stride, module0_7_conv2d_4_in_channels, module0_7_conv2d_4_out_channels,
+                 module0_7_conv2d_6_in_channels, module0_7_conv2d_6_out_channels, module0_7_conv2d_8_in_channels,
+                 module0_7_conv2d_8_out_channels, module0_8_conv2d_0_in_channels, module0_8_conv2d_0_out_channels,
+                 module0_8_conv2d_2_in_channels, module0_8_conv2d_2_out_channels, module0_8_conv2d_2_stride,
+                 module0_8_conv2d_4_in_channels, module0_8_conv2d_4_out_channels, module0_8_conv2d_6_in_channels,
+                 module0_8_conv2d_6_out_channels, module0_8_conv2d_8_in_channels, module0_8_conv2d_8_out_channels,
+                 module0_9_conv2d_0_in_channels, module0_9_conv2d_0_out_channels, module0_9_conv2d_2_in_channels,
+                 module0_9_conv2d_2_out_channels, module0_9_conv2d_2_stride, module0_9_conv2d_4_in_channels,
+                 module0_9_conv2d_4_out_channels, module0_9_conv2d_6_in_channels, module0_9_conv2d_6_out_channels,
+                 module0_9_conv2d_8_in_channels, module0_9_conv2d_8_out_channels, module0_10_conv2d_0_in_channels,
+                 module0_10_conv2d_0_out_channels, module0_10_conv2d_2_in_channels, module0_10_conv2d_2_out_channels,
+                 module0_10_conv2d_2_stride, module0_10_conv2d_4_in_channels, module0_10_conv2d_4_out_channels,
+                 module0_10_conv2d_6_in_channels, module0_10_conv2d_6_out_channels, module0_10_conv2d_8_in_channels,
+                 module0_10_conv2d_8_out_channels, module0_11_conv2d_0_in_channels, module0_11_conv2d_0_out_channels,
+                 module0_11_conv2d_2_in_channels, module0_11_conv2d_2_out_channels, module0_11_conv2d_2_stride,
+                 module0_11_conv2d_4_in_channels, module0_11_conv2d_4_out_channels, module0_11_conv2d_6_in_channels,
+                 module0_11_conv2d_6_out_channels, module0_11_conv2d_8_in_channels, module0_11_conv2d_8_out_channels,
+                 module0_12_conv2d_0_in_channels, module0_12_conv2d_0_out_channels, module0_12_conv2d_2_in_channels,
+                 module0_12_conv2d_2_out_channels, module0_12_conv2d_2_stride, module0_12_conv2d_4_in_channels,
+                 module0_12_conv2d_4_out_channels, module0_12_conv2d_6_in_channels, module0_12_conv2d_6_out_channels,
+                 module0_12_conv2d_8_in_channels, module0_12_conv2d_8_out_channels, module0_13_conv2d_0_in_channels,
+                 module0_13_conv2d_0_out_channels, module0_13_conv2d_2_in_channels, module0_13_conv2d_2_out_channels,
+                 module0_13_conv2d_2_stride, module0_13_conv2d_4_in_channels, module0_13_conv2d_4_out_channels,
+                 module0_13_conv2d_6_in_channels, module0_13_conv2d_6_out_channels, module0_13_conv2d_8_in_channels,
+                 module0_13_conv2d_8_out_channels, module0_14_conv2d_0_in_channels, module0_14_conv2d_0_out_channels,
+                 module0_14_conv2d_2_in_channels, module0_14_conv2d_2_out_channels, module0_14_conv2d_2_stride,
+                 module0_14_conv2d_4_in_channels, module0_14_conv2d_4_out_channels, module0_14_conv2d_6_in_channels,
+                 module0_14_conv2d_6_out_channels, module0_14_conv2d_8_in_channels, module0_14_conv2d_8_out_channels,
+                 module0_15_conv2d_0_in_channels, module0_15_conv2d_0_out_channels, module0_15_conv2d_2_in_channels,
+                 module0_15_conv2d_2_out_channels, module0_15_conv2d_2_stride, module0_15_conv2d_4_in_channels,
+                 module0_15_conv2d_4_out_channels, module0_15_conv2d_6_in_channels, module0_15_conv2d_6_out_channels,
+                 module0_15_conv2d_8_in_channels, module0_15_conv2d_8_out_channels):
+        super(Module40, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_0_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_0_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_0_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_0_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_0_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_0_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_0_conv2d_8_out_channels)
+        self.relu_1 = nn.ReLU()
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_1_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_1_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_1_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_1_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_1_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_1_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_1_conv2d_8_out_channels)
+        self.relu_3 = nn.ReLU()
+        self.module0_2 = Module0(conv2d_0_in_channels=module0_2_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_2_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_2_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_2_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_2_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_2_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_2_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_2_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_2_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_2_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_2_conv2d_8_out_channels)
+        self.relu_5 = nn.ReLU()
+        self.module0_3 = Module0(conv2d_0_in_channels=module0_3_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_3_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_3_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_3_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_3_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_3_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_3_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_3_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_3_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_3_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_3_conv2d_8_out_channels)
+        self.relu_7 = nn.ReLU()
+        self.module0_4 = Module0(conv2d_0_in_channels=module0_4_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_4_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_4_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_4_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_4_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_4_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_4_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_4_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_4_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_4_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_4_conv2d_8_out_channels)
+        self.relu_9 = nn.ReLU()
+        self.module0_5 = Module0(conv2d_0_in_channels=module0_5_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_5_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_5_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_5_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_5_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_5_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_5_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_5_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_5_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_5_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_5_conv2d_8_out_channels)
+        self.relu_11 = nn.ReLU()
+        self.module0_6 = Module0(conv2d_0_in_channels=module0_6_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_6_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_6_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_6_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_6_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_6_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_6_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_6_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_6_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_6_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_6_conv2d_8_out_channels)
+        self.relu_13 = nn.ReLU()
+        self.module0_7 = Module0(conv2d_0_in_channels=module0_7_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_7_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_7_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_7_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_7_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_7_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_7_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_7_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_7_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_7_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_7_conv2d_8_out_channels)
+        self.relu_15 = nn.ReLU()
+        self.module0_8 = Module0(conv2d_0_in_channels=module0_8_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_8_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_8_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_8_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_8_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_8_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_8_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_8_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_8_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_8_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_8_conv2d_8_out_channels)
+        self.relu_17 = nn.ReLU()
+        self.module0_9 = Module0(conv2d_0_in_channels=module0_9_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_9_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_9_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_9_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_9_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_9_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_9_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_9_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_9_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_9_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_9_conv2d_8_out_channels)
+        self.relu_19 = nn.ReLU()
+        self.module0_10 = Module0(conv2d_0_in_channels=module0_10_conv2d_0_in_channels,
+                                  conv2d_0_out_channels=module0_10_conv2d_0_out_channels,
+                                  conv2d_2_in_channels=module0_10_conv2d_2_in_channels,
+                                  conv2d_2_out_channels=module0_10_conv2d_2_out_channels,
+                                  conv2d_2_stride=module0_10_conv2d_2_stride,
+                                  conv2d_4_in_channels=module0_10_conv2d_4_in_channels,
+                                  conv2d_4_out_channels=module0_10_conv2d_4_out_channels,
+                                  conv2d_6_in_channels=module0_10_conv2d_6_in_channels,
+                                  conv2d_6_out_channels=module0_10_conv2d_6_out_channels,
+                                  conv2d_8_in_channels=module0_10_conv2d_8_in_channels,
+                                  conv2d_8_out_channels=module0_10_conv2d_8_out_channels)
+        self.relu_21 = nn.ReLU()
+        self.module0_11 = Module0(conv2d_0_in_channels=module0_11_conv2d_0_in_channels,
+                                  conv2d_0_out_channels=module0_11_conv2d_0_out_channels,
+                                  conv2d_2_in_channels=module0_11_conv2d_2_in_channels,
+                                  conv2d_2_out_channels=module0_11_conv2d_2_out_channels,
+                                  conv2d_2_stride=module0_11_conv2d_2_stride,
+                                  conv2d_4_in_channels=module0_11_conv2d_4_in_channels,
+                                  conv2d_4_out_channels=module0_11_conv2d_4_out_channels,
+                                  conv2d_6_in_channels=module0_11_conv2d_6_in_channels,
+                                  conv2d_6_out_channels=module0_11_conv2d_6_out_channels,
+                                  conv2d_8_in_channels=module0_11_conv2d_8_in_channels,
+                                  conv2d_8_out_channels=module0_11_conv2d_8_out_channels)
+        self.relu_23 = nn.ReLU()
+        self.module0_12 = Module0(conv2d_0_in_channels=module0_12_conv2d_0_in_channels,
+                                  conv2d_0_out_channels=module0_12_conv2d_0_out_channels,
+                                  conv2d_2_in_channels=module0_12_conv2d_2_in_channels,
+                                  conv2d_2_out_channels=module0_12_conv2d_2_out_channels,
+                                  conv2d_2_stride=module0_12_conv2d_2_stride,
+                                  conv2d_4_in_channels=module0_12_conv2d_4_in_channels,
+                                  conv2d_4_out_channels=module0_12_conv2d_4_out_channels,
+                                  conv2d_6_in_channels=module0_12_conv2d_6_in_channels,
+                                  conv2d_6_out_channels=module0_12_conv2d_6_out_channels,
+                                  conv2d_8_in_channels=module0_12_conv2d_8_in_channels,
+                                  conv2d_8_out_channels=module0_12_conv2d_8_out_channels)
+        self.relu_25 = nn.ReLU()
+        self.module0_13 = Module0(conv2d_0_in_channels=module0_13_conv2d_0_in_channels,
+                                  conv2d_0_out_channels=module0_13_conv2d_0_out_channels,
+                                  conv2d_2_in_channels=module0_13_conv2d_2_in_channels,
+                                  conv2d_2_out_channels=module0_13_conv2d_2_out_channels,
+                                  conv2d_2_stride=module0_13_conv2d_2_stride,
+                                  conv2d_4_in_channels=module0_13_conv2d_4_in_channels,
+                                  conv2d_4_out_channels=module0_13_conv2d_4_out_channels,
+                                  conv2d_6_in_channels=module0_13_conv2d_6_in_channels,
+                                  conv2d_6_out_channels=module0_13_conv2d_6_out_channels,
+                                  conv2d_8_in_channels=module0_13_conv2d_8_in_channels,
+                                  conv2d_8_out_channels=module0_13_conv2d_8_out_channels)
+        self.relu_27 = nn.ReLU()
+        self.module0_14 = Module0(conv2d_0_in_channels=module0_14_conv2d_0_in_channels,
+                                  conv2d_0_out_channels=module0_14_conv2d_0_out_channels,
+                                  conv2d_2_in_channels=module0_14_conv2d_2_in_channels,
+                                  conv2d_2_out_channels=module0_14_conv2d_2_out_channels,
+                                  conv2d_2_stride=module0_14_conv2d_2_stride,
+                                  conv2d_4_in_channels=module0_14_conv2d_4_in_channels,
+                                  conv2d_4_out_channels=module0_14_conv2d_4_out_channels,
+                                  conv2d_6_in_channels=module0_14_conv2d_6_in_channels,
+                                  conv2d_6_out_channels=module0_14_conv2d_6_out_channels,
+                                  conv2d_8_in_channels=module0_14_conv2d_8_in_channels,
+                                  conv2d_8_out_channels=module0_14_conv2d_8_out_channels)
+        self.relu_29 = nn.ReLU()
+        self.module0_15 = Module0(conv2d_0_in_channels=module0_15_conv2d_0_in_channels,
+                                  conv2d_0_out_channels=module0_15_conv2d_0_out_channels,
+                                  conv2d_2_in_channels=module0_15_conv2d_2_in_channels,
+                                  conv2d_2_out_channels=module0_15_conv2d_2_out_channels,
+                                  conv2d_2_stride=module0_15_conv2d_2_stride,
+                                  conv2d_4_in_channels=module0_15_conv2d_4_in_channels,
+                                  conv2d_4_out_channels=module0_15_conv2d_4_out_channels,
+                                  conv2d_6_in_channels=module0_15_conv2d_6_in_channels,
+                                  conv2d_6_out_channels=module0_15_conv2d_6_out_channels,
+                                  conv2d_8_in_channels=module0_15_conv2d_8_in_channels,
+                                  conv2d_8_out_channels=module0_15_conv2d_8_out_channels)
+        self.relu_31 = nn.ReLU()
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_add_0 = P.Add()(module0_0_opt, x)
+        opt_relu_1 = self.relu_1(opt_add_0)
+        module0_1_opt = self.module0_1(opt_relu_1)
+        opt_add_2 = P.Add()(module0_1_opt, opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_add_2)
+        module0_2_opt = self.module0_2(opt_relu_3)
+        opt_add_4 = P.Add()(module0_2_opt, opt_relu_3)
+        opt_relu_5 = self.relu_5(opt_add_4)
+        module0_3_opt = self.module0_3(opt_relu_5)
+        opt_add_6 = P.Add()(module0_3_opt, opt_relu_5)
+        opt_relu_7 = self.relu_7(opt_add_6)
+        module0_4_opt = self.module0_4(opt_relu_7)
+        opt_add_8 = P.Add()(module0_4_opt, opt_relu_7)
+        opt_relu_9 = self.relu_9(opt_add_8)
+        module0_5_opt = self.module0_5(opt_relu_9)
+        opt_add_10 = P.Add()(module0_5_opt, opt_relu_9)
+        opt_relu_11 = self.relu_11(opt_add_10)
+        module0_6_opt = self.module0_6(opt_relu_11)
+        opt_add_12 = P.Add()(module0_6_opt, opt_relu_11)
+        opt_relu_13 = self.relu_13(opt_add_12)
+        module0_7_opt = self.module0_7(opt_relu_13)
+        opt_add_14 = P.Add()(module0_7_opt, opt_relu_13)
+        opt_relu_15 = self.relu_15(opt_add_14)
+        module0_8_opt = self.module0_8(opt_relu_15)
+        opt_add_16 = P.Add()(module0_8_opt, opt_relu_15)
+        opt_relu_17 = self.relu_17(opt_add_16)
+        module0_9_opt = self.module0_9(opt_relu_17)
+        opt_add_18 = P.Add()(module0_9_opt, opt_relu_17)
+        opt_relu_19 = self.relu_19(opt_add_18)
+        module0_10_opt = self.module0_10(opt_relu_19)
+        opt_add_20 = P.Add()(module0_10_opt, opt_relu_19)
+        opt_relu_21 = self.relu_21(opt_add_20)
+        module0_11_opt = self.module0_11(opt_relu_21)
+        opt_add_22 = P.Add()(module0_11_opt, opt_relu_21)
+        opt_relu_23 = self.relu_23(opt_add_22)
+        module0_12_opt = self.module0_12(opt_relu_23)
+        opt_add_24 = P.Add()(module0_12_opt, opt_relu_23)
+        opt_relu_25 = self.relu_25(opt_add_24)
+        module0_13_opt = self.module0_13(opt_relu_25)
+        opt_add_26 = P.Add()(module0_13_opt, opt_relu_25)
+        opt_relu_27 = self.relu_27(opt_add_26)
+        module0_14_opt = self.module0_14(opt_relu_27)
+        opt_add_28 = P.Add()(module0_14_opt, opt_relu_27)
+        opt_relu_29 = self.relu_29(opt_add_28)
+        module0_15_opt = self.module0_15(opt_relu_29)
+        opt_add_30 = P.Add()(module0_15_opt, opt_relu_29)
+        opt_relu_31 = self.relu_31(opt_add_30)
+        return opt_relu_31
+
+
+class Module3(nn.Cell):
+    def __init__(self):
+        super(Module3, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=128,
+                                 conv2d_2_in_channels=128,
+                                 conv2d_2_out_channels=128,
+                                 conv2d_2_stride=(1, 1),
+                                 conv2d_4_in_channels=128,
+                                 conv2d_4_out_channels=512,
+                                 conv2d_6_in_channels=512,
+                                 conv2d_6_out_channels=128,
+                                 conv2d_8_in_channels=128,
+                                 conv2d_8_out_channels=512)
+        self.relu_1 = nn.ReLU()
+        self.module0_1 = Module0(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=128,
+                                 conv2d_2_in_channels=128,
+                                 conv2d_2_out_channels=128,
+                                 conv2d_2_stride=(1, 1),
+                                 conv2d_4_in_channels=128,
+                                 conv2d_4_out_channels=512,
+                                 conv2d_6_in_channels=512,
+                                 conv2d_6_out_channels=128,
+                                 conv2d_8_in_channels=128,
+                                 conv2d_8_out_channels=512)
+        self.relu_3 = nn.ReLU()
+        self.module0_2 = Module0(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=128,
+                                 conv2d_2_in_channels=128,
+                                 conv2d_2_out_channels=128,
+                                 conv2d_2_stride=(1, 1),
+                                 conv2d_4_in_channels=128,
+                                 conv2d_4_out_channels=512,
+                                 conv2d_6_in_channels=512,
+                                 conv2d_6_out_channels=128,
+                                 conv2d_8_in_channels=128,
+                                 conv2d_8_out_channels=512)
+        self.relu_5 = nn.ReLU()
+        self.module0_3 = Module0(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=128,
+                                 conv2d_2_in_channels=128,
+                                 conv2d_2_out_channels=128,
+                                 conv2d_2_stride=(1, 1),
+                                 conv2d_4_in_channels=128,
+                                 conv2d_4_out_channels=512,
+                                 conv2d_6_in_channels=512,
+                                 conv2d_6_out_channels=128,
+                                 conv2d_8_in_channels=128,
+                                 conv2d_8_out_channels=512)
+        self.relu_7 = nn.ReLU()
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_add_0 = P.Add()(module0_0_opt, x)
+        opt_relu_1 = self.relu_1(opt_add_0)
+        module0_1_opt = self.module0_1(opt_relu_1)
+        opt_add_2 = P.Add()(module0_1_opt, opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_add_2)
+        module0_2_opt = self.module0_2(opt_relu_3)
+        opt_add_4 = P.Add()(module0_2_opt, opt_relu_3)
+        opt_relu_5 = self.relu_5(opt_add_4)
+        module0_3_opt = self.module0_3(opt_relu_5)
+        opt_add_6 = P.Add()(module0_3_opt, opt_relu_5)
+        opt_relu_7 = self.relu_7(opt_add_6)
+        return opt_relu_7
+
+
+class Module16(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_conv2d_2_stride, module0_0_conv2d_4_in_channels,
+                 module0_0_conv2d_4_out_channels, module0_0_conv2d_6_in_channels, module0_0_conv2d_6_out_channels,
+                 module0_0_conv2d_8_in_channels, module0_0_conv2d_8_out_channels, module0_1_conv2d_0_in_channels,
+                 module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels,
+                 module0_1_conv2d_2_stride, module0_1_conv2d_4_in_channels, module0_1_conv2d_4_out_channels,
+                 module0_1_conv2d_6_in_channels, module0_1_conv2d_6_out_channels, module0_1_conv2d_8_in_channels,
+                 module0_1_conv2d_8_out_channels, module0_2_conv2d_0_in_channels, module0_2_conv2d_0_out_channels,
+                 module0_2_conv2d_2_in_channels, module0_2_conv2d_2_out_channels, module0_2_conv2d_2_stride,
+                 module0_2_conv2d_4_in_channels, module0_2_conv2d_4_out_channels, module0_2_conv2d_6_in_channels,
+                 module0_2_conv2d_6_out_channels, module0_2_conv2d_8_in_channels, module0_2_conv2d_8_out_channels):
+        super(Module16, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_0_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_0_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_0_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_0_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_0_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_0_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_0_conv2d_8_out_channels)
+        self.relu_1 = nn.ReLU()
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_1_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_1_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_1_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_1_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_1_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_1_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_1_conv2d_8_out_channels)
+        self.relu_3 = nn.ReLU()
+        self.module0_2 = Module0(conv2d_0_in_channels=module0_2_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_2_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_2_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_2_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_2_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_2_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_2_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_2_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_2_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_2_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_2_conv2d_8_out_channels)
+        self.relu_5 = nn.ReLU()
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_add_0 = P.Add()(module0_0_opt, x)
+        opt_relu_1 = self.relu_1(opt_add_0)
+        module0_1_opt = self.module0_1(opt_relu_1)
+        opt_add_2 = P.Add()(module0_1_opt, opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_add_2)
+        module0_2_opt = self.module0_2(opt_relu_3)
+        opt_add_4 = P.Add()(module0_2_opt, opt_relu_3)
+        opt_relu_5 = self.relu_5(opt_add_4)
+        return opt_relu_5
+
+
+class MindSporeModel(nn.Cell):
+    def __init__(self):
+        super(MindSporeModel, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=3,
+                                  out_channels=32,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.module11_0 = Module11(conv2d_0_in_channels=32, conv2d_0_out_channels=32, conv2d_0_stride=(1, 1))
+        self.module11_1 = Module11(conv2d_0_in_channels=32, conv2d_0_out_channels=64, conv2d_0_stride=(1, 1))
+        self.module11_2 = Module11(conv2d_0_in_channels=64, conv2d_0_out_channels=64, conv2d_0_stride=(2, 2))
+        self.module0_0 = Module0(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_2_in_channels=64,
+                                 conv2d_2_out_channels=64,
+                                 conv2d_2_stride=(1, 1),
+                                 conv2d_4_in_channels=64,
+                                 conv2d_4_out_channels=256,
+                                 conv2d_6_in_channels=256,
+                                 conv2d_6_out_channels=64,
+                                 conv2d_8_in_channels=64,
+                                 conv2d_8_out_channels=256)
+        self.conv2d_9 = nn.Conv2d(in_channels=64,
+                                  out_channels=256,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_21 = nn.ReLU()
+        self.module2_0 = Module2(module0_0_conv2d_0_in_channels=256,
+                                 module0_0_conv2d_0_out_channels=64,
+                                 module0_0_conv2d_2_in_channels=64,
+                                 module0_0_conv2d_2_out_channels=64,
+                                 module0_0_conv2d_2_stride=(1, 1),
+                                 module0_0_conv2d_4_in_channels=64,
+                                 module0_0_conv2d_4_out_channels=256,
+                                 module0_0_conv2d_6_in_channels=256,
+                                 module0_0_conv2d_6_out_channels=64,
+                                 module0_0_conv2d_8_in_channels=64,
+                                 module0_0_conv2d_8_out_channels=256,
+                                 module0_1_conv2d_0_in_channels=256,
+                                 module0_1_conv2d_0_out_channels=64,
+                                 module0_1_conv2d_2_in_channels=64,
+                                 module0_1_conv2d_2_out_channels=64,
+                                 module0_1_conv2d_2_stride=(1, 1),
+                                 module0_1_conv2d_4_in_channels=64,
+                                 module0_1_conv2d_4_out_channels=256,
+                                 module0_1_conv2d_6_in_channels=256,
+                                 module0_1_conv2d_6_out_channels=64,
+                                 module0_1_conv2d_8_in_channels=64,
+                                 module0_1_conv2d_8_out_channels=256)
+        self.module0_1 = Module0(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=128,
+                                 conv2d_2_in_channels=128,
+                                 conv2d_2_out_channels=128,
+                                 conv2d_2_stride=(2, 2),
+                                 conv2d_4_in_channels=128,
+                                 conv2d_4_out_channels=512,
+                                 conv2d_6_in_channels=512,
+                                 conv2d_6_out_channels=128,
+                                 conv2d_8_in_channels=128,
+                                 conv2d_8_out_channels=512)
+        self.module6_0 = Module6(conv2d_1_in_channels=256, conv2d_1_out_channels=512)
+        self.relu_62 = nn.ReLU()
+        self.module40_0 = Module40(module0_0_conv2d_0_in_channels=512,
+                                   module0_0_conv2d_0_out_channels=128,
+                                   module0_0_conv2d_2_in_channels=128,
+                                   module0_0_conv2d_2_out_channels=128,
+                                   module0_0_conv2d_2_stride=(1, 1),
+                                   module0_0_conv2d_4_in_channels=128,
+                                   module0_0_conv2d_4_out_channels=512,
+                                   module0_0_conv2d_6_in_channels=512,
+                                   module0_0_conv2d_6_out_channels=128,
+                                   module0_0_conv2d_8_in_channels=128,
+                                   module0_0_conv2d_8_out_channels=512,
+                                   module0_1_conv2d_0_in_channels=512,
+                                   module0_1_conv2d_0_out_channels=128,
+                                   module0_1_conv2d_2_in_channels=128,
+                                   module0_1_conv2d_2_out_channels=128,
+                                   module0_1_conv2d_2_stride=(1, 1),
+                                   module0_1_conv2d_4_in_channels=128,
+                                   module0_1_conv2d_4_out_channels=512,
+                                   module0_1_conv2d_6_in_channels=512,
+                                   module0_1_conv2d_6_out_channels=128,
+                                   module0_1_conv2d_8_in_channels=128,
+                                   module0_1_conv2d_8_out_channels=512,
+                                   module0_2_conv2d_0_in_channels=512,
+                                   module0_2_conv2d_0_out_channels=128,
+                                   module0_2_conv2d_2_in_channels=128,
+                                   module0_2_conv2d_2_out_channels=128,
+                                   module0_2_conv2d_2_stride=(1, 1),
+                                   module0_2_conv2d_4_in_channels=128,
+                                   module0_2_conv2d_4_out_channels=512,
+                                   module0_2_conv2d_6_in_channels=512,
+                                   module0_2_conv2d_6_out_channels=128,
+                                   module0_2_conv2d_8_in_channels=128,
+                                   module0_2_conv2d_8_out_channels=512,
+                                   module0_3_conv2d_0_in_channels=512,
+                                   module0_3_conv2d_0_out_channels=128,
+                                   module0_3_conv2d_2_in_channels=128,
+                                   module0_3_conv2d_2_out_channels=128,
+                                   module0_3_conv2d_2_stride=(1, 1),
+                                   module0_3_conv2d_4_in_channels=128,
+                                   module0_3_conv2d_4_out_channels=512,
+                                   module0_3_conv2d_6_in_channels=512,
+                                   module0_3_conv2d_6_out_channels=128,
+                                   module0_3_conv2d_8_in_channels=128,
+                                   module0_3_conv2d_8_out_channels=512,
+                                   module0_4_conv2d_0_in_channels=512,
+                                   module0_4_conv2d_0_out_channels=128,
+                                   module0_4_conv2d_2_in_channels=128,
+                                   module0_4_conv2d_2_out_channels=128,
+                                   module0_4_conv2d_2_stride=(1, 1),
+                                   module0_4_conv2d_4_in_channels=128,
+                                   module0_4_conv2d_4_out_channels=512,
+                                   module0_4_conv2d_6_in_channels=512,
+                                   module0_4_conv2d_6_out_channels=128,
+                                   module0_4_conv2d_8_in_channels=128,
+                                   module0_4_conv2d_8_out_channels=512,
+                                   module0_5_conv2d_0_in_channels=512,
+                                   module0_5_conv2d_0_out_channels=128,
+                                   module0_5_conv2d_2_in_channels=128,
+                                   module0_5_conv2d_2_out_channels=128,
+                                   module0_5_conv2d_2_stride=(1, 1),
+                                   module0_5_conv2d_4_in_channels=128,
+                                   module0_5_conv2d_4_out_channels=512,
+                                   module0_5_conv2d_6_in_channels=512,
+                                   module0_5_conv2d_6_out_channels=128,
+                                   module0_5_conv2d_8_in_channels=128,
+                                   module0_5_conv2d_8_out_channels=512,
+                                   module0_6_conv2d_0_in_channels=512,
+                                   module0_6_conv2d_0_out_channels=128,
+                                   module0_6_conv2d_2_in_channels=128,
+                                   module0_6_conv2d_2_out_channels=128,
+                                   module0_6_conv2d_2_stride=(1, 1),
+                                   module0_6_conv2d_4_in_channels=128,
+                                   module0_6_conv2d_4_out_channels=512,
+                                   module0_6_conv2d_6_in_channels=512,
+                                   module0_6_conv2d_6_out_channels=128,
+                                   module0_6_conv2d_8_in_channels=128,
+                                   module0_6_conv2d_8_out_channels=512,
+                                   module0_7_conv2d_0_in_channels=512,
+                                   module0_7_conv2d_0_out_channels=128,
+                                   module0_7_conv2d_2_in_channels=128,
+                                   module0_7_conv2d_2_out_channels=128,
+                                   module0_7_conv2d_2_stride=(1, 1),
+                                   module0_7_conv2d_4_in_channels=128,
+                                   module0_7_conv2d_4_out_channels=512,
+                                   module0_7_conv2d_6_in_channels=512,
+                                   module0_7_conv2d_6_out_channels=128,
+                                   module0_7_conv2d_8_in_channels=128,
+                                   module0_7_conv2d_8_out_channels=512,
+                                   module0_8_conv2d_0_in_channels=512,
+                                   module0_8_conv2d_0_out_channels=128,
+                                   module0_8_conv2d_2_in_channels=128,
+                                   module0_8_conv2d_2_out_channels=128,
+                                   module0_8_conv2d_2_stride=(1, 1),
+                                   module0_8_conv2d_4_in_channels=128,
+                                   module0_8_conv2d_4_out_channels=512,
+                                   module0_8_conv2d_6_in_channels=512,
+                                   module0_8_conv2d_6_out_channels=128,
+                                   module0_8_conv2d_8_in_channels=128,
+                                   module0_8_conv2d_8_out_channels=512,
+                                   module0_9_conv2d_0_in_channels=512,
+                                   module0_9_conv2d_0_out_channels=128,
+                                   module0_9_conv2d_2_in_channels=128,
+                                   module0_9_conv2d_2_out_channels=128,
+                                   module0_9_conv2d_2_stride=(1, 1),
+                                   module0_9_conv2d_4_in_channels=128,
+                                   module0_9_conv2d_4_out_channels=512,
+                                   module0_9_conv2d_6_in_channels=512,
+                                   module0_9_conv2d_6_out_channels=128,
+                                   module0_9_conv2d_8_in_channels=128,
+                                   module0_9_conv2d_8_out_channels=512,
+                                   module0_10_conv2d_0_in_channels=512,
+                                   module0_10_conv2d_0_out_channels=128,
+                                   module0_10_conv2d_2_in_channels=128,
+                                   module0_10_conv2d_2_out_channels=128,
+                                   module0_10_conv2d_2_stride=(1, 1),
+                                   module0_10_conv2d_4_in_channels=128,
+                                   module0_10_conv2d_4_out_channels=512,
+                                   module0_10_conv2d_6_in_channels=512,
+                                   module0_10_conv2d_6_out_channels=128,
+                                   module0_10_conv2d_8_in_channels=128,
+                                   module0_10_conv2d_8_out_channels=512,
+                                   module0_11_conv2d_0_in_channels=512,
+                                   module0_11_conv2d_0_out_channels=128,
+                                   module0_11_conv2d_2_in_channels=128,
+                                   module0_11_conv2d_2_out_channels=128,
+                                   module0_11_conv2d_2_stride=(1, 1),
+                                   module0_11_conv2d_4_in_channels=128,
+                                   module0_11_conv2d_4_out_channels=512,
+                                   module0_11_conv2d_6_in_channels=512,
+                                   module0_11_conv2d_6_out_channels=128,
+                                   module0_11_conv2d_8_in_channels=128,
+                                   module0_11_conv2d_8_out_channels=512,
+                                   module0_12_conv2d_0_in_channels=512,
+                                   module0_12_conv2d_0_out_channels=128,
+                                   module0_12_conv2d_2_in_channels=128,
+                                   module0_12_conv2d_2_out_channels=128,
+                                   module0_12_conv2d_2_stride=(1, 1),
+                                   module0_12_conv2d_4_in_channels=128,
+                                   module0_12_conv2d_4_out_channels=512,
+                                   module0_12_conv2d_6_in_channels=512,
+                                   module0_12_conv2d_6_out_channels=128,
+                                   module0_12_conv2d_8_in_channels=128,
+                                   module0_12_conv2d_8_out_channels=512,
+                                   module0_13_conv2d_0_in_channels=512,
+                                   module0_13_conv2d_0_out_channels=128,
+                                   module0_13_conv2d_2_in_channels=128,
+                                   module0_13_conv2d_2_out_channels=128,
+                                   module0_13_conv2d_2_stride=(1, 1),
+                                   module0_13_conv2d_4_in_channels=128,
+                                   module0_13_conv2d_4_out_channels=512,
+                                   module0_13_conv2d_6_in_channels=512,
+                                   module0_13_conv2d_6_out_channels=128,
+                                   module0_13_conv2d_8_in_channels=128,
+                                   module0_13_conv2d_8_out_channels=512,
+                                   module0_14_conv2d_0_in_channels=512,
+                                   module0_14_conv2d_0_out_channels=128,
+                                   module0_14_conv2d_2_in_channels=128,
+                                   module0_14_conv2d_2_out_channels=128,
+                                   module0_14_conv2d_2_stride=(1, 1),
+                                   module0_14_conv2d_4_in_channels=128,
+                                   module0_14_conv2d_4_out_channels=512,
+                                   module0_14_conv2d_6_in_channels=512,
+                                   module0_14_conv2d_6_out_channels=128,
+                                   module0_14_conv2d_8_in_channels=128,
+                                   module0_14_conv2d_8_out_channels=512,
+                                   module0_15_conv2d_0_in_channels=512,
+                                   module0_15_conv2d_0_out_channels=128,
+                                   module0_15_conv2d_2_in_channels=128,
+                                   module0_15_conv2d_2_out_channels=128,
+                                   module0_15_conv2d_2_stride=(1, 1),
+                                   module0_15_conv2d_4_in_channels=128,
+                                   module0_15_conv2d_4_out_channels=512,
+                                   module0_15_conv2d_6_in_channels=512,
+                                   module0_15_conv2d_6_out_channels=128,
+                                   module0_15_conv2d_8_in_channels=128,
+                                   module0_15_conv2d_8_out_channels=512)
+        self.module3_0 = Module3()
+        self.module16_0 = Module16(module0_0_conv2d_0_in_channels=512,
+                                   module0_0_conv2d_0_out_channels=128,
+                                   module0_0_conv2d_2_in_channels=128,
+                                   module0_0_conv2d_2_out_channels=128,
+                                   module0_0_conv2d_2_stride=(1, 1),
+                                   module0_0_conv2d_4_in_channels=128,
+                                   module0_0_conv2d_4_out_channels=512,
+                                   module0_0_conv2d_6_in_channels=512,
+                                   module0_0_conv2d_6_out_channels=128,
+                                   module0_0_conv2d_8_in_channels=128,
+                                   module0_0_conv2d_8_out_channels=512,
+                                   module0_1_conv2d_0_in_channels=512,
+                                   module0_1_conv2d_0_out_channels=128,
+                                   module0_1_conv2d_2_in_channels=128,
+                                   module0_1_conv2d_2_out_channels=128,
+                                   module0_1_conv2d_2_stride=(1, 1),
+                                   module0_1_conv2d_4_in_channels=128,
+                                   module0_1_conv2d_4_out_channels=512,
+                                   module0_1_conv2d_6_in_channels=512,
+                                   module0_1_conv2d_6_out_channels=128,
+                                   module0_1_conv2d_8_in_channels=128,
+                                   module0_1_conv2d_8_out_channels=512,
+                                   module0_2_conv2d_0_in_channels=512,
+                                   module0_2_conv2d_0_out_channels=128,
+                                   module0_2_conv2d_2_in_channels=128,
+                                   module0_2_conv2d_2_out_channels=128,
+                                   module0_2_conv2d_2_stride=(1, 1),
+                                   module0_2_conv2d_4_in_channels=128,
+                                   module0_2_conv2d_4_out_channels=512,
+                                   module0_2_conv2d_6_in_channels=512,
+                                   module0_2_conv2d_6_out_channels=128,
+                                   module0_2_conv2d_8_in_channels=128,
+                                   module0_2_conv2d_8_out_channels=512)
+        self.module0_2 = Module0(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=256,
+                                 conv2d_2_stride=(2, 2),
+                                 conv2d_4_in_channels=256,
+                                 conv2d_4_out_channels=1024,
+                                 conv2d_6_in_channels=1024,
+                                 conv2d_6_out_channels=256,
+                                 conv2d_8_in_channels=256,
+                                 conv2d_8_out_channels=1024)
+        self.module6_1 = Module6(conv2d_1_in_channels=512, conv2d_1_out_channels=1024)
+        self.relu_376 = nn.ReLU()
+        self.module40_1 = Module40(module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_conv2d_2_stride=(1, 1),
+                                   module0_0_conv2d_4_in_channels=256,
+                                   module0_0_conv2d_4_out_channels=1024,
+                                   module0_0_conv2d_6_in_channels=1024,
+                                   module0_0_conv2d_6_out_channels=256,
+                                   module0_0_conv2d_8_in_channels=256,
+                                   module0_0_conv2d_8_out_channels=1024,
+                                   module0_1_conv2d_0_in_channels=1024,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=256,
+                                   module0_1_conv2d_2_stride=(1, 1),
+                                   module0_1_conv2d_4_in_channels=256,
+                                   module0_1_conv2d_4_out_channels=1024,
+                                   module0_1_conv2d_6_in_channels=1024,
+                                   module0_1_conv2d_6_out_channels=256,
+                                   module0_1_conv2d_8_in_channels=256,
+                                   module0_1_conv2d_8_out_channels=1024,
+                                   module0_2_conv2d_0_in_channels=1024,
+                                   module0_2_conv2d_0_out_channels=256,
+                                   module0_2_conv2d_2_in_channels=256,
+                                   module0_2_conv2d_2_out_channels=256,
+                                   module0_2_conv2d_2_stride=(1, 1),
+                                   module0_2_conv2d_4_in_channels=256,
+                                   module0_2_conv2d_4_out_channels=1024,
+                                   module0_2_conv2d_6_in_channels=1024,
+                                   module0_2_conv2d_6_out_channels=256,
+                                   module0_2_conv2d_8_in_channels=256,
+                                   module0_2_conv2d_8_out_channels=1024,
+                                   module0_3_conv2d_0_in_channels=1024,
+                                   module0_3_conv2d_0_out_channels=256,
+                                   module0_3_conv2d_2_in_channels=256,
+                                   module0_3_conv2d_2_out_channels=256,
+                                   module0_3_conv2d_2_stride=(1, 1),
+                                   module0_3_conv2d_4_in_channels=256,
+                                   module0_3_conv2d_4_out_channels=1024,
+                                   module0_3_conv2d_6_in_channels=1024,
+                                   module0_3_conv2d_6_out_channels=256,
+                                   module0_3_conv2d_8_in_channels=256,
+                                   module0_3_conv2d_8_out_channels=1024,
+                                   module0_4_conv2d_0_in_channels=1024,
+                                   module0_4_conv2d_0_out_channels=256,
+                                   module0_4_conv2d_2_in_channels=256,
+                                   module0_4_conv2d_2_out_channels=256,
+                                   module0_4_conv2d_2_stride=(1, 1),
+                                   module0_4_conv2d_4_in_channels=256,
+                                   module0_4_conv2d_4_out_channels=1024,
+                                   module0_4_conv2d_6_in_channels=1024,
+                                   module0_4_conv2d_6_out_channels=256,
+                                   module0_4_conv2d_8_in_channels=256,
+                                   module0_4_conv2d_8_out_channels=1024,
+                                   module0_5_conv2d_0_in_channels=1024,
+                                   module0_5_conv2d_0_out_channels=256,
+                                   module0_5_conv2d_2_in_channels=256,
+                                   module0_5_conv2d_2_out_channels=256,
+                                   module0_5_conv2d_2_stride=(1, 1),
+                                   module0_5_conv2d_4_in_channels=256,
+                                   module0_5_conv2d_4_out_channels=1024,
+                                   module0_5_conv2d_6_in_channels=1024,
+                                   module0_5_conv2d_6_out_channels=256,
+                                   module0_5_conv2d_8_in_channels=256,
+                                   module0_5_conv2d_8_out_channels=1024,
+                                   module0_6_conv2d_0_in_channels=1024,
+                                   module0_6_conv2d_0_out_channels=256,
+                                   module0_6_conv2d_2_in_channels=256,
+                                   module0_6_conv2d_2_out_channels=256,
+                                   module0_6_conv2d_2_stride=(1, 1),
+                                   module0_6_conv2d_4_in_channels=256,
+                                   module0_6_conv2d_4_out_channels=1024,
+                                   module0_6_conv2d_6_in_channels=1024,
+                                   module0_6_conv2d_6_out_channels=256,
+                                   module0_6_conv2d_8_in_channels=256,
+                                   module0_6_conv2d_8_out_channels=1024,
+                                   module0_7_conv2d_0_in_channels=1024,
+                                   module0_7_conv2d_0_out_channels=256,
+                                   module0_7_conv2d_2_in_channels=256,
+                                   module0_7_conv2d_2_out_channels=256,
+                                   module0_7_conv2d_2_stride=(1, 1),
+                                   module0_7_conv2d_4_in_channels=256,
+                                   module0_7_conv2d_4_out_channels=1024,
+                                   module0_7_conv2d_6_in_channels=1024,
+                                   module0_7_conv2d_6_out_channels=256,
+                                   module0_7_conv2d_8_in_channels=256,
+                                   module0_7_conv2d_8_out_channels=1024,
+                                   module0_8_conv2d_0_in_channels=1024,
+                                   module0_8_conv2d_0_out_channels=256,
+                                   module0_8_conv2d_2_in_channels=256,
+                                   module0_8_conv2d_2_out_channels=256,
+                                   module0_8_conv2d_2_stride=(1, 1),
+                                   module0_8_conv2d_4_in_channels=256,
+                                   module0_8_conv2d_4_out_channels=1024,
+                                   module0_8_conv2d_6_in_channels=1024,
+                                   module0_8_conv2d_6_out_channels=256,
+                                   module0_8_conv2d_8_in_channels=256,
+                                   module0_8_conv2d_8_out_channels=1024,
+                                   module0_9_conv2d_0_in_channels=1024,
+                                   module0_9_conv2d_0_out_channels=256,
+                                   module0_9_conv2d_2_in_channels=256,
+                                   module0_9_conv2d_2_out_channels=256,
+                                   module0_9_conv2d_2_stride=(1, 1),
+                                   module0_9_conv2d_4_in_channels=256,
+                                   module0_9_conv2d_4_out_channels=1024,
+                                   module0_9_conv2d_6_in_channels=1024,
+                                   module0_9_conv2d_6_out_channels=256,
+                                   module0_9_conv2d_8_in_channels=256,
+                                   module0_9_conv2d_8_out_channels=1024,
+                                   module0_10_conv2d_0_in_channels=1024,
+                                   module0_10_conv2d_0_out_channels=256,
+                                   module0_10_conv2d_2_in_channels=256,
+                                   module0_10_conv2d_2_out_channels=256,
+                                   module0_10_conv2d_2_stride=(1, 1),
+                                   module0_10_conv2d_4_in_channels=256,
+                                   module0_10_conv2d_4_out_channels=1024,
+                                   module0_10_conv2d_6_in_channels=1024,
+                                   module0_10_conv2d_6_out_channels=256,
+                                   module0_10_conv2d_8_in_channels=256,
+                                   module0_10_conv2d_8_out_channels=1024,
+                                   module0_11_conv2d_0_in_channels=1024,
+                                   module0_11_conv2d_0_out_channels=256,
+                                   module0_11_conv2d_2_in_channels=256,
+                                   module0_11_conv2d_2_out_channels=256,
+                                   module0_11_conv2d_2_stride=(1, 1),
+                                   module0_11_conv2d_4_in_channels=256,
+                                   module0_11_conv2d_4_out_channels=1024,
+                                   module0_11_conv2d_6_in_channels=1024,
+                                   module0_11_conv2d_6_out_channels=256,
+                                   module0_11_conv2d_8_in_channels=256,
+                                   module0_11_conv2d_8_out_channels=1024,
+                                   module0_12_conv2d_0_in_channels=1024,
+                                   module0_12_conv2d_0_out_channels=256,
+                                   module0_12_conv2d_2_in_channels=256,
+                                   module0_12_conv2d_2_out_channels=256,
+                                   module0_12_conv2d_2_stride=(1, 1),
+                                   module0_12_conv2d_4_in_channels=256,
+                                   module0_12_conv2d_4_out_channels=1024,
+                                   module0_12_conv2d_6_in_channels=1024,
+                                   module0_12_conv2d_6_out_channels=256,
+                                   module0_12_conv2d_8_in_channels=256,
+                                   module0_12_conv2d_8_out_channels=1024,
+                                   module0_13_conv2d_0_in_channels=1024,
+                                   module0_13_conv2d_0_out_channels=256,
+                                   module0_13_conv2d_2_in_channels=256,
+                                   module0_13_conv2d_2_out_channels=256,
+                                   module0_13_conv2d_2_stride=(1, 1),
+                                   module0_13_conv2d_4_in_channels=256,
+                                   module0_13_conv2d_4_out_channels=1024,
+                                   module0_13_conv2d_6_in_channels=1024,
+                                   module0_13_conv2d_6_out_channels=256,
+                                   module0_13_conv2d_8_in_channels=256,
+                                   module0_13_conv2d_8_out_channels=1024,
+                                   module0_14_conv2d_0_in_channels=1024,
+                                   module0_14_conv2d_0_out_channels=256,
+                                   module0_14_conv2d_2_in_channels=256,
+                                   module0_14_conv2d_2_out_channels=256,
+                                   module0_14_conv2d_2_stride=(1, 1),
+                                   module0_14_conv2d_4_in_channels=256,
+                                   module0_14_conv2d_4_out_channels=1024,
+                                   module0_14_conv2d_6_in_channels=1024,
+                                   module0_14_conv2d_6_out_channels=256,
+                                   module0_14_conv2d_8_in_channels=256,
+                                   module0_14_conv2d_8_out_channels=1024,
+                                   module0_15_conv2d_0_in_channels=1024,
+                                   module0_15_conv2d_0_out_channels=256,
+                                   module0_15_conv2d_2_in_channels=256,
+                                   module0_15_conv2d_2_out_channels=256,
+                                   module0_15_conv2d_2_stride=(1, 1),
+                                   module0_15_conv2d_4_in_channels=256,
+                                   module0_15_conv2d_4_out_channels=1024,
+                                   module0_15_conv2d_6_in_channels=1024,
+                                   module0_15_conv2d_6_out_channels=256,
+                                   module0_15_conv2d_8_in_channels=256,
+                                   module0_15_conv2d_8_out_channels=1024)
+        self.module40_2 = Module40(module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_conv2d_2_stride=(1, 1),
+                                   module0_0_conv2d_4_in_channels=256,
+                                   module0_0_conv2d_4_out_channels=1024,
+                                   module0_0_conv2d_6_in_channels=1024,
+                                   module0_0_conv2d_6_out_channels=256,
+                                   module0_0_conv2d_8_in_channels=256,
+                                   module0_0_conv2d_8_out_channels=1024,
+                                   module0_1_conv2d_0_in_channels=1024,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=256,
+                                   module0_1_conv2d_2_stride=(1, 1),
+                                   module0_1_conv2d_4_in_channels=256,
+                                   module0_1_conv2d_4_out_channels=1024,
+                                   module0_1_conv2d_6_in_channels=1024,
+                                   module0_1_conv2d_6_out_channels=256,
+                                   module0_1_conv2d_8_in_channels=256,
+                                   module0_1_conv2d_8_out_channels=1024,
+                                   module0_2_conv2d_0_in_channels=1024,
+                                   module0_2_conv2d_0_out_channels=256,
+                                   module0_2_conv2d_2_in_channels=256,
+                                   module0_2_conv2d_2_out_channels=256,
+                                   module0_2_conv2d_2_stride=(1, 1),
+                                   module0_2_conv2d_4_in_channels=256,
+                                   module0_2_conv2d_4_out_channels=1024,
+                                   module0_2_conv2d_6_in_channels=1024,
+                                   module0_2_conv2d_6_out_channels=256,
+                                   module0_2_conv2d_8_in_channels=256,
+                                   module0_2_conv2d_8_out_channels=1024,
+                                   module0_3_conv2d_0_in_channels=1024,
+                                   module0_3_conv2d_0_out_channels=256,
+                                   module0_3_conv2d_2_in_channels=256,
+                                   module0_3_conv2d_2_out_channels=256,
+                                   module0_3_conv2d_2_stride=(1, 1),
+                                   module0_3_conv2d_4_in_channels=256,
+                                   module0_3_conv2d_4_out_channels=1024,
+                                   module0_3_conv2d_6_in_channels=1024,
+                                   module0_3_conv2d_6_out_channels=256,
+                                   module0_3_conv2d_8_in_channels=256,
+                                   module0_3_conv2d_8_out_channels=1024,
+                                   module0_4_conv2d_0_in_channels=1024,
+                                   module0_4_conv2d_0_out_channels=256,
+                                   module0_4_conv2d_2_in_channels=256,
+                                   module0_4_conv2d_2_out_channels=256,
+                                   module0_4_conv2d_2_stride=(1, 1),
+                                   module0_4_conv2d_4_in_channels=256,
+                                   module0_4_conv2d_4_out_channels=1024,
+                                   module0_4_conv2d_6_in_channels=1024,
+                                   module0_4_conv2d_6_out_channels=256,
+                                   module0_4_conv2d_8_in_channels=256,
+                                   module0_4_conv2d_8_out_channels=1024,
+                                   module0_5_conv2d_0_in_channels=1024,
+                                   module0_5_conv2d_0_out_channels=256,
+                                   module0_5_conv2d_2_in_channels=256,
+                                   module0_5_conv2d_2_out_channels=256,
+                                   module0_5_conv2d_2_stride=(1, 1),
+                                   module0_5_conv2d_4_in_channels=256,
+                                   module0_5_conv2d_4_out_channels=1024,
+                                   module0_5_conv2d_6_in_channels=1024,
+                                   module0_5_conv2d_6_out_channels=256,
+                                   module0_5_conv2d_8_in_channels=256,
+                                   module0_5_conv2d_8_out_channels=1024,
+                                   module0_6_conv2d_0_in_channels=1024,
+                                   module0_6_conv2d_0_out_channels=256,
+                                   module0_6_conv2d_2_in_channels=256,
+                                   module0_6_conv2d_2_out_channels=256,
+                                   module0_6_conv2d_2_stride=(1, 1),
+                                   module0_6_conv2d_4_in_channels=256,
+                                   module0_6_conv2d_4_out_channels=1024,
+                                   module0_6_conv2d_6_in_channels=1024,
+                                   module0_6_conv2d_6_out_channels=256,
+                                   module0_6_conv2d_8_in_channels=256,
+                                   module0_6_conv2d_8_out_channels=1024,
+                                   module0_7_conv2d_0_in_channels=1024,
+                                   module0_7_conv2d_0_out_channels=256,
+                                   module0_7_conv2d_2_in_channels=256,
+                                   module0_7_conv2d_2_out_channels=256,
+                                   module0_7_conv2d_2_stride=(1, 1),
+                                   module0_7_conv2d_4_in_channels=256,
+                                   module0_7_conv2d_4_out_channels=1024,
+                                   module0_7_conv2d_6_in_channels=1024,
+                                   module0_7_conv2d_6_out_channels=256,
+                                   module0_7_conv2d_8_in_channels=256,
+                                   module0_7_conv2d_8_out_channels=1024,
+                                   module0_8_conv2d_0_in_channels=1024,
+                                   module0_8_conv2d_0_out_channels=256,
+                                   module0_8_conv2d_2_in_channels=256,
+                                   module0_8_conv2d_2_out_channels=256,
+                                   module0_8_conv2d_2_stride=(1, 1),
+                                   module0_8_conv2d_4_in_channels=256,
+                                   module0_8_conv2d_4_out_channels=1024,
+                                   module0_8_conv2d_6_in_channels=1024,
+                                   module0_8_conv2d_6_out_channels=256,
+                                   module0_8_conv2d_8_in_channels=256,
+                                   module0_8_conv2d_8_out_channels=1024,
+                                   module0_9_conv2d_0_in_channels=1024,
+                                   module0_9_conv2d_0_out_channels=256,
+                                   module0_9_conv2d_2_in_channels=256,
+                                   module0_9_conv2d_2_out_channels=256,
+                                   module0_9_conv2d_2_stride=(1, 1),
+                                   module0_9_conv2d_4_in_channels=256,
+                                   module0_9_conv2d_4_out_channels=1024,
+                                   module0_9_conv2d_6_in_channels=1024,
+                                   module0_9_conv2d_6_out_channels=256,
+                                   module0_9_conv2d_8_in_channels=256,
+                                   module0_9_conv2d_8_out_channels=1024,
+                                   module0_10_conv2d_0_in_channels=1024,
+                                   module0_10_conv2d_0_out_channels=256,
+                                   module0_10_conv2d_2_in_channels=256,
+                                   module0_10_conv2d_2_out_channels=256,
+                                   module0_10_conv2d_2_stride=(1, 1),
+                                   module0_10_conv2d_4_in_channels=256,
+                                   module0_10_conv2d_4_out_channels=1024,
+                                   module0_10_conv2d_6_in_channels=1024,
+                                   module0_10_conv2d_6_out_channels=256,
+                                   module0_10_conv2d_8_in_channels=256,
+                                   module0_10_conv2d_8_out_channels=1024,
+                                   module0_11_conv2d_0_in_channels=1024,
+                                   module0_11_conv2d_0_out_channels=256,
+                                   module0_11_conv2d_2_in_channels=256,
+                                   module0_11_conv2d_2_out_channels=256,
+                                   module0_11_conv2d_2_stride=(1, 1),
+                                   module0_11_conv2d_4_in_channels=256,
+                                   module0_11_conv2d_4_out_channels=1024,
+                                   module0_11_conv2d_6_in_channels=1024,
+                                   module0_11_conv2d_6_out_channels=256,
+                                   module0_11_conv2d_8_in_channels=256,
+                                   module0_11_conv2d_8_out_channels=1024,
+                                   module0_12_conv2d_0_in_channels=1024,
+                                   module0_12_conv2d_0_out_channels=256,
+                                   module0_12_conv2d_2_in_channels=256,
+                                   module0_12_conv2d_2_out_channels=256,
+                                   module0_12_conv2d_2_stride=(1, 1),
+                                   module0_12_conv2d_4_in_channels=256,
+                                   module0_12_conv2d_4_out_channels=1024,
+                                   module0_12_conv2d_6_in_channels=1024,
+                                   module0_12_conv2d_6_out_channels=256,
+                                   module0_12_conv2d_8_in_channels=256,
+                                   module0_12_conv2d_8_out_channels=1024,
+                                   module0_13_conv2d_0_in_channels=1024,
+                                   module0_13_conv2d_0_out_channels=256,
+                                   module0_13_conv2d_2_in_channels=256,
+                                   module0_13_conv2d_2_out_channels=256,
+                                   module0_13_conv2d_2_stride=(1, 1),
+                                   module0_13_conv2d_4_in_channels=256,
+                                   module0_13_conv2d_4_out_channels=1024,
+                                   module0_13_conv2d_6_in_channels=1024,
+                                   module0_13_conv2d_6_out_channels=256,
+                                   module0_13_conv2d_8_in_channels=256,
+                                   module0_13_conv2d_8_out_channels=1024,
+                                   module0_14_conv2d_0_in_channels=1024,
+                                   module0_14_conv2d_0_out_channels=256,
+                                   module0_14_conv2d_2_in_channels=256,
+                                   module0_14_conv2d_2_out_channels=256,
+                                   module0_14_conv2d_2_stride=(1, 1),
+                                   module0_14_conv2d_4_in_channels=256,
+                                   module0_14_conv2d_4_out_channels=1024,
+                                   module0_14_conv2d_6_in_channels=1024,
+                                   module0_14_conv2d_6_out_channels=256,
+                                   module0_14_conv2d_8_in_channels=256,
+                                   module0_14_conv2d_8_out_channels=1024,
+                                   module0_15_conv2d_0_in_channels=1024,
+                                   module0_15_conv2d_0_out_channels=256,
+                                   module0_15_conv2d_2_in_channels=256,
+                                   module0_15_conv2d_2_out_channels=256,
+                                   module0_15_conv2d_2_stride=(1, 1),
+                                   module0_15_conv2d_4_in_channels=256,
+                                   module0_15_conv2d_4_out_channels=1024,
+                                   module0_15_conv2d_6_in_channels=1024,
+                                   module0_15_conv2d_6_out_channels=256,
+                                   module0_15_conv2d_8_in_channels=256,
+                                   module0_15_conv2d_8_out_channels=1024)
+        self.module16_1 = Module16(module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_conv2d_2_stride=(1, 1),
+                                   module0_0_conv2d_4_in_channels=256,
+                                   module0_0_conv2d_4_out_channels=1024,
+                                   module0_0_conv2d_6_in_channels=1024,
+                                   module0_0_conv2d_6_out_channels=256,
+                                   module0_0_conv2d_8_in_channels=256,
+                                   module0_0_conv2d_8_out_channels=1024,
+                                   module0_1_conv2d_0_in_channels=1024,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=256,
+                                   module0_1_conv2d_2_stride=(1, 1),
+                                   module0_1_conv2d_4_in_channels=256,
+                                   module0_1_conv2d_4_out_channels=1024,
+                                   module0_1_conv2d_6_in_channels=1024,
+                                   module0_1_conv2d_6_out_channels=256,
+                                   module0_1_conv2d_8_in_channels=256,
+                                   module0_1_conv2d_8_out_channels=1024,
+                                   module0_2_conv2d_0_in_channels=1024,
+                                   module0_2_conv2d_0_out_channels=256,
+                                   module0_2_conv2d_2_in_channels=256,
+                                   module0_2_conv2d_2_out_channels=256,
+                                   module0_2_conv2d_2_stride=(1, 1),
+                                   module0_2_conv2d_4_in_channels=256,
+                                   module0_2_conv2d_4_out_channels=1024,
+                                   module0_2_conv2d_6_in_channels=1024,
+                                   module0_2_conv2d_6_out_channels=256,
+                                   module0_2_conv2d_8_in_channels=256,
+                                   module0_2_conv2d_8_out_channels=1024)
+        self.module0_3 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=512,
+                                 conv2d_2_in_channels=512,
+                                 conv2d_2_out_channels=512,
+                                 conv2d_2_stride=(2, 2),
+                                 conv2d_4_in_channels=512,
+                                 conv2d_4_out_channels=2048,
+                                 conv2d_6_in_channels=2048,
+                                 conv2d_6_out_channels=512,
+                                 conv2d_8_in_channels=512,
+                                 conv2d_8_out_channels=2048)
+        self.module6_2 = Module6(conv2d_1_in_channels=1024, conv2d_1_out_channels=2048)
+        self.relu_846 = nn.ReLU()
+        self.module2_1 = Module2(module0_0_conv2d_0_in_channels=2048,
+                                 module0_0_conv2d_0_out_channels=512,
+                                 module0_0_conv2d_2_in_channels=512,
+                                 module0_0_conv2d_2_out_channels=512,
+                                 module0_0_conv2d_2_stride=(1, 1),
+                                 module0_0_conv2d_4_in_channels=512,
+                                 module0_0_conv2d_4_out_channels=2048,
+                                 module0_0_conv2d_6_in_channels=2048,
+                                 module0_0_conv2d_6_out_channels=512,
+                                 module0_0_conv2d_8_in_channels=512,
+                                 module0_0_conv2d_8_out_channels=2048,
+                                 module0_1_conv2d_0_in_channels=2048,
+                                 module0_1_conv2d_0_out_channels=512,
+                                 module0_1_conv2d_2_in_channels=512,
+                                 module0_1_conv2d_2_out_channels=512,
+                                 module0_1_conv2d_2_stride=(1, 1),
+                                 module0_1_conv2d_4_in_channels=512,
+                                 module0_1_conv2d_4_out_channels=2048,
+                                 module0_1_conv2d_6_in_channels=2048,
+                                 module0_1_conv2d_6_out_channels=512,
+                                 module0_1_conv2d_8_in_channels=512,
+                                 module0_1_conv2d_8_out_channels=2048)
+        self.avgpool2d_873 = nn.AvgPool2d(kernel_size=(8, 8))
+        self.flatten_874 = nn.Flatten()
+        self.dense_875 = nn.Dense(in_channels=2048, out_channels=1000, has_bias=True)
+
+    def construct(self, input_1):
+        opt_conv2d_0 = self.conv2d_0(input_1)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        module11_0_opt = self.module11_0(opt_relu_1)
+        module11_1_opt = self.module11_1(module11_0_opt)
+        module11_2_opt = self.module11_2(module11_1_opt)
+        module0_0_opt = self.module0_0(module11_2_opt)
+        opt_conv2d_9 = self.conv2d_9(module11_2_opt)
+        opt_add_20 = P.Add()(module0_0_opt, opt_conv2d_9)
+        opt_relu_21 = self.relu_21(opt_add_20)
+        module2_0_opt = self.module2_0(opt_relu_21)
+        module0_1_opt = self.module0_1(module2_0_opt)
+        module6_0_opt = self.module6_0(module2_0_opt)
+        opt_add_61 = P.Add()(module0_1_opt, module6_0_opt)
+        opt_relu_62 = self.relu_62(opt_add_61)
+        module40_0_opt = self.module40_0(opt_relu_62)
+        module3_0_opt = self.module3_0(module40_0_opt)
+        module16_0_opt = self.module16_0(module3_0_opt)
+        module0_2_opt = self.module0_2(module16_0_opt)
+        module6_1_opt = self.module6_1(module16_0_opt)
+        opt_add_375 = P.Add()(module0_2_opt, module6_1_opt)
+        opt_relu_376 = self.relu_376(opt_add_375)
+        module40_1_opt = self.module40_1(opt_relu_376)
+        module40_2_opt = self.module40_2(module40_1_opt)
+        module16_1_opt = self.module16_1(module40_2_opt)
+        module0_3_opt = self.module0_3(module16_1_opt)
+        module6_2_opt = self.module6_2(module16_1_opt)
+        opt_add_845 = P.Add()(module0_3_opt, module6_2_opt)
+        opt_relu_846 = self.relu_846(opt_add_845)
+        module2_1_opt = self.module2_1(opt_relu_846)
+        opt_avgpool2d_873 = self.avgpool2d_873(module2_1_opt)
+        opt_flatten_874 = self.flatten_874(opt_avgpool2d_873)
+        opt_dense_875 = self.dense_875(opt_flatten_874)
+        return opt_dense_875
diff --git a/research/cvtmodel/resnet_ipl/src/resnetrs50.py b/research/cvtmodel/resnet_ipl/src/resnetrs50.py
new file mode 100644
index 0000000000000000000000000000000000000000..90bb6894fb8e143cf8c72ff007a74eacdceae950
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/src/resnetrs50.py
@@ -0,0 +1,469 @@
+import mindspore.ops as P
+from mindspore import nn
+
+
+class Module7(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_stride):
+        super(Module7, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=conv2d_0_stride,
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        return opt_relu_1
+
+
+class Module0(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
+                 conv2d_2_stride, conv2d_4_in_channels, conv2d_4_out_channels, conv2d_6_in_channels,
+                 conv2d_6_out_channels, conv2d_8_in_channels, conv2d_8_out_channels):
+        super(Module0, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
+                                  out_channels=conv2d_2_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=conv2d_2_stride,
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_3 = nn.ReLU()
+        self.conv2d_4 = nn.Conv2d(in_channels=conv2d_4_in_channels,
+                                  out_channels=conv2d_4_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.reducemean_5 = P.ReduceMean(keep_dims=True)
+        self.reducemean_5_axis = (2, 3)
+        self.conv2d_6 = nn.Conv2d(in_channels=conv2d_6_in_channels,
+                                  out_channels=conv2d_6_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_7 = nn.ReLU()
+        self.conv2d_8 = nn.Conv2d(in_channels=conv2d_8_in_channels,
+                                  out_channels=conv2d_8_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.sigmoid_9 = nn.Sigmoid()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        opt_conv2d_2 = self.conv2d_2(opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_conv2d_2)
+        opt_conv2d_4 = self.conv2d_4(opt_relu_3)
+        opt_reducemean_5 = self.reducemean_5(opt_conv2d_4, self.reducemean_5_axis)
+        opt_conv2d_6 = self.conv2d_6(opt_reducemean_5)
+        opt_relu_7 = self.relu_7(opt_conv2d_6)
+        opt_conv2d_8 = self.conv2d_8(opt_relu_7)
+        opt_sigmoid_9 = self.sigmoid_9(opt_conv2d_8)
+        opt_mul_10 = P.Mul()(opt_conv2d_4, opt_sigmoid_9)
+        return opt_mul_10
+
+
+class Module8(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_conv2d_2_stride, module0_0_conv2d_4_in_channels,
+                 module0_0_conv2d_4_out_channels, module0_0_conv2d_6_in_channels, module0_0_conv2d_6_out_channels,
+                 module0_0_conv2d_8_in_channels, module0_0_conv2d_8_out_channels, module0_1_conv2d_0_in_channels,
+                 module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels,
+                 module0_1_conv2d_2_stride, module0_1_conv2d_4_in_channels, module0_1_conv2d_4_out_channels,
+                 module0_1_conv2d_6_in_channels, module0_1_conv2d_6_out_channels, module0_1_conv2d_8_in_channels,
+                 module0_1_conv2d_8_out_channels):
+        super(Module8, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_0_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_0_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_0_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_0_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_0_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_0_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_0_conv2d_8_out_channels)
+        self.relu_1 = nn.ReLU()
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_1_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_1_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_1_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_1_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_1_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_1_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_1_conv2d_8_out_channels)
+        self.relu_3 = nn.ReLU()
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_add_0 = P.Add()(module0_0_opt, x)
+        opt_relu_1 = self.relu_1(opt_add_0)
+        module0_1_opt = self.module0_1(opt_relu_1)
+        opt_add_2 = P.Add()(module0_1_opt, opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_add_2)
+        return opt_relu_3
+
+
+class Module4(nn.Cell):
+    def __init__(self, conv2d_1_in_channels, conv2d_1_out_channels):
+        super(Module4, self).__init__()
+        self.pad_avgpool2d_0 = nn.Pad(paddings=((0, 0), (0, 0), (0, 0), (0, 0)))
+        self.avgpool2d_0 = nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2))
+        self.conv2d_1 = nn.Conv2d(in_channels=conv2d_1_in_channels,
+                                  out_channels=conv2d_1_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        opt_avgpool2d_0 = self.pad_avgpool2d_0(x)
+        opt_avgpool2d_0 = self.avgpool2d_0(opt_avgpool2d_0)
+        opt_conv2d_1 = self.conv2d_1(opt_avgpool2d_0)
+        return opt_conv2d_1
+
+
+class Module11(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_conv2d_2_stride, module0_0_conv2d_4_in_channels,
+                 module0_0_conv2d_4_out_channels, module0_0_conv2d_6_in_channels, module0_0_conv2d_6_out_channels,
+                 module0_0_conv2d_8_in_channels, module0_0_conv2d_8_out_channels, module0_1_conv2d_0_in_channels,
+                 module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels,
+                 module0_1_conv2d_2_stride, module0_1_conv2d_4_in_channels, module0_1_conv2d_4_out_channels,
+                 module0_1_conv2d_6_in_channels, module0_1_conv2d_6_out_channels, module0_1_conv2d_8_in_channels,
+                 module0_1_conv2d_8_out_channels, module0_2_conv2d_0_in_channels, module0_2_conv2d_0_out_channels,
+                 module0_2_conv2d_2_in_channels, module0_2_conv2d_2_out_channels, module0_2_conv2d_2_stride,
+                 module0_2_conv2d_4_in_channels, module0_2_conv2d_4_out_channels, module0_2_conv2d_6_in_channels,
+                 module0_2_conv2d_6_out_channels, module0_2_conv2d_8_in_channels, module0_2_conv2d_8_out_channels):
+        super(Module11, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_0_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_0_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_0_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_0_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_0_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_0_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_0_conv2d_8_out_channels)
+        self.relu_1 = nn.ReLU()
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_1_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_1_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_1_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_1_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_1_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_1_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_1_conv2d_8_out_channels)
+        self.relu_3 = nn.ReLU()
+        self.module0_2 = Module0(conv2d_0_in_channels=module0_2_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_2_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_2_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_2_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_2_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_2_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_2_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_2_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_2_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_2_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_2_conv2d_8_out_channels)
+        self.relu_5 = nn.ReLU()
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_add_0 = P.Add()(module0_0_opt, x)
+        opt_relu_1 = self.relu_1(opt_add_0)
+        module0_1_opt = self.module0_1(opt_relu_1)
+        opt_add_2 = P.Add()(module0_1_opt, opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_add_2)
+        module0_2_opt = self.module0_2(opt_relu_3)
+        opt_add_4 = P.Add()(module0_2_opt, opt_relu_3)
+        opt_relu_5 = self.relu_5(opt_add_4)
+        return opt_relu_5
+
+
+class MindSporeModel(nn.Cell):
+    def __init__(self):
+        super(MindSporeModel, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=3,
+                                  out_channels=32,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.module7_0 = Module7(conv2d_0_in_channels=32, conv2d_0_out_channels=32, conv2d_0_stride=(1, 1))
+        self.module7_1 = Module7(conv2d_0_in_channels=32, conv2d_0_out_channels=64, conv2d_0_stride=(1, 1))
+        self.module7_2 = Module7(conv2d_0_in_channels=64, conv2d_0_out_channels=64, conv2d_0_stride=(2, 2))
+        self.module0_0 = Module0(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_2_in_channels=64,
+                                 conv2d_2_out_channels=64,
+                                 conv2d_2_stride=(1, 1),
+                                 conv2d_4_in_channels=64,
+                                 conv2d_4_out_channels=256,
+                                 conv2d_6_in_channels=256,
+                                 conv2d_6_out_channels=64,
+                                 conv2d_8_in_channels=64,
+                                 conv2d_8_out_channels=256)
+        self.conv2d_9 = nn.Conv2d(in_channels=64,
+                                  out_channels=256,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_21 = nn.ReLU()
+        self.module8_0 = Module8(module0_0_conv2d_0_in_channels=256,
+                                 module0_0_conv2d_0_out_channels=64,
+                                 module0_0_conv2d_2_in_channels=64,
+                                 module0_0_conv2d_2_out_channels=64,
+                                 module0_0_conv2d_2_stride=(1, 1),
+                                 module0_0_conv2d_4_in_channels=64,
+                                 module0_0_conv2d_4_out_channels=256,
+                                 module0_0_conv2d_6_in_channels=256,
+                                 module0_0_conv2d_6_out_channels=64,
+                                 module0_0_conv2d_8_in_channels=64,
+                                 module0_0_conv2d_8_out_channels=256,
+                                 module0_1_conv2d_0_in_channels=256,
+                                 module0_1_conv2d_0_out_channels=64,
+                                 module0_1_conv2d_2_in_channels=64,
+                                 module0_1_conv2d_2_out_channels=64,
+                                 module0_1_conv2d_2_stride=(1, 1),
+                                 module0_1_conv2d_4_in_channels=64,
+                                 module0_1_conv2d_4_out_channels=256,
+                                 module0_1_conv2d_6_in_channels=256,
+                                 module0_1_conv2d_6_out_channels=64,
+                                 module0_1_conv2d_8_in_channels=64,
+                                 module0_1_conv2d_8_out_channels=256)
+        self.module0_1 = Module0(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=128,
+                                 conv2d_2_in_channels=128,
+                                 conv2d_2_out_channels=128,
+                                 conv2d_2_stride=(2, 2),
+                                 conv2d_4_in_channels=128,
+                                 conv2d_4_out_channels=512,
+                                 conv2d_6_in_channels=512,
+                                 conv2d_6_out_channels=128,
+                                 conv2d_8_in_channels=128,
+                                 conv2d_8_out_channels=512)
+        self.module4_0 = Module4(conv2d_1_in_channels=256, conv2d_1_out_channels=512)
+        self.relu_62 = nn.ReLU()
+        self.module11_0 = Module11(module0_0_conv2d_0_in_channels=512,
+                                   module0_0_conv2d_0_out_channels=128,
+                                   module0_0_conv2d_2_in_channels=128,
+                                   module0_0_conv2d_2_out_channels=128,
+                                   module0_0_conv2d_2_stride=(1, 1),
+                                   module0_0_conv2d_4_in_channels=128,
+                                   module0_0_conv2d_4_out_channels=512,
+                                   module0_0_conv2d_6_in_channels=512,
+                                   module0_0_conv2d_6_out_channels=128,
+                                   module0_0_conv2d_8_in_channels=128,
+                                   module0_0_conv2d_8_out_channels=512,
+                                   module0_1_conv2d_0_in_channels=512,
+                                   module0_1_conv2d_0_out_channels=128,
+                                   module0_1_conv2d_2_in_channels=128,
+                                   module0_1_conv2d_2_out_channels=128,
+                                   module0_1_conv2d_2_stride=(1, 1),
+                                   module0_1_conv2d_4_in_channels=128,
+                                   module0_1_conv2d_4_out_channels=512,
+                                   module0_1_conv2d_6_in_channels=512,
+                                   module0_1_conv2d_6_out_channels=128,
+                                   module0_1_conv2d_8_in_channels=128,
+                                   module0_1_conv2d_8_out_channels=512,
+                                   module0_2_conv2d_0_in_channels=512,
+                                   module0_2_conv2d_0_out_channels=128,
+                                   module0_2_conv2d_2_in_channels=128,
+                                   module0_2_conv2d_2_out_channels=128,
+                                   module0_2_conv2d_2_stride=(1, 1),
+                                   module0_2_conv2d_4_in_channels=128,
+                                   module0_2_conv2d_4_out_channels=512,
+                                   module0_2_conv2d_6_in_channels=512,
+                                   module0_2_conv2d_6_out_channels=128,
+                                   module0_2_conv2d_8_in_channels=128,
+                                   module0_2_conv2d_8_out_channels=512)
+        self.module0_2 = Module0(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=256,
+                                 conv2d_2_stride=(2, 2),
+                                 conv2d_4_in_channels=256,
+                                 conv2d_4_out_channels=1024,
+                                 conv2d_6_in_channels=1024,
+                                 conv2d_6_out_channels=256,
+                                 conv2d_8_in_channels=256,
+                                 conv2d_8_out_channels=1024)
+        self.module4_1 = Module4(conv2d_1_in_channels=512, conv2d_1_out_channels=1024)
+        self.relu_116 = nn.ReLU()
+        self.module8_1 = Module8(module0_0_conv2d_0_in_channels=1024,
+                                 module0_0_conv2d_0_out_channels=256,
+                                 module0_0_conv2d_2_in_channels=256,
+                                 module0_0_conv2d_2_out_channels=256,
+                                 module0_0_conv2d_2_stride=(1, 1),
+                                 module0_0_conv2d_4_in_channels=256,
+                                 module0_0_conv2d_4_out_channels=1024,
+                                 module0_0_conv2d_6_in_channels=1024,
+                                 module0_0_conv2d_6_out_channels=256,
+                                 module0_0_conv2d_8_in_channels=256,
+                                 module0_0_conv2d_8_out_channels=1024,
+                                 module0_1_conv2d_0_in_channels=1024,
+                                 module0_1_conv2d_0_out_channels=256,
+                                 module0_1_conv2d_2_in_channels=256,
+                                 module0_1_conv2d_2_out_channels=256,
+                                 module0_1_conv2d_2_stride=(1, 1),
+                                 module0_1_conv2d_4_in_channels=256,
+                                 module0_1_conv2d_4_out_channels=1024,
+                                 module0_1_conv2d_6_in_channels=1024,
+                                 module0_1_conv2d_6_out_channels=256,
+                                 module0_1_conv2d_8_in_channels=256,
+                                 module0_1_conv2d_8_out_channels=1024)
+        self.module11_1 = Module11(module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_conv2d_2_stride=(1, 1),
+                                   module0_0_conv2d_4_in_channels=256,
+                                   module0_0_conv2d_4_out_channels=1024,
+                                   module0_0_conv2d_6_in_channels=1024,
+                                   module0_0_conv2d_6_out_channels=256,
+                                   module0_0_conv2d_8_in_channels=256,
+                                   module0_0_conv2d_8_out_channels=1024,
+                                   module0_1_conv2d_0_in_channels=1024,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=256,
+                                   module0_1_conv2d_2_stride=(1, 1),
+                                   module0_1_conv2d_4_in_channels=256,
+                                   module0_1_conv2d_4_out_channels=1024,
+                                   module0_1_conv2d_6_in_channels=1024,
+                                   module0_1_conv2d_6_out_channels=256,
+                                   module0_1_conv2d_8_in_channels=256,
+                                   module0_1_conv2d_8_out_channels=1024,
+                                   module0_2_conv2d_0_in_channels=1024,
+                                   module0_2_conv2d_0_out_channels=256,
+                                   module0_2_conv2d_2_in_channels=256,
+                                   module0_2_conv2d_2_out_channels=256,
+                                   module0_2_conv2d_2_stride=(1, 1),
+                                   module0_2_conv2d_4_in_channels=256,
+                                   module0_2_conv2d_4_out_channels=1024,
+                                   module0_2_conv2d_6_in_channels=1024,
+                                   module0_2_conv2d_6_out_channels=256,
+                                   module0_2_conv2d_8_in_channels=256,
+                                   module0_2_conv2d_8_out_channels=1024)
+        self.module0_3 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=512,
+                                 conv2d_2_in_channels=512,
+                                 conv2d_2_out_channels=512,
+                                 conv2d_2_stride=(2, 2),
+                                 conv2d_4_in_channels=512,
+                                 conv2d_4_out_channels=2048,
+                                 conv2d_6_in_channels=2048,
+                                 conv2d_6_out_channels=512,
+                                 conv2d_8_in_channels=512,
+                                 conv2d_8_out_channels=2048)
+        self.module4_2 = Module4(conv2d_1_in_channels=1024, conv2d_1_out_channels=2048)
+        self.relu_196 = nn.ReLU()
+        self.module8_2 = Module8(module0_0_conv2d_0_in_channels=2048,
+                                 module0_0_conv2d_0_out_channels=512,
+                                 module0_0_conv2d_2_in_channels=512,
+                                 module0_0_conv2d_2_out_channels=512,
+                                 module0_0_conv2d_2_stride=(1, 1),
+                                 module0_0_conv2d_4_in_channels=512,
+                                 module0_0_conv2d_4_out_channels=2048,
+                                 module0_0_conv2d_6_in_channels=2048,
+                                 module0_0_conv2d_6_out_channels=512,
+                                 module0_0_conv2d_8_in_channels=512,
+                                 module0_0_conv2d_8_out_channels=2048,
+                                 module0_1_conv2d_0_in_channels=2048,
+                                 module0_1_conv2d_0_out_channels=512,
+                                 module0_1_conv2d_2_in_channels=512,
+                                 module0_1_conv2d_2_out_channels=512,
+                                 module0_1_conv2d_2_stride=(1, 1),
+                                 module0_1_conv2d_4_in_channels=512,
+                                 module0_1_conv2d_4_out_channels=2048,
+                                 module0_1_conv2d_6_in_channels=2048,
+                                 module0_1_conv2d_6_out_channels=512,
+                                 module0_1_conv2d_8_in_channels=512,
+                                 module0_1_conv2d_8_out_channels=2048)
+        self.avgpool2d_223 = nn.AvgPool2d(kernel_size=(7, 7))
+        self.flatten_224 = nn.Flatten()
+        self.dense_225 = nn.Dense(in_channels=2048, out_channels=1000, has_bias=True)
+
+    def construct(self, input_1):
+        opt_conv2d_0 = self.conv2d_0(input_1)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        module7_0_opt = self.module7_0(opt_relu_1)
+        module7_1_opt = self.module7_1(module7_0_opt)
+        module7_2_opt = self.module7_2(module7_1_opt)
+        module0_0_opt = self.module0_0(module7_2_opt)
+        opt_conv2d_9 = self.conv2d_9(module7_2_opt)
+        opt_add_20 = P.Add()(module0_0_opt, opt_conv2d_9)
+        opt_relu_21 = self.relu_21(opt_add_20)
+        module8_0_opt = self.module8_0(opt_relu_21)
+        module0_1_opt = self.module0_1(module8_0_opt)
+        module4_0_opt = self.module4_0(module8_0_opt)
+        opt_add_61 = P.Add()(module0_1_opt, module4_0_opt)
+        opt_relu_62 = self.relu_62(opt_add_61)
+        module11_0_opt = self.module11_0(opt_relu_62)
+        module0_2_opt = self.module0_2(module11_0_opt)
+        module4_1_opt = self.module4_1(module11_0_opt)
+        opt_add_115 = P.Add()(module0_2_opt, module4_1_opt)
+        opt_relu_116 = self.relu_116(opt_add_115)
+        module8_1_opt = self.module8_1(opt_relu_116)
+        module11_1_opt = self.module11_1(module8_1_opt)
+        module0_3_opt = self.module0_3(module11_1_opt)
+        module4_2_opt = self.module4_2(module11_1_opt)
+        opt_add_195 = P.Add()(module0_3_opt, module4_2_opt)
+        opt_relu_196 = self.relu_196(opt_add_195)
+        module8_2_opt = self.module8_2(opt_relu_196)
+        opt_avgpool2d_223 = self.avgpool2d_223(module8_2_opt)
+        opt_flatten_224 = self.flatten_224(opt_avgpool2d_223)
+        opt_dense_225 = self.dense_225(opt_flatten_224)
+        return opt_dense_225
diff --git a/research/cvtmodel/resnet_ipl/src/seresnet152d.py b/research/cvtmodel/resnet_ipl/src/seresnet152d.py
new file mode 100644
index 0000000000000000000000000000000000000000..184f34592bbc074fe5724ebe7ec268aca2af0d75
--- /dev/null
+++ b/research/cvtmodel/resnet_ipl/src/seresnet152d.py
@@ -0,0 +1,1105 @@
+import mindspore.ops as P
+from mindspore import nn
+
+
+class Module0(nn.Cell):
+    def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
+                 conv2d_2_stride, conv2d_4_in_channels, conv2d_4_out_channels, conv2d_6_in_channels,
+                 conv2d_6_out_channels, conv2d_8_in_channels, conv2d_8_out_channels):
+        super(Module0, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
+                                  out_channels=conv2d_0_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
+                                  out_channels=conv2d_2_out_channels,
+                                  kernel_size=(3, 3),
+                                  stride=conv2d_2_stride,
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_3 = nn.ReLU()
+        self.conv2d_4 = nn.Conv2d(in_channels=conv2d_4_in_channels,
+                                  out_channels=conv2d_4_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.reducemean_5 = P.ReduceMean(keep_dims=True)
+        self.reducemean_5_axis = (2, 3)
+        self.conv2d_6 = nn.Conv2d(in_channels=conv2d_6_in_channels,
+                                  out_channels=conv2d_6_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_7 = nn.ReLU()
+        self.conv2d_8 = nn.Conv2d(in_channels=conv2d_8_in_channels,
+                                  out_channels=conv2d_8_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.sigmoid_9 = nn.Sigmoid()
+
+    def construct(self, x):
+        opt_conv2d_0 = self.conv2d_0(x)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        opt_conv2d_2 = self.conv2d_2(opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_conv2d_2)
+        opt_conv2d_4 = self.conv2d_4(opt_relu_3)
+        opt_reducemean_5 = self.reducemean_5(opt_conv2d_4, self.reducemean_5_axis)
+        opt_conv2d_6 = self.conv2d_6(opt_reducemean_5)
+        opt_relu_7 = self.relu_7(opt_conv2d_6)
+        opt_conv2d_8 = self.conv2d_8(opt_relu_7)
+        opt_sigmoid_9 = self.sigmoid_9(opt_conv2d_8)
+        opt_mul_10 = P.Mul()(opt_conv2d_4, opt_sigmoid_9)
+        return opt_mul_10
+
+
+class Module4(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_conv2d_2_stride, module0_0_conv2d_4_in_channels,
+                 module0_0_conv2d_4_out_channels, module0_0_conv2d_6_in_channels, module0_0_conv2d_6_out_channels,
+                 module0_0_conv2d_8_in_channels, module0_0_conv2d_8_out_channels, module0_1_conv2d_0_in_channels,
+                 module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels,
+                 module0_1_conv2d_2_stride, module0_1_conv2d_4_in_channels, module0_1_conv2d_4_out_channels,
+                 module0_1_conv2d_6_in_channels, module0_1_conv2d_6_out_channels, module0_1_conv2d_8_in_channels,
+                 module0_1_conv2d_8_out_channels):
+        super(Module4, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_0_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_0_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_0_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_0_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_0_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_0_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_0_conv2d_8_out_channels)
+        self.relu_1 = nn.ReLU()
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_1_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_1_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_1_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_1_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_1_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_1_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_1_conv2d_8_out_channels)
+        self.relu_3 = nn.ReLU()
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_add_0 = P.Add()(module0_0_opt, x)
+        opt_relu_1 = self.relu_1(opt_add_0)
+        module0_1_opt = self.module0_1(opt_relu_1)
+        opt_add_2 = P.Add()(module0_1_opt, opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_add_2)
+        return opt_relu_3
+
+
+class Module3(nn.Cell):
+    def __init__(self, conv2d_1_in_channels, conv2d_1_out_channels):
+        super(Module3, self).__init__()
+        self.pad_avgpool2d_0 = nn.Pad(paddings=((0, 0), (0, 0), (0, 0), (0, 0)))
+        self.avgpool2d_0 = nn.AvgPool2d(kernel_size=(2, 2), stride=(2, 2))
+        self.conv2d_1 = nn.Conv2d(in_channels=conv2d_1_in_channels,
+                                  out_channels=conv2d_1_out_channels,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+
+    def construct(self, x):
+        opt_avgpool2d_0 = self.pad_avgpool2d_0(x)
+        opt_avgpool2d_0 = self.avgpool2d_0(opt_avgpool2d_0)
+        opt_conv2d_1 = self.conv2d_1(opt_avgpool2d_0)
+        return opt_conv2d_1
+
+
+class Module16(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_conv2d_2_stride, module0_0_conv2d_4_in_channels,
+                 module0_0_conv2d_4_out_channels, module0_0_conv2d_6_in_channels, module0_0_conv2d_6_out_channels,
+                 module0_0_conv2d_8_in_channels, module0_0_conv2d_8_out_channels, module0_1_conv2d_0_in_channels,
+                 module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels,
+                 module0_1_conv2d_2_stride, module0_1_conv2d_4_in_channels, module0_1_conv2d_4_out_channels,
+                 module0_1_conv2d_6_in_channels, module0_1_conv2d_6_out_channels, module0_1_conv2d_8_in_channels,
+                 module0_1_conv2d_8_out_channels, module0_2_conv2d_0_in_channels, module0_2_conv2d_0_out_channels,
+                 module0_2_conv2d_2_in_channels, module0_2_conv2d_2_out_channels, module0_2_conv2d_2_stride,
+                 module0_2_conv2d_4_in_channels, module0_2_conv2d_4_out_channels, module0_2_conv2d_6_in_channels,
+                 module0_2_conv2d_6_out_channels, module0_2_conv2d_8_in_channels, module0_2_conv2d_8_out_channels,
+                 module0_3_conv2d_0_in_channels, module0_3_conv2d_0_out_channels, module0_3_conv2d_2_in_channels,
+                 module0_3_conv2d_2_out_channels, module0_3_conv2d_2_stride, module0_3_conv2d_4_in_channels,
+                 module0_3_conv2d_4_out_channels, module0_3_conv2d_6_in_channels, module0_3_conv2d_6_out_channels,
+                 module0_3_conv2d_8_in_channels, module0_3_conv2d_8_out_channels, module0_4_conv2d_0_in_channels,
+                 module0_4_conv2d_0_out_channels, module0_4_conv2d_2_in_channels, module0_4_conv2d_2_out_channels,
+                 module0_4_conv2d_2_stride, module0_4_conv2d_4_in_channels, module0_4_conv2d_4_out_channels,
+                 module0_4_conv2d_6_in_channels, module0_4_conv2d_6_out_channels, module0_4_conv2d_8_in_channels,
+                 module0_4_conv2d_8_out_channels, module0_5_conv2d_0_in_channels, module0_5_conv2d_0_out_channels,
+                 module0_5_conv2d_2_in_channels, module0_5_conv2d_2_out_channels, module0_5_conv2d_2_stride,
+                 module0_5_conv2d_4_in_channels, module0_5_conv2d_4_out_channels, module0_5_conv2d_6_in_channels,
+                 module0_5_conv2d_6_out_channels, module0_5_conv2d_8_in_channels, module0_5_conv2d_8_out_channels,
+                 module0_6_conv2d_0_in_channels, module0_6_conv2d_0_out_channels, module0_6_conv2d_2_in_channels,
+                 module0_6_conv2d_2_out_channels, module0_6_conv2d_2_stride, module0_6_conv2d_4_in_channels,
+                 module0_6_conv2d_4_out_channels, module0_6_conv2d_6_in_channels, module0_6_conv2d_6_out_channels,
+                 module0_6_conv2d_8_in_channels, module0_6_conv2d_8_out_channels):
+        super(Module16, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_0_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_0_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_0_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_0_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_0_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_0_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_0_conv2d_8_out_channels)
+        self.relu_1 = nn.ReLU()
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_1_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_1_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_1_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_1_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_1_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_1_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_1_conv2d_8_out_channels)
+        self.relu_3 = nn.ReLU()
+        self.module0_2 = Module0(conv2d_0_in_channels=module0_2_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_2_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_2_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_2_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_2_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_2_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_2_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_2_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_2_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_2_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_2_conv2d_8_out_channels)
+        self.relu_5 = nn.ReLU()
+        self.module0_3 = Module0(conv2d_0_in_channels=module0_3_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_3_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_3_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_3_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_3_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_3_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_3_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_3_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_3_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_3_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_3_conv2d_8_out_channels)
+        self.relu_7 = nn.ReLU()
+        self.module0_4 = Module0(conv2d_0_in_channels=module0_4_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_4_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_4_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_4_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_4_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_4_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_4_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_4_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_4_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_4_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_4_conv2d_8_out_channels)
+        self.relu_9 = nn.ReLU()
+        self.module0_5 = Module0(conv2d_0_in_channels=module0_5_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_5_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_5_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_5_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_5_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_5_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_5_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_5_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_5_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_5_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_5_conv2d_8_out_channels)
+        self.relu_11 = nn.ReLU()
+        self.module0_6 = Module0(conv2d_0_in_channels=module0_6_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_6_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_6_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_6_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_6_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_6_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_6_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_6_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_6_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_6_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_6_conv2d_8_out_channels)
+        self.relu_13 = nn.ReLU()
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_add_0 = P.Add()(module0_0_opt, x)
+        opt_relu_1 = self.relu_1(opt_add_0)
+        module0_1_opt = self.module0_1(opt_relu_1)
+        opt_add_2 = P.Add()(module0_1_opt, opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_add_2)
+        module0_2_opt = self.module0_2(opt_relu_3)
+        opt_add_4 = P.Add()(module0_2_opt, opt_relu_3)
+        opt_relu_5 = self.relu_5(opt_add_4)
+        module0_3_opt = self.module0_3(opt_relu_5)
+        opt_add_6 = P.Add()(module0_3_opt, opt_relu_5)
+        opt_relu_7 = self.relu_7(opt_add_6)
+        module0_4_opt = self.module0_4(opt_relu_7)
+        opt_add_8 = P.Add()(module0_4_opt, opt_relu_7)
+        opt_relu_9 = self.relu_9(opt_add_8)
+        module0_5_opt = self.module0_5(opt_relu_9)
+        opt_add_10 = P.Add()(module0_5_opt, opt_relu_9)
+        opt_relu_11 = self.relu_11(opt_add_10)
+        module0_6_opt = self.module0_6(opt_relu_11)
+        opt_add_12 = P.Add()(module0_6_opt, opt_relu_11)
+        opt_relu_13 = self.relu_13(opt_add_12)
+        return opt_relu_13
+
+
+class Module60(nn.Cell):
+    def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
+                 module0_0_conv2d_2_out_channels, module0_0_conv2d_2_stride, module0_0_conv2d_4_in_channels,
+                 module0_0_conv2d_4_out_channels, module0_0_conv2d_6_in_channels, module0_0_conv2d_6_out_channels,
+                 module0_0_conv2d_8_in_channels, module0_0_conv2d_8_out_channels, module0_1_conv2d_0_in_channels,
+                 module0_1_conv2d_0_out_channels, module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels,
+                 module0_1_conv2d_2_stride, module0_1_conv2d_4_in_channels, module0_1_conv2d_4_out_channels,
+                 module0_1_conv2d_6_in_channels, module0_1_conv2d_6_out_channels, module0_1_conv2d_8_in_channels,
+                 module0_1_conv2d_8_out_channels, module0_2_conv2d_0_in_channels, module0_2_conv2d_0_out_channels,
+                 module0_2_conv2d_2_in_channels, module0_2_conv2d_2_out_channels, module0_2_conv2d_2_stride,
+                 module0_2_conv2d_4_in_channels, module0_2_conv2d_4_out_channels, module0_2_conv2d_6_in_channels,
+                 module0_2_conv2d_6_out_channels, module0_2_conv2d_8_in_channels, module0_2_conv2d_8_out_channels,
+                 module0_3_conv2d_0_in_channels, module0_3_conv2d_0_out_channels, module0_3_conv2d_2_in_channels,
+                 module0_3_conv2d_2_out_channels, module0_3_conv2d_2_stride, module0_3_conv2d_4_in_channels,
+                 module0_3_conv2d_4_out_channels, module0_3_conv2d_6_in_channels, module0_3_conv2d_6_out_channels,
+                 module0_3_conv2d_8_in_channels, module0_3_conv2d_8_out_channels, module0_4_conv2d_0_in_channels,
+                 module0_4_conv2d_0_out_channels, module0_4_conv2d_2_in_channels, module0_4_conv2d_2_out_channels,
+                 module0_4_conv2d_2_stride, module0_4_conv2d_4_in_channels, module0_4_conv2d_4_out_channels,
+                 module0_4_conv2d_6_in_channels, module0_4_conv2d_6_out_channels, module0_4_conv2d_8_in_channels,
+                 module0_4_conv2d_8_out_channels, module0_5_conv2d_0_in_channels, module0_5_conv2d_0_out_channels,
+                 module0_5_conv2d_2_in_channels, module0_5_conv2d_2_out_channels, module0_5_conv2d_2_stride,
+                 module0_5_conv2d_4_in_channels, module0_5_conv2d_4_out_channels, module0_5_conv2d_6_in_channels,
+                 module0_5_conv2d_6_out_channels, module0_5_conv2d_8_in_channels, module0_5_conv2d_8_out_channels,
+                 module0_6_conv2d_0_in_channels, module0_6_conv2d_0_out_channels, module0_6_conv2d_2_in_channels,
+                 module0_6_conv2d_2_out_channels, module0_6_conv2d_2_stride, module0_6_conv2d_4_in_channels,
+                 module0_6_conv2d_4_out_channels, module0_6_conv2d_6_in_channels, module0_6_conv2d_6_out_channels,
+                 module0_6_conv2d_8_in_channels, module0_6_conv2d_8_out_channels, module0_7_conv2d_0_in_channels,
+                 module0_7_conv2d_0_out_channels, module0_7_conv2d_2_in_channels, module0_7_conv2d_2_out_channels,
+                 module0_7_conv2d_2_stride, module0_7_conv2d_4_in_channels, module0_7_conv2d_4_out_channels,
+                 module0_7_conv2d_6_in_channels, module0_7_conv2d_6_out_channels, module0_7_conv2d_8_in_channels,
+                 module0_7_conv2d_8_out_channels):
+        super(Module60, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_0_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_0_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_0_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_0_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_0_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_0_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_0_conv2d_8_out_channels)
+        self.relu_1 = nn.ReLU()
+        self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_1_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_1_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_1_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_1_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_1_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_1_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_1_conv2d_8_out_channels)
+        self.relu_3 = nn.ReLU()
+        self.module0_2 = Module0(conv2d_0_in_channels=module0_2_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_2_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_2_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_2_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_2_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_2_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_2_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_2_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_2_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_2_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_2_conv2d_8_out_channels)
+        self.relu_5 = nn.ReLU()
+        self.module0_3 = Module0(conv2d_0_in_channels=module0_3_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_3_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_3_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_3_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_3_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_3_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_3_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_3_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_3_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_3_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_3_conv2d_8_out_channels)
+        self.relu_7 = nn.ReLU()
+        self.module0_4 = Module0(conv2d_0_in_channels=module0_4_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_4_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_4_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_4_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_4_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_4_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_4_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_4_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_4_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_4_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_4_conv2d_8_out_channels)
+        self.relu_9 = nn.ReLU()
+        self.module0_5 = Module0(conv2d_0_in_channels=module0_5_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_5_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_5_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_5_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_5_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_5_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_5_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_5_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_5_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_5_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_5_conv2d_8_out_channels)
+        self.relu_11 = nn.ReLU()
+        self.module0_6 = Module0(conv2d_0_in_channels=module0_6_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_6_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_6_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_6_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_6_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_6_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_6_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_6_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_6_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_6_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_6_conv2d_8_out_channels)
+        self.relu_13 = nn.ReLU()
+        self.module0_7 = Module0(conv2d_0_in_channels=module0_7_conv2d_0_in_channels,
+                                 conv2d_0_out_channels=module0_7_conv2d_0_out_channels,
+                                 conv2d_2_in_channels=module0_7_conv2d_2_in_channels,
+                                 conv2d_2_out_channels=module0_7_conv2d_2_out_channels,
+                                 conv2d_2_stride=module0_7_conv2d_2_stride,
+                                 conv2d_4_in_channels=module0_7_conv2d_4_in_channels,
+                                 conv2d_4_out_channels=module0_7_conv2d_4_out_channels,
+                                 conv2d_6_in_channels=module0_7_conv2d_6_in_channels,
+                                 conv2d_6_out_channels=module0_7_conv2d_6_out_channels,
+                                 conv2d_8_in_channels=module0_7_conv2d_8_in_channels,
+                                 conv2d_8_out_channels=module0_7_conv2d_8_out_channels)
+        self.relu_15 = nn.ReLU()
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_add_0 = P.Add()(module0_0_opt, x)
+        opt_relu_1 = self.relu_1(opt_add_0)
+        module0_1_opt = self.module0_1(opt_relu_1)
+        opt_add_2 = P.Add()(module0_1_opt, opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_add_2)
+        module0_2_opt = self.module0_2(opt_relu_3)
+        opt_add_4 = P.Add()(module0_2_opt, opt_relu_3)
+        opt_relu_5 = self.relu_5(opt_add_4)
+        module0_3_opt = self.module0_3(opt_relu_5)
+        opt_add_6 = P.Add()(module0_3_opt, opt_relu_5)
+        opt_relu_7 = self.relu_7(opt_add_6)
+        module0_4_opt = self.module0_4(opt_relu_7)
+        opt_add_8 = P.Add()(module0_4_opt, opt_relu_7)
+        opt_relu_9 = self.relu_9(opt_add_8)
+        module0_5_opt = self.module0_5(opt_relu_9)
+        opt_add_10 = P.Add()(module0_5_opt, opt_relu_9)
+        opt_relu_11 = self.relu_11(opt_add_10)
+        module0_6_opt = self.module0_6(opt_relu_11)
+        opt_add_12 = P.Add()(module0_6_opt, opt_relu_11)
+        opt_relu_13 = self.relu_13(opt_add_12)
+        module0_7_opt = self.module0_7(opt_relu_13)
+        opt_add_14 = P.Add()(module0_7_opt, opt_relu_13)
+        opt_relu_15 = self.relu_15(opt_add_14)
+        return opt_relu_15
+
+
+class Module5(nn.Cell):
+    def __init__(self):
+        super(Module5, self).__init__()
+        self.module0_0 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=256,
+                                 conv2d_2_stride=(1, 1),
+                                 conv2d_4_in_channels=256,
+                                 conv2d_4_out_channels=1024,
+                                 conv2d_6_in_channels=1024,
+                                 conv2d_6_out_channels=64,
+                                 conv2d_8_in_channels=64,
+                                 conv2d_8_out_channels=1024)
+        self.relu_1 = nn.ReLU()
+        self.module0_1 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=256,
+                                 conv2d_2_stride=(1, 1),
+                                 conv2d_4_in_channels=256,
+                                 conv2d_4_out_channels=1024,
+                                 conv2d_6_in_channels=1024,
+                                 conv2d_6_out_channels=64,
+                                 conv2d_8_in_channels=64,
+                                 conv2d_8_out_channels=1024)
+        self.relu_3 = nn.ReLU()
+        self.module0_2 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=256,
+                                 conv2d_2_stride=(1, 1),
+                                 conv2d_4_in_channels=256,
+                                 conv2d_4_out_channels=1024,
+                                 conv2d_6_in_channels=1024,
+                                 conv2d_6_out_channels=64,
+                                 conv2d_8_in_channels=64,
+                                 conv2d_8_out_channels=1024)
+        self.relu_5 = nn.ReLU()
+        self.module0_3 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=256,
+                                 conv2d_2_stride=(1, 1),
+                                 conv2d_4_in_channels=256,
+                                 conv2d_4_out_channels=1024,
+                                 conv2d_6_in_channels=1024,
+                                 conv2d_6_out_channels=64,
+                                 conv2d_8_in_channels=64,
+                                 conv2d_8_out_channels=1024)
+        self.relu_7 = nn.ReLU()
+
+    def construct(self, x):
+        module0_0_opt = self.module0_0(x)
+        opt_add_0 = P.Add()(module0_0_opt, x)
+        opt_relu_1 = self.relu_1(opt_add_0)
+        module0_1_opt = self.module0_1(opt_relu_1)
+        opt_add_2 = P.Add()(module0_1_opt, opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_add_2)
+        module0_2_opt = self.module0_2(opt_relu_3)
+        opt_add_4 = P.Add()(module0_2_opt, opt_relu_3)
+        opt_relu_5 = self.relu_5(opt_add_4)
+        module0_3_opt = self.module0_3(opt_relu_5)
+        opt_add_6 = P.Add()(module0_3_opt, opt_relu_5)
+        opt_relu_7 = self.relu_7(opt_add_6)
+        return opt_relu_7
+
+
+class MindSporeModel(nn.Cell):
+    def __init__(self):
+        super(MindSporeModel, self).__init__()
+        self.conv2d_0 = nn.Conv2d(in_channels=3,
+                                  out_channels=32,
+                                  kernel_size=(3, 3),
+                                  stride=(2, 2),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_1 = nn.ReLU()
+        self.conv2d_2 = nn.Conv2d(in_channels=32,
+                                  out_channels=32,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_3 = nn.ReLU()
+        self.conv2d_4 = nn.Conv2d(in_channels=32,
+                                  out_channels=64,
+                                  kernel_size=(3, 3),
+                                  stride=(1, 1),
+                                  padding=(1, 1, 1, 1),
+                                  pad_mode="pad",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_5 = nn.ReLU()
+        self.pad_maxpool2d_6 = nn.Pad(paddings=((0, 0), (0, 0), (1, 0), (1, 0)))
+        self.maxpool2d_6 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))
+        self.module0_0 = Module0(conv2d_0_in_channels=64,
+                                 conv2d_0_out_channels=64,
+                                 conv2d_2_in_channels=64,
+                                 conv2d_2_out_channels=64,
+                                 conv2d_2_stride=(1, 1),
+                                 conv2d_4_in_channels=64,
+                                 conv2d_4_out_channels=256,
+                                 conv2d_6_in_channels=256,
+                                 conv2d_6_out_channels=16,
+                                 conv2d_8_in_channels=16,
+                                 conv2d_8_out_channels=256)
+        self.conv2d_8 = nn.Conv2d(in_channels=64,
+                                  out_channels=256,
+                                  kernel_size=(1, 1),
+                                  stride=(1, 1),
+                                  padding=0,
+                                  pad_mode="valid",
+                                  dilation=(1, 1),
+                                  group=1,
+                                  has_bias=True)
+        self.relu_20 = nn.ReLU()
+        self.module4_0 = Module4(module0_0_conv2d_0_in_channels=256,
+                                 module0_0_conv2d_0_out_channels=64,
+                                 module0_0_conv2d_2_in_channels=64,
+                                 module0_0_conv2d_2_out_channels=64,
+                                 module0_0_conv2d_2_stride=(1, 1),
+                                 module0_0_conv2d_4_in_channels=64,
+                                 module0_0_conv2d_4_out_channels=256,
+                                 module0_0_conv2d_6_in_channels=256,
+                                 module0_0_conv2d_6_out_channels=16,
+                                 module0_0_conv2d_8_in_channels=16,
+                                 module0_0_conv2d_8_out_channels=256,
+                                 module0_1_conv2d_0_in_channels=256,
+                                 module0_1_conv2d_0_out_channels=64,
+                                 module0_1_conv2d_2_in_channels=64,
+                                 module0_1_conv2d_2_out_channels=64,
+                                 module0_1_conv2d_2_stride=(1, 1),
+                                 module0_1_conv2d_4_in_channels=64,
+                                 module0_1_conv2d_4_out_channels=256,
+                                 module0_1_conv2d_6_in_channels=256,
+                                 module0_1_conv2d_6_out_channels=16,
+                                 module0_1_conv2d_8_in_channels=16,
+                                 module0_1_conv2d_8_out_channels=256)
+        self.module0_1 = Module0(conv2d_0_in_channels=256,
+                                 conv2d_0_out_channels=128,
+                                 conv2d_2_in_channels=128,
+                                 conv2d_2_out_channels=128,
+                                 conv2d_2_stride=(2, 2),
+                                 conv2d_4_in_channels=128,
+                                 conv2d_4_out_channels=512,
+                                 conv2d_6_in_channels=512,
+                                 conv2d_6_out_channels=32,
+                                 conv2d_8_in_channels=32,
+                                 conv2d_8_out_channels=512)
+        self.module3_0 = Module3(conv2d_1_in_channels=256, conv2d_1_out_channels=512)
+        self.relu_61 = nn.ReLU()
+        self.module16_0 = Module16(module0_0_conv2d_0_in_channels=512,
+                                   module0_0_conv2d_0_out_channels=128,
+                                   module0_0_conv2d_2_in_channels=128,
+                                   module0_0_conv2d_2_out_channels=128,
+                                   module0_0_conv2d_2_stride=(1, 1),
+                                   module0_0_conv2d_4_in_channels=128,
+                                   module0_0_conv2d_4_out_channels=512,
+                                   module0_0_conv2d_6_in_channels=512,
+                                   module0_0_conv2d_6_out_channels=32,
+                                   module0_0_conv2d_8_in_channels=32,
+                                   module0_0_conv2d_8_out_channels=512,
+                                   module0_1_conv2d_0_in_channels=512,
+                                   module0_1_conv2d_0_out_channels=128,
+                                   module0_1_conv2d_2_in_channels=128,
+                                   module0_1_conv2d_2_out_channels=128,
+                                   module0_1_conv2d_2_stride=(1, 1),
+                                   module0_1_conv2d_4_in_channels=128,
+                                   module0_1_conv2d_4_out_channels=512,
+                                   module0_1_conv2d_6_in_channels=512,
+                                   module0_1_conv2d_6_out_channels=32,
+                                   module0_1_conv2d_8_in_channels=32,
+                                   module0_1_conv2d_8_out_channels=512,
+                                   module0_2_conv2d_0_in_channels=512,
+                                   module0_2_conv2d_0_out_channels=128,
+                                   module0_2_conv2d_2_in_channels=128,
+                                   module0_2_conv2d_2_out_channels=128,
+                                   module0_2_conv2d_2_stride=(1, 1),
+                                   module0_2_conv2d_4_in_channels=128,
+                                   module0_2_conv2d_4_out_channels=512,
+                                   module0_2_conv2d_6_in_channels=512,
+                                   module0_2_conv2d_6_out_channels=32,
+                                   module0_2_conv2d_8_in_channels=32,
+                                   module0_2_conv2d_8_out_channels=512,
+                                   module0_3_conv2d_0_in_channels=512,
+                                   module0_3_conv2d_0_out_channels=128,
+                                   module0_3_conv2d_2_in_channels=128,
+                                   module0_3_conv2d_2_out_channels=128,
+                                   module0_3_conv2d_2_stride=(1, 1),
+                                   module0_3_conv2d_4_in_channels=128,
+                                   module0_3_conv2d_4_out_channels=512,
+                                   module0_3_conv2d_6_in_channels=512,
+                                   module0_3_conv2d_6_out_channels=32,
+                                   module0_3_conv2d_8_in_channels=32,
+                                   module0_3_conv2d_8_out_channels=512,
+                                   module0_4_conv2d_0_in_channels=512,
+                                   module0_4_conv2d_0_out_channels=128,
+                                   module0_4_conv2d_2_in_channels=128,
+                                   module0_4_conv2d_2_out_channels=128,
+                                   module0_4_conv2d_2_stride=(1, 1),
+                                   module0_4_conv2d_4_in_channels=128,
+                                   module0_4_conv2d_4_out_channels=512,
+                                   module0_4_conv2d_6_in_channels=512,
+                                   module0_4_conv2d_6_out_channels=32,
+                                   module0_4_conv2d_8_in_channels=32,
+                                   module0_4_conv2d_8_out_channels=512,
+                                   module0_5_conv2d_0_in_channels=512,
+                                   module0_5_conv2d_0_out_channels=128,
+                                   module0_5_conv2d_2_in_channels=128,
+                                   module0_5_conv2d_2_out_channels=128,
+                                   module0_5_conv2d_2_stride=(1, 1),
+                                   module0_5_conv2d_4_in_channels=128,
+                                   module0_5_conv2d_4_out_channels=512,
+                                   module0_5_conv2d_6_in_channels=512,
+                                   module0_5_conv2d_6_out_channels=32,
+                                   module0_5_conv2d_8_in_channels=32,
+                                   module0_5_conv2d_8_out_channels=512,
+                                   module0_6_conv2d_0_in_channels=512,
+                                   module0_6_conv2d_0_out_channels=128,
+                                   module0_6_conv2d_2_in_channels=128,
+                                   module0_6_conv2d_2_out_channels=128,
+                                   module0_6_conv2d_2_stride=(1, 1),
+                                   module0_6_conv2d_4_in_channels=128,
+                                   module0_6_conv2d_4_out_channels=512,
+                                   module0_6_conv2d_6_in_channels=512,
+                                   module0_6_conv2d_6_out_channels=32,
+                                   module0_6_conv2d_8_in_channels=32,
+                                   module0_6_conv2d_8_out_channels=512)
+        self.module0_2 = Module0(conv2d_0_in_channels=512,
+                                 conv2d_0_out_channels=256,
+                                 conv2d_2_in_channels=256,
+                                 conv2d_2_out_channels=256,
+                                 conv2d_2_stride=(2, 2),
+                                 conv2d_4_in_channels=256,
+                                 conv2d_4_out_channels=1024,
+                                 conv2d_6_in_channels=1024,
+                                 conv2d_6_out_channels=64,
+                                 conv2d_8_in_channels=64,
+                                 conv2d_8_out_channels=1024)
+        self.module3_1 = Module3(conv2d_1_in_channels=512, conv2d_1_out_channels=1024)
+        self.relu_167 = nn.ReLU()
+        self.module60_0 = Module60(module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_conv2d_2_stride=(1, 1),
+                                   module0_0_conv2d_4_in_channels=256,
+                                   module0_0_conv2d_4_out_channels=1024,
+                                   module0_0_conv2d_6_in_channels=1024,
+                                   module0_0_conv2d_6_out_channels=64,
+                                   module0_0_conv2d_8_in_channels=64,
+                                   module0_0_conv2d_8_out_channels=1024,
+                                   module0_1_conv2d_0_in_channels=1024,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=256,
+                                   module0_1_conv2d_2_stride=(1, 1),
+                                   module0_1_conv2d_4_in_channels=256,
+                                   module0_1_conv2d_4_out_channels=1024,
+                                   module0_1_conv2d_6_in_channels=1024,
+                                   module0_1_conv2d_6_out_channels=64,
+                                   module0_1_conv2d_8_in_channels=64,
+                                   module0_1_conv2d_8_out_channels=1024,
+                                   module0_2_conv2d_0_in_channels=1024,
+                                   module0_2_conv2d_0_out_channels=256,
+                                   module0_2_conv2d_2_in_channels=256,
+                                   module0_2_conv2d_2_out_channels=256,
+                                   module0_2_conv2d_2_stride=(1, 1),
+                                   module0_2_conv2d_4_in_channels=256,
+                                   module0_2_conv2d_4_out_channels=1024,
+                                   module0_2_conv2d_6_in_channels=1024,
+                                   module0_2_conv2d_6_out_channels=64,
+                                   module0_2_conv2d_8_in_channels=64,
+                                   module0_2_conv2d_8_out_channels=1024,
+                                   module0_3_conv2d_0_in_channels=1024,
+                                   module0_3_conv2d_0_out_channels=256,
+                                   module0_3_conv2d_2_in_channels=256,
+                                   module0_3_conv2d_2_out_channels=256,
+                                   module0_3_conv2d_2_stride=(1, 1),
+                                   module0_3_conv2d_4_in_channels=256,
+                                   module0_3_conv2d_4_out_channels=1024,
+                                   module0_3_conv2d_6_in_channels=1024,
+                                   module0_3_conv2d_6_out_channels=64,
+                                   module0_3_conv2d_8_in_channels=64,
+                                   module0_3_conv2d_8_out_channels=1024,
+                                   module0_4_conv2d_0_in_channels=1024,
+                                   module0_4_conv2d_0_out_channels=256,
+                                   module0_4_conv2d_2_in_channels=256,
+                                   module0_4_conv2d_2_out_channels=256,
+                                   module0_4_conv2d_2_stride=(1, 1),
+                                   module0_4_conv2d_4_in_channels=256,
+                                   module0_4_conv2d_4_out_channels=1024,
+                                   module0_4_conv2d_6_in_channels=1024,
+                                   module0_4_conv2d_6_out_channels=64,
+                                   module0_4_conv2d_8_in_channels=64,
+                                   module0_4_conv2d_8_out_channels=1024,
+                                   module0_5_conv2d_0_in_channels=1024,
+                                   module0_5_conv2d_0_out_channels=256,
+                                   module0_5_conv2d_2_in_channels=256,
+                                   module0_5_conv2d_2_out_channels=256,
+                                   module0_5_conv2d_2_stride=(1, 1),
+                                   module0_5_conv2d_4_in_channels=256,
+                                   module0_5_conv2d_4_out_channels=1024,
+                                   module0_5_conv2d_6_in_channels=1024,
+                                   module0_5_conv2d_6_out_channels=64,
+                                   module0_5_conv2d_8_in_channels=64,
+                                   module0_5_conv2d_8_out_channels=1024,
+                                   module0_6_conv2d_0_in_channels=1024,
+                                   module0_6_conv2d_0_out_channels=256,
+                                   module0_6_conv2d_2_in_channels=256,
+                                   module0_6_conv2d_2_out_channels=256,
+                                   module0_6_conv2d_2_stride=(1, 1),
+                                   module0_6_conv2d_4_in_channels=256,
+                                   module0_6_conv2d_4_out_channels=1024,
+                                   module0_6_conv2d_6_in_channels=1024,
+                                   module0_6_conv2d_6_out_channels=64,
+                                   module0_6_conv2d_8_in_channels=64,
+                                   module0_6_conv2d_8_out_channels=1024,
+                                   module0_7_conv2d_0_in_channels=1024,
+                                   module0_7_conv2d_0_out_channels=256,
+                                   module0_7_conv2d_2_in_channels=256,
+                                   module0_7_conv2d_2_out_channels=256,
+                                   module0_7_conv2d_2_stride=(1, 1),
+                                   module0_7_conv2d_4_in_channels=256,
+                                   module0_7_conv2d_4_out_channels=1024,
+                                   module0_7_conv2d_6_in_channels=1024,
+                                   module0_7_conv2d_6_out_channels=64,
+                                   module0_7_conv2d_8_in_channels=64,
+                                   module0_7_conv2d_8_out_channels=1024)
+        self.module60_1 = Module60(module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_conv2d_2_stride=(1, 1),
+                                   module0_0_conv2d_4_in_channels=256,
+                                   module0_0_conv2d_4_out_channels=1024,
+                                   module0_0_conv2d_6_in_channels=1024,
+                                   module0_0_conv2d_6_out_channels=64,
+                                   module0_0_conv2d_8_in_channels=64,
+                                   module0_0_conv2d_8_out_channels=1024,
+                                   module0_1_conv2d_0_in_channels=1024,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=256,
+                                   module0_1_conv2d_2_stride=(1, 1),
+                                   module0_1_conv2d_4_in_channels=256,
+                                   module0_1_conv2d_4_out_channels=1024,
+                                   module0_1_conv2d_6_in_channels=1024,
+                                   module0_1_conv2d_6_out_channels=64,
+                                   module0_1_conv2d_8_in_channels=64,
+                                   module0_1_conv2d_8_out_channels=1024,
+                                   module0_2_conv2d_0_in_channels=1024,
+                                   module0_2_conv2d_0_out_channels=256,
+                                   module0_2_conv2d_2_in_channels=256,
+                                   module0_2_conv2d_2_out_channels=256,
+                                   module0_2_conv2d_2_stride=(1, 1),
+                                   module0_2_conv2d_4_in_channels=256,
+                                   module0_2_conv2d_4_out_channels=1024,
+                                   module0_2_conv2d_6_in_channels=1024,
+                                   module0_2_conv2d_6_out_channels=64,
+                                   module0_2_conv2d_8_in_channels=64,
+                                   module0_2_conv2d_8_out_channels=1024,
+                                   module0_3_conv2d_0_in_channels=1024,
+                                   module0_3_conv2d_0_out_channels=256,
+                                   module0_3_conv2d_2_in_channels=256,
+                                   module0_3_conv2d_2_out_channels=256,
+                                   module0_3_conv2d_2_stride=(1, 1),
+                                   module0_3_conv2d_4_in_channels=256,
+                                   module0_3_conv2d_4_out_channels=1024,
+                                   module0_3_conv2d_6_in_channels=1024,
+                                   module0_3_conv2d_6_out_channels=64,
+                                   module0_3_conv2d_8_in_channels=64,
+                                   module0_3_conv2d_8_out_channels=1024,
+                                   module0_4_conv2d_0_in_channels=1024,
+                                   module0_4_conv2d_0_out_channels=256,
+                                   module0_4_conv2d_2_in_channels=256,
+                                   module0_4_conv2d_2_out_channels=256,
+                                   module0_4_conv2d_2_stride=(1, 1),
+                                   module0_4_conv2d_4_in_channels=256,
+                                   module0_4_conv2d_4_out_channels=1024,
+                                   module0_4_conv2d_6_in_channels=1024,
+                                   module0_4_conv2d_6_out_channels=64,
+                                   module0_4_conv2d_8_in_channels=64,
+                                   module0_4_conv2d_8_out_channels=1024,
+                                   module0_5_conv2d_0_in_channels=1024,
+                                   module0_5_conv2d_0_out_channels=256,
+                                   module0_5_conv2d_2_in_channels=256,
+                                   module0_5_conv2d_2_out_channels=256,
+                                   module0_5_conv2d_2_stride=(1, 1),
+                                   module0_5_conv2d_4_in_channels=256,
+                                   module0_5_conv2d_4_out_channels=1024,
+                                   module0_5_conv2d_6_in_channels=1024,
+                                   module0_5_conv2d_6_out_channels=64,
+                                   module0_5_conv2d_8_in_channels=64,
+                                   module0_5_conv2d_8_out_channels=1024,
+                                   module0_6_conv2d_0_in_channels=1024,
+                                   module0_6_conv2d_0_out_channels=256,
+                                   module0_6_conv2d_2_in_channels=256,
+                                   module0_6_conv2d_2_out_channels=256,
+                                   module0_6_conv2d_2_stride=(1, 1),
+                                   module0_6_conv2d_4_in_channels=256,
+                                   module0_6_conv2d_4_out_channels=1024,
+                                   module0_6_conv2d_6_in_channels=1024,
+                                   module0_6_conv2d_6_out_channels=64,
+                                   module0_6_conv2d_8_in_channels=64,
+                                   module0_6_conv2d_8_out_channels=1024,
+                                   module0_7_conv2d_0_in_channels=1024,
+                                   module0_7_conv2d_0_out_channels=256,
+                                   module0_7_conv2d_2_in_channels=256,
+                                   module0_7_conv2d_2_out_channels=256,
+                                   module0_7_conv2d_2_stride=(1, 1),
+                                   module0_7_conv2d_4_in_channels=256,
+                                   module0_7_conv2d_4_out_channels=1024,
+                                   module0_7_conv2d_6_in_channels=1024,
+                                   module0_7_conv2d_6_out_channels=64,
+                                   module0_7_conv2d_8_in_channels=64,
+                                   module0_7_conv2d_8_out_channels=1024)
+        self.module60_2 = Module60(module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_conv2d_2_stride=(1, 1),
+                                   module0_0_conv2d_4_in_channels=256,
+                                   module0_0_conv2d_4_out_channels=1024,
+                                   module0_0_conv2d_6_in_channels=1024,
+                                   module0_0_conv2d_6_out_channels=64,
+                                   module0_0_conv2d_8_in_channels=64,
+                                   module0_0_conv2d_8_out_channels=1024,
+                                   module0_1_conv2d_0_in_channels=1024,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=256,
+                                   module0_1_conv2d_2_stride=(1, 1),
+                                   module0_1_conv2d_4_in_channels=256,
+                                   module0_1_conv2d_4_out_channels=1024,
+                                   module0_1_conv2d_6_in_channels=1024,
+                                   module0_1_conv2d_6_out_channels=64,
+                                   module0_1_conv2d_8_in_channels=64,
+                                   module0_1_conv2d_8_out_channels=1024,
+                                   module0_2_conv2d_0_in_channels=1024,
+                                   module0_2_conv2d_0_out_channels=256,
+                                   module0_2_conv2d_2_in_channels=256,
+                                   module0_2_conv2d_2_out_channels=256,
+                                   module0_2_conv2d_2_stride=(1, 1),
+                                   module0_2_conv2d_4_in_channels=256,
+                                   module0_2_conv2d_4_out_channels=1024,
+                                   module0_2_conv2d_6_in_channels=1024,
+                                   module0_2_conv2d_6_out_channels=64,
+                                   module0_2_conv2d_8_in_channels=64,
+                                   module0_2_conv2d_8_out_channels=1024,
+                                   module0_3_conv2d_0_in_channels=1024,
+                                   module0_3_conv2d_0_out_channels=256,
+                                   module0_3_conv2d_2_in_channels=256,
+                                   module0_3_conv2d_2_out_channels=256,
+                                   module0_3_conv2d_2_stride=(1, 1),
+                                   module0_3_conv2d_4_in_channels=256,
+                                   module0_3_conv2d_4_out_channels=1024,
+                                   module0_3_conv2d_6_in_channels=1024,
+                                   module0_3_conv2d_6_out_channels=64,
+                                   module0_3_conv2d_8_in_channels=64,
+                                   module0_3_conv2d_8_out_channels=1024,
+                                   module0_4_conv2d_0_in_channels=1024,
+                                   module0_4_conv2d_0_out_channels=256,
+                                   module0_4_conv2d_2_in_channels=256,
+                                   module0_4_conv2d_2_out_channels=256,
+                                   module0_4_conv2d_2_stride=(1, 1),
+                                   module0_4_conv2d_4_in_channels=256,
+                                   module0_4_conv2d_4_out_channels=1024,
+                                   module0_4_conv2d_6_in_channels=1024,
+                                   module0_4_conv2d_6_out_channels=64,
+                                   module0_4_conv2d_8_in_channels=64,
+                                   module0_4_conv2d_8_out_channels=1024,
+                                   module0_5_conv2d_0_in_channels=1024,
+                                   module0_5_conv2d_0_out_channels=256,
+                                   module0_5_conv2d_2_in_channels=256,
+                                   module0_5_conv2d_2_out_channels=256,
+                                   module0_5_conv2d_2_stride=(1, 1),
+                                   module0_5_conv2d_4_in_channels=256,
+                                   module0_5_conv2d_4_out_channels=1024,
+                                   module0_5_conv2d_6_in_channels=1024,
+                                   module0_5_conv2d_6_out_channels=64,
+                                   module0_5_conv2d_8_in_channels=64,
+                                   module0_5_conv2d_8_out_channels=1024,
+                                   module0_6_conv2d_0_in_channels=1024,
+                                   module0_6_conv2d_0_out_channels=256,
+                                   module0_6_conv2d_2_in_channels=256,
+                                   module0_6_conv2d_2_out_channels=256,
+                                   module0_6_conv2d_2_stride=(1, 1),
+                                   module0_6_conv2d_4_in_channels=256,
+                                   module0_6_conv2d_4_out_channels=1024,
+                                   module0_6_conv2d_6_in_channels=1024,
+                                   module0_6_conv2d_6_out_channels=64,
+                                   module0_6_conv2d_8_in_channels=64,
+                                   module0_6_conv2d_8_out_channels=1024,
+                                   module0_7_conv2d_0_in_channels=1024,
+                                   module0_7_conv2d_0_out_channels=256,
+                                   module0_7_conv2d_2_in_channels=256,
+                                   module0_7_conv2d_2_out_channels=256,
+                                   module0_7_conv2d_2_stride=(1, 1),
+                                   module0_7_conv2d_4_in_channels=256,
+                                   module0_7_conv2d_4_out_channels=1024,
+                                   module0_7_conv2d_6_in_channels=1024,
+                                   module0_7_conv2d_6_out_channels=64,
+                                   module0_7_conv2d_8_in_channels=64,
+                                   module0_7_conv2d_8_out_channels=1024)
+        self.module5_0 = Module5()
+        self.module16_1 = Module16(module0_0_conv2d_0_in_channels=1024,
+                                   module0_0_conv2d_0_out_channels=256,
+                                   module0_0_conv2d_2_in_channels=256,
+                                   module0_0_conv2d_2_out_channels=256,
+                                   module0_0_conv2d_2_stride=(1, 1),
+                                   module0_0_conv2d_4_in_channels=256,
+                                   module0_0_conv2d_4_out_channels=1024,
+                                   module0_0_conv2d_6_in_channels=1024,
+                                   module0_0_conv2d_6_out_channels=64,
+                                   module0_0_conv2d_8_in_channels=64,
+                                   module0_0_conv2d_8_out_channels=1024,
+                                   module0_1_conv2d_0_in_channels=1024,
+                                   module0_1_conv2d_0_out_channels=256,
+                                   module0_1_conv2d_2_in_channels=256,
+                                   module0_1_conv2d_2_out_channels=256,
+                                   module0_1_conv2d_2_stride=(1, 1),
+                                   module0_1_conv2d_4_in_channels=256,
+                                   module0_1_conv2d_4_out_channels=1024,
+                                   module0_1_conv2d_6_in_channels=1024,
+                                   module0_1_conv2d_6_out_channels=64,
+                                   module0_1_conv2d_8_in_channels=64,
+                                   module0_1_conv2d_8_out_channels=1024,
+                                   module0_2_conv2d_0_in_channels=1024,
+                                   module0_2_conv2d_0_out_channels=256,
+                                   module0_2_conv2d_2_in_channels=256,
+                                   module0_2_conv2d_2_out_channels=256,
+                                   module0_2_conv2d_2_stride=(1, 1),
+                                   module0_2_conv2d_4_in_channels=256,
+                                   module0_2_conv2d_4_out_channels=1024,
+                                   module0_2_conv2d_6_in_channels=1024,
+                                   module0_2_conv2d_6_out_channels=64,
+                                   module0_2_conv2d_8_in_channels=64,
+                                   module0_2_conv2d_8_out_channels=1024,
+                                   module0_3_conv2d_0_in_channels=1024,
+                                   module0_3_conv2d_0_out_channels=256,
+                                   module0_3_conv2d_2_in_channels=256,
+                                   module0_3_conv2d_2_out_channels=256,
+                                   module0_3_conv2d_2_stride=(1, 1),
+                                   module0_3_conv2d_4_in_channels=256,
+                                   module0_3_conv2d_4_out_channels=1024,
+                                   module0_3_conv2d_6_in_channels=1024,
+                                   module0_3_conv2d_6_out_channels=64,
+                                   module0_3_conv2d_8_in_channels=64,
+                                   module0_3_conv2d_8_out_channels=1024,
+                                   module0_4_conv2d_0_in_channels=1024,
+                                   module0_4_conv2d_0_out_channels=256,
+                                   module0_4_conv2d_2_in_channels=256,
+                                   module0_4_conv2d_2_out_channels=256,
+                                   module0_4_conv2d_2_stride=(1, 1),
+                                   module0_4_conv2d_4_in_channels=256,
+                                   module0_4_conv2d_4_out_channels=1024,
+                                   module0_4_conv2d_6_in_channels=1024,
+                                   module0_4_conv2d_6_out_channels=64,
+                                   module0_4_conv2d_8_in_channels=64,
+                                   module0_4_conv2d_8_out_channels=1024,
+                                   module0_5_conv2d_0_in_channels=1024,
+                                   module0_5_conv2d_0_out_channels=256,
+                                   module0_5_conv2d_2_in_channels=256,
+                                   module0_5_conv2d_2_out_channels=256,
+                                   module0_5_conv2d_2_stride=(1, 1),
+                                   module0_5_conv2d_4_in_channels=256,
+                                   module0_5_conv2d_4_out_channels=1024,
+                                   module0_5_conv2d_6_in_channels=1024,
+                                   module0_5_conv2d_6_out_channels=64,
+                                   module0_5_conv2d_8_in_channels=64,
+                                   module0_5_conv2d_8_out_channels=1024,
+                                   module0_6_conv2d_0_in_channels=1024,
+                                   module0_6_conv2d_0_out_channels=256,
+                                   module0_6_conv2d_2_in_channels=256,
+                                   module0_6_conv2d_2_out_channels=256,
+                                   module0_6_conv2d_2_stride=(1, 1),
+                                   module0_6_conv2d_4_in_channels=256,
+                                   module0_6_conv2d_4_out_channels=1024,
+                                   module0_6_conv2d_6_in_channels=1024,
+                                   module0_6_conv2d_6_out_channels=64,
+                                   module0_6_conv2d_8_in_channels=64,
+                                   module0_6_conv2d_8_out_channels=1024)
+        self.module0_3 = Module0(conv2d_0_in_channels=1024,
+                                 conv2d_0_out_channels=512,
+                                 conv2d_2_in_channels=512,
+                                 conv2d_2_out_channels=512,
+                                 conv2d_2_stride=(2, 2),
+                                 conv2d_4_in_channels=512,
+                                 conv2d_4_out_channels=2048,
+                                 conv2d_6_in_channels=2048,
+                                 conv2d_6_out_channels=128,
+                                 conv2d_8_in_channels=128,
+                                 conv2d_8_out_channels=2048)
+        self.module3_2 = Module3(conv2d_1_in_channels=1024, conv2d_1_out_channels=2048)
+        self.relu_637 = nn.ReLU()
+        self.module4_1 = Module4(module0_0_conv2d_0_in_channels=2048,
+                                 module0_0_conv2d_0_out_channels=512,
+                                 module0_0_conv2d_2_in_channels=512,
+                                 module0_0_conv2d_2_out_channels=512,
+                                 module0_0_conv2d_2_stride=(1, 1),
+                                 module0_0_conv2d_4_in_channels=512,
+                                 module0_0_conv2d_4_out_channels=2048,
+                                 module0_0_conv2d_6_in_channels=2048,
+                                 module0_0_conv2d_6_out_channels=128,
+                                 module0_0_conv2d_8_in_channels=128,
+                                 module0_0_conv2d_8_out_channels=2048,
+                                 module0_1_conv2d_0_in_channels=2048,
+                                 module0_1_conv2d_0_out_channels=512,
+                                 module0_1_conv2d_2_in_channels=512,
+                                 module0_1_conv2d_2_out_channels=512,
+                                 module0_1_conv2d_2_stride=(1, 1),
+                                 module0_1_conv2d_4_in_channels=512,
+                                 module0_1_conv2d_4_out_channels=2048,
+                                 module0_1_conv2d_6_in_channels=2048,
+                                 module0_1_conv2d_6_out_channels=128,
+                                 module0_1_conv2d_8_in_channels=128,
+                                 module0_1_conv2d_8_out_channels=2048)
+        self.avgpool2d_664 = nn.AvgPool2d(kernel_size=(8, 8))
+        self.flatten_665 = nn.Flatten()
+        self.dense_666 = nn.Dense(in_channels=2048, out_channels=1000, has_bias=True)
+
+    def construct(self, input_1):
+        opt_conv2d_0 = self.conv2d_0(input_1)
+        opt_relu_1 = self.relu_1(opt_conv2d_0)
+        opt_conv2d_2 = self.conv2d_2(opt_relu_1)
+        opt_relu_3 = self.relu_3(opt_conv2d_2)
+        opt_conv2d_4 = self.conv2d_4(opt_relu_3)
+        opt_relu_5 = self.relu_5(opt_conv2d_4)
+        opt_maxpool2d_6 = self.pad_maxpool2d_6(opt_relu_5)
+        opt_maxpool2d_6 = self.maxpool2d_6(opt_maxpool2d_6)
+        module0_0_opt = self.module0_0(opt_maxpool2d_6)
+        opt_conv2d_8 = self.conv2d_8(opt_maxpool2d_6)
+        opt_add_19 = P.Add()(module0_0_opt, opt_conv2d_8)
+        opt_relu_20 = self.relu_20(opt_add_19)
+        module4_0_opt = self.module4_0(opt_relu_20)
+        module0_1_opt = self.module0_1(module4_0_opt)
+        module3_0_opt = self.module3_0(module4_0_opt)
+        opt_add_60 = P.Add()(module0_1_opt, module3_0_opt)
+        opt_relu_61 = self.relu_61(opt_add_60)
+        module16_0_opt = self.module16_0(opt_relu_61)
+        module0_2_opt = self.module0_2(module16_0_opt)
+        module3_1_opt = self.module3_1(module16_0_opt)
+        opt_add_166 = P.Add()(module0_2_opt, module3_1_opt)
+        opt_relu_167 = self.relu_167(opt_add_166)
+        module60_0_opt = self.module60_0(opt_relu_167)
+        module60_1_opt = self.module60_1(module60_0_opt)
+        module60_2_opt = self.module60_2(module60_1_opt)
+        module5_0_opt = self.module5_0(module60_2_opt)
+        module16_1_opt = self.module16_1(module5_0_opt)
+        module0_3_opt = self.module0_3(module16_1_opt)
+        module3_2_opt = self.module3_2(module16_1_opt)
+        opt_add_636 = P.Add()(module0_3_opt, module3_2_opt)
+        opt_relu_637 = self.relu_637(opt_add_636)
+        module4_1_opt = self.module4_1(opt_relu_637)
+        opt_avgpool2d_664 = self.avgpool2d_664(module4_1_opt)
+        opt_flatten_665 = self.flatten_665(opt_avgpool2d_664)
+        opt_dense_666 = self.dense_666(opt_flatten_665)
+        return opt_dense_666