diff --git a/oneflow/python/ops/layers.py b/oneflow/python/ops/layers.py
deleted file mode 100644
index 17b677d0f2987935ffa487d32edf11703354bf4e..0000000000000000000000000000000000000000
--- a/oneflow/python/ops/layers.py
+++ /dev/null
@@ -1,1577 +0,0 @@
-"""
-Copyright 2020 The OneFlow Authors. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from __future__ import absolute_import
-from typing import Callable, Optional, Union, Tuple, Sequence
-from oneflow.python.oneflow_export import oneflow_export
-
-import oneflow as flow
-import oneflow.core.operator.op_conf_pb2 as op_conf_util
-import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util
-import oneflow.core.job.regularizer_conf_pb2 as regularizer_conf_util
-import oneflow.python.framework.distribute as distribute_util
-import oneflow.python.framework.remote_blob as remote_blob_util
-import oneflow._oneflow_internal
-
-IntPair = Tuple[int, int]
-
-
-@oneflow_export("layers.dense")
-def dense(
-    inputs: oneflow._oneflow_internal.BlobDesc,
-    units: int,
-    activation: Optional[
-        Callable[
-            [oneflow._oneflow_internal.BlobDesc, str],
-            oneflow._oneflow_internal.BlobDesc,
-        ]
-    ] = None,
-    use_bias: bool = True,
-    kernel_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    bias_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    kernel_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
-    bias_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
-    trainable: bool = True,
-    name: str = "Dense",
-    model_distribute: oneflow._oneflow_internal.distribute.Distribute = oneflow._oneflow_internal.distribute.broadcast(),
-) -> oneflow._oneflow_internal.BlobDesc:
-    r"""Fully-connected layer.
-
-    The fully-connected layer multiplies input Blob with weight matrix and produces an Output Blob.
-
-    Args:
-        inputs (oneflow._oneflow_internal.BlobDesc): A 2D input `Blob`.
-        units (int): A positive integer for the dimensionality of the output space.
-        activation (Optional[oneflow._oneflow_internal.BlobDesc], optional):  Activation function. Defaults to None.
-        use_bias (bool, optional): A boolean specifies whether to use a bias vector. Defaults to True.
-        kernel_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the kernel weights matrix. Defaults to None.
-        bias_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the bias vector. Defaults to None.
-        kernel_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer function applied to the kernel weights matrix. Defaults to None.
-        bias_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for the bias vector. Defaults to None.
-        trainable (bool, optional): A boolean specifies whether to train the variables. Defaults to True.
-        name (Optional[str], optional): This layer's name. Defaults to None.
-        model_distribute (oneflow._oneflow_internal.distribute.Distribute, optional): Define the way to ditribute the model. Defaults to oneflow._oneflow_internal.distribute.broadcast().
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc:  A N-D `Blob` with the shape of (batch_size, units).
-
-    Raises:
-        ValueError: The dimension of input `Blob` must be less than 2.
-        VauleError: Model distribute must be in auto, broadcast, split.
-        ValueError: The input must be a 2D `Blob` when the model distribute is split.
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def dense_Job(x: tp.Numpy.Placeholder((1, 256))
-        ) -> tp.Numpy:
-            initializer = flow.truncated_normal(0.1)
-            hidden = flow.layers.dense(
-                x,
-                512,
-                activation=flow.nn.relu,
-                kernel_initializer=initializer,
-                name="dense1",
-            )
-            return hidden
-
-
-        x = np.random.randn(1, 256).astype(np.float32)
-        out = dense_Job(x)
-
-        # out.shape (1, 512)
-
-    """
-    in_shape = inputs.shape
-    in_num_axes = len(in_shape)
-    assert in_num_axes >= 2
-
-    assert (
-        model_distribute is oneflow._oneflow_internal.distribute.auto()
-        or model_distribute is oneflow._oneflow_internal.distribute.broadcast()
-        or model_distribute is oneflow._oneflow_internal.distribute.split(0)
-    )
-
-    if model_distribute is oneflow._oneflow_internal.distribute.split(0):
-        assert in_num_axes == 2  # model distribute is hard for reshape split dim 1
-
-    if in_num_axes > 2:
-        inputs = flow.reshape(inputs, (-1, in_shape[-1]))
-
-    with flow.scope.namespace(name):
-        if kernel_initializer is None:
-            kernel_initializer = flow.constant_initializer(0)
-
-        weight = flow.get_variable(
-            name="weight",
-            shape=(units, inputs.shape[1]),
-            dtype=inputs.dtype,
-            initializer=kernel_initializer,
-            regularizer=kernel_regularizer,
-            trainable=trainable,
-            model_name="weight",
-            distribute=model_distribute,
-            reuse=False,
-        )
-        weight = weight.with_distribute(model_distribute)
-
-        out = flow.matmul(a=inputs, b=weight, transpose_b=True, name="matmul")
-
-        if use_bias:
-            if bias_initializer is None:
-                bias_initializer = flow.constant_initializer(0)
-
-            bias = flow.get_variable(
-                name="bias",
-                shape=(units,),
-                dtype=inputs.dtype,
-                initializer=bias_initializer,
-                regularizer=bias_regularizer,
-                trainable=trainable,
-                model_name="bias",
-                distribute=model_distribute,
-                reuse=False,
-            )
-            bias = bias.with_distribute(model_distribute)
-            out = flow.nn.bias_add(out, bias, name="bias_add")
-
-        if callable(activation):
-            out = activation(out, name="activation")
-
-    if in_num_axes > 2:
-        out = flow.reshape(out, in_shape[:-1] + (units,))
-
-    return out
-
-
-@oneflow_export("layers.conv1d")
-def conv1d(
-    inputs: oneflow._oneflow_internal.BlobDesc,
-    filters: int,
-    kernel_size: Union[int, Tuple[int]] = 1,
-    strides: Union[int, Tuple[int]] = 1,
-    padding: Union[str, Tuple[IntPair, IntPair, IntPair]] = "VALID",
-    data_format: str = "NCW",
-    dilation_rate: Optional[Union[int, Tuple[int]]] = None,
-    groups: int = 1,
-    activation: Optional[
-        Callable[
-            [oneflow._oneflow_internal.BlobDesc, str],
-            oneflow._oneflow_internal.BlobDesc,
-        ]
-    ] = None,
-    use_bias: bool = True,
-    kernel_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    bias_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    kernel_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
-    bias_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
-    trainable: bool = True,
-    name: str = "Conv1d",
-    weight_name: Optional[str] = None,
-    bias_name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    r"""1D convolution layer.
-
-    This layer computes a 1-D convolution with 3D input Blob and filters.
-
-    Args:
-        inputs (oneflow._oneflow_internal.BlobDesc): A 3D input `Blob`.
-        filters (int): An integer specifies the dimensionality of the output space.
-        kernel_size (Union[int, List[int], Tuple[int]], optional): An integer or tuple/list specifies the height and width of the convolution window.
-                        When it is an integer, a square window is applied to the input. Defaults to 1.
-        strides (Union[int, List[int], Tuple[int]], optional): An integer or tuple/list specifies the strides of the convolution window along the height and width.
-                        When it is an integer, the same value for the all spatial dimesions is applied. Defaults to 1.
-        padding (str, Tuple[IntPair, IntPair, IntPair], optional): padding: `string` `"SAME"` or `"SAME_LOWER"` or `"SAME_UPPER"` or `"VALID" or Tuple[IntPair, IntPair, IntPair]` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. Defaults to "VALID".
-        data_format (str, optional): A string specifies the format of the input `Blob`, one of "NCW" or "NWC" (default: "NCW"). "NCW" cooresponds to channels_first, i.e. the input `Blob` with shape (batch_size, channels, width).
-                        "NWC" cooresponds to channels_last, i.e. the input `Blob` with shape (batch_size, channels, width). Defaults to "NCW".
-        dilation_rate (Optional[Union[int, Tuple[int]]], optional): An integer or tuple/list specifies the dilation rate for the dilated convolution. When it is an integer, the same dilation rate is applied for the all dimensions. Defaults to 1.
-        groups (int, optional): A positive integer specifies number of groups for the Group conv. Defaults to 1.
-        activation (Optional[ Callable[[oneflow._oneflow_internal.BlobDesc, str], oneflow._oneflow_internal.BlobDesc] ], optional): Activation function. Defaults to None.
-        use_bias (bool, optional): A boolean specifies whether to use a bias vector. Defaults to True.
-        kernel_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the kernel weights matrix. Defaults to None.
-        bias_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the bias vector. Defaults to None.
-        kernel_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for the kernel weights matrix. Defaults to None.
-        bias_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for the bias vector . Defaults to None.
-        trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
-        name (Optional[str], optional): This layer's name. Defaults to None.
-
-    Raises:
-        ValueError: If the type of kernel_size is not one of integer, list, tuple.
-        ValueError: The number of groups must be positive and number of filters must be divisible by it.
-        ValueError: If data_format is not one of 'NCW', 'NWC'.
-        ValueError: If number of input channels is not divisible by number of groups or less than number of groups.
-        ValueError: Number of group must be one when data_format is 'NWC'.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: A 3D `Blob` with the shape of (batch_size, filters, new_width).
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def conv1d_Job(x: tp.Numpy.Placeholder((1, 64, 32))
-        ) -> tp.Numpy:
-            initializer = flow.truncated_normal(0.1)
-            conv1d = flow.layers.conv1d(
-                x,
-                filters=128,
-                kernel_size=3,
-                strides=1,
-                padding='SAME',
-                kernel_initializer=initializer,
-                name="Conv1d"
-            )
-            return conv1d
-
-
-        x = np.random.randn(1, 64, 32).astype(np.float32)
-        out = conv1d_Job(x)
-
-        # out.shape (1, 128, 32)
-
-    """
-
-    if isinstance(kernel_size, int):
-        kernel_size = (kernel_size,)
-    else:
-        assert isinstance(kernel_size, (list, tuple))
-        assert len(kernel_size) == 1
-        kernel_size = tuple(kernel_size)
-
-    assert isinstance(groups, int)
-    assert groups > 0
-    assert groups <= filters
-    assert filters % groups == 0
-
-    if data_format.upper() == "NCW":
-        assert groups <= inputs.shape[1]
-        assert inputs.shape[1] % groups == 0
-        weight_shape = (filters, inputs.shape[1] // groups) + kernel_size
-    elif data_format.upper() == "NWC":
-        assert groups == 1
-        assert groups <= inputs.shape[2]
-        assert inputs.shape[2] % groups == 0
-        weight_shape = (
-            filters,
-            kernel_size[0],
-            inputs.shape[2] // groups,
-        )
-    else:
-        raise ValueError("data_format must be in NCW or NWC")
-
-    if kernel_initializer is None:
-        kernel_initializer = flow.xavier_uniform_initializer(data_format=data_format)
-
-    if weight_name is None:
-        with flow.scope.namespace(name):
-            weight = flow.get_variable(
-                name="weight",
-                shape=weight_shape,
-                dtype=inputs.dtype,
-                initializer=kernel_initializer,
-                regularizer=kernel_regularizer,
-                trainable=trainable,
-                model_name="weight",
-                reuse=False,
-            )
-    else:
-        weight = flow.get_variable(
-            name=weight_name,
-            shape=weight_shape,
-            dtype=inputs.dtype,
-            initializer=kernel_initializer,
-            regularizer=kernel_regularizer,
-            trainable=trainable,
-            model_name="weight",
-            reuse=False,
-        )
-
-    output = flow.nn.conv1d(
-        inputs,
-        weight,
-        strides,
-        padding,
-        data_format,
-        dilation_rate,
-        groups=groups,
-        name=name,
-    )
-
-    if use_bias:
-        if bias_initializer is None:
-            bias_initializer = flow.constant_initializer(0)
-
-        if bias_name is None:
-            with flow.scope.namespace(name):
-                bias = flow.get_variable(
-                    name="bias",
-                    shape=(filters,),
-                    dtype=inputs.dtype,
-                    initializer=bias_initializer,
-                    regularizer=bias_regularizer,
-                    trainable=trainable,
-                    model_name="bias",
-                    reuse=False,
-                )
-        else:
-            bias = flow.get_variable(
-                name=bias_name,
-                shape=(filters,),
-                dtype=inputs.dtype,
-                initializer=bias_initializer,
-                regularizer=bias_regularizer,
-                trainable=trainable,
-                model_name="bias",
-                reuse=False,
-            )
-
-        with flow.scope.namespace(name):
-            output = flow.nn.bias_add(output, bias, data_format, name="bias_add")
-
-    if callable(activation):
-        with flow.scope.namespace(name):
-            output = activation(output, name="activation")
-
-    return output
-
-
-@oneflow_export("layers.conv2d")
-def conv2d(
-    inputs: oneflow._oneflow_internal.BlobDesc,
-    filters: int,
-    kernel_size: Union[int, IntPair] = 1,
-    strides: Union[int, IntPair] = 1,
-    padding: Union[str, Tuple[IntPair, IntPair, IntPair, IntPair]] = "VALID",
-    data_format: str = "NCHW",
-    dilation_rate: Optional[Union[int, IntPair]] = None,
-    groups: int = 1,
-    activation: Optional[
-        Callable[
-            [oneflow._oneflow_internal.BlobDesc, str],
-            oneflow._oneflow_internal.BlobDesc,
-        ]
-    ] = None,
-    use_bias: bool = True,
-    kernel_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    bias_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    kernel_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
-    bias_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
-    trainable: bool = True,
-    name: str = "Conv2d",
-    weight_name: Optional[str] = None,
-    bias_name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    r"""2D convolution layer.
-
-    This layer computes a 2D convolution with 4D input Blob and filters.
-
-    Args:
-        inputs (oneflow._oneflow_internal.BlobDesc): A 4D input `Blob`.
-        filters (int): An integer specifies the dimensionality of the output space.
-        kernel_size (Union[int, List[int], Tuple[int]], optional): An integer or tuple/list specifies the height and width of the convolution window.
-                        When it is an integer, a square window is applied to the input. Defaults to 1.
-        strides (Union[int, List[int], Tuple[int]], optional): An integer or tuple/list specifies the strides of the convolution window along the height and width.
-                        When it is an integer, the same value for the all spatial dimesions is applied. Defaults to 1.
-        padding (str, Tuple[IntPair, IntPair, IntPair, IntPair], optional): padding: `string` `"SAME"` or `"SAME_LOWER"` or `"SAME_UPPER"` or `"VALID" or Tuple[IntPair, IntPair, IntPair]` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. Defaults to "VALID".
-        data_format (str, optional): A string specifies the format of the input `Blob`, one of "NCHW" or "NHWC" (default: "NCHW"). "NCHW" cooresponds to channels_first, i.e. the input `Blob` with shape (batch_size, channels, height, width).
-                        "NHWC" cooresponds to channels_last, i.e. the input `Blob` with shape (batch_size, height, width, channels). Defaults to "NCHW".
-        dilation_rate (int, optional): An integer or tuple/list specifies the dilation rate for the dilated convolution. When it is an integer, the same dilation rate is applied for the all dimensions. Defaults to 1.
-        groups (int, optional): A positive integer specifies number of groups for the Group conv. Defaults to 1.
-        activation (Optional[ Callable[[oneflow._oneflow_internal.BlobDesc, str], oneflow._oneflow_internal.BlobDesc] ], optional): Activation function. Defaults to None.
-        use_bias (bool, optional): A boolean specifies whether to use a bias vector. Defaults to True.
-        kernel_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the kernel weights matrix. Defaults to None.
-        bias_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the bias vector. Defaults to None.
-        kernel_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for the kernel weights matrix. Defaults to None.
-        bias_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for the bias vector . Defaults to None.
-        trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
-        name (Optional[str], optional): This layer's name. Defaults to None.
-        weight_name (Optional[str], optional): This weight's name. Defaults to None.
-        bias_name (Optional[str], optional):  This bias's name. Defaults to None.
-
-    Raises:
-        ValueError: If the type of kernel_size is not one of integer, list, tuple.
-        ValueError: The number of groups must be positive and number of filters must be divisible by it.
-        ValueError: If data_format is not one of 'NCHW', 'NHWC'.
-        ValueError: If number of input channels is not divisible by number of groups or less than number of groups.
-        ValueError: Number of group must be one when data_format is 'NHWC'.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: A 4D `Blob` with the shape of (batch_size, filters, new_height, new_width).
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def conv2d_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
-        ) -> tp.Numpy:
-            initializer = flow.truncated_normal(0.1)
-            conv2d = flow.layers.conv2d(
-                x,
-                filters=128,
-                kernel_size=3,
-                strides=1,
-                padding='SAME',
-                kernel_initializer=initializer,
-                name="Conv2d"
-            )
-            return conv2d
-
-
-        x = np.random.randn(1, 256, 32, 32).astype(np.float32)
-        out = conv2d_Job(x)
-
-        # out.shape (1, 128, 32, 32)
-
-    """
-
-    if isinstance(kernel_size, int):
-        kernel_size = (kernel_size, kernel_size)
-    else:
-        assert isinstance(kernel_size, (list, tuple))
-        assert len(kernel_size) == 2
-        kernel_size = tuple(kernel_size)
-
-    assert isinstance(groups, int)
-    assert groups > 0
-    assert groups <= filters
-    assert filters % groups == 0
-
-    if data_format.upper() == "NCHW":
-        assert groups <= inputs.shape[1]
-        assert inputs.shape[1] % groups == 0
-        weight_shape = (filters, inputs.shape[1] // groups) + kernel_size
-    elif data_format.upper() == "NHWC":
-        assert groups == 1
-        assert groups <= inputs.shape[3]
-        assert inputs.shape[3] % groups == 0
-        weight_shape = (
-            filters,
-            kernel_size[0],
-            kernel_size[1],
-            inputs.shape[3] // groups,
-        )
-    else:
-        raise ValueError("data_format must be in NCHW or NHWC")
-
-    if kernel_initializer is None:
-        kernel_initializer = flow.xavier_uniform_initializer(data_format=data_format)
-
-    if weight_name is None:
-        with flow.scope.namespace(name):
-            weight = flow.get_variable(
-                name="weight",
-                shape=weight_shape,
-                dtype=inputs.dtype,
-                initializer=kernel_initializer,
-                regularizer=kernel_regularizer,
-                trainable=trainable,
-                model_name="weight",
-                reuse=False,
-            )
-    else:
-        weight = flow.get_variable(
-            name=weight_name,
-            shape=weight_shape,
-            dtype=inputs.dtype,
-            initializer=kernel_initializer,
-            regularizer=kernel_regularizer,
-            trainable=trainable,
-            model_name="weight",
-            reuse=False,
-        )
-    output = flow.nn.conv2d(
-        inputs,
-        weight,
-        strides=strides,
-        padding=padding,
-        bias=None,
-        data_format=data_format,
-        dilations=dilation_rate,
-        groups=groups,
-        name=name,
-    )
-
-    if use_bias:
-        if bias_initializer is None:
-            bias_initializer = flow.constant_initializer(0)
-
-        if bias_name is None:
-            with flow.scope.namespace(name):
-                bias = flow.get_variable(
-                    name="bias",
-                    shape=(filters,),
-                    dtype=inputs.dtype,
-                    initializer=bias_initializer,
-                    regularizer=bias_regularizer,
-                    trainable=trainable,
-                    model_name="bias",
-                    reuse=False,
-                )
-        else:
-            bias = flow.get_variable(
-                name=bias_name,
-                shape=(filters,),
-                dtype=inputs.dtype,
-                initializer=bias_initializer,
-                regularizer=bias_regularizer,
-                trainable=trainable,
-                model_name="bias",
-                reuse=False,
-            )
-
-        with flow.scope.namespace(name):
-            output = flow.nn.bias_add(output, bias, data_format, name="bias_add")
-
-    if callable(activation):
-        with flow.scope.namespace(name):
-            output = activation(output, name="activation")
-
-    return output
-
-
-@oneflow_export("layers.conv3d")
-def conv3d(
-    inputs: oneflow._oneflow_internal.BlobDesc,
-    filters: int,
-    kernel_size: Union[int, Sequence[int]] = 1,
-    strides: Union[int, Sequence[int]] = 1,
-    padding: Union[str, Tuple[IntPair, IntPair, IntPair, IntPair, IntPair]] = "VALID",
-    data_format: str = "NCDHW",
-    dilation_rate: Optional[Union[int, IntPair]] = None,
-    groups: int = 1,
-    activation: Optional[
-        Callable[
-            [oneflow._oneflow_internal.BlobDesc, str],
-            oneflow._oneflow_internal.BlobDesc,
-        ]
-    ] = None,
-    use_bias: bool = True,
-    kernel_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    bias_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    kernel_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
-    bias_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
-    trainable: bool = True,
-    name: str = "Conv3d",
-    weight_name: Optional[str] = None,
-    bias_name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    r"""3D convolution layer.
-
-    This layer computes 3D convolution with 5D input Blob and filters
-
-    Args:
-        inputs (oneflow._oneflow_internal.BlobDesc): A 5D input `Blob`.
-        filters (int): An integer specifies the dimensionality of the output space.
-        kernel_size (Union[int, List[int], Sequence[int]], optional): An integer or tuple/list specifies the height and width of the convolution window.
-                        When it is an integer, a square window is applied to the input. Defaults to 1.
-        strides (Union[int, List[int], Sequence[int]], optional): An integer or tuple/list specifies the strides of the convolution window along the height and width.
-                        When it is an integer, the same value for the all spatial dimesions is applied. Defaults to 1.
-        padding (str, Tuple[IntPair, IntPair, IntPair, IntPair, IntPair], optional): padding: `string` `"SAME"` or `"SAME_LOWER"` or `"SAME_UPPER"` or `"VALID" or Tuple[IntPair, IntPair, IntPair, IntPair, IntPair]` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension. Defaults to "VALID".
-        data_format (str, optional): A string specifies the format of the input `Blob`, one of "NCDHW" or "NDHWC" (default: "NCDHW"). "NCDHW" cooresponds to channels_first, i.e. the input `Blob` with shape (batch_size, channels, depth, height, width).
-                        "NDHWC" cooresponds to channels_last, i.e. the input `Blob` with shape (batch_size, channels, depth, height, width). Defaults to "NCDHW".
-        dilation_rate (int, optional): An integer or tuple/list specifies the dilation rate for the dilated convolution. When it is an integer, the same dilation rate is applied for the all dimensions. Defaults to 1.
-        groups (int, optional): A positive integer specifies number of groups for the Group conv. Defaults to 1.
-        activation (Optional[ Callable[[oneflow._oneflow_internal.BlobDesc, str], oneflow._oneflow_internal.BlobDesc] ], optional): Activation function. Defaults to None.
-        use_bias (bool, optional): A boolean specifies whether to use a bias vector. Defaults to True.
-        kernel_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the kernel weights matrix. Defaults to None.
-        bias_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for the bias vector. Defaults to None.
-        kernel_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for the kernel weights matrix. Defaults to None.
-        bias_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for the bias vector . Defaults to None.
-        trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
-        name (Optional[str], optional): This layer's name. Defaults to None.
-        weight_name (Optional[str], optional): This weight's name. Defaults to None.
-        bias_name (Optional[str], optional):  This bias's name. Defaults to None.
-
-    Raises:
-        ValueError: If the type of kernel_size is not one of integer, list, tuple.
-        ValueError: The number of groups must be positive and number of filters must be divisible by it.
-        ValueError: If data_format is not one of 'NCDHW', 'NDHWC'.
-        ValueError: If number of input channels is not divisible by number of groups or less than number of groups.
-        ValueError: Number of group must be one when data_format is 'NDHWC'.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: A 5D `Blob` with the shape of (batch_size, filters, new_height, new_width).
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def conv3d_Job(x: tp.Numpy.Placeholder((1, 64, 16, 16, 16))
-        ) -> tp.Numpy:
-            initializer = flow.truncated_normal(0.1)
-            conv3d = flow.layers.conv3d(
-                x,
-                filters=128,
-                kernel_size=3,
-                strides=1,
-                padding='SAME',
-                kernel_initializer=initializer,
-                name="Conv3d"
-            )
-            return conv3d
-
-
-        x = np.random.randn(1, 64, 16, 16, 16).astype(np.float32)
-        out = conv3d_Job(x)
-
-        # out.shape (1, 128, 16, 16, 16)
-
-    """
-    need_transpose = 0
-    if data_format.upper() == "NDHWC":  # NDHWC is not supported before cudnn 8.0
-        need_transpose = 1
-        data_format = "NCDHW"
-
-    if need_transpose:
-        inputs = flow.transpose(inputs, perm=[0, 4, 1, 2, 3])
-        # padding for `NDHWC` is [0, 0, 1, 1, 1] to `NCDHW` format [0, 1, 1, 1, 0]
-        if isinstance(padding, (list, tuple)):
-            padding = list(padding)
-            padding[1], padding[4] = padding[4], padding[1]
-
-    if isinstance(kernel_size, int):
-        kernel_size = (kernel_size, kernel_size, kernel_size)
-    else:
-        assert isinstance(kernel_size, (list, tuple))
-        assert len(kernel_size) == 3
-        kernel_size = tuple(kernel_size)
-
-    assert isinstance(groups, int)
-    assert groups > 0
-    assert groups <= filters
-    assert filters % groups == 0
-
-    if data_format.upper() == "NCDHW":
-        assert groups <= inputs.shape[1]
-        assert inputs.shape[1] % groups == 0
-        weight_shape = (filters, inputs.shape[1] // groups) + kernel_size
-    elif data_format.upper() == "NDHWC":
-        assert groups == 1
-        assert groups <= inputs.shape[3]
-        assert inputs.shape[3] % groups == 0
-        weight_shape = (
-            filters,
-            kernel_size[0],
-            kernel_size[1],
-            kernel_size[2],
-            inputs.shape[4] // groups,
-        )
-    else:
-        raise ValueError("data_format must be in NCHW or NHWC")
-
-    if kernel_initializer is None:
-        kernel_initializer = flow.xavier_uniform_initializer(data_format=data_format)
-
-    if weight_name is None:
-        with flow.scope.namespace(name):
-            weight = flow.get_variable(
-                name="weight",
-                shape=weight_shape,
-                dtype=inputs.dtype,
-                initializer=kernel_initializer,
-                regularizer=kernel_regularizer,
-                trainable=trainable,
-                model_name="weight",
-                reuse=False,
-            )
-    else:
-        weight = flow.get_variable(
-            name=weight_name,
-            shape=weight_shape,
-            dtype=inputs.dtype,
-            initializer=kernel_initializer,
-            regularizer=kernel_regularizer,
-            trainable=trainable,
-            model_name="weight",
-            reuse=False,
-        )
-
-    output = flow.nn.conv3d(
-        inputs,
-        weight,
-        strides,
-        padding,
-        data_format,
-        dilation_rate,
-        groups=groups,
-        name=name,
-    )
-
-    if use_bias:
-        if bias_initializer is None:
-            bias_initializer = flow.constant_initializer(0)
-
-        if bias_name is None:
-            with flow.scope.namespace(name):
-                bias = flow.get_variable(
-                    name="bias",
-                    shape=(filters,),
-                    dtype=inputs.dtype,
-                    initializer=bias_initializer,
-                    regularizer=bias_regularizer,
-                    trainable=trainable,
-                    model_name="bias",
-                    reuse=False,
-                )
-        else:
-            bias = flow.get_variable(
-                name=bias_name,
-                shape=(filters,),
-                dtype=inputs.dtype,
-                initializer=bias_initializer,
-                regularizer=bias_regularizer,
-                trainable=trainable,
-                model_name="bias",
-                reuse=False,
-            )
-
-        with flow.scope.namespace(name):
-            output = flow.nn.bias_add(output, bias, data_format, name="bias_add")
-
-    if callable(activation):
-        with flow.scope.namespace(name):
-            output = activation(output, name="activation")
-
-    if need_transpose:
-        output = flow.transpose(output, perm=[0, 2, 3, 4, 1])
-
-    return output
-
-
-@oneflow_export("layers.layer_norm")
-def layer_norm(
-    inputs: oneflow._oneflow_internal.BlobDesc,
-    center: bool = True,
-    scale: bool = True,
-    trainable: bool = True,
-    begin_norm_axis: int = 1,
-    begin_params_axis: int = -1,
-    epsilon: float = 1e-5,
-    name: str = "LayerNorm",
-) -> oneflow._oneflow_internal.BlobDesc:
-    r"""Layer Normalization.
-
-    Args:
-        inputs (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
-        center (bool, optional): A boolean specifies whether to shift input `Blob`. Defaults to True.
-        scale (bool, optional): A boolean specifies whether to scale input `Blob`. Defaults to True.
-        trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
-        begin_norm_axis (int, optional): An integer specifies which axis to normalize at first. Defaults to 1.
-        begin_params_axis (int, optional):  An integer specifies which axis params at . Defaults to -1.
-        epsilon (float, optional): A small float is added to avoid division by zero. Defaults to 1e-5.
-        name (Optional[str], optional): This layer's name. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: A normalized `Blob` with same shape of input.
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def layer_norm_Job(x: tp.Numpy.Placeholder((1, 64, 128, 128))
-        ) -> tp.Numpy:
-            layer_norm = flow.layers.layer_norm(
-                x,
-                name="LayerNorm1"
-            )
-            return layer_norm
-
-
-        x = np.random.randn(1, 64, 128, 128).astype(np.float32)
-        out = layer_norm_Job(x)
-
-        # out.shape (1, 64, 128, 128)
-
-    """
-    if center is False and scale is False:
-        trainable = False
-
-    beta = None
-    gamma = None
-
-    param_shape = inputs.shape[begin_params_axis:]
-    if center:
-        with flow.scope.namespace(name):
-            beta = flow.get_variable(
-                name="beta",
-                shape=param_shape,
-                dtype=inputs.dtype,
-                initializer=flow.constant_initializer(0.0),
-                trainable=trainable,
-                model_name="beta",
-                distribute=oneflow._oneflow_internal.distribute.broadcast(),
-                reuse=False,
-            )
-
-    if scale:
-        with flow.scope.namespace(name):
-            gamma = flow.get_variable(
-                name="gamma",
-                shape=param_shape,
-                dtype=inputs.dtype,
-                initializer=flow.constant_initializer(1.0),
-                trainable=trainable,
-                model_name="gamma",
-                distribute=oneflow._oneflow_internal.distribute.broadcast(),
-                reuse=False,
-            )
-
-    if flow.current_scope().device_parallel_desc_symbol.device_tag == "cpu":
-        if begin_norm_axis < 0:
-            begin_norm_axis = begin_norm_axis + len(inputs.shape)
-
-        reduce_axis = []
-        for dim in range(len(inputs.shape)):
-            if dim >= begin_norm_axis:
-                reduce_axis.append(dim)
-        mean, variance = flow.nn.moments(inputs, reduce_axis, keepdims=True)
-
-        axis = begin_norm_axis
-        normalized = flow.nn.batch_normalization(
-            x=inputs,
-            mean=mean,
-            variance=variance,
-            variance_epsilon=epsilon,
-            axis=axis,
-            name=name,
-        )
-        nd_params_shape = [1] * (len(inputs.shape) - len(param_shape)) + list(
-            param_shape
-        )
-        affined = normalized
-        if gamma:
-            gamma = flow.reshape(gamma, nd_params_shape)
-            affined *= gamma
-        if beta:
-            beta = flow.reshape(beta, nd_params_shape)
-            affined += beta
-        return affined
-    elif flow.current_scope().device_parallel_desc_symbol.device_tag == "gpu":
-        op_builder = (
-            flow.user_op_builder(name)
-            .Op("layer_norm")
-            .Input("x", [inputs])
-            .Output("y")
-            .Output("mean")
-            .Output("inv_variance")
-        )
-
-        if beta is not None:
-            op_builder.Input("beta", [beta])
-        if gamma is not None:
-            op_builder.Input("gamma", [gamma])
-            op_builder.Output("normalized")
-        op_builder.Attr("center", center)
-        op_builder.Attr("scale", scale)
-        op_builder.Attr("begin_norm_axis", begin_norm_axis)
-        op_builder.Attr("begin_params_axis", begin_params_axis)
-        op_builder.Attr("epsilon", epsilon)
-
-        return op_builder.Build().InferAndTryRun().RemoteBlobList()[0]
-    else:
-        raise NotImplementedError
-
-
-@oneflow_export("layers.layer_norm_grad")
-def layer_norm_grad(
-    dy: oneflow._oneflow_internal.BlobDesc,
-    x: oneflow._oneflow_internal.BlobDesc,
-    mean: oneflow._oneflow_internal.BlobDesc,
-    inv_variance: oneflow._oneflow_internal.BlobDesc,
-    begin_norm_axis: int = 1,
-    name: str = "LayerNormGrad",
-) -> oneflow._oneflow_internal.BlobDesc:
-    r"""Layer normalization
-
-    Args:
-        dy (oneflow._oneflow_internal.BlobDesc): Upstream derivstives.
-        x (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
-        mean (oneflow._oneflow_internal.BlobDesc): Mean over neurons.
-        inv_variance (oneflow._oneflow_internal.BlobDesc): Variance over neurons.
-        begin_norm_axis (int, optional): An integer specifies which axis to normalize at first. Defaults to 1.
-        name (Optional[str], optional): This layer's name. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: Gradient with respect to input `Blob`.
-    """
-    op = (
-        flow.user_op_builder(name)
-        .Op("layer_norm_grad")
-        .Input("dy", [dy])
-        .Input("x", [x])
-        .Input("mean", [mean])
-        .Input("inv_variance", [inv_variance])
-        .Output("dx")
-        .Attr("begin_norm_axis", begin_norm_axis)
-        .Attr("epsilon", 1e-5)
-        .Build()
-    )
-    return op.InferAndTryRun().SoleOutputBlob()
-
-
-@oneflow_export("layers.layer_norm_param_grad")
-def layer_norm_param_grad(
-    dy: oneflow._oneflow_internal.BlobDesc,
-    norm: oneflow._oneflow_internal.BlobDesc,
-    gamma: oneflow._oneflow_internal.BlobDesc,
-    begin_params_axis: int = -1,
-    name: str = "LayerNormParamGrad",
-) -> Tuple[
-    oneflow._oneflow_internal.BlobDesc,
-    oneflow._oneflow_internal.BlobDesc,
-    oneflow._oneflow_internal.BlobDesc,
-]:
-    r"""Backward pass for layer normalization
-
-    Args:
-        dy (oneflow._oneflow_internal.BlobDesc): Upstream derivstives.
-        norm (oneflow._oneflow_internal.BlobDesc): Normalized output.
-        gamma (oneflow._oneflow_internal.BlobDesc): Scale parameter.
-        begin_params_axis (int, optional): From which parameters to begin with. Defaults to -1.
-        name (Optional[str], optional): This layer's name. Defaults to 'LayerNormParamGrad'.
-
-    Returns:
-        Tuple[oneflow._oneflow_internal.BlobDesc]:
-                normalized_diff: Gradient with respect to input `Blob`.
-                beta_diff: Gradient with respect to shift parameter beta.
-                gamma_diff: Gradient with respect to scale parameter gamma.
-    """
-    op = (
-        flow.user_op_builder(name)
-        .Op("layer_norm_param_grad")
-        .Input("dy", [dy])
-        .Input("normalized", [norm])
-        .Input("gamma", [gamma])
-        .Output("normalized_diff")
-        .Output("beta_diff")
-        .Output("gamma_diff")
-        .Output("reduce_buf")
-        .Attr("begin_params_axis", begin_params_axis)
-        .Build()
-    )
-
-    (
-        normalized_diff,
-        beta_diff,
-        gamma_diff,
-        reduce_buf,
-    ) = op.InferAndTryRun().RemoteBlobList()
-
-    return normalized_diff, beta_diff, gamma_diff
-
-
-def _get_batch_normalization_variables(
-    name,
-    gamma_name,
-    beta_name,
-    moving_mean_name,
-    moving_variance_name,
-    center,
-    scale,
-    params_shape,
-    params_dtype,
-    trainable,
-    beta_initializer,
-    beta_regularizer,
-    gamma_initializer,
-    gamma_regularizer,
-    moving_mean_initializer,
-    moving_variance_initializer,
-):
-    def get_beta_var(name):
-        if center:
-            beta = flow.get_variable(
-                name=name,
-                shape=params_shape,
-                dtype=params_dtype,
-                initializer=beta_initializer or flow.zeros_initializer(),
-                regularizer=beta_regularizer,
-                trainable=trainable,
-                distribute=oneflow._oneflow_internal.distribute.broadcast(),
-                reuse=False,
-            )
-        else:
-            beta = flow.constant(0, dtype=params_dtype, shape=params_shape, name=name)
-        return beta
-
-    if beta_name is None:
-        with flow.scope.namespace(name):
-            beta = get_beta_var("beta")
-    else:
-        beta = get_beta_var(beta_name)
-
-    def get_gamma_var(name):
-        if scale:
-            gamma = flow.get_variable(
-                name=name,
-                shape=params_shape,
-                dtype=params_dtype,
-                initializer=gamma_initializer or flow.ones_initializer(),
-                regularizer=gamma_regularizer,
-                trainable=trainable,
-                distribute=oneflow._oneflow_internal.distribute.broadcast(),
-                reuse=False,
-            )
-        else:
-            gamma = flow.constant(1, dtype=params_dtype, shape=params_shape, name=name)
-        return gamma
-
-    if gamma_name is None:
-        with flow.scope.namespace(name):
-            gamma = get_gamma_var("gamma")
-    else:
-        gamma = get_gamma_var(gamma_name)
-
-    def get_moving_mean_var(name):
-        moving_mean = flow.get_variable(
-            name=name,
-            shape=params_shape,
-            dtype=params_dtype,
-            initializer=moving_mean_initializer or flow.zeros_initializer(),
-            trainable=False,
-            distribute=oneflow._oneflow_internal.distribute.broadcast(),
-            reuse=False,
-        )
-        return moving_mean
-
-    if moving_mean_name is None:
-        with flow.scope.namespace(name):
-            moving_mean = get_moving_mean_var("moving_mean")
-    else:
-        moving_mean = get_moving_mean_var(moving_mean_name)
-
-    def get_moving_variance_var(name):
-        moving_variance = flow.get_variable(
-            name=name,
-            shape=params_shape,
-            dtype=params_dtype,
-            initializer=moving_variance_initializer or flow.ones_initializer(),
-            trainable=False,
-            distribute=oneflow._oneflow_internal.distribute.broadcast(),
-            reuse=False,
-        )
-        return moving_variance
-
-    if moving_variance_name is None:
-        with flow.scope.namespace(name):
-            moving_variance = get_moving_variance_var("moving_variance")
-    else:
-        moving_variance = get_moving_variance_var(moving_variance_name)
-
-    return beta, gamma, moving_mean, moving_variance
-
-
-@oneflow_export("layers.batch_normalization")
-def batch_normalization(
-    inputs: oneflow._oneflow_internal.BlobDesc,
-    axis: int = -1,
-    momentum: float = 0.99,
-    epsilon: float = 0.001,
-    center: bool = True,
-    scale: bool = True,
-    beta_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    gamma_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    beta_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
-    gamma_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
-    moving_mean_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    moving_variance_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    trainable: bool = True,
-    training: bool = True,
-    name: str = "BatchNorm",
-    gamma_name: Optional[str] = None,
-    beta_name: Optional[str] = None,
-    moving_mean_name: Optional[str] = None,
-    moving_variance_name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    r"""The BatchNormalization Layer.
-
-    This layer can be used in conv or dense layer.
-
-    The input data will be normalized by the mean and variance of the current batch data
-
-    Args:
-        inputs (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
-        axis (int, optional): An int specifies the axis that should be normalized . Default is -1, which normalizes the last axis.
-        momentum (float, optional):  A float specifies the momentum for the moving average. Defaults to 0.99.
-        epsilon (float, optional): A small float added to avoid division by zero. Defaults to 0.001.
-        center (bool, optional): A boolean specifies whether to add offset to normalized `Blob`. Defaults to True.
-        scale (bool, optional): A boolean specifies whether to multiply normalized `Blob` by gamma. Defaults to True.
-        beta_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for beta. Defaults to None.
-        gamma_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for gamma. Defaults to None.
-        beta_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for beta. Defaults to None.
-        gamma_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for gamma. Defaults to None.
-        moving_mean_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for moving mean. Defaults to None.
-        moving_variance_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for moving variance. Defaults to None.
-        trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
-        training (bool, optional): A boolean specifies whether now is training the model. Defaults to True.
-        name (Optional[str], optional): This layer's name. Defaults to None.
-        gamma_name (Optional[str], optional): This gamma's name. Defaults to None.
-        beta_name (Optional[str], optional): This beta's name. Defaults to None.
-        moving_mean_name (Optional[str], optional): This moving_mean's name. Defaults to None.
-        moving_variance_name (Optional[str], optional): This moving_var's name. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc:  A `Blob` with same shape of input.
-
-    Raises:
-        ValueError: If axis is out of dimension of input.
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def batch_norm_Job(x: tp.Numpy.Placeholder((1, 64, 128, 128))
-        ) -> tp.Numpy:
-            initializer = flow.truncated_normal(0.1)
-            conv2d = flow.layers.conv2d(
-                x,
-                filters=128,
-                kernel_size=3,
-                strides=2,
-                padding='SAME',
-                kernel_initializer=initializer,
-                name="Conv2d"
-            )
-            batch_norm = flow.layers.batch_normalization(
-                conv2d,
-                axis=1
-            )
-            return batch_norm
-
-
-        x = np.random.randn(1, 64, 128, 128).astype(np.float32)
-        out = batch_norm_Job(x)
-
-        # out.shape (1, 128, 64, 64)
-
-    """
-    if axis < 0:
-        axis += len(inputs.shape)
-    assert axis >= 0 and axis < len(inputs.shape)
-
-    params_shape = [inputs.shape[axis]]
-    # Float32 required to avoid precision-loss when using fp16 input/output
-    params_dtype = flow.float32 if inputs.dtype == flow.float16 else inputs.dtype
-
-    if not flow.current_global_function_desc().IsTrainable() or not trainable:
-        training = False
-
-    beta, gamma, moving_mean, moving_variance = _get_batch_normalization_variables(
-        name,
-        gamma_name,
-        beta_name,
-        moving_mean_name,
-        moving_variance_name,
-        center,
-        scale,
-        params_shape,
-        params_dtype,
-        trainable,
-        beta_initializer,
-        beta_regularizer,
-        gamma_initializer,
-        gamma_regularizer,
-        moving_mean_initializer,
-        moving_variance_initializer,
-    )
-
-    if flow.current_scope().device_parallel_desc_symbol.device_tag == "cpu":
-        if training:
-            reduce_axis = []
-            for dim in range(len(inputs.shape)):
-                if dim != axis:
-                    reduce_axis.append(dim)
-            mean, variance = flow.nn.moments(inputs, reduce_axis, keepdims=False)
-
-            def update_moving(moving, this_batch):
-                moving_identity = flow.identity(moving)
-                flow.assign(
-                    moving, momentum * moving_identity + (1 - momentum) * this_batch
-                )
-
-            update_moving(moving_mean, mean)
-            update_moving(moving_variance, variance)
-
-            return flow.nn.batch_normalization(
-                x=inputs,
-                mean=mean,
-                variance=variance,
-                offset=beta,
-                scale=gamma,
-                variance_epsilon=epsilon,
-                axis=axis,
-                name=name,
-            )
-        else:
-            mean = moving_mean
-            variance = moving_variance
-            return flow.nn.batch_normalization(
-                x=inputs,
-                mean=mean,
-                variance=variance,
-                offset=beta,
-                scale=gamma,
-                variance_epsilon=epsilon,
-                axis=axis,
-                name=name,
-            )
-    else:
-        builder = (
-            flow.user_op_builder(name)
-            .Op("normalization")
-            .Input("x", [inputs])
-            .Input("moving_mean", [moving_mean])
-            .Input("moving_variance", [moving_variance])
-            .Input("gamma", [gamma])
-            .Input("beta", [beta])
-            .Output("y")
-            .Attr("axis", axis)
-            .Attr("epsilon", epsilon)
-            .Attr("training", training)
-            .Attr("momentum", momentum)
-        )
-        if trainable and training:
-            builder = builder.Output("mean").Output("inv_variance")
-
-        return builder.Build().InferAndTryRun().RemoteBlobList()[0]
-
-
-@oneflow_export("layers.batch_normalization_add_relu")
-def batch_normalization_add_relu(
-    inputs: oneflow._oneflow_internal.BlobDesc,
-    addend: Optional[oneflow._oneflow_internal.BlobDesc] = None,
-    axis: int = -1,
-    momentum: float = 0.99,
-    epsilon: float = 0.001,
-    center: bool = True,
-    scale: bool = True,
-    beta_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    gamma_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    beta_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
-    gamma_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
-    moving_mean_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    moving_variance_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    trainable: bool = True,
-    training: bool = True,
-    name: str = "BatchNorm",
-    gamma_name: Optional[str] = None,
-    beta_name: Optional[str] = None,
-    moving_mean_name: Optional[str] = None,
-    moving_variance_name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    r"""Fused flow.layers.batch_normalization + flow.math.add + flow.math.relu
-
-    Args:
-        inputs (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
-        addend (oneflow._oneflow_internal.BlobDesc): `Blob` add to batch_normalization output.
-        axis (int, optional): An int specifies the axis that should be normalized . Default is -1, which normalizes the last axis.
-        momentum (float, optional):  A float specifies the momentum for the moving average. Defaults to 0.99.
-        epsilon (float, optional): A small float added to avoid division by zero. Defaults to 0.001.
-        center (bool, optional): A boolean specifies whether to add offset to normalized `Blob`. Defaults to True.
-        scale (bool, optional): A boolean specifies whether to multiply normalized `Blob` by gamma. Defaults to True.
-        beta_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for beta. Defaults to None.
-        gamma_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for gamma. Defaults to None.
-        beta_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for beta. Defaults to None.
-        gamma_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for gamma. Defaults to None.
-        moving_mean_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for moving mean. Defaults to None.
-        moving_variance_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for moving variance. Defaults to None.
-        trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
-        training (bool, optional): A boolean specifies whether now is training the model. Defaults to True.
-        name (Optional[str], optional): This layer's name. Defaults to None.
-        gamma_name (Optional[str], optional): This gamma's name. Defaults to None.
-        beta_name (Optional[str], optional): This beta's name. Defaults to None.
-        moving_mean_name (Optional[str], optional): This moving_mean's name. Defaults to None.
-        moving_variance_name (Optional[str], optional): This moving_var's name. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc:  A `Blob` with same shape of input.
-
-    Raises:
-        ValueError: If axis is out of dimension of input.
-
-    """
-    if not flow.current_global_function_desc().IsTrainable() or not trainable:
-        training = False
-
-    if (
-        not training
-        or flow.current_scope().device_parallel_desc_symbol.device_tag == "cpu"
-    ):
-        out = flow.layers.batch_normalization(
-            inputs,
-            axis=axis,
-            momentum=momentum,
-            epsilon=epsilon,
-            center=center,
-            scale=scale,
-            beta_initializer=beta_initializer,
-            gamma_initializer=gamma_initializer,
-            beta_regularizer=beta_regularizer,
-            gamma_regularizer=gamma_regularizer,
-            moving_mean_initializer=moving_mean_initializer,
-            moving_variance_initializer=moving_variance_initializer,
-            trainable=trainable,
-            training=training,
-            name=name,
-        )
-        with flow.scope.namespace("BatchNormAddRelu"):
-            if addend is not None:
-                out = out + addend
-            return flow.math.relu(out)
-
-    if axis < 0:
-        axis += len(inputs.shape)
-    assert 0 <= axis < len(inputs.shape)
-
-    params_shape = [inputs.shape[axis]]
-    # Float32 required to avoid precision-loss when using fp16 input/output
-    params_dtype = flow.float32 if inputs.dtype == flow.float16 else inputs.dtype
-
-    beta, gamma, moving_mean, moving_variance = _get_batch_normalization_variables(
-        name,
-        gamma_name,
-        beta_name,
-        moving_mean_name,
-        moving_variance_name,
-        center,
-        scale,
-        params_shape,
-        params_dtype,
-        trainable,
-        beta_initializer,
-        beta_regularizer,
-        gamma_initializer,
-        gamma_regularizer,
-        moving_mean_initializer,
-        moving_variance_initializer,
-    )
-
-    builder = (
-        flow.user_op_builder(name)
-        .Op("normalization_add_relu")
-        .Input("x", [inputs])
-        .Input("moving_mean", [moving_mean])
-        .Input("moving_variance", [moving_variance])
-        .Input("gamma", [gamma])
-        .Input("beta", [beta])
-        .Output("y")
-        .Output("mean")
-        .Output("inv_variance")
-        .Output("reserve_space")
-        .Attr("axis", axis)
-        .Attr("epsilon", epsilon)
-        .Attr("momentum", momentum)
-    )
-    if addend is not None:
-        builder = builder.Input("addend", [addend])
-    return builder.Build().InferAndTryRun().RemoteBlobList()[0]
-
-
-@oneflow_export("layers.batch_normalization_relu")
-def batch_normalization_relu(
-    inputs: oneflow._oneflow_internal.BlobDesc,
-    axis: int = -1,
-    momentum: float = 0.99,
-    epsilon: float = 0.001,
-    center: bool = True,
-    scale: bool = True,
-    beta_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    gamma_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    beta_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
-    gamma_regularizer: Optional[regularizer_conf_util.RegularizerConf] = None,
-    moving_mean_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    moving_variance_initializer: Optional[initializer_conf_util.InitializerConf] = None,
-    trainable: bool = True,
-    training: bool = True,
-    name: str = "BatchNorm",
-) -> oneflow._oneflow_internal.BlobDesc:
-    r"""Fused flow.layers.batch_normalization + flow.math.relu
-
-Args:
-    inputs (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
-    axis (int, optional): An int specifies the axis that should be normalized . Default is -1, which normalizes the last axis.
-    momentum (float, optional):  A float specifies the momentum for the moving average. Defaults to 0.99.
-    epsilon (float, optional): A small float added to avoid division by zero. Defaults to 0.001.
-    center (bool, optional): A boolean specifies whether to add offset to normalized `Blob`. Defaults to True.
-    scale (bool, optional): A boolean specifies whether to multiply normalized `Blob` by gamma. Defaults to True.
-    beta_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for beta. Defaults to None.
-    gamma_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for gamma. Defaults to None.
-    beta_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for beta. Defaults to None.
-    gamma_regularizer (Optional[regularizer_conf_util.RegularizerConf], optional): Regularizer for gamma. Defaults to None.
-    moving_mean_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for moving mean. Defaults to None.
-    moving_variance_initializer (Optional[initializer_conf_util.InitializerConf], optional): Initializer for moving variance. Defaults to None.
-    trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
-    training (bool, optional): A boolean specifies whether now is training the model. Defaults to True.
-    name (Optional[str], optional): This layer's name. Defaults to None.
-
-Returns:
-    oneflow._oneflow_internal.BlobDesc:  A `Blob` with same shape of input.
-
-Raises:
-    ValueError: If axis is out of dimension of input.
-
-"""
-    return flow.layers.batch_normalization_add_relu(
-        inputs,
-        axis=axis,
-        momentum=momentum,
-        epsilon=epsilon,
-        center=center,
-        scale=scale,
-        beta_initializer=beta_initializer,
-        gamma_initializer=gamma_initializer,
-        beta_regularizer=beta_regularizer,
-        gamma_regularizer=gamma_regularizer,
-        moving_mean_initializer=moving_mean_initializer,
-        moving_variance_initializer=moving_variance_initializer,
-        trainable=trainable,
-        training=training,
-        name=name,
-    )
-
-
-@oneflow_export("layers.upsample_2d")
-def upsample(
-    x: oneflow._oneflow_internal.BlobDesc,
-    size: Sequence[int] = (2, 2),
-    align_corners: bool = False,
-    data_format: str = "NCHW",
-    interpolation: str = "nearest",
-    name: str = "Upsample2D",
-):
-    r"""The Upsample Layer, this layer can upsample the feature map to a specified scale.
-
-    Args:
-        x ([type]): Input `Blob`.
-        size (tuple, optional): (height_scale, width_scale)  Defaults to (2, 2).
-        align_corners (bool, optional): Defaults to False.
-        data_format (str, optional): A string specifies the format of the input `Blob`, one of "NCHW" or "NHWC" (default: "NCHW"). "NCHW" cooresponds to channels_first, i.e. the input `Blob` with shape (batch_size, channels, height, width).
-                        "NHWC" cooresponds to channels_last, i.e. the input `Blob` with shape (batch_size, height, width, channels).. Defaults to "NCHW".
-        interpolation (str, optional): Image interpolation algorithm to enlarge the image size. Defaults to "nearest". "nearest" and "bilinear" are available now.
-        name ([type], optional): This layer's name. Defaults to None.
-
-    Raises:
-        ValueError: interpolation must be "nearest" or "bilinear".
-        ValueError: data_format must be "NHWC" or "NCHW"
-
-    Returns:
-        [type]: oneflow._oneflow_internal.BlobDesc:  A `Blob` which is the upsampled `x`. If `size` is (2, 2), the shape of return value is [N, C, 2H, 2W].
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def upsample_Job(x: tp.Numpy.Placeholder((1, 32, 32, 32))
-        ) -> tp.Numpy:
-            upsample = flow.layers.upsample_2d(
-                x,
-                size=(2, 2),
-                name="Upsample1"
-            )
-            return upsample
-
-
-        x = np.random.randn(1, 32, 32, 32).astype(np.float32)
-        out = upsample_Job(x)
-
-        # out.shape (1, 32, 64, 64)
-
-    """
-    if isinstance(size, int):
-        height_scale = size
-        width_scale = size
-    else:
-        assert isinstance(size, (list, tuple))
-        assert len(size) == 2
-        height_scale = size[0]
-        width_scale = size[1]
-
-    if interpolation != "nearest" and interpolation != "bilinear":
-        raise ValueError('interpolation must be "nearest" or "bilinear".')
-
-    if interpolation == "nearest" and align_corners:
-        raise ValueError('interpolation "nearest" does not support align_corners.')
-
-    if data_format.upper() != "NCHW" and data_format.upper() != "NHWC":
-        raise ValueError('data_format must be "NHWC" or "NCHW".')
-
-    need_transpose = 0
-    if data_format.upper() == "NHWC":
-        need_transpose = 1
-
-    if need_transpose:
-        x = flow.transpose(x, perm=[0, 3, 1, 2])
-
-    op = (
-        flow.user_op_builder(name)
-        .Op("upsample")
-        .Input("x", [x])
-        .Output("y")
-        .Attr("height_scale", float(height_scale))
-        .Attr("width_scale", float(width_scale))
-        .Attr("align_corners", align_corners)
-        .Attr("data_format", "channels_first")
-        .Attr("interpolation", interpolation)
-        .Build()
-    )
-    output = op.InferAndTryRun().SoleOutputBlob()
-
-    if need_transpose:
-        output = flow.transpose(output, perm=[0, 2, 3, 1])
-
-    return output
diff --git a/oneflow/python/ops/losses/add_loss.py b/oneflow/python/ops/losses/add_loss.py
deleted file mode 100644
index 93e9f15c39452f53b0ba9883b521503e7b5cbd97..0000000000000000000000000000000000000000
--- a/oneflow/python/ops/losses/add_loss.py
+++ /dev/null
@@ -1,51 +0,0 @@
-"""
-Copyright 2020 The OneFlow Authors. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from __future__ import absolute_import
-
-import oneflow.python.framework.c_api_util as c_api_util
-import oneflow.python.framework.hob as hob
-import oneflow.python.eager.gradient_util as gradient_util
-import oneflow.python.lib.core.enable_if as enable_if
-from oneflow.python.oneflow_export import oneflow_export
-import oneflow.python.framework.remote_blob as remote_blob_util
-import oneflow._oneflow_internal
-
-
-@oneflow_export("losses.add_loss")
-def api_add_loss(loss: oneflow._oneflow_internal.BlobDesc) -> None:
-    r"""Mark a `Blob` as a loss. Auto grad starts at every loss blob. It doesn't has to be a product of typical "loss" operator like softmax loss but can also be a `Blob` produced by any operator.
-
-    Args:
-        loss: A `Blob`.
-    """
-    return enable_if.unique([lazy_add_loss, eager_add_loss])(loss)
-
-
-@enable_if.condition(
-    hob.in_global_mode & hob.is_trainable & ~hob.eager_execution_enabled
-)
-def lazy_add_loss(loss):
-    c_api_util.CurJobBuildAndInferCtx_AddLossLogicalBlobName(loss.unique_name)
-
-
-@enable_if.condition(
-    hob.in_global_mode & hob.is_trainable & hob.eager_execution_enabled
-)
-def eager_add_loss(loss):
-    c_api_util.CurJobBuildAndInferCtx_AddLossLogicalBlobName(loss.unique_name)
-    gradient_util.GetDefaultBackwardBlobRegister().TrySetObject4BlobName(
-        loss.logical_blob_name, loss.blob_object
-    )
diff --git a/oneflow/python/ops/pad.py b/oneflow/python/ops/pad.py
deleted file mode 100644
index c4f722cda992b25e73aeb82ffa638ce89dc52bc7..0000000000000000000000000000000000000000
--- a/oneflow/python/ops/pad.py
+++ /dev/null
@@ -1,535 +0,0 @@
-"""
-Copyright 2020 The OneFlow Authors. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from __future__ import absolute_import
-
-from typing import Optional, Sequence, Union
-
-import oneflow
-import oneflow.python.framework.id_util as id_util
-import oneflow.python.framework.remote_blob as remote_blob_util
-from oneflow.python.oneflow_export import oneflow_export, stable_api
-import oneflow._oneflow_internal
-
-
-@oneflow_export("pad")
-def pad(
-    x: oneflow._oneflow_internal.BlobDesc,
-    paddings: Sequence[int],
-    constant_value: Union[int, float] = 0,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator pads the input blob with constant value that user specifies. User can set the amount of padding by setting the parameter `paddings`.
-
-    Args:
-        x (oneflow._oneflow_internal.BlobDesc): The input Blob
-        paddings (Sequence[int]): A list of integers to specify the padding width, its length must equal with the length of `x.shape`.
-        constant_value (Union[int, float], optional): The constant value to pad. Defaults to 0.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Raises:
-        ValueError: The parameter `paddings` must be a tuple or a list.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The Blob after padding.
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        import numpy as np
-
-
-        @flow.global_function()
-        def pad_Job(x: tp.Numpy.Placeholder((3, 3))
-        ) -> tp.Numpy:
-            return flow.pad(x,
-                            paddings=((2, 2), (1, 1)),
-                            constant_value=5)
-
-
-        x = np.array([[1, 1, 1],
-                    [1, 1, 1],
-                    [1, 1, 1]]).astype(np.float32)
-        out = pad_Job(x)
-
-        # out [[5. 5. 5. 5. 5.]
-        #      [5. 5. 5. 5. 5.]
-        #      [5. 1. 1. 1. 5.]
-        #      [5. 1. 1. 1. 5.]
-        #      [5. 1. 1. 1. 5.]
-        #      [5. 5. 5. 5. 5.]
-        #      [5. 5. 5. 5. 5.]]
-
-    """
-    padding_before = []
-    padding_after = []
-    if isinstance(paddings, (list, tuple)):
-        assert len(paddings) == len(x.shape), ValueError(
-            "paddings must be the same size of input dims"
-        )
-        for p in paddings:
-            assert isinstance(p, (list, tuple)) and len(p) == 2, ValueError(
-                "the elem of paddings must be a tuple or a list with length of 2"
-            )
-            padding_before.append(p[0])
-            padding_after.append(p[1])
-    else:
-        raise ValueError("paddings must be a tuple or a list.")
-    if x.dtype in [
-        oneflow.float32,
-        oneflow.float16,
-        oneflow.float64,
-    ]:
-        floating_constant_value = float(constant_value)
-        integral_constant_value = int(0)
-    else:
-        floating_constant_value = float(0)
-        integral_constant_value = int(constant_value)
-    return (
-        oneflow.user_op_builder(name if name is not None else id_util.UniqueStr("Pad_"))
-        .Op("pad")
-        .Input("x", [x])
-        .Output("y")
-        .Attr("padding_before", padding_before)
-        .Attr("padding_after", padding_after)
-        .Attr("floating_constant_value", floating_constant_value)
-        .Attr("integral_constant_value", integral_constant_value)
-        .Build()
-        .InferAndTryRun()
-        .RemoteBlobList()[0]
-    )
-
-
-@oneflow_export("pad_grad")
-def pad_grad(
-    x: oneflow._oneflow_internal.BlobDesc,
-    paddings: Sequence[int],
-    constant_value: Union[int, float] = 0,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    padding_before = []
-    padding_after = []
-    if isinstance(paddings, (list, tuple)):
-        assert len(paddings) == len(x.shape), ValueError(
-            "paddings must be the same size of input dims"
-        )
-        for p in paddings:
-            assert isinstance(p, (list, tuple)) and len(p) == 2, ValueError(
-                "the elem of paddings must be a tuple or a list with length of 2"
-            )
-            padding_before.append(p[0])
-            padding_after.append(p[1])
-    else:
-        raise ValueError("paddings must be a tuple or a list.")
-    return (
-        oneflow.user_op_builder(
-            name if name is not None else id_util.UniqueStr("PadGrad_")
-        )
-        .Op("pad_grad")
-        .Input("dy", [x])
-        .Output("dx")
-        .Attr("padding_before", padding_before)
-        .Attr("padding_after", padding_after)
-        .Attr("floating_constant_value", float(constant_value))
-        .Attr("integral_constant_value", int(constant_value))
-        .Build()
-        .InferAndTryRun()
-        .RemoteBlobList()[0]
-    )
-
-
-@oneflow_export("same_padding")
-def same_padding(
-    x: oneflow._oneflow_internal.BlobDesc,
-    padding: Sequence[int],
-    data_format: str,
-    kernel_size: Sequence[int],
-    strides: Sequence[int],
-    dilation_rate: Sequence[int],
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator do the padding in "SAME" mode, It can computes the pad width according to the `kernel_size` and `strides` to keep the size of feature map unchanged after convolution or other operations.
-
-    Args:
-        x (oneflow._oneflow_internal.BlobDesc): The input blob.
-        padding (Sequence[int]): The padding mode. It should be "SAME_UPPER" or "SAME_LOWER"
-        data_format ([type]): The data format of input Blob. If the string starts with "NC", it means the data format is `channel first`, else the data format is `channel last`.
-        kernel_size (Sequence[int]): The kernel size of operations. Its type should be tuple or list.
-        strides (Sequence[int]): The strides of operations. Its type should be tuple or list.
-        dilation_rate (Sequence[int]): The dilation rate of operations. Its type should be tuple or list.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The Blob after padding.
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        import numpy as np
-
-
-        @flow.global_function()
-        def same_pad_Job(x: tp.Numpy.Placeholder((1, 1, 3, 3))
-        ) -> tp.Numpy:
-            return flow.same_padding(x,
-                                    padding="SAME_UPPER",
-                                    data_format="NCHW",
-                                    kernel_size=(3, 3),
-                                    strides=(1, 1),
-                                    dilation_rate=(1, 1))
-
-
-        x = np.ones(shape=(1, 1, 3, 3)).astype(np.float32)
-        out = same_pad_Job(x)
-
-        # out [[[[0. 0. 0. 0. 0.]
-        #        [0. 1. 1. 1. 0.]
-        #        [0. 1. 1. 1. 0.]
-        #        [0. 1. 1. 1. 0.]
-        #        [0. 0. 0. 0. 0.]]]]
-
-    """
-    assert isinstance(padding, str) and (
-        padding.upper() == "SAME_LOWER" or padding.upper() == "SAME_UPPER"
-    ), 'padding must be "SAME_LOWER" or "SAME_UPPER".'
-    channel_pos = "channels_first" if data_format.startswith("NC") else "channels_last"
-    assert isinstance(kernel_size, (list, tuple))
-    assert isinstance(strides, (list, tuple))
-    assert isinstance(dilation_rate, (list, tuple))
-    num_spatial_dims = len(x.shape) - 2
-    assert len(kernel_size) == num_spatial_dims
-    assert len(strides) == num_spatial_dims
-    assert len(dilation_rate) == num_spatial_dims
-
-    return (
-        oneflow.user_op_builder(
-            name if name is not None else id_util.UniqueStr("SamePadding_")
-        )
-        .Op("same_padding")
-        .Input("x", [x])
-        .Output("y")
-        .Attr("padding", padding.lower())
-        .Attr("data_format", channel_pos)
-        .Attr("kernel_size", kernel_size)
-        .Attr("strides", strides)
-        .Attr("dilation_rate", dilation_rate)
-        .Build()
-        .InferAndTryRun()
-        .RemoteBlobList()[0]
-    )
-
-
-@oneflow_export("reflection_pad2d")
-@stable_api
-def reflection_pad2d(
-    x: oneflow._oneflow_internal.BlobDesc,
-    padding: Union[int, tuple, list],
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """Pads the input tensor using the reflection of the input boundary.
-
-    Args:
-        x (oneflow._oneflow_internal.BlobDesc): input blob, only support "NCHW" format.
-        padding (Union[int, oneflow._oneflow_internal.BlobDesc]): The size or bundary of padding, if is int uses the same padding in all dimension;
-        if 4-dims tuple, uses (\text{padding\_left}padding_left , \text{padding\_right}padding_right , \text{padding\_top}padding_top , \text{padding\_bottom}padding_bottom )
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: [description]
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        import numpy as np
-
-
-        @flow.global_function()
-        def pad_Job(x: tp.Numpy.Placeholder((1, 2, 3, 3))
-        ) -> tp.Numpy:
-            return flow.reflection_pad2d(x, padding=[2, 2, 1, 1])
-
-
-        x = np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32)
-        out = pad_Job(x)
-
-        # out [[[[ 5.  4.  3.  4.  5.  4.  3.]
-        #    [ 2.  1.  0.  1.  2.  1.  0.]
-        #    [ 5.  4.  3.  4.  5.  4.  3.]
-        #    [ 8.  7.  6.  7.  8.  7.  6.]
-        #    [ 5.  4.  3.  4.  5.  4.  3.]]
-
-        #   [[ 14. 13. 12. 13. 14. 13. 12.]
-        #    [ 11. 10.  9. 10. 11. 10.  9.]
-        #    [ 14. 13. 12. 13. 14. 13. 12.]
-        #    [ 17. 16. 15. 16. 17. 16. 15.]
-        #    [ 14. 13. 12. 13. 14. 13. 12.]]]]
-
-    """
-    H, W = x.shape[2], x.shape[3]
-    if isinstance(padding, (tuple, list)):
-        assert len(padding) == len(x.shape), ValueError(
-            "padding boundry must be the same size of input dims"
-        )
-        assert (
-            padding[2] < H and padding[3] < H and padding[0] < W and padding[1] < W
-        ), ValueError(
-            "Padding size should be less than the corresponding input dimension!"
-        )
-        boundry = [padding[0], padding[1], padding[2], padding[3]]
-    elif isinstance(padding, int):
-        assert padding < H and padding < W, ValueError(
-            "Padding size should be less than the corresponding input dimension!"
-        )
-        boundry = [padding, padding, padding, padding]
-    else:
-        raise ValueError("padding must be in or list or tuple!")
-
-    return (
-        oneflow.user_op_builder(
-            name if name is not None else id_util.UniqueStr("Reflection_Pad2d_")
-        )
-        .Op("reflection_pad2d")
-        .Input("x", [x])
-        .Output("y")
-        .Attr("padding", list(boundry))
-        .Build()
-        .InferAndTryRun()
-        .RemoteBlobList()[0]
-    )
-
-
-@oneflow_export("replication_pad2d")
-def replication_pad2d(
-    x: oneflow._oneflow_internal.BlobDesc,
-    padding: Union[int, tuple, list],
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """Pads the input tensor using the replication of the input boundary.
-
-    Args:
-        x (oneflow._oneflow_internal.BlobDesc): input blob, only support "NCHW" format.
-        padding (Union[int, oneflow._oneflow_internal.BlobDesc]): The size or bundary of padding, if is int uses the same padding in all dimension;
-        if 4-dims tuple, uses (\text{padding\_left}padding_left , \text{padding\_right}padding_right , \text{padding\_top}padding_top , \text{padding\_bottom}padding_bottom )
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: [description]
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        import numpy as np
-
-
-        @flow.global_function()
-        def pad_Job(x: tp.Numpy.Placeholder((1, 2, 3, 3))
-        ) -> tp.Numpy:
-            return flow.reflection_pad2d(x, padding=[2, 2, 1, 1])
-
-
-        x = np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32)
-        out = pad_Job(x)
-
-        # out [[[[ 0.  0.  0.  1.  2.  2.  2.]
-        #    [ 0.  0.  0.  1.  2.  2.  2.]
-        #    [ 3.  3.  3.  4.  5.  5.  5.]
-        #    [ 6.  6.  6.  7.  8.  8.  8.]
-        #    [ 6.  6.  6.  7.  8.  8.  8.]]
-
-        #   [[ 9.  9.  9.  10.  11.  11.  11.]
-        #    [ 9.  9.  9.  10.  11.  11.  11.]
-        #    [ 12.  12.  12.  13.  14.  14.  14.]
-        #    [ 15.  15.  15.  16.  17.  17.  17.]
-        #    [ 15.  15.  15.  16.  17.  17.  17.]]]]
-
-    """
-    H, W = x.shape[2], x.shape[3]
-    if isinstance(padding, (tuple, list)):
-        assert len(padding) == len(x.shape), ValueError(
-            "padding boundry must be the same size of input dims"
-        )
-        boundry = [padding[0], padding[1], padding[2], padding[3]]
-    elif isinstance(padding, int):
-        boundry = [padding, padding, padding, padding]
-    else:
-        raise ValueError("padding must be in or list or tuple!")
-
-    return (
-        oneflow.user_op_builder(
-            name if name is not None else id_util.UniqueStr("Replication_Pad2d_")
-        )
-        .Op("replication_pad2d")
-        .Input("x", [x])
-        .Output("y")
-        .Attr("padding", list(boundry))
-        .Build()
-        .InferAndTryRun()
-        .RemoteBlobList()[0]
-    )
-
-
-@oneflow_export("constant_pad2d")
-def constant_pad2d(
-    x: oneflow._oneflow_internal.BlobDesc,
-    padding: Union[int, tuple, list],
-    constant_value: Union[int, float] = 0,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """Pads the input tensor using an input constant value.
-
-    Args:
-        x (oneflow._oneflow_internal.BlobDesc): input blob, only support "NCHW" format.
-        padding (Union[int, oneflow._oneflow_internal.BlobDesc]): The size or bundary of padding, if is int uses the same padding in all dimension;
-        if 4-dims tuple, uses (\text{padding\_left}padding_left , \text{padding\_right}padding_right , \text{padding\_top}padding_top , \text{padding\_bottom}padding_bottom )
-        constant_value (Union[int, float]): The constant value used for padding. Defaults to Zero.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: [description]
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        import numpy as np
-
-
-        @flow.global_function()
-        def pad_Job(x: tp.Numpy.Placeholder((1, 2, 3, 3), const_value)
-        ) -> tp.Numpy:
-            return flow.constant_pad2d(x, padding=[2, 2, 1, 1], const_value)
-
-
-        x = np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32)
-        const_value = 1.5
-        out = pad_Job(x, const_value)
-
-        # out [[[[ 1.5  1.5  1.5  1.5  1.5  1.5  1.5]
-        #    [ 1.5  1.5  0.  1.  2.  1.5  1.5]
-        #    [ 1.5  1.5  3.  4.  5.  1.5  1.5]
-        #    [ 1.5  1.5  6.  7.  8.  1.5  1.5]
-        #    [ 1.5  1.5  1.5  1.5  1.5  1.5  1.5]]
-
-        #   [[ 1.5  1.5  1.5  1.5  1.5  1.5  1.5.]
-        #    [ 1.5  1.5  9.  10.  11.  1.5  1.5]
-        #    [ 1.5  1.5  12.  13.  14.  1.5  1.5]
-        #    [ 1.5  1.5  15.  16.  17.  1.5  1.5]
-        #    [ 1.5  1.5  1.5  1.5  1.5  1.5  1.5]]]]
-
-    """
-    H, W = x.shape[2], x.shape[3]
-    if isinstance(padding, (tuple, list)):
-        assert len(padding) == len(x.shape), ValueError(
-            "padding boundry must be the same size of input dims"
-        )
-        boundry = [padding[0], padding[1], padding[2], padding[3]]
-    elif isinstance(padding, int):
-        boundry = [padding, padding, padding, padding]
-    else:
-        raise ValueError("padding must be in or list or tuple!")
-
-    if x.dtype in [
-        oneflow.float32,
-        oneflow.float16,
-        oneflow.float64,
-    ]:
-        floating_value = float(constant_value)
-        integral_value = int(0)
-    else:
-        floating_value = float(0)
-        integral_value = int(constant_value)
-
-    return (
-        oneflow.user_op_builder(
-            name if name is not None else id_util.UniqueStr("Constant_Pad2d_")
-        )
-        .Op("constant_pad2d")
-        .Input("x", [x])
-        .Output("y")
-        .Attr("padding", list(boundry))
-        .Attr("floating_value", floating_value)
-        .Attr("integral_value", integral_value)
-        .Build()
-        .InferAndTryRun()
-        .RemoteBlobList()[0]
-    )
-
-
-@oneflow_export("zero_pad2d")
-def zero_pad2d(
-    x: oneflow._oneflow_internal.BlobDesc,
-    padding: Union[int, tuple, list],
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """Pads the input tensor using zeros.
-
-    Args:
-        x (oneflow._oneflow_internal.BlobDesc): input blob, only support "NCHW" format.
-        padding (Union[int, oneflow._oneflow_internal.BlobDesc]): The size or bundary of padding, if is int uses the same padding in all dimension;
-        if 4-dims tuple, uses (\text{padding\_left}padding_left , \text{padding\_right}padding_right , \text{padding\_top}padding_top , \text{padding\_bottom}padding_bottom )
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: [description]
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        import numpy as np
-
-
-        @flow.global_function()
-        def pad_Job(x: tp.Numpy.Placeholder((1, 2, 3, 3), const_value)
-        ) -> tp.Numpy:
-            return flow.constant_pad2d(x, padding=[2, 2, 1, 1], const_value)
-
-
-        x = np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32)
-        const_value = 1.5
-        out = pad_Job(x, const_value)
-
-        # out [[[[ 0.  0.  0.  0.  0.  0.  0.]
-        #    [ 0.  0.  0.  1.  2.  0.  0.]
-        #    [ 0.  0.  3.  4.  5.  0.  0.]
-        #    [ 0.  0.  6.  7.  8.  0.  0.]
-        #    [ 0.  0.  0.  0.  0.  0.  0.]]
-
-        #   [[ 0.  0.  0.  0.  0.  0.  0.]
-        #    [ 0.  0.  9.  10.  11.  0.  0.]
-        #    [ 0.  0.  12.  13.  14.  0.  0.]
-        #    [ 0.  0.  15.  16.  17.  0.  0.]
-        #    [ 0.  0.  0.  0.  0.  0.  0.]]]]
-
-    """
-    if name is None:
-        name = id_util.UniqueStr("Zero_Pad2d_")
-    return constant_pad2d(x, padding, 0.0, name)
diff --git a/oneflow/python/ops/reduce_ops.py b/oneflow/python/ops/reduce_ops.py
deleted file mode 100644
index ac9b37e79a8790139ee4d50df39d7b43cb0a01aa..0000000000000000000000000000000000000000
--- a/oneflow/python/ops/reduce_ops.py
+++ /dev/null
@@ -1,623 +0,0 @@
-"""
-Copyright 2020 The OneFlow Authors. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-import os
-from typing import Optional, Sequence, Sized, Union
-
-import oneflow as flow
-import oneflow.core.operator.op_conf_pb2 as op_conf_util
-import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
-import oneflow.python.framework.id_util as id_util
-import oneflow.python.framework.interpret_util as interpret_util
-import oneflow.python.framework.remote_blob as remote_blob_util
-from oneflow.python.oneflow_export import oneflow_export
-import oneflow._oneflow_internal
-
-
-def _gen_unique_name_if_need(name, default_name):
-    if name is None:
-        return id_util.UniqueStr(default_name)
-
-    assert isinstance(name, str), name
-    return name
-
-
-def _check_axis(axis, shape):
-    if axis is None:
-        axis = list(range(len(shape)))
-
-    if isinstance(axis, int):
-        axis = [axis]
-
-    assert isinstance(axis, (list, tuple)), "Invalid axis {}".format(axis)
-    for x in axis:
-        if x < 0:
-            x += len(shape)
-        assert x >= 0 and x < len(shape), "Invalid axis {}, len(shape): {}".format(
-            axis, len(shape)
-        )
-
-    return axis
-
-
-def _do_reduce(x, name, op_type_name, keepdims, axis):
-    op = (
-        flow.user_op_builder(name)
-        .Op(op_type_name)
-        .Input("input_tensor", [x])
-        .Output("output_tensor")
-        .Attr("axis", axis)
-        .Attr("keepdims", keepdims)
-        .Build()
-    )
-    return op.InferAndTryRun().SoleOutputBlob()
-
-
-@oneflow_export("math.reduce_sum")
-def reduce_sum(
-    input_tensor: oneflow._oneflow_internal.BlobDesc,
-    axis: Optional[Union[int, Sequence[int]]] = None,
-    keepdims: bool = False,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator computes the sum of elements across dimensions of a tensor
-
-    Args:
-        input_tensor (oneflow._oneflow_internal.BlobDesc): A Blob
-        axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the sum value is computed. Defaults to None.
-        keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result of sum on the specified axis of input Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def reduce_sum_Job(x: tp.Numpy.Placeholder((3, 3))
-        ) -> tp.Numpy:
-            return flow.math.reduce_sum(x, axis=1, keepdims=True)
-
-
-        x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)
-        out = reduce_sum_Job(x)
-
-        # out [[ 6.]
-        #      [15.]
-        #      [24.]]
-
-    """
-    name = _gen_unique_name_if_need(name, "ReduceSum_")
-
-    axis = _check_axis(axis, input_tensor.shape)
-    if len(axis) == 0:
-        return input_tensor
-
-    op = (
-        flow.user_op_builder(name)
-        .Op("reduce_sum")
-        .Input("input_tensor", [input_tensor])
-        .Output("output_tensor")
-        .Attr("axis", axis)
-        .Attr("keepdims", keepdims)
-        .Build()
-    )
-    return op.InferAndTryRun().SoleOutputBlob()
-
-
-@oneflow_export("math.reduce_any")
-def reduce_any(
-    x: oneflow._oneflow_internal.BlobDesc,
-    axis: Optional[Union[int, Sequence[int]]] = None,
-    keepdims: bool = False,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator computes the `logical or` of input Blob along the specified axis
-
-    Args:
-        x (oneflow._oneflow_internal.BlobDesc): A Blob
-        axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the logical and value is computed. Defaults to None.
-        keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result of logical or on the specified axis of input Blob
-
-    Note:
-
-        The input Blob dtype is int8
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def reduce_any_Job(x: tp.Numpy.Placeholder((3, 3), dtype=flow.int8)
-        ) -> tp.Numpy:
-            return flow.math.reduce_any(x, axis=1, keepdims=True)
-
-
-        x = np.array([[1, 0, 0], [0, 0, 0], [1, 0, 1]]).astype(np.int8)
-        out = reduce_any_Job(x)
-
-        # out [[1]
-        #      [0]
-        #      [1]]
-
-    """
-    name = _gen_unique_name_if_need(name, "ReduceAny_")
-    axis = _check_axis(axis, x.shape)
-    if len(axis) == 0:
-        return flow.math.not_equal(x, flow.constant_scalar(value=0.0, dtype=x.dtype))
-    return _do_reduce(x, name, "reduce_any", keepdims, axis)
-
-
-@oneflow_export("math.reduce_min")
-def reduce_min(
-    x: oneflow._oneflow_internal.BlobDesc,
-    axis: Optional[Union[int, Sequence[int]]] = None,
-    keepdims: bool = False,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator computes the minimum value of input Blob along the specified axis
-
-    Args:
-        x (oneflow._oneflow_internal.BlobDesc): A Blob
-        axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the minimum value is computed. Defaults to None.
-        keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result of minimum value on the specified axis of input Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def reduce_min_Job(x: tp.Numpy.Placeholder((3, 3))
-        ) -> tp.Numpy:
-            return flow.math.reduce_min(x, axis=1, keepdims=True)
-
-
-        x = np.array([[2, 1, 3], [5, 3, 6], [7, 4, 9]]).astype(np.float32)
-        out = reduce_min_Job(x)
-
-        # out [[1.]
-        #      [3.]
-        #      [4.]]
-
-    """
-    name = _gen_unique_name_if_need(name, "ReduceMin_")
-    axis = _check_axis(axis, x.shape)
-    if len(axis) == 0:
-        return x
-    return _do_reduce(x, name, "reduce_min", keepdims, axis)
-
-
-@oneflow_export("math.reduce_max")
-def reduce_max(
-    x: oneflow._oneflow_internal.BlobDesc,
-    axis: Optional[Union[int, Sequence[int]]] = None,
-    keepdims: bool = False,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator computes the maximum value of input Blob along the specified axis
-
-    Args:
-        x (oneflow._oneflow_internal.BlobDesc): A Blob
-        axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the maximum value is computed. Defaults to None.
-        keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result of maximum value on the specified axis of input Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def reduce_max_Job(x: tp.Numpy.Placeholder((3, 3))
-        ) -> tp.Numpy:
-            return flow.math.reduce_max(x, axis=1, keepdims=True)
-
-
-        x = np.array([[2, 1, 4], [5, 3, 7], [7, 4, 9]]).astype(np.float32)
-        out = reduce_max_Job(x)
-
-        # out [[4.]
-        #      [7.]
-        #      [9.]]
-
-    """
-    name = _gen_unique_name_if_need(name, "ReduceMax_")
-    axis = _check_axis(axis, x.shape)
-    if len(axis) == 0:
-        return x
-    return _do_reduce(x, name, "reduce_max", keepdims, axis)
-
-
-@oneflow_export("math.reduce_prod")
-def reduce_prod(
-    x: oneflow._oneflow_internal.BlobDesc,
-    axis: Optional[Union[int, Sequence[int]]] = None,
-    keepdims: bool = False,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator computes the product of input Blob along the specified axis
-
-    Args:
-        x (oneflow._oneflow_internal.BlobDesc): A Blob
-        axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the product is computed. Defaults to None.
-        keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result of product value on the specified axis of input Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def reduce_product_Job(x: tp.Numpy.Placeholder((3, 3))
-        ) -> tp.Numpy:
-            return flow.math.reduce_prod(x, axis=1, keepdims=True)
-
-
-        x = np.array([[1, 2, 3], [3, 4, 5], [6, 3, 2]]).astype(np.float32)
-        out = reduce_product_Job(x)
-
-        # out [[ 6.]
-        #      [60.]
-        #      [36.]]
-
-    """
-    name = _gen_unique_name_if_need(name, "ReduceProd_")
-    axis = _check_axis(axis, x.shape)
-    if len(axis) == 0:
-        return x
-    return _do_reduce(x, name, "reduce_prod", keepdims, axis)
-
-
-@oneflow_export("math.reduce_all")
-def reduce_all(
-    x: oneflow._oneflow_internal.BlobDesc,
-    axis: Optional[Union[int, Sequence[int]]] = None,
-    keepdims: bool = False,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator computes the `logical and` of input Blob along the specified axis
-
-    Args:
-        x (oneflow._oneflow_internal.BlobDesc): A Blob
-        axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the logical and value is computed. Defaults to None.
-        keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result of logical and value on the specified axis of input Blob
-
-    Note:
-
-        The input Blob dtype is int8
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def reduce_all_Job(x: tp.Numpy.Placeholder((3, 3), dtype=flow.int8)
-        ) -> tp.Numpy:
-            return flow.math.reduce_all(x, axis=1, keepdims=True)
-
-
-        x = np.array([[1, 0, 0], [0, 0, 0], [1, 1, 1]]).astype(np.int8)
-        out = reduce_all_Job(x)
-
-        # out [[0]
-        #      [0]
-        #      [1]]
-
-    """
-    name = _gen_unique_name_if_need(name, "ReduceAll_")
-    axis = _check_axis(axis, x.shape)
-    if len(axis) == 0:
-        return flow.math.not_equal(x, flow.constant_scalar(value=0.0, dtype=x.dtype))
-    return _do_reduce(x, name, "reduce_all", keepdims, axis)
-
-
-@oneflow_export("math.reduce_euclidean_norm")
-def reduce_euclidean_norm(
-    input_tensor: oneflow._oneflow_internal.BlobDesc,
-    axis: Optional[Union[int, Sequence[int]]] = None,
-    keepdims: bool = False,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    r"""This operator computes the Euclidean norm of input Blob along the specified axis
-
-    The equation is:
-
-    .. math::
-
-        out=\sqrt{\sum_{t=0}^{n} x_{t}^2}
-
-    Args:
-        input_tensor (oneflow._oneflow_internal.BlobDesc): A Blob
-        axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the Euclidean norm is computed. Defaults to None.
-        keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result of Euclidean norm on the specified axis of input Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def reduce_euclidean_norm_Job(x: tp.Numpy.Placeholder((3, 2))
-        ) -> tp.Numpy:
-            return flow.math.reduce_euclidean_norm(x, axis=1, keepdims=True)
-
-
-        x = np.array([[3, 4], [5, 12], [8, 15]]).astype(np.float32)
-        out = reduce_euclidean_norm_Job(x)
-
-        # out [[ 5.]
-        #      [13.]
-        #      [17.]]
-
-    """
-    name = _gen_unique_name_if_need(name, "ReduceEuclideanNorm_")
-    return flow.math.sqrt(
-        flow.math.reduce_sum(
-            flow.math.square(input_tensor, name + "_square"),
-            axis,
-            keepdims,
-            name + "_reduce_sum",
-        ),
-        name + "_sqrt",
-    )
-
-
-@oneflow_export("math.reduce_logsumexp")
-def reduce_logsumexp(
-    input_tensor: oneflow._oneflow_internal.BlobDesc,
-    axis: Optional[Union[int, Sequence[int]]] = None,
-    keepdims: bool = False,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    r"""This operator computes the log of exponential sum of input Blob along the specified axis
-
-
-    The equation is:
-
-    .. math::
-
-        out = log(\sum_{t=0}^{t=n} e^{x_{t}})
-
-    Args:
-        input_tensor (oneflow._oneflow_internal.BlobDesc): A Blob
-        axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the log of exponential sum is computed. Defaults to None.
-        keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result of log of exponential sum on the specified axis of input Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def reduce_logsumexp_Job(x: tp.Numpy.Placeholder((3, 2))
-        ) -> tp.Numpy:
-            return flow.math.reduce_logsumexp(x, axis=1, keepdims=True)
-
-
-        x = np.array([[0, 0], [1, 1], [2, 2]]).astype(np.float32)
-        out = reduce_logsumexp_Job(x)
-
-        # out [[0.6931472]
-        #      [1.6931472]
-        #      [2.6931472]]
-
-    """
-    name = _gen_unique_name_if_need(name, "ReduceLogSumExp_")
-    axis = _check_axis(axis, input_tensor.shape)
-    return flow.math.log(
-        flow.math.reduce_sum(
-            flow.math.exp(input_tensor, name + "_exp"),
-            axis,
-            keepdims,
-            name + "_reduce_sum",
-        ),
-        name + "_log",
-    )
-
-
-@oneflow_export("math.reduce_std")
-def reduce_std(
-    input_tensor: oneflow._oneflow_internal.BlobDesc,
-    axis: Optional[Union[int, Sequence[int]]] = None,
-    keepdims: bool = False,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    r"""This operator computes the standard deviation of input Blob along the specified axis
-
-    The equation is:
-
-    .. math::
-
-        out=\sqrt{\frac{1}{n}*\sum_{i=1}^{n}(x_i-mean)^2}
-
-    Args:
-        input_tensor (oneflow._oneflow_internal.BlobDesc): A Blob
-        axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the standard deviation is computed. Defaults to None.
-        keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result of standard deviation on the specified axis of input Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def reduce_std_Job(x: tp.Numpy.Placeholder((3, 3))
-        ) -> tp.Numpy:
-            return flow.math.reduce_std(x, axis=1, keepdims=True)
-
-
-        x = np.array([[0, 5, 10], [5, 5, 5], [12, 3, 0]]).astype(np.float32)
-        out = reduce_std_Job(x)
-
-        # out [[4.0824833]
-        #      [0.       ]
-        #      [5.0990195]]
-
-    """
-    name = _gen_unique_name_if_need(name, "ReduceStd_")
-    axis = _check_axis(axis, input_tensor.shape)
-    if isinstance(axis, list) and len(axis) == 0:
-        return flow.zeros_like(
-            input_tensor, dtype=input_tensor.dtype, name=name + "_zeros_like"
-        )
-    return flow.math.sqrt(
-        flow.math.reduce_variance(
-            input_tensor, axis, keepdims, name + "_reduce_variance"
-        ),
-        name + "_sqrt",
-    )
-
-
-@oneflow_export("math.reduce_variance")
-def reduce_variance(
-    input_tensor: oneflow._oneflow_internal.BlobDesc,
-    axis: Optional[Union[int, Sequence[int]]] = None,
-    keepdims: bool = False,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    r"""This operator computes the variance of input Blob along the specified axis
-
-    The equation is:
-
-    .. math::
-
-        out=\frac{1}{n}*\sum_{i=1}^{n}(x_i-mean)^2
-
-    Args:
-        input_tensor (oneflow._oneflow_internal.BlobDesc): A Blob
-        axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the variance is computed. Defaults to None.
-        keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result of variance on the specified axis of input Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import numpy as np
-        import oneflow.typing as tp
-
-
-        @flow.global_function()
-        def reduce_variance_Job(x: tp.Numpy.Placeholder((3, 3))
-        ) -> tp.Numpy:
-            return flow.math.reduce_variance(x, axis=1, keepdims=True)
-
-
-        x = np.array([[0, 5, 10], [5, 5, 5], [12, 3, 0]]).astype(np.float32)
-        out = reduce_variance_Job(x)
-
-        # out [[16.666668]
-        #      [ 0.      ]
-        #      [26.      ]]
-
-    """
-    name = _gen_unique_name_if_need(name, "ReduceVariance_")
-    axis = _check_axis(axis, input_tensor.shape)
-    if isinstance(axis, list) and len(axis) == 0:
-        return flow.zeros_like(
-            input_tensor, dtype=input_tensor.dtype, name=name + "_zeros_like"
-        )
-    return flow.math.subtract(
-        flow.math.reduce_mean(
-            flow.math.square(input_tensor, name + "_square_minuend"),
-            axis,
-            keepdims,
-            name + "_reduce_mean_minuend",
-        ),
-        flow.math.square(
-            flow.math.reduce_mean(
-                input_tensor, axis, keepdims, name + "_reduce_mean_subtrahend"
-            ),
-            name + "_square_subtrahend",
-        ),
-        name + "_subtract",
-    )
diff --git a/oneflow/python/ops/sort_ops.py b/oneflow/python/ops/sort_ops.py
deleted file mode 100644
index afd9344615a5564eeaf99ce970bd46ef0088bff7..0000000000000000000000000000000000000000
--- a/oneflow/python/ops/sort_ops.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""
-Copyright 2020 The OneFlow Authors. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from __future__ import absolute_import
-
-from typing import Optional
-
-import oneflow as flow
-import oneflow.python.framework.id_util as id_util
-import oneflow.python.framework.remote_blob as remote_blob_util
-from oneflow.python.oneflow_export import oneflow_export, stable_api
-from oneflow.python.ops.transpose_util import get_perm_when_transpose_axis_to_last_dim
-from oneflow.python.ops.transpose_util import get_inversed_perm
-import oneflow._oneflow_internal
-
-
-def _sort_at_last_dim(
-    input: oneflow._oneflow_internal.BlobDesc,
-    direction: str = "ASCENDING",
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    assert direction in ["ASCENDING", "DESCENDING"]
-    return (
-        flow.user_op_builder(name if name is not None else id_util.UniqueStr("Sort_"))
-        .Op("sort")
-        .Input("in", [input])
-        .Output("out")
-        .Attr("direction", direction)
-        .Build()
-        .InferAndTryRun()
-        .RemoteBlobList()[0]
-    )
-
-
-def _argsort_at_last_dim(
-    input: oneflow._oneflow_internal.BlobDesc,
-    direction: str = "ASCENDING",
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    assert direction in ["ASCENDING", "DESCENDING"]
-    return (
-        flow.user_op_builder(
-            name if name is not None else id_util.UniqueStr("ArgSort_")
-        )
-        .Op("arg_sort")
-        .Input("in", [input])
-        .Output("out")
-        .Attr("direction", direction)
-        .Build()
-        .InferAndTryRun()
-        .RemoteBlobList()[0]
-    )
diff --git a/oneflow/python/ops/summary_ops.py b/oneflow/python/ops/summary_ops.py
deleted file mode 100644
index 0240915998afcb6dfde71de7675198292a528d52..0000000000000000000000000000000000000000
--- a/oneflow/python/ops/summary_ops.py
+++ /dev/null
@@ -1,145 +0,0 @@
-"""
-Copyright 2020 The OneFlow Authors. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from __future__ import absolute_import
-
-import oneflow.python.framework.id_util as id_util
-from oneflow.python.oneflow_export import oneflow_export
-import oneflow.python.ops.user_op_builder as user_op_builder
-
-import oneflow as flow
-
-
-@oneflow_export("summary.scalar")
-def write_scalar(value, step, tag, name=None):
-    r"""Write scalar to log file
-
-    Args:
-        value: A 'Blob' with 1 value and dtype in (flow.float, flow.double, flow.int64, flow.int32)
-        step: A 'Blob' with 1 value and dtype is 'flow.int64'
-        tag: A 'Blob' with 1 value and dtype is 'flow.int8'
-        name: This operator's name 
-    """
-    if name is None:
-        name = id_util.UniqueStr("WriteScalar_")
-    (
-        flow.user_op_builder(name)
-        .Op("summary_write_scalar")
-        .Input("in", [value])
-        .Input("step", [step])
-        .Input("tag", [tag])
-        .Build()
-        .InferAndTryRun()
-    )
-
-
-@oneflow_export("summary.create_summary_writer")
-def create_summary_writer(logdir, name=None):
-    r"""Create a summary writer object
-
-    Args:
-        logdir: log dir
-        name: This operator's name
-    """
-    if name is None:
-        name = id_util.UniqueStr("CreateWriter_")
-    (
-        flow.user_op_builder(name)
-        .Op("create_summary_writer")
-        .Attr("logdir", logdir)
-        .Build()
-        .InferAndTryRun()
-    )
-
-
-@oneflow_export("summary.flush_summary_writer")
-def flush_summary_writer(name=None):
-    r"""Flush the summary writer
-
-    Args:
-        name: This operator's name
-    """
-    if name is None:
-        name = id_util.UniqueStr("FlushWriter_")
-    (flow.user_op_builder(name).Op("flush_summary_writer").Build().InferAndTryRun())
-
-
-@oneflow_export("summary.histogram")
-def write_histogram(value, step, tag, name=None):
-    r"""Write histogram to log file
-
-    Args:
-        value: A 'Blob' with dtype in (flow.float, flow.double, flow.int64, flow.int32, flow.int8, flow.uint8)
-        step: A 'Blob' with 1 value and dtype is 'flow.int64'
-        tag: A 'Blob' with 1 value and dtype is 'flow.int8'
-        name: This operator's name 
-    """
-    if name is None:
-        name = id_util.UniqueStr("WriteHistogram_")
-    (
-        flow.user_op_builder(name)
-        .Op("summary_write_histogram")
-        .Input("in", [value])
-        .Input("step", [step])
-        .Input("tag", [tag])
-        .Build()
-        .InferAndTryRun()
-    )
-
-
-@oneflow_export("summary.pb")
-def write_pb(value, step=None, name=None):
-    r"""Write raw protobuf data to log file
-
-    Args:
-        value: A 'Blob' with dtype in 'flow.int8'
-        step: A 'Blob' with 1 value and dtype is 'flow.int64'
-        name: This operator's name 
-    """
-    if name is None:
-        name = id_util.UniqueStr("WritePb_")
-    (
-        flow.user_op_builder(name)
-        .Op("summary_write_pb")
-        .Input("in", [value])
-        .Input("step", [step])
-        .Build()
-        .InferAndTryRun()
-    )
-
-
-@oneflow_export("summary.image")
-def write_image(value, step=None, tag=None, name=None):
-    r"""Write image to log file
-
-    Args:
-        value: A 'Blob' with dtype in 'flow.uint8'
-        step: A 'Blob' with 1 value and dtype is 'flow.int64'
-        tag: A 'Blob' with 1 value and dtype is 'flow.int8'
-        name: This operator's name 
-    """
-    if name is None:
-        name = id_util.UniqueStr("WriteImage_")
-    if tag is None:
-        tag = "image"
-    (
-        flow.user_op_builder(name)
-        .Op("summary_write_image")
-        .Input("in", [value])
-        .Input("step", [step])
-        .Input("tag", [tag])
-        .Build()
-        .InferAndTryRun()
-    )
diff --git a/oneflow/python/ops/user_data_ops.py b/oneflow/python/ops/user_data_ops.py
deleted file mode 100644
index 37549b64646822f71aeea5fb5a445fa1ad9b26b7..0000000000000000000000000000000000000000
--- a/oneflow/python/ops/user_data_ops.py
+++ /dev/null
@@ -1,2475 +0,0 @@
-"""
-Copyright 2020 The OneFlow Authors. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from __future__ import absolute_import
-
-import oneflow as flow
-import oneflow.python.framework.id_util as id_util
-import oneflow.python.framework.remote_blob as remote_blob_util
-import oneflow.python.framework.module as module_util
-import oneflow._oneflow_internal
-from oneflow.python.oneflow_export import oneflow_export
-from typing import Optional, Sequence, Union
-import random
-import sys
-import traceback
-
-
-@oneflow_export("data.OFRecordRawDecoder", "data.ofrecord_raw_decoder")
-def OFRecordRawDecoder(
-    input_blob: oneflow._oneflow_internal.BlobDesc,
-    blob_name: str,
-    shape: Sequence[int],
-    dtype: flow.dtype,
-    dim1_varying_length: bool = False,
-    truncate: bool = False,
-    auto_zero_padding: bool = False,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    if auto_zero_padding:
-        print(
-            """WARNING: auto_zero_padding has been deprecated, Please use truncate instead.
-            """
-        )
-    if name is None:
-        name = id_util.UniqueStr("OFRecordRawDecoder_")
-    return (
-        flow.user_op_builder(name)
-        .Op("ofrecord_raw_decoder")
-        .Input("in", [input_blob])
-        .Output("out")
-        .Attr("name", blob_name)
-        .Attr("shape", shape)
-        .Attr("data_type", dtype)
-        .Attr("dim1_varying_length", dim1_varying_length)
-        .Attr("truncate", truncate or auto_zero_padding)
-        .Build()
-        .InferAndTryRun()
-        .RemoteBlobList()[0]
-    )
-
-
-@oneflow_export("data.OFRecordBytesDecoder", "data.ofrecord_bytes_decoder")
-def OFRecordBytesDecoder(
-    input_blob: oneflow._oneflow_internal.BlobDesc,
-    blob_name: str,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    if name is None:
-        name = id_util.UniqueStr("OFRecordBytesDecoder_")
-    return (
-        flow.user_op_builder(name)
-        .Op("ofrecord_bytes_decoder")
-        .Input("in", [input_blob])
-        .Output("out")
-        .Attr("name", blob_name)
-        .Build()
-        .InferAndTryRun()
-        .RemoteBlobList()[0]
-    )
-
-
-@oneflow_export(
-    "data.OFRecordImageDecoderRandomCrop", "data.ofrecord_image_decoder_random_crop"
-)
-def api_ofrecord_image_decoder_random_crop(
-    input_blob: oneflow._oneflow_internal.BlobDesc,
-    blob_name: str,
-    color_space: str = "BGR",
-    num_attempts: int = 10,
-    seed: Optional[int] = None,
-    random_area: Sequence[float] = [0.08, 1.0],
-    random_aspect_ratio: Sequence[float] = [0.75, 1.333333],
-    name: str = "OFRecordImageDecoderRandomCrop",
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator is an image decoder with random crop.
-
-    Args:
-        input_blob (oneflow._oneflow_internal.BlobDesc): The input Blob
-        blob_name (str): The name of the Blob
-        color_space (str, optional): The color space, such as "RGB", "BGR". Defaults to "BGR".
-        num_attempts (int, optional): The maximum number of random cropping attempts. Defaults to 10.
-        seed (Optional[int], optional): The random seed. Defaults to None.
-        random_area (Sequence[float], optional): The random cropping area. Defaults to [0.08, 1.0].
-        random_aspect_ratio (Sequence[float], optional): The random scaled ratio. Defaults to [0.75, 1.333333].
-        name (str, optional): The name for the operation. Defaults to "OFRecordImageDecoderRandomCrop".
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The random cropped Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        from typing import Tuple
-
-
-        @flow.global_function(type="predict")
-        def ofrecord_reader_job() -> Tuple[tp.Numpy, tp.Numpy]:
-            batch_size = 16
-            color_space = "RGB"
-            # our ofrecord file path is "./dataset/part-0"
-            ofrecord = flow.data.ofrecord_reader(
-                "./imgdataset",
-                batch_size=batch_size,
-                data_part_num=1,
-                part_name_suffix_length=-1,
-                part_name_prefix='part-',
-                random_shuffle=True,
-                shuffle_after_epoch=True,
-            )
-            image = flow.data.OFRecordImageDecoderRandomCrop(
-                    ofrecord, "encoded", color_space=color_space
-                )
-            res_image, scale, new_size = flow.image.Resize(
-                    image, target_size=(224, 224)
-                )
-            label = flow.data.OFRecordRawDecoder(
-                ofrecord, "class/label", shape=(1, ), dtype=flow.int32
-            )
-
-            return res_image, label
-
-        if __name__ == "__main__":
-            images, labels = ofrecord_reader_job()
-            # images.shape (16, 224, 224, 3)
-
-    """
-    assert isinstance(name, str)
-    if seed is not None:
-        assert name is not None
-    module = flow.find_or_create_module(
-        name,
-        lambda: OFRecordImageDecoderRandomCropModule(
-            blob_name=blob_name,
-            color_space=color_space,
-            num_attempts=num_attempts,
-            random_seed=seed,
-            random_area=random_area,
-            random_aspect_ratio=random_aspect_ratio,
-            name=name,
-        ),
-    )
-    return module(input_blob)
-
-
-class OFRecordImageDecoderRandomCropModule(module_util.Module):
-    def __init__(
-        self,
-        blob_name: str,
-        color_space: str,
-        num_attempts: int,
-        random_seed: Optional[int],
-        random_area: Sequence[float],
-        random_aspect_ratio: Sequence[float],
-        name: str,
-    ):
-        module_util.Module.__init__(self, name)
-        seed, has_seed = flow.random.gen_seed(random_seed)
-        self.op_module_builder = (
-            flow.user_op_module_builder("ofrecord_image_decoder_random_crop")
-            .InputSize("in", 1)
-            .Output("out")
-            .Attr("name", blob_name)
-            .Attr("color_space", color_space)
-            .Attr("num_attempts", num_attempts)
-            .Attr("random_area", random_area)
-            .Attr("random_aspect_ratio", random_aspect_ratio)
-            .Attr("has_seed", has_seed)
-            .Attr("seed", seed)
-            .CheckAndComplete()
-        )
-        self.op_module_builder.user_op_module.InitOpKernel()
-
-    def forward(self, input: oneflow._oneflow_internal.BlobDesc):
-        if self.call_seq_no == 0:
-            name = self.module_name
-        else:
-            name = id_util.UniqueStr("OFRecordImageDecoderRandomCrop_")
-
-        return (
-            self.op_module_builder.OpName(name)
-            .Input("in", [input])
-            .Build()
-            .InferAndTryRun()
-            .SoleOutputBlob()
-        )
-
-
-@oneflow_export("data.OFRecordImageDecoder", "data.ofrecord_image_decoder")
-def OFRecordImageDecoder(
-    input_blob: oneflow._oneflow_internal.BlobDesc,
-    blob_name: str,
-    color_space: str = "BGR",
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator is an image decoder.
-
-    Args:
-        input_blob (oneflow._oneflow_internal.BlobDesc): The input Blob
-        blob_name (str): The name of the input Blob
-        color_space (str, optional): The color space, such as "RGB", "BGR". Defaults to "BGR".
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        from typing import Tuple
-
-
-        @flow.global_function(type="predict")
-        def image_decoder_job() -> Tuple[tp.Numpy, tp.Numpy]:
-            batch_size = 16
-            color_space = "RGB"
-            # our ofrecord file path is "./dataset/part-0"
-            ofrecord = flow.data.ofrecord_reader(
-                "./imgdataset",
-                batch_size=batch_size,
-                data_part_num=1,
-                part_name_suffix_length=-1,
-                part_name_prefix='part-',
-                random_shuffle=True,
-                shuffle_after_epoch=True,
-            )
-            image = flow.data.OFRecordImageDecoder(
-                    ofrecord, "encoded", color_space=color_space
-                )
-            res_image, scale, new_size = flow.image.Resize(
-                    image, target_size=(224, 224)
-                )
-            label = flow.data.OFRecordRawDecoder(
-                ofrecord, "class/label", shape=(1, ), dtype=flow.int32
-            )
-
-            return res_image, label
-
-        if __name__ == "__main__":
-            images, labels = image_decoder_job()
-            # image.shape (16, 224, 224, 3)
-
-    """
-    if name is None:
-        name = id_util.UniqueStr("OFRecordImageDecoder_")
-    return (
-        flow.user_op_builder(name)
-        .Op("ofrecord_image_decoder")
-        .Input("in", [input_blob])
-        .Output("out")
-        .Attr("name", blob_name)
-        .Attr("color_space", color_space)
-        .Build()
-        .InferAndTryRun()
-        .RemoteBlobList()[0]
-    )
-
-
-@oneflow_export("image.Resize", "image.resize", "image_resize")
-def api_image_resize(
-    image: oneflow._oneflow_internal.BlobDesc,
-    target_size: Union[int, Sequence[int]] = None,
-    min_size: Optional[int] = None,
-    max_size: Optional[int] = None,
-    keep_aspect_ratio: bool = False,
-    resize_side: str = "shorter",
-    channels: int = 3,
-    dtype: Optional[flow.dtype] = None,
-    interpolation_type: str = "auto",
-    name: Optional[str] = None,
-    # deprecated params, reserve for backward compatible
-    color_space: Optional[str] = None,
-    interp_type: Optional[str] = None,
-    resize_shorter: int = 0,
-    resize_x: int = 0,
-    resize_y: int = 0,
-) -> Union[
-    oneflow._oneflow_internal.BlobDesc, Sequence[oneflow._oneflow_internal.BlobDesc]
-]:
-    r"""Resize images to target size.
-
-    Args:
-        image: A `Tensor` consists of images to be resized.
-        target_size: A list or tuple when `keep_aspect_ratio` is false or an int when `keep_aspect_ratio` is true. When `keep_aspect_ratio` is false, `target_size` has a form of `(target_width, target_height)` that image will resize to. When `keep_aspect_ratio` is true, the longer side or shorter side of the image will be resized to target size.
-        min_size: An int, optional. Only works when `keep_aspect_ratio` is true and `resize_side` is "longer". If `min_size` is not None, the shorter side must be greater than or equal to `min_size`. Default is None.
-        max_size: An int, optional. Only works when `keep_aspect_ratio` is true and `resize_side` is "shorter". If `max_size` is not None, the longer side must be less than or equal to `max_size`. Default is None.
-        keep_aspect_ratio: A bool. If is false, indicate that image will be resized to fixed width and height, otherwise image will be resized keeping aspect ratio.
-        resize_side: A str of "longer" or "shorter". Only works when `keep_aspect_ratio` is True. If `resize_side` is "longer", the longer side of image will be resized to `target_size`. If `resize_side` is "shorter", the shorter side of image will be resized to `target_size`.
-        channels: An int. how many channels an image has
-        dtype: `oneflow.dtype`. Indicate output resized image data type.
-        interpolation_type: A str of "auto", "bilinear", "nearest_neighbor", "bicubic" or "area". Indicate interpolation method used to resize image.
-        name: A str, optional. Name for the operation.
-        color_space: Deprecated, a str of "RGB", "BGR" or "GRAY". Please use `channels` instead.
-        interp_type: Deprecated, s str of "Linear", "Cubic" or "NN". Please use `interpolation_type` instead.
-        resize_shorter: Deprecated, a int. Indicate target size that the shorter side of image will resize to. Please use `target_size` and `resize_side` instead.
-        resize_x: Deprecated, a int. Indicate the target size that the width of image will resize to. Please use `target_size` instead.
-        resize_y: Deprecated, a int. Indicate the target size that the height of image will resize to. Please use `target_size` instead.
-
-    Returns:
-        Tuple of resized images `Blob`, width and height scales `Blob` and new width and height `Blob`
-        (new width and height `Blob` will be None when keep_aspect_ratio is false).
-        If deprecated params are used, a single resized images `Blob` will be returned.
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        from typing import Tuple
-
-
-        @flow.global_function(type="predict")
-        def ofrecord_reader_job() -> Tuple[tp.Numpy, tp.Numpy]:
-            batch_size = 16
-            color_space = "RGB"
-            # our ofrecord file path is "./dataset/part-0"
-            ofrecord = flow.data.ofrecord_reader(
-                "./imgdataset",
-                batch_size=batch_size,
-                data_part_num=1,
-                part_name_suffix_length=-1,
-                part_name_prefix='part-',
-                random_shuffle=True,
-                shuffle_after_epoch=True,
-            )
-            image = flow.data.OFRecordImageDecoderRandomCrop(
-                    ofrecord, "encoded", color_space=color_space
-                )
-            res_image, scale, new_size = flow.image.Resize(
-                    image, target_size=(224, 224)
-                )
-            label = flow.data.OFRecordRawDecoder(
-                ofrecord, "class/label", shape=(1, ), dtype=flow.int32
-            )
-
-            return res_image, label
-
-        if __name__ == "__main__":
-            images, labels = ofrecord_reader_job()
-            # image.shape (16, 224, 224, 3)
-
-    """
-    # process deprecated params
-    deprecated_param_used = False
-    if color_space is not None:
-        print("WARNING: color_space has been deprecated. Please use channels instead.")
-        print(traceback.format_stack()[-2])
-        deprecated_param_used = True
-        assert isinstance(color_space, str)
-        if color_space.upper() == "RGB" or color_space.upper() == "BGR":
-            channels = 3
-        elif color_space.upper() == "GRAY":
-            channels = 1
-        else:
-            raise ValueError("invalid color_space")
-
-    if interp_type is not None:
-        print(
-            "WARNING: interp_type has been deprecated. Please use interpolation_type instead."
-        )
-        print(traceback.format_stack()[-2])
-        deprecated_param_used = True
-        assert isinstance(interp_type, str)
-        if interp_type == "Linear":
-            interpolation_type = "bilinear"
-        elif interp_type == "NN":
-            interpolation_type = "nearest_neighbor"
-        elif interp_type == "Cubic":
-            interpolation_type = "bicubic"
-        else:
-            raise ValueError("invalid interp_type")
-
-    if resize_x > 0 and resize_y > 0:
-        print(
-            "WARNING: resize_x and resize_y has been deprecated. Please use target_size instead."
-        )
-        print(traceback.format_stack()[-2])
-        deprecated_param_used = True
-        target_size = (resize_x, resize_y)
-        keep_aspect_ratio = False
-
-    if resize_shorter > 0:
-        print(
-            "WARNING: resize_shorter has been deprecated. Please use target_size instead."
-        )
-        print(traceback.format_stack()[-2])
-        deprecated_param_used = True
-        target_size = resize_shorter
-        keep_aspect_ratio = True
-        resize_side = "shorter"
-
-    if name is None:
-        name = id_util.UniqueStr("ImageResize_")
-
-    if keep_aspect_ratio:
-        if not isinstance(target_size, int):
-            raise ValueError(
-                "target_size must be an int when keep_aspect_ratio is True"
-            )
-
-        if min_size is None:
-            min_size = 0
-
-        if max_size is None:
-            max_size = 0
-
-        if resize_side == "shorter":
-            resize_longer = False
-        elif resize_side == "longer":
-            resize_longer = True
-        else:
-            raise ValueError('resize_side must be "shorter" or "longer"')
-
-        op = (
-            flow.user_op_builder(name)
-            .Op("image_resize_keep_aspect_ratio")
-            .Input("in", [image])
-            .Output("out")
-            .Output("size")
-            .Output("scale")
-            .Attr("target_size", target_size)
-            .Attr("min_size", min_size)
-            .Attr("max_size", max_size)
-            .Attr("resize_longer", resize_longer)
-            .Attr("interpolation_type", interpolation_type)
-            .Build()
-        )
-        res_image, new_size, scale = op.InferAndTryRun().RemoteBlobList()
-        scale = flow.tensor_buffer_to_tensor(
-            scale, dtype=flow.float32, instance_shape=(2,)
-        )
-        new_size = flow.tensor_buffer_to_tensor(
-            new_size, dtype=flow.int32, instance_shape=(2,)
-        )
-
-    else:
-        if (
-            not isinstance(target_size, (list, tuple))
-            or len(target_size) != 2
-            or not all(isinstance(size, int) for size in target_size)
-        ):
-            raise ValueError(
-                "target_size must be a form like (width, height) when keep_aspect_ratio is False"
-            )
-
-        if dtype is None:
-            dtype = flow.uint8
-
-        target_w, target_h = target_size
-        op = (
-            flow.user_op_builder(name)
-            .Op("image_resize_to_fixed")
-            .Input("in", [image])
-            .Output("out")
-            .Output("scale")
-            .Attr("target_width", target_w)
-            .Attr("target_height", target_h)
-            .Attr("channels", channels)
-            .Attr("data_type", dtype)
-            .Attr("interpolation_type", interpolation_type)
-            .Build()
-        )
-        res_image, scale = op.InferAndTryRun().RemoteBlobList()
-        new_size = None
-
-    if deprecated_param_used:
-        return res_image
-
-    return res_image, scale, new_size
-
-
-@oneflow_export("image.target_resize", "image_target_resize")
-def api_image_target_resize(
-    images: oneflow._oneflow_internal.BlobDesc,
-    target_size: int,
-    min_size: Optional[int] = None,
-    max_size: Optional[int] = None,
-    resize_side: str = "shorter",
-    interpolation_type: str = "auto",
-    name: Optional[str] = None,
-) -> Sequence[oneflow._oneflow_internal.BlobDesc]:
-    """This operator resizes image to target size.
-
-    Args:
-        images (oneflow._oneflow_internal.BlobDesc): The input Blob. Its type should be `kTensorBuffer`. More details please refer to the code example.
-        target_size (int): An int, the target size.
-        min_size (Optional[int], optional): If `min_size` is not None, the shorter side must be greater than or equal to `min_size`. Default is None. Defaults to None.
-        max_size (Optional[int], optional): If `max_size` is not None, the longer side must be less than or equal to `max_size`. Defaults to None.
-        resize_side (str, optional): A str of "longer" or "shorter". Only works when `keep_aspect_ratio` is True. If `resize_side` is "longer", the longer side of image will be resized to `target_size`. If `resize_side` is "shorter", the shorter side of image will be resized to `target_size`. Defaults to "shorter".
-        interpolation_type (str, optional): A str of "auto", "bilinear", "nearest_neighbor", "bicubic" or "area". Indicate interpolation method used to resize image. Defaults to "auto".
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        Sequence[oneflow._oneflow_internal.BlobDesc]: A Sequence includes the result Blob.
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        from typing import Tuple
-        import numpy as np
-        import cv2
-
-
-        def _read_images_by_cv(image_files):
-            images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
-            return [np.expand_dims(image, axis=0) for image in images]
-
-
-        def _get_images_static_shape(images):
-            image_shapes = [image.shape for image in images]
-            image_static_shape = np.amax(image_shapes, axis=0)
-            assert isinstance(
-                image_static_shape, np.ndarray
-            ), "image_shapes: {}, image_static_shape: {}".format(
-                str(image_shapes), str(image_static_shape)
-            )
-            image_static_shape = image_static_shape.tolist()
-            assert image_static_shape[0] == 1, str(image_static_shape)
-            image_static_shape[0] = len(image_shapes)
-            return image_static_shape
-
-        def _of_image_target_resize(images, image_static_shape, target_size, max_size):
-            func_config = flow.FunctionConfig()
-            func_config.default_data_type(flow.float)
-            func_config.default_logical_view(flow.scope.mirrored_view())
-
-            @flow.global_function(function_config=func_config)
-            def image_target_resize_job(images_def: tp.ListListNumpy.Placeholder(shape=image_static_shape, dtype=flow.float)
-            ) -> Tuple[tp.ListListNumpy, tp.ListNumpy, tp.ListNumpy]:
-                # The input Blob type should be "kTensorBuffer"
-                # So we use oneflow.tensor_list_to_tensor_buffer to convert
-                images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
-
-                resized_images_buffer, size, scale = flow.image_target_resize(
-                    images_buffer,
-                    target_size=target_size,
-                    max_size=max_size,
-                    resize_side="shorter",
-                )
-                # We convert back to "tensorlist" type
-                resized_images = flow.tensor_buffer_to_tensor_list(
-                    resized_images_buffer,
-                    shape=(target_size, max_size, image_static_shape[-1]),
-                    dtype=flow.float,
-                )
-                return resized_images, size, scale
-
-            resized_images, size, scale = image_target_resize_job([images])
-            resized_image = resized_images[0]
-            size = size[0]
-            scale = scale[0]
-
-            return resized_images, size, scale
-
-        if __name__ == "__main__":
-            img = _read_images_by_cv(['./img/1.jpg'])
-            img_shape = _get_images_static_shape(img) # In example is [1, 349, 367, 3]
-            target_size = 256
-            max_size = 512
-            resized_images, size, scale = _of_image_target_resize(img, tuple(img_shape), target_size, max_size)
-            # Here the shorter side is "349", we resize it to target_size(256)
-            # The scale is 256 / 349 = 0.73
-            # The longer side will be resized to 367 * scale = 269
-            # get the first element from the resized_images (its type is `list.list`)
-            print(resized_images[0][0].shape) # (1, 256, 269, 3)
-
-    """
-    if name is None:
-        name = id_util.UniqueStr("ImageTargetResize_")
-
-    res_image, scale, new_size = api_image_resize(
-        images,
-        target_size=target_size,
-        min_size=min_size,
-        max_size=max_size,
-        keep_aspect_ratio=True,
-        resize_side=resize_side,
-        interpolation_type=interpolation_type,
-        name=name,
-    )
-    return res_image, new_size, scale
-
-
-@oneflow_export("image.CropMirrorNormalize", "image.crop_mirror_normalize")
-def CropMirrorNormalize(
-    input_blob: oneflow._oneflow_internal.BlobDesc,
-    mirror_blob: Optional[oneflow._oneflow_internal.BlobDesc] = None,
-    color_space: str = "BGR",
-    output_layout: str = "NCHW",
-    crop_h: int = 0,
-    crop_w: int = 0,
-    crop_pos_y: float = 0.5,
-    crop_pos_x: float = 0.5,
-    mean: Sequence[float] = [0.0],
-    std: Sequence[float] = [1.0],
-    output_dtype: flow.dtype = flow.float,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator performs the cropping, normalization, and horizontal flip for input Blob.
-
-    If `crop_h` and `crop_w` are provided, the image cropping position is specified by "crop_pos_y" and "crop_pos_x".
-
-    The position is computed as follows:
-
-    .. math::
-
-        & crop_x = crop\_pos\_x*(Width-crop\_w)
-
-        & crop_y = crop\_pos\_y*(Height-crop\_h)
-
-    The `Width` and `Height` is the width and height of input Blob.
-
-    Args:
-        input_blob (oneflow._oneflow_internal.BlobDesc): The input Blob.
-        mirror_blob (Optional[oneflow._oneflow_internal.BlobDesc], optional): The operation for horizontal flip, if it is `None`, the operator will not perform the horizontal flip. Defaults to None.
-        color_space (str, optional): The color space for input Blob. Defaults to "BGR".
-        output_layout (str, optional): The output format. Defaults to "NCHW".
-        crop_h (int, optional): The image cropping window height. Defaults to 0.
-        crop_w (int, optional): The image cropping window width. Defaults to 0.
-        crop_pos_y (float, optional): The vertical position of the image cropping window, the value range is normalized to (0.0, 1.0). Defaults to 0.5.
-        crop_pos_x (float, optional): The horizontal position of the image cropping window, the value range is normalized to (0.0, 1.0). Defaults to 0.5.
-        mean (Sequence[float], optional): The mean value for normalization. Defaults to [0.0].
-        std (Sequence[float], optional): The standard deviation values for normalization. Defaults to [1.0].
-        output_dtype (flow.dtype, optional): The datatype of output Blob. Defaults to flow.float.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Raises:
-        NotImplementedError: The data type of input Blob should be `tensor_buffer` or `uint8`
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        from typing import Tuple
-
-
-        @flow.global_function(type="predict")
-        def crop_mirror_job() -> Tuple[tp.Numpy, tp.Numpy]:
-            batch_size = 1
-            color_space = "RGB"
-            # our ofrecord file path is "./dataset/part-0"
-            ofrecord = flow.data.ofrecord_reader(
-                "./imgdataset",
-                batch_size=batch_size,
-                data_part_num=1,
-                part_name_suffix_length=-1,
-                part_name_prefix='part-',
-                shuffle_after_epoch=True,
-            )
-            image = flow.data.OFRecordImageDecoder(
-                    ofrecord, "encoded", color_space=color_space
-                )
-            res_image, scale, new_size = flow.image.Resize(
-                    image, target_size=(512, 512)
-                )
-            label = flow.data.OFRecordRawDecoder(
-                ofrecord, "class/label", shape=(1, ), dtype=flow.int32
-            )
-            rng = flow.random.CoinFlip(batch_size=batch_size)
-            normal = flow.image.CropMirrorNormalize(
-                    res_image,
-                    mirror_blob=rng,
-                    color_space=color_space,
-                    crop_h= 256,
-                    crop_w= 256,
-                    crop_pos_y=0.5,
-                    crop_pos_x=0.5,
-                    mean=[123.68, 116.779, 103.939],
-                    std=[58.393, 57.12, 57.375],
-                    output_dtype=flow.float,
-                )
-
-            return normal, label
-
-        if __name__ == "__main__":
-            images, labels = crop_mirror_job()
-            # images.shape (1, 3, 256, 256)
-
-    """
-    if name is None:
-        name = id_util.UniqueStr("CropMirrorNormalize_")
-    op_type_name = ""
-    if input_blob.dtype is flow.tensor_buffer:
-        op_type_name = "crop_mirror_normalize_from_tensorbuffer"
-    elif input_blob.dtype is flow.uint8:
-        op_type_name = "crop_mirror_normalize_from_uint8"
-    else:
-        print(
-            "ERROR! oneflow.data.crop_mirror_normalize op",
-            " NOT support input data type : ",
-            input_blob.dtype,
-        )
-        raise NotImplementedError
-
-    op = flow.user_op_builder(name).Op(op_type_name).Input("in", [input_blob])
-    if mirror_blob is not None:
-        op = op.Input("mirror", [mirror_blob])
-    return (
-        op.Output("out")
-        .Attr("color_space", color_space)
-        .Attr("output_layout", output_layout)
-        .Attr("mean", mean)
-        .Attr("std", std)
-        .Attr("crop_h", crop_h)
-        .Attr("crop_w", crop_w)
-        .Attr("crop_pos_y", crop_pos_y)
-        .Attr("crop_pos_x", crop_pos_x)
-        .Attr("output_dtype", output_dtype)
-        .Build()
-        .InferAndTryRun()
-        .RemoteBlobList()[0]
-    )
-
-
-@oneflow_export("image.random_crop", "image_random_crop")
-def api_image_random_crop(
-    input_blob: oneflow._oneflow_internal.BlobDesc,
-    num_attempts: int = 10,
-    seed: Optional[int] = None,
-    random_area: Sequence[float] = None,
-    random_aspect_ratio: Sequence[float] = None,
-    name: str = "ImageRandomCrop",
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator crops the input image randomly.
-
-    Args:
-        input_blob (oneflow._oneflow_internal.BlobDesc): The input Blob.
-        num_attempts (int, optional): The maximum number of random cropping attempts. Defaults to 10.
-        seed (Optional[int], optional): The random seed. Defaults to None.
-        random_area (Sequence[float], optional): The random cropping area. Defaults to None.
-        random_aspect_ratio (Sequence[float], optional): The random scaled ratio. Defaults to None.
-        name (str, optional): The name for the operation. Defaults to "ImageRandomCrop".
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result Blob.
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        import numpy as np
-        import cv2
-
-
-        def _read_images_by_cv(image_files):
-            images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
-            return [np.expand_dims(image, axis=0) for image in images]
-
-
-        def _get_images_static_shape(images):
-            image_shapes = [image.shape for image in images]
-            image_static_shape = np.amax(image_shapes, axis=0)
-            assert isinstance(
-                image_static_shape, np.ndarray
-            ), "image_shapes: {}, image_static_shape: {}".format(
-                str(image_shapes), str(image_static_shape)
-            )
-            image_static_shape = image_static_shape.tolist()
-            assert image_static_shape[0] == 1, str(image_static_shape)
-            image_static_shape[0] = len(image_shapes)
-            return image_static_shape
-
-        def _of_image_random_crop(images, image_static_shape):
-            func_config = flow.FunctionConfig()
-            func_config.default_data_type(flow.float)
-            func_config.default_logical_view(flow.scope.mirrored_view())
-
-            @flow.global_function(function_config=func_config)
-            def image_random_crop_job(images_def: tp.ListListNumpy.Placeholder(shape=image_static_shape, dtype=flow.float)
-            ) -> tp.ListListNumpy:
-                # The input Blob type should be "kTensorBuffer"
-                # So we use oneflow.tensor_list_to_tensor_buffer to convert
-                images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
-                # Do the random crop
-                random_crop_buffer = flow.image.random_crop(
-                    images_buffer,
-                    random_area=[0.15, 0.80],
-                    random_aspect_ratio=[0.75, 1.55],
-                )
-                # We convert back to "tensorlist" type
-                random_crop_images = flow.tensor_buffer_to_tensor_list(
-                    random_crop_buffer,
-                    shape=(image_static_shape[1], image_static_shape[2], image_static_shape[-1]),
-                    dtype=flow.float,
-                )
-                return random_crop_images
-
-            random_crop_images = image_random_crop_job([images])
-
-            return random_crop_images
-
-        if __name__ == "__main__":
-            img = _read_images_by_cv(['./img/1.jpg'])
-            img_shape = _get_images_static_shape(img) # In example is (1, 234, 346, 3)
-            random_crop_images = _of_image_random_crop(img, tuple(img_shape))
-            # random_crop_images.shape is (234, 346, 3)
-
-    """
-    assert isinstance(name, str)
-    if seed is not None:
-        assert name is not None
-    if random_area is None:
-        random_area = [0.08, 1.0]
-    if random_aspect_ratio is None:
-        random_aspect_ratio = [0.75, 1.333333]
-    module = flow.find_or_create_module(
-        name,
-        lambda: ImageRandomCropModule(
-            num_attempts=num_attempts,
-            random_seed=seed,
-            random_area=random_area,
-            random_aspect_ratio=random_aspect_ratio,
-            name=name,
-        ),
-    )
-    return module(input_blob)
-
-
-class ImageRandomCropModule(module_util.Module):
-    def __init__(
-        self,
-        num_attempts: int,
-        random_seed: Optional[int],
-        random_area: Sequence[float],
-        random_aspect_ratio: Sequence[float],
-        name: str,
-    ):
-        module_util.Module.__init__(self, name)
-        seed, has_seed = flow.random.gen_seed(random_seed)
-        self.op_module_builder = (
-            flow.user_op_module_builder("image_random_crop")
-            .InputSize("in", 1)
-            .Output("out")
-            .Attr("num_attempts", num_attempts)
-            .Attr("random_area", random_area)
-            .Attr("random_aspect_ratio", random_aspect_ratio)
-            .Attr("has_seed", has_seed)
-            .Attr("seed", seed)
-            .CheckAndComplete()
-        )
-        self.op_module_builder.user_op_module.InitOpKernel()
-
-    def forward(self, input: oneflow._oneflow_internal.BlobDesc):
-        if self.call_seq_no == 0:
-            name = self.module_name
-        else:
-            name = id_util.UniqueStr("ImageRandomCrop_")
-
-        return (
-            self.op_module_builder.OpName(name)
-            .Input("in", [input])
-            .Build()
-            .InferAndTryRun()
-            .SoleOutputBlob()
-        )
-
-
-@oneflow_export("random.CoinFlip", "random.coin_flip")
-def api_coin_flip(
-    batch_size: int = 1,
-    seed: Optional[int] = None,
-    probability: float = 0.5,
-    name: str = "CoinFlip",
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator performs the horizontal flip.
-
-    Args:
-        batch_size (int, optional): The batch size. Defaults to 1.
-        seed (Optional[int], optional): The random seed. Defaults to None.
-        probability (float, optional): The flip probability. Defaults to 0.5.
-        name (str, optional): The name for the operation. Defaults to "CoinFlip".
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: [description]
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        from typing import Tuple
-
-
-        @flow.global_function(type="predict")
-        def coin_flip_job() -> Tuple[tp.Numpy, tp.Numpy]:
-            batch_size = 1
-            color_space = "RGB"
-            # our ofrecord file path is "./dataset/part-0"
-            ofrecord = flow.data.ofrecord_reader(
-                "./imgdataset",
-                batch_size=batch_size,
-                data_part_num=1,
-                part_name_suffix_length=-1,
-                part_name_prefix='part-',
-                shuffle_after_epoch=True,
-            )
-            image = flow.data.OFRecordImageDecoder(
-                    ofrecord, "encoded", color_space=color_space
-                )
-            res_image, scale, new_size = flow.image.Resize(
-                    image, target_size=(512, 512)
-                )
-            label = flow.data.OFRecordRawDecoder(
-                ofrecord, "class/label", shape=(1, ), dtype=flow.int32
-            )
-            coin_flip = flow.random.CoinFlip(
-                batch_size=batch_size,
-                probability=0.8
-            )
-            normal = flow.image.CropMirrorNormalize(
-                    res_image,
-                    mirror_blob=coin_flip,
-                    color_space=color_space,
-                    crop_h= 256,
-                    crop_w= 256,
-                    crop_pos_y=0.5,
-                    crop_pos_x=0.5,
-                    mean=[123.68, 116.779, 103.939],
-                    std=[58.393, 57.12, 57.375],
-                    output_dtype=flow.float,
-                )
-
-            return normal, label
-
-        if __name__ == "__main__":
-            images, labels = coin_flip_job()
-
-    """
-    assert isinstance(name, str)
-    if seed is not None:
-        assert name is not None
-    module = flow.find_or_create_module(
-        name,
-        lambda: CoinFlipModule(
-            batch_size=batch_size, probability=probability, random_seed=seed, name=name,
-        ),
-    )
-    return module()
-
-
-class CoinFlipModule(module_util.Module):
-    def __init__(
-        self,
-        batch_size: str,
-        probability: float,
-        random_seed: Optional[int],
-        name: str,
-    ):
-        module_util.Module.__init__(self, name)
-        seed, has_seed = flow.random.gen_seed(random_seed)
-        self.op_module_builder = (
-            flow.user_op_module_builder("coin_flip")
-            .Output("out")
-            .Attr("batch_size", batch_size)
-            .Attr("probability", probability)
-            .Attr("has_seed", has_seed)
-            .Attr("seed", seed)
-            .CheckAndComplete()
-        )
-        self.op_module_builder.user_op_module.InitOpKernel()
-
-    def forward(self):
-        if self.call_seq_no == 0:
-            name = self.module_name
-        else:
-            name = id_util.UniqueStr("CoinFlip_")
-
-        return (
-            self.op_module_builder.OpName(name)
-            .Build()
-            .InferAndTryRun()
-            .SoleOutputBlob()
-        )
-
-
-@oneflow_export("image.decode", "image_decode")
-def image_decode(
-    images_bytes_buffer: oneflow._oneflow_internal.BlobDesc,
-    dtype: flow.dtype = flow.uint8,
-    color_space: str = "BGR",
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator decode the image.
-
-    Args:
-        images_bytes_buffer (oneflow._oneflow_internal.BlobDesc): The input Blob. Its type should be `kTensorBuffer`. More details please refer to the code example.
-        dtype (flow.dtype, optional): The data type. Defaults to flow.uint8.
-        color_space (str, optional): The color space. Defaults to "BGR".
-        name (Optional[str], optional): The name for the opreation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The decoded image list.
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        import numpy as np
-        from PIL import Image
-
-
-        def _of_image_decode(images):
-            image_files = [open(im, "rb") for im in images]
-            images_bytes = [imf.read() for imf in image_files]
-            static_shape = (len(images_bytes), max([len(bys) for bys in images_bytes]))
-            for imf in image_files:
-                imf.close()
-
-            func_config = flow.FunctionConfig()
-            func_config.default_data_type(flow.float)
-            func_config.default_logical_view(flow.scope.mirrored_view())
-
-            @flow.global_function(function_config=func_config)
-            def image_decode_job(
-                images_def: tp.ListListNumpy.Placeholder(shape=static_shape, dtype=flow.int8)
-            )->tp.ListListNumpy:
-                # convert to tensor buffer
-                images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
-                decoded_images_buffer = flow.image_decode(images_buffer)
-                # Remember to set a shape
-                # convert back to tensor list
-                return flow.tensor_buffer_to_tensor_list(
-                    decoded_images_buffer, shape=(640, 640, 3), dtype=flow.uint8
-                )
-
-            images_np_arr = [
-                np.frombuffer(bys, dtype=np.byte).reshape(1, -1) for bys in images_bytes
-            ]
-            decoded_images = image_decode_job([images_np_arr])
-            return decoded_images[0]
-
-
-        if __name__ == "__main__":
-            img = _of_image_decode(['./img/1.jpg'])
-            print(img[0].shape) # Our image shape is (1, 349, 367, 3)
-
-    """
-    # TODO: check color_space valiad
-    if name is None:
-        name = id_util.UniqueStr("ImageDecode_")
-
-    op = (
-        flow.user_op_builder(name)
-        .Op("image_decode")
-        .Input("in", [images_bytes_buffer])
-        .Output("out")
-        .Attr("color_space", color_space)
-        .Attr("data_type", dtype)
-        .Build()
-    )
-    return op.InferAndTryRun().SoleOutputBlob()
-
-
-@oneflow_export("image.batch_align", "image_batch_align")
-def image_batch_align(
-    images: oneflow._oneflow_internal.BlobDesc,
-    shape: Sequence[int],
-    dtype: flow.dtype,
-    alignment: int,
-    dynamic_out: bool = True,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    r"""This operator aligns the shape for a batch of images.
-
-    The aligned shape is computed as:
-
-    .. math::
-
-        & shape_{width} = int(\frac{(shape_{width}+alignment-1)}{alignment})*alignment
-
-        & shape_{height} = int(\frac{(shape_{height}+alignment-1)}{alignment})*alignment
-
-    Args:
-        images (oneflow._oneflow_internal.BlobDesc): The images.
-        shape (Sequence[int]): The maximum static shape of input images.
-        dtype (flow.dtype): The data type.
-        alignment (int): The align factor.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import cv2
-        import numpy as np
-        import oneflow as flow
-        import oneflow.typing as tp
-
-
-        def _of_image_batch_align(images, input_shape, output_shape, alignment):
-            func_config = flow.FunctionConfig()
-            func_config.default_data_type(flow.float)
-            func_config.default_logical_view(flow.scope.mirrored_view())
-
-            @flow.global_function(function_config=func_config)
-            def image_batch_align_job(
-                images_def: tp.ListListNumpy.Placeholder(shape=input_shape, dtype=flow.float)
-            ) -> tp.ListNumpy:
-                # Convert to tensor buffer
-                images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
-                image = flow.image_batch_align(
-                    images_buffer, shape=output_shape[1:], dtype=flow.float, alignment=alignment
-                )
-                return image
-
-            image = image_batch_align_job([images])
-            return image[0]
-
-
-        def _read_images_by_cv(image_files):
-            images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
-            return [np.expand_dims(image, axis=0) for image in images]
-
-
-        def _get_images_static_shape(images):
-            image_shapes = [image.shape for image in images]
-            image_static_shape = np.amax(image_shapes, axis=0)
-            assert isinstance(
-                image_static_shape, np.ndarray
-            ), "image_shapes: {}, image_static_shape: {}".format(
-                str(image_shapes), str(image_static_shape)
-            )
-            image_static_shape = image_static_shape.tolist()
-            assert image_static_shape[0] == 1, str(image_static_shape)
-            image_static_shape[0] = len(image_shapes)
-            return image_static_shape
-
-        def _roundup(x, n):
-            # compute the aligned shape
-            return int((x + n - 1) / n) * n
-
-        if __name__ == "__main__":
-            img = _read_images_by_cv(['./img/1.jpg', './img/2.jpg', './img/3.jpg'])
-            img_shape = _get_images_static_shape(img) # In example is [3, 349, 367, 3]
-            alignment = 16 # alignment factor
-            aligned_image_shape = [
-                img_shape[0],
-                _roundup(img_shape[1], alignment),
-                _roundup(img_shape[2], alignment),
-                img_shape[3],
-            ]
-            image = _of_image_batch_align(img, tuple(img_shape), aligned_image_shape, alignment)
-
-    """
-    if name is None:
-        name = id_util.UniqueStr("ImageBatchAlign_")
-
-    op = (
-        flow.user_op_builder(name)
-        .Op("image_batch_align")
-        .Input("in", [images])
-        .Output("out")
-        .Attr("shape", shape)
-        .Attr("data_type", dtype)
-        .Attr("alignment", alignment)
-        .Attr("dynamic_out", dynamic_out)
-        .Build()
-    )
-    return op.InferAndTryRun().SoleOutputBlob()
-
-
-@oneflow_export("image.normalize", "image_normalize")
-def image_normalize(
-    image: oneflow._oneflow_internal.BlobDesc,
-    std: Sequence[float],
-    mean: Sequence[float],
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator normalizes the image.
-
-    Args:
-        image (oneflow._oneflow_internal.BlobDesc): The input image.
-        std (Sequence[float]): The standard deviation of the images.
-        mean (Sequence[float]): The mean value of the images.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import cv2
-        import numpy as np
-        import oneflow as flow
-        import oneflow.typing as tp
-
-
-        def _of_image_normalize(images, image_shape, std, mean):
-            func_config = flow.FunctionConfig()
-            func_config.default_data_type(flow.float)
-            func_config.default_logical_view(flow.scope.mirrored_view())
-
-            @flow.global_function(function_config=func_config)
-            def image_normalize_job(
-                images_def: tp.ListListNumpy.Placeholder(shape=image_shape, dtype=flow.float)
-            ) -> tp.ListListNumpy:
-                # Convert to tensor buffer
-                images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
-                # Normalize the imagess
-                norm_images = flow.image_normalize(images_buffer, std, mean)
-                # Convert back to tensor list
-                return flow.tensor_buffer_to_tensor_list(
-                    norm_images, shape=image_shape[1:], dtype=flow.float
-                )
-
-            image_tensor = image_normalize_job([images])
-            return image_tensor[0]
-
-
-        def _read_images_by_cv(image_files):
-            images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
-            return [np.expand_dims(image, axis=0) for image in images]
-
-
-        def _get_images_static_shape(images):
-            image_shapes = [image.shape for image in images]
-            image_static_shape = np.amax(image_shapes, axis=0)
-            assert isinstance(
-                image_static_shape, np.ndarray
-            ), "image_shapes: {}, image_static_shape: {}".format(
-                str(image_shapes), str(image_static_shape)
-            )
-            image_static_shape = image_static_shape.tolist()
-            assert image_static_shape[0] == 1, str(image_static_shape)
-            image_static_shape[0] = len(image_shapes)
-            return image_static_shape
-
-        if __name__ == "__main__":
-            img = _read_images_by_cv(['./img/1.jpg', './img/2.jpg', './img/3.jpg'])
-            img_shape = _get_images_static_shape(img) # In example is [3, 349, 367, 3]
-            image = _of_image_normalize(img,
-                                        tuple(img_shape),
-                                        std=(102.9801, 115.9465, 122.7717),
-                                        mean=(1.0, 1.0, 1.0))
-
-    """
-    if name is None:
-        name = id_util.UniqueStr("ImageNormalize_")
-
-    assert isinstance(std, (list, tuple))
-    assert isinstance(mean, (list, tuple))
-
-    op = (
-        flow.user_op_builder(name)
-        .Op("image_normalize")
-        .Input("in", [image])
-        .Output("out")
-        .Attr("std", std)
-        .Attr("mean", mean)
-        .Build()
-    )
-    return op.InferAndTryRun().SoleOutputBlob()
-
-
-@oneflow_export("image.flip", "image_flip")
-def image_flip(
-    image: oneflow._oneflow_internal.BlobDesc,
-    flip_code: Union[int, oneflow._oneflow_internal.BlobDesc],
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator flips the images.
-
-    The flip code corresponds to the different flip mode:
-
-    0 (0x00): Non Flip
-
-    1 (0x01): Horizontal Flip
-
-    16 (0x10): Vertical Flip
-
-    17 (0x11): Both Horizontal and Vertical Flip
-
-    Args:
-        image (oneflow._oneflow_internal.BlobDesc): The input images.
-        flip_code (Union[int, oneflow._oneflow_internal.BlobDesc]): The flip code.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import cv2
-        import numpy as np
-        import oneflow as flow
-        import oneflow.typing as tp
-
-
-        def _of_image_flip(images, image_shape, flip_code):
-            func_config = flow.FunctionConfig()
-            func_config.default_data_type(flow.float)
-            func_config.default_logical_view(flow.scope.mirrored_view())
-
-            @flow.global_function(function_config=func_config)
-            def image_flip_job(
-                images_def: tp.ListListNumpy.Placeholder(shape=image_shape, dtype=flow.float)
-            ) -> tp.ListListNumpy:
-                images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
-                flip_images = flow.image_flip(images_buffer, flip_code)
-                return flow.tensor_buffer_to_tensor_list(
-                    flip_images, shape=image_shape[1:], dtype=flow.float
-                )
-
-            image_tensor = image_flip_job([images])
-            return image_tensor[0]
-
-
-        def _read_images_by_cv(image_files):
-            images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
-            return [np.expand_dims(image, axis=0) for image in images]
-
-
-        def _get_images_static_shape(images):
-            image_shapes = [image.shape for image in images]
-            image_static_shape = np.amax(image_shapes, axis=0)
-            assert isinstance(
-                image_static_shape, np.ndarray
-            ), "image_shapes: {}, image_static_shape: {}".format(
-                str(image_shapes), str(image_static_shape)
-            )
-            image_static_shape = image_static_shape.tolist()
-            assert image_static_shape[0] == 1, str(image_static_shape)
-            image_static_shape[0] = len(image_shapes)
-            return image_static_shape
-
-        if __name__ == "__main__":
-            img = _read_images_by_cv(['./img/1.jpg', './img/2.jpg', './img/3.jpg'])
-            img_shape = _get_images_static_shape(img) # In example is [3, 349, 367, 3]
-            image = _of_image_flip(img,
-                           tuple(img_shape),
-                           flip_code=1)
-
-    """
-    assert isinstance(image, oneflow._oneflow_internal.BlobDesc)
-
-    if name is None:
-        name = id_util.UniqueStr("ImageFlip_")
-
-    if not isinstance(flip_code, oneflow._oneflow_internal.BlobDesc):
-        assert isinstance(flip_code, int)
-        flip_code = flow.constant(
-            flip_code,
-            shape=(image.shape[0],),
-            dtype=flow.int8,
-            name="{}_FlipCode_".format(name),
-        )
-    else:
-        assert image.shape[0] == flip_code.shape[0]
-
-    op = (
-        flow.user_op_builder(name)
-        .Op("image_flip")
-        .Input("in", [image])
-        .Input("flip_code", [flip_code])
-        .Output("out")
-        .Build()
-    )
-    return op.InferAndTryRun().SoleOutputBlob()
-
-
-@oneflow_export("detection.object_bbox_flip", "object_bbox_flip")
-def object_bbox_flip(
-    bbox: oneflow._oneflow_internal.BlobDesc,
-    image_size: oneflow._oneflow_internal.BlobDesc,
-    flip_code: Union[int, oneflow._oneflow_internal.BlobDesc],
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator flips the object bounding box.
-
-    The flip code corresponds to the different flip mode:
-
-    0 (0x00): Non Flip
-
-    1 (0x01): Horizontal Flip
-
-    16 (0x10): Vertical Flip
-
-    17 (0x11): Both Horizontal and Vertical Flip
-
-    Args:
-        bbox (oneflow._oneflow_internal.BlobDesc): The bounding box.
-        image_size (oneflow._oneflow_internal.BlobDesc): The size of input image.
-        flip_code (Union[int, oneflow._oneflow_internal.BlobDesc]): The flip code.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import numpy as np
-        import oneflow as flow
-        import oneflow.typing as tp
-
-
-        def _of_object_bbox_flip(bbox_list, image_size, flip_code):
-            bbox_shape = _get_bbox_static_shape(bbox_list)
-            func_config = flow.FunctionConfig()
-            func_config.default_data_type(flow.float)
-            func_config.default_logical_view(flow.scope.mirrored_view())
-
-            @flow.global_function(function_config=func_config)
-            def object_bbox_flip_job(
-                bbox_def: tp.ListListNumpy.Placeholder(
-                    shape=tuple(bbox_shape), dtype=flow.float
-                ),
-                image_size_def: tp.ListNumpy.Placeholder(
-                    shape=image_size.shape, dtype=flow.int32
-                ),
-            ) -> tp.ListListNumpy:
-                bbox_buffer = flow.tensor_list_to_tensor_buffer(bbox_def)
-                flip_bbox = flow.object_bbox_flip(bbox_buffer, image_size_def, flip_code)
-                return flow.tensor_buffer_to_tensor_list(
-                    flip_bbox, shape=bbox_shape[1:], dtype=flow.float
-                )
-
-            input_bbox_list = [np.expand_dims(bbox, axis=0) for bbox in bbox_list]
-            bbox_tensor = object_bbox_flip_job([input_bbox_list], [image_size])
-            return bbox_tensor[0]
-
-
-        def _get_bbox_static_shape(bbox_list):
-            bbox_shapes = [bbox.shape for bbox in bbox_list]
-            bbox_static_shape = np.amax(bbox_shapes, axis=0)
-            assert isinstance(
-                bbox_static_shape, np.ndarray
-            ), "bbox_shapes: {}, bbox_static_shape: {}".format(
-                str(bbox_shapes), str(bbox_static_shape)
-            )
-            bbox_static_shape = bbox_static_shape.tolist()
-            bbox_static_shape.insert(0, len(bbox_list))
-            return bbox_static_shape
-
-        if __name__ == "__main__":
-            bbox = np.array([[[20.0, 40.0, 80.0, 160.0],
-                            [30.0, 50.0, 70.0, 100.0]]]).astype(np.single) # [x1, y1, x2, y2]
-            image_size = np.array([[480, 620]]).astype(np.int32)
-            bbox_flip =  _of_object_bbox_flip(bbox,
-                                            image_size,
-                                            flip_code=1) # Horizontal Flip
-            print(bbox_flip[0][0])
-
-            # [[399.  40. 459. 160.]
-            #  [409.  50. 449. 100.]]
-    """
-    assert isinstance(bbox, oneflow._oneflow_internal.BlobDesc)
-    assert isinstance(image_size, oneflow._oneflow_internal.BlobDesc)
-    assert bbox.shape[0] == image_size.shape[0]
-
-    if name is None:
-        name = id_util.UniqueStr("ObjectBboxFlip_")
-
-    if not isinstance(flip_code, oneflow._oneflow_internal.BlobDesc):
-        assert isinstance(flip_code, int)
-        flip_code = flow.constant(
-            flip_code,
-            shape=(bbox.shape[0],),
-            dtype=flow.int8,
-            name="{}_FlipCode".format(name),
-        )
-    else:
-        assert bbox.shape[0] == flip_code.shape[0]
-
-    op = (
-        flow.user_op_builder(name)
-        .Op("object_bbox_flip")
-        .Input("bbox", [bbox])
-        .Input("image_size", [image_size])
-        .Input("flip_code", [flip_code])
-        .Output("out")
-        .Build()
-    )
-    return op.InferAndTryRun().SoleOutputBlob()
-
-
-@oneflow_export("detection.object_bbox_scale", "object_bbox_scale")
-def object_bbox_scale(
-    bbox: oneflow._oneflow_internal.BlobDesc,
-    scale: oneflow._oneflow_internal.BlobDesc,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator scales the input image and the corresponding bounding box. It returns the scaled bounding box.
-
-    Args:
-        bbox (oneflow._oneflow_internal.BlobDesc): The bounding box.
-        scale (oneflow._oneflow_internal.BlobDesc): The scale factor.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result Blob.
-
-    For example:
-
-    .. code-block:: python
-
-        import numpy as np
-        import oneflow as flow
-        import oneflow.typing as tp
-        import cv2
-        from typing import Tuple
-
-
-        def _read_images_by_cv(image_files):
-            images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
-            return images
-
-
-        def _get_images_static_shape(images):
-            image_shapes = [image.shape for image in images]
-            image_static_shape = np.amax(image_shapes, axis=0)
-            assert isinstance(
-                image_static_shape, np.ndarray
-            ), "image_shapes: {}, image_static_shape: {}".format(
-                str(image_shapes), str(image_static_shape)
-            )
-            image_static_shape = image_static_shape.tolist()
-            image_static_shape.insert(0, len(image_shapes))
-            return image_static_shape
-
-
-        def _get_bbox_static_shape(bbox_list):
-            bbox_shapes = [bbox.shape for bbox in bbox_list]
-            bbox_static_shape = np.amax(bbox_shapes, axis=0)
-            assert isinstance(
-                bbox_static_shape, np.ndarray
-            ), "bbox_shapes: {}, bbox_static_shape: {}".format(
-                str(bbox_shapes), str(bbox_static_shape)
-            )
-            bbox_static_shape = bbox_static_shape.tolist()
-            bbox_static_shape.insert(0, len(bbox_list))
-            return bbox_static_shape
-
-
-        def _of_target_resize_bbox_scale(images, bbox_list, target_size, max_size):
-            image_shape = _get_images_static_shape(images)
-            bbox_shape = _get_bbox_static_shape(bbox_list)
-
-            func_config = flow.FunctionConfig()
-            func_config.default_data_type(flow.float)
-            func_config.default_logical_view(flow.scope.mirrored_view())
-
-            @flow.global_function(function_config=func_config)
-            def target_resize_bbox_scale_job(
-                image_def: tp.ListListNumpy.Placeholder(
-                    shape=tuple(image_shape), dtype=flow.float
-                ),
-                bbox_def: tp.ListListNumpy.Placeholder(
-                    shape=tuple(bbox_shape), dtype=flow.float
-                ),
-            ) -> Tuple[tp.ListListNumpy, tp.ListNumpy]:
-                images_buffer = flow.tensor_list_to_tensor_buffer(image_def)
-                resized_images_buffer, new_size, scale = flow.image_target_resize(
-                    images_buffer, target_size=target_size, max_size=max_size
-                )
-                bbox_buffer = flow.tensor_list_to_tensor_buffer(bbox_def)
-                scaled_bbox = flow.object_bbox_scale(bbox_buffer, scale)
-                scaled_bbox_list = flow.tensor_buffer_to_tensor_list(
-                    scaled_bbox, shape=bbox_shape[1:], dtype=flow.float
-                )
-                return scaled_bbox_list, new_size
-
-            input_image_list = [np.expand_dims(image, axis=0) for image in images]
-            input_bbox_list = [np.expand_dims(bbox, axis=0) for bbox in bbox_list]
-            output_bbox_list, output_image_size = target_resize_bbox_scale_job(
-                [input_image_list], [input_bbox_list]
-            )
-            return output_bbox_list[0], output_image_size[0]
-
-
-        if __name__ == "__main__":
-            images = _read_images_by_cv(['./img/1.jpg', './img/2.jpg'])
-            bbox = np.array([[[20.0, 40.0, 80.0, 160.0],
-                            [30.0, 50.0, 70.0, 100.0]],
-                            [[26.0, 40.0, 86.0, 160.0],
-                            [36.0, 56.0, 76.0, 106.0]]]).astype(np.single) # [x1, y1, x2, y2]
-            bbox, size = _of_target_resize_bbox_scale(images, bbox, 280, 350)
-            print(bbox[0])
-            print(bbox[1])
-
-            # [[[ 16.0218    32.09169   64.0872   128.36676 ]
-            #   [ 24.032698  40.114613  56.076298  80.229225]]]
-
-            # [[[ 24.186047  37.170418  80.       148.68167 ]
-            #   [ 33.488373  52.038586  70.69768   98.5016  ]]]
-
-    """
-    assert isinstance(bbox, oneflow._oneflow_internal.BlobDesc)
-    assert isinstance(scale, oneflow._oneflow_internal.BlobDesc)
-    assert bbox.shape[0] == scale.shape[0]
-
-    if name is None:
-        name = id_util.UniqueStr("ObjectBboxScale_")
-
-    op = (
-        flow.user_op_builder(name)
-        .Op("object_bbox_scale")
-        .Input("bbox", [bbox])
-        .Input("scale", [scale])
-        .Output("out")
-        .Build()
-    )
-    return op.InferAndTryRun().SoleOutputBlob()
-
-
-@oneflow_export(
-    "detection.object_segmentation_polygon_flip", "object_segmentation_polygon_flip"
-)
-def object_segm_poly_flip(
-    poly: oneflow._oneflow_internal.BlobDesc,
-    image_size: oneflow._oneflow_internal.BlobDesc,
-    flip_code: Union[int, oneflow._oneflow_internal.BlobDesc],
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator flips the segmentation points in image.
-
-    The flip code corresponds to the different flip mode:
-
-    0 (0x00): Non Flip
-
-    1 (0x01): Horizontal Flip
-
-    16 (0x10): Vertical Flip
-
-    17 (0x11): Both Horizontal and Vertical Flip
-
-    Args:
-        poly (oneflow._oneflow_internal.BlobDesc): The poly segmentation points.
-        image_size (oneflow._oneflow_internal.BlobDesc): The image size.
-        flip_code (Union[int, oneflow._oneflow_internal.BlobDesc]): The filp code.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result Blob
-
-    For example:
-
-    .. code-block:: python
-
-        import numpy as np
-        import oneflow as flow
-        import oneflow.typing as tp
-        import cv2
-
-
-        def _read_images_by_cv(image_files):
-            images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
-            return [np.expand_dims(image, axis=0) for image in images]
-
-
-        def _of_object_segm_poly_flip(poly_list, image_size, flip_code):
-            poly_shape = _get_segm_poly_static_shape(poly_list)
-
-            func_config = flow.FunctionConfig()
-            func_config.default_data_type(flow.float)
-            func_config.default_logical_view(flow.scope.mirrored_view())
-
-            @flow.global_function(function_config=func_config)
-            def object_segm_poly_flip_job(
-                poly_def: tp.ListListNumpy.Placeholder(
-                    shape=tuple(poly_shape), dtype=flow.float
-                ),
-                image_size_def: tp.ListNumpy.Placeholder(
-                    shape=image_size.shape, dtype=flow.int32
-                ),
-            ) -> tp.ListListNumpy:
-                poly_buffer = flow.tensor_list_to_tensor_buffer(poly_def)
-                flip_poly = flow.object_segmentation_polygon_flip(
-                    poly_buffer, image_size_def, flip_code
-                )
-                return flow.tensor_buffer_to_tensor_list(
-                    flip_poly, shape=poly_shape[1:], dtype=flow.float
-                )
-
-            input_poly_list = [np.expand_dims(poly, axis=0) for poly in poly_list]
-            poly_tensor = object_segm_poly_flip_job([input_poly_list], [image_size])
-            return poly_tensor[0]
-
-
-        def _get_segm_poly_static_shape(poly_list):
-            poly_shapes = [poly.shape for poly in poly_list]
-            poly_static_shape = np.amax(poly_shapes, axis=0)
-            assert isinstance(
-                poly_static_shape, np.ndarray
-            ), "poly_shapes: {}, poly_static_shape: {}".format(
-                str(poly_shapes), str(poly_static_shape)
-            )
-            poly_static_shape = poly_static_shape.tolist()
-            poly_static_shape.insert(0, len(poly_list))
-            return poly_static_shape
-
-        if __name__ == "__main__":
-            segm_poly_list = []
-            segmentations = [[[20.0, 40.0], [80.0, 160.0], [100.0, 210.0]], # Image 1 segmentation point
-                            [[25.0, 45.0], [85.0, 165.0], [105.0, 215.0]]] # Image 2 segmentation point
-            for segmentation in segmentations:
-                polygon = []
-                for seg in segmentation:
-                    polygon.extend(seg)
-                poly_array = np.array(polygon, dtype=np.single).reshape(-1, 2) # Reshape it
-                segm_poly_list.append(poly_array)
-
-            image_size = np.array([[480, 620], # Image 1 size
-                                [640, 640]]).astype(np.int32) # Image 2 size
-            of_segm_poly_list = _of_object_segm_poly_flip(
-                segm_poly_list, image_size, flip_code=1
-            ) # Horizontal Flip
-            print(of_segm_poly_list[0])
-            print(of_segm_poly_list[1])
-
-            # of_segm_poly_list[0]
-            # [[[460.  40.]
-            #   [400. 160.]
-            #   [380. 210.]]]
-
-            # of_segm_poly_list[1]
-            # [[[615.  45.]
-            #   [555. 165.]
-            #   [535. 215.]]]
-
-    """
-    assert isinstance(poly, oneflow._oneflow_internal.BlobDesc)
-    assert isinstance(image_size, oneflow._oneflow_internal.BlobDesc)
-    assert poly.shape[0] == image_size.shape[0]
-
-    if name is None:
-        name = id_util.UniqueStr("ObjectSegmPolyFilp_")
-
-    if not isinstance(flip_code, oneflow._oneflow_internal.BlobDesc):
-        assert isinstance(flip_code, int)
-        flip_code = flow.constant(
-            flip_code,
-            shape=(poly.shape[0],),
-            dtype=flow.int8,
-            name="{}_FlipCode".format(name),
-        )
-    else:
-        assert poly.shape[0] == flip_code.shape[0]
-
-    op = (
-        flow.user_op_builder(name)
-        .Op("object_segmentation_polygon_flip")
-        .Input("poly", [poly])
-        .Input("image_size", [image_size])
-        .Input("flip_code", [flip_code])
-        .Output("out")
-        .Build()
-    )
-    return op.InferAndTryRun().SoleOutputBlob()
-
-
-@oneflow_export(
-    "detection.object_segmentation_polygon_scale", "object_segmentation_polygon_scale"
-)
-def object_segm_poly_scale(
-    poly: oneflow._oneflow_internal.BlobDesc,
-    scale: oneflow._oneflow_internal.BlobDesc,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator scales the segmentation points in the images.
-
-    Args:
-        poly (oneflow._oneflow_internal.BlobDesc): The poly segmentation points.
-        scale (oneflow._oneflow_internal.BlobDesc): The image scale.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result Blob.
-
-    For example:
-
-    .. code-block:: python
-
-        import numpy as np
-        import oneflow as flow
-        import oneflow.typing as tp
-        import cv2
-        from typing import Tuple
-
-
-        def _read_images_by_cv(image_files):
-            images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
-            return images
-
-
-        def _get_images_static_shape(images):
-            image_shapes = [image.shape for image in images]
-            image_static_shape = np.amax(image_shapes, axis=0)
-            assert isinstance(
-                image_static_shape, np.ndarray
-            ), "image_shapes: {}, image_static_shape: {}".format(
-                str(image_shapes), str(image_static_shape)
-            )
-            image_static_shape = image_static_shape.tolist()
-            image_static_shape.insert(0, len(image_shapes))
-            return image_static_shape
-
-
-        def _get_segm_poly_static_shape(poly_list):
-            poly_shapes = [poly.shape for poly in poly_list]
-            poly_static_shape = np.amax(poly_shapes, axis=0)
-            assert isinstance(
-                poly_static_shape, np.ndarray
-            ), "poly_shapes: {}, poly_static_shape: {}".format(
-                str(poly_shapes), str(poly_static_shape)
-            )
-            poly_static_shape = poly_static_shape.tolist()
-            poly_static_shape.insert(0, len(poly_list))
-            return poly_static_shape
-
-
-        def _get_bbox_static_shape(bbox_list):
-            bbox_shapes = [bbox.shape for bbox in bbox_list]
-            bbox_static_shape = np.amax(bbox_shapes, axis=0)
-            assert isinstance(
-                bbox_static_shape, np.ndarray
-            ), "bbox_shapes: {}, bbox_static_shape: {}".format(
-                str(bbox_shapes), str(bbox_static_shape)
-            )
-            bbox_static_shape = bbox_static_shape.tolist()
-            bbox_static_shape.insert(0, len(bbox_list))
-            return bbox_static_shape
-
-
-        def _of_object_segm_poly_scale(images, poly_list, target_size, max_size):
-            image_shape = _get_images_static_shape(images)
-            print(image_shape)
-            poly_shape = _get_segm_poly_static_shape(poly_list)
-            print("Poly shape is ", poly_shape)
-            func_config = flow.FunctionConfig()
-            func_config.default_data_type(flow.float)
-            func_config.default_logical_view(flow.scope.mirrored_view())
-
-            @flow.global_function(function_config=func_config)
-            def object_segm_poly_scale_job(
-                image_def: tp.ListListNumpy.Placeholder(
-                    shape=tuple(image_shape), dtype=flow.float
-                ),
-                poly_def: tp.ListListNumpy.Placeholder(
-                    shape=tuple(poly_shape), dtype=flow.float
-                ),
-            ) -> Tuple[tp.ListListNumpy, tp.ListNumpy]:
-                images_buffer = flow.tensor_list_to_tensor_buffer(image_def)
-                resized_images_buffer, new_size, scale = flow.image_target_resize(
-                    images_buffer, target_size=target_size, max_size=max_size
-                )
-                poly_buffer = flow.tensor_list_to_tensor_buffer(poly_def)
-                scaled_poly = flow.object_segmentation_polygon_scale(poly_buffer, scale)
-                scaled_poly_list = flow.tensor_buffer_to_tensor_list(
-                    scaled_poly, shape=poly_shape[1:], dtype=flow.float
-                )
-                return scaled_poly_list, new_size
-
-            input_image_list = [np.expand_dims(image, axis=0) for image in images]
-            input_poly_list = [np.expand_dims(poly, axis=0) for poly in poly_list]
-
-            output_poly_list, output_image_size = object_segm_poly_scale_job(
-                [input_image_list], [input_poly_list]
-            )
-
-            return output_poly_list[0], output_image_size
-
-        if __name__ == "__main__":
-            images = _read_images_by_cv(['./img/1.jpg', './img/2.jpg'])
-            segm_poly_list = []
-            segmentations = [[[20.0, 40.0], [80.0, 160.0], [100.0, 210.0]], # Image 1 segmentation point
-                            [[25.0, 45.0], [85.0, 165.0], [105.0, 215.0]]] # Image 2 segmentation point
-
-            for segmentation in segmentations:
-                polygon = []
-                for seg in segmentation:
-                    polygon.extend(seg)
-                poly_array = np.array(polygon, dtype=np.single).reshape(-1, 2) # Reshape it
-                segm_poly_list.append(poly_array)
-
-            bbox, size = _of_object_segm_poly_scale(images, segm_poly_list, 280, 350)
-
-    """
-    assert isinstance(poly, oneflow._oneflow_internal.BlobDesc)
-    assert isinstance(scale, oneflow._oneflow_internal.BlobDesc)
-    assert poly.shape[0] == scale.shape[0]
-
-    if name is None:
-        name = id_util.UniqueStr("ObjectSegmPolyFilp_")
-
-    op = (
-        flow.user_op_builder(name)
-        .Op("object_segmentation_polygon_scale")
-        .Input("poly", [poly])
-        .Input("scale", [scale])
-        .Output("out")
-        .Build()
-    )
-    return op.InferAndTryRun().SoleOutputBlob()
-
-
-@oneflow_export(
-    "detection.object_segmentation_polygon_to_mask",
-    "object_segmentation_polygon_to_mask",
-)
-def object_segm_poly_to_mask(
-    poly: oneflow._oneflow_internal.BlobDesc,
-    poly_index: oneflow._oneflow_internal.BlobDesc,
-    image_size: oneflow._oneflow_internal.BlobDesc,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator converts the poly segment points to the segment mask array.
-
-    Args:
-        poly (oneflow._oneflow_internal.BlobDesc): The poly segment points.
-        poly_index (oneflow._oneflow_internal.BlobDesc): The poly segment index.
-        image_size (oneflow._oneflow_internal.BlobDesc): The input image size.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result Blob.
-
-    .. code-block:: python
-
-        import numpy as np
-        import oneflow as flow
-        import oneflow.typing as tp
-        import cv2
-        from typing import Tuple
-
-
-        def _read_images_by_cv(image_files):
-            images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
-            return images
-
-
-        def _get_images_static_shape(images):
-            image_shapes = [image.shape for image in images]
-            image_static_shape = np.amax(image_shapes, axis=0)
-            assert isinstance(
-                image_static_shape, np.ndarray
-            ), "image_shapes: {}, image_static_shape: {}".format(
-                str(image_shapes), str(image_static_shape)
-            )
-            image_static_shape = image_static_shape.tolist()
-            image_static_shape.insert(0, len(image_shapes))
-            return image_static_shape
-
-
-        def _get_segm_poly_static_shape(poly_list, poly_index_list):
-            assert len(poly_list) == len(poly_index_list)
-            num_images = len(poly_list)
-            max_poly_elems = 0
-            for poly, poly_index in zip(poly_list, poly_index_list):
-                assert len(poly.shape) == 2
-                assert len(poly_index.shape) == 2, str(poly_index.shape)
-                assert poly.shape[0] == poly_index.shape[0]
-                assert poly.shape[1] == 2
-                assert poly_index.shape[1] == 3
-                max_poly_elems = max(max_poly_elems, poly.shape[0])
-            return [num_images, max_poly_elems, 2], [num_images, max_poly_elems, 3]
-
-        def _segm_poly_to_tensor(img_segm_poly_list):
-            poly_array_list = []
-            poly_index_array_list = []
-            for img_idx, segm_poly_list in enumerate(img_segm_poly_list):
-                img_poly_elem_list = []
-                img_poly_index_list = []
-
-                for obj_idx, poly_list in enumerate(segm_poly_list):
-                    for poly_idx, poly in enumerate(poly_list):
-                        img_poly_elem_list.extend(poly)
-                        for pt_idx, pt in enumerate(poly):
-                            if pt_idx % 2 == 0:
-                                img_poly_index_list.append([pt_idx / 2, poly_idx, obj_idx])
-
-                img_poly_array = np.array(img_poly_elem_list, dtype=np.single).reshape(-1, 2)
-                assert img_poly_array.size > 0, segm_poly_list
-                poly_array_list.append(img_poly_array)
-
-                img_poly_index_array = np.array(img_poly_index_list, dtype=np.int32)
-                assert img_poly_index_array.size > 0, segm_poly_list
-                poly_index_array_list.append(img_poly_index_array)
-
-            return poly_array_list, poly_index_array_list
-
-
-        def _of_poly_to_mask_pipline(
-            images, poly_list, poly_index_list, num_segms_list, target_size, max_size
-        ):
-            print(len(images))
-            print(len(poly_list))
-
-            assert len(images) == len(poly_list)
-            assert len(poly_list) == len(poly_index_list)
-            image_shape = _get_images_static_shape(images)
-            poly_shape, poly_index_shape = _get_segm_poly_static_shape(
-                poly_list, poly_index_list
-            )
-            max_num_segms = max(num_segms_list)
-
-            func_config = flow.FunctionConfig()
-            func_config.default_logical_view(flow.scope.mirrored_view())
-            func_config.default_data_type(flow.float)
-
-
-            @flow.global_function(function_config=func_config)
-            def poly_to_mask_job(
-                image_def: tp.ListListNumpy.Placeholder(
-                    shape=tuple(image_shape), dtype=flow.float
-                ),
-                poly_def: tp.ListListNumpy.Placeholder(
-                    shape=tuple(poly_shape), dtype=flow.float
-                ),
-                poly_index_def: tp.ListListNumpy.Placeholder(
-                    shape=tuple(poly_index_shape), dtype=flow.int32
-                ),
-            ) -> Tuple[tp.ListListNumpy, tp.ListListNumpy]:
-                images_buffer = flow.tensor_list_to_tensor_buffer(image_def)
-                resized_images_buffer, new_size, scale = flow.image_target_resize(
-                    images_buffer, target_size=target_size, max_size=max_size
-                )
-                poly_buffer = flow.tensor_list_to_tensor_buffer(poly_def)
-                poly_index_buffer = flow.tensor_list_to_tensor_buffer(poly_index_def)
-                scaled_poly_buffer = flow.object_segmentation_polygon_scale(poly_buffer, scale)
-                mask_buffer = flow.object_segmentation_polygon_to_mask(
-                    scaled_poly_buffer, poly_index_buffer, new_size
-                )
-                mask_list = flow.tensor_buffer_to_tensor_list(
-                    mask_buffer, shape=(max_num_segms, target_size, max_size), dtype=flow.int8
-                )
-                scaled_poly_list = flow.tensor_buffer_to_tensor_list(
-                    scaled_poly_buffer, shape=poly_shape[1:], dtype=flow.float
-                )
-                return mask_list, scaled_poly_list
-
-            input_image_list = [np.expand_dims(image, axis=0) for image in images]
-            input_poly_list = [np.expand_dims(poly, axis=0) for poly in poly_list]
-            input_poly_index_list = [
-                np.expand_dims(poly_index, axis=0) for poly_index in poly_index_list
-            ]
-
-            output_mask_list, output_poly_list = poly_to_mask_job(
-                [input_image_list], [input_poly_list], [input_poly_index_list]
-            )
-
-            return output_mask_list[0], output_poly_list[0]
-
-        if __name__ == "__main__":
-            images = _read_images_by_cv(['./img/1.jpg', './img/2.jpg'])
-            segm_poly_list = []
-
-            segmentations = [[[20.0, 40.0, 80.0, 160.0, 100.0, 210.0, 120.0, 215.0]], # Image 1 segmentation point
-                            [[24.0, 42.0, 86.0, 168.0, 103.0, 223.0, 125.0, 235.0]]] # Image 2 segmentation point
-
-            for segmentation in segmentations:
-                polygon = []
-                for seg in segmentation:
-                    polygon.extend(seg)
-
-                poly_array = np.array(polygon, dtype=np.single).reshape(-1, 2) # Reshape it
-                segm_poly_list.append([poly_array])
-
-            poly_list, poly_index_list = _segm_poly_to_tensor(segm_poly_list)
-            num_segms_list = [len(segm_poly_list) for segm_poly_list in segm_poly_list]
-            target_size = 280
-            max_size = 350
-            of_mask_list, of_scaled_poly_list = _of_poly_to_mask_pipline(
-                images, poly_list, poly_index_list, num_segms_list, target_size, max_size
-            )
-            of_mask_list = [
-                mask_array.reshape(-1, mask_array.shape[-2], mask_array.shape[-1])
-                for mask_array in of_mask_list
-            ] # reshape it
-
-    """
-    assert isinstance(poly, oneflow._oneflow_internal.BlobDesc)
-    assert isinstance(poly_index, oneflow._oneflow_internal.BlobDesc)
-    assert isinstance(image_size, oneflow._oneflow_internal.BlobDesc)
-    assert poly.shape[0] == poly_index.shape[0]
-    assert poly.shape[0] == image_size.shape[0]
-
-    if name is None:
-        name = id_util.UniqueStr("ObjectSegmPolyToMask_")
-
-    op = (
-        flow.user_op_builder(name)
-        .Op("object_segmentation_polygon_to_mask")
-        .Input("poly", [poly])
-        .Input("poly_index", [poly_index])
-        .Input("image_size", [image_size])
-        .Output("out")
-        .Build()
-    )
-    return op.InferAndTryRun().SoleOutputBlob()
-
-
-@oneflow_export("data.coco_reader")
-def api_coco_reader(
-    annotation_file: str,
-    image_dir: str,
-    batch_size: int,
-    shuffle: bool = True,
-    random_seed: Optional[int] = None,
-    group_by_aspect_ratio: bool = True,
-    stride_partition: bool = True,
-    remove_images_without_annotations: bool = True,
-    name: str = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    assert name is not None
-    module = flow.find_or_create_module(
-        name,
-        lambda: COCOReader(
-            annotation_file=annotation_file,
-            image_dir=image_dir,
-            batch_size=batch_size,
-            shuffle=shuffle,
-            random_seed=random_seed,
-            group_by_aspect_ratio=group_by_aspect_ratio,
-            remove_images_without_annotations=remove_images_without_annotations,
-            stride_partition=stride_partition,
-            name=name,
-        ),
-    )
-    return module()
-
-
-class COCOReader(module_util.Module):
-    def __init__(
-        self,
-        annotation_file: str,
-        image_dir: str,
-        batch_size: int,
-        shuffle: bool = True,
-        random_seed: Optional[int] = None,
-        group_by_aspect_ratio: bool = True,
-        remove_images_without_annotations: bool = True,
-        stride_partition: bool = True,
-        name: str = None,
-    ):
-        assert name is not None
-        if random_seed is None:
-            random_seed = random.randrange(sys.maxsize)
-        module_util.Module.__init__(self, name)
-        self.op_module_builder = (
-            flow.consistent_user_op_module_builder("COCOReader")
-            .Output("image")
-            .Output("image_id")
-            .Output("image_size")
-            .Output("gt_bbox")
-            .Output("gt_label")
-            .Output("gt_segm")
-            .Output("gt_segm_index")
-            .Attr("session_id", flow.current_scope().session_id)
-            .Attr("annotation_file", annotation_file)
-            .Attr("image_dir", image_dir)
-            .Attr("batch_size", batch_size)
-            .Attr("shuffle_after_epoch", shuffle)
-            .Attr("random_seed", random_seed)
-            .Attr("group_by_ratio", group_by_aspect_ratio)
-            .Attr(
-                "remove_images_without_annotations", remove_images_without_annotations
-            )
-            .Attr("stride_partition", stride_partition)
-            .CheckAndComplete()
-        )
-        self.op_module_builder.user_op_module.InitOpKernel()
-
-    def forward(self):
-        if self.call_seq_no == 0:
-            name = self.module_name
-        else:
-            name = id_util.UniqueStr("COCOReader")
-        return (
-            self.op_module_builder.OpName(name)
-            .Build()
-            .InferAndTryRun()
-            .RemoteBlobList()
-        )
-
-
-@oneflow_export("data.ofrecord_image_classification_reader")
-def ofrecord_image_classification_reader(
-    ofrecord_dir: str,
-    image_feature_name: str,
-    label_feature_name: str,
-    batch_size: int = 1,
-    data_part_num: int = 1,
-    part_name_prefix: str = "part-",
-    part_name_suffix_length: int = -1,
-    random_shuffle: bool = False,
-    shuffle_buffer_size: int = 1024,
-    shuffle_after_epoch: bool = False,
-    color_space: str = "BGR",
-    decode_buffer_size_per_thread: int = 32,
-    num_decode_threads_per_machine: Optional[int] = None,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    """This operator creates a reader for image classification tasks.
-
-    Args:
-        ofrecord_dir (str): The directory of ofrecord file.
-        image_feature_name (str): The name of the image feature.
-        label_feature_name (str): The name of the label feature.
-        batch_size (int, optional): The batch_size. Defaults to 1.
-        data_part_num (int, optional): The amounts of data part. Defaults to 1.
-        part_name_prefix (str, optional): The prefix of data part name. Defaults to "part-".
-        part_name_suffix_length (int, optional): The suffix name of data part name. Defaults to -1.
-        random_shuffle (bool, optional): Whether to random shuffle the data. Defaults to False.
-        shuffle_buffer_size (int, optional): The buffer size for shuffle data. Defaults to 1024.
-        shuffle_after_epoch (bool, optional): Whether to shuffle the data after each epoch. Defaults to False.
-        color_space (str, optional): The color space. Defaults to "BGR".
-        decode_buffer_size_per_thread (int, optional): The decode buffer size for per thread. Defaults to 32.
-        num_decode_threads_per_machine (Optional[int], optional): The amounts of decode threads for each machine. Defaults to None.
-        name (Optional[str], optional): The name for the operation. Defaults to None.
-
-    Returns:
-        oneflow._oneflow_internal.BlobDesc: The result Blob.
-
-    For example:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        from typing import Tuple
-
-
-        @flow.global_function(type="predict")
-        def image_classifier_job() -> Tuple[tp.Numpy, tp.Numpy]:
-            image, label = flow.data.ofrecord_image_classification_reader(
-                ofrecord_dir="./imgdataset",
-                image_feature_name="encoded",
-                label_feature_name="class/label",
-                batch_size=8,
-                data_part_num=1,
-                part_name_prefix="part-",
-                part_name_suffix_length=-1,
-                random_shuffle=False,
-                shuffle_after_epoch=False,
-                color_space="RGB",
-                decode_buffer_size_per_thread=16,
-            )
-            res_image, scale, new_size = flow.image.Resize(
-                    image, target_size=(224, 224)
-                )
-            return res_image, label
-
-
-        if __name__ == "__main__":
-            images, labels = image_classifier_job()
-            # images.shape (8, 224, 224, 3)
-
-    """
-    if name is None:
-        name = id_util.UniqueStr("OFRecordImageClassificationReader_")
-    (image, label) = (
-        flow.user_op_builder(name)
-        .Op("ofrecord_image_classification_reader")
-        .Output("image")
-        .Output("label")
-        .Attr("data_dir", ofrecord_dir)
-        .Attr("data_part_num", data_part_num)
-        .Attr("batch_size", batch_size)
-        .Attr("part_name_prefix", part_name_prefix)
-        .Attr("random_shuffle", random_shuffle)
-        .Attr("shuffle_buffer_size", shuffle_buffer_size)
-        .Attr("shuffle_after_epoch", shuffle_after_epoch)
-        .Attr("part_name_suffix_length", part_name_suffix_length)
-        .Attr("color_space", color_space)
-        .Attr("image_feature_name", image_feature_name)
-        .Attr("label_feature_name", label_feature_name)
-        .Attr("decode_buffer_size_per_thread", decode_buffer_size_per_thread)
-        .Attr("num_decode_threads_per_machine", num_decode_threads_per_machine or 0)
-        .Build()
-        .InferAndTryRun()
-        .RemoteBlobList()
-    )
-    label = flow.tensor_buffer_to_tensor(label, dtype=flow.int32, instance_shape=[1])
-    label = flow.squeeze(label, axis=[-1])
-    return image, label
-
-
-@oneflow_export("data.OneRecDecoder", "data.onerec_decoder")
-def OneRecDecoder(
-    input_blob,
-    key,
-    dtype,
-    shape,
-    is_dynamic=False,
-    reshape=None,
-    batch_padding=None,
-    name=None,
-):
-    if name is None:
-        name = id_util.UniqueStr("OneRecDecoder_")
-    if reshape is not None:
-        has_reshape = True
-    else:
-        has_reshape = False
-        reshape = shape
-    if batch_padding is not None:
-        has_batch_padding = True
-    else:
-        has_batch_padding = False
-        batch_padding = shape
-    return (
-        flow.user_op_builder(name)
-        .Op("onerec_decoder")
-        .Input("in", [input_blob])
-        .Output("out")
-        .Attr("key", key)
-        .Attr("data_type", dtype)
-        .Attr("static_shape", shape)
-        .Attr("is_dynamic", is_dynamic)
-        .Attr("has_reshape", has_reshape)
-        .Attr("reshape", reshape)
-        .Attr("has_batch_padding", has_batch_padding)
-        .Attr("batch_padding", batch_padding)
-        .Build()
-        .InferAndTryRun()
-        .RemoteBlobList()[0]
-    )
-
-
-@oneflow_export("data.megatron_gpt_mmap_data_loader", "data.MegatronGPTMMapDataLoader")
-def gpt_data_loader(
-    data_file_prefix: str,
-    seq_length: int,
-    num_samples: int,
-    batch_size: int,
-    dtype: flow.dtype = flow.int64,
-    shuffle: bool = True,
-    random_seed: Optional[int] = None,
-    split_sizes: Optional[Sequence[str]] = None,
-    split_index: Optional[int] = None,
-    parallel_distribution: Optional[Sequence[str]] = None,
-    start_from_saved_progress: bool = False,
-    name: Optional[str] = None,
-) -> oneflow._oneflow_internal.BlobDesc:
-    if name is None:
-        name = (
-            "gpt_data_loader"
-            if start_from_saved_progress
-            else id_util.UniqueStr("gpt_data_loader_")
-        )
-
-    # consider being exported as parameters
-    label_length = 1
-
-    if parallel_distribution is None:
-        parallel_distribution = []
-
-    if split_index is None:
-        split_index = 0
-
-    if split_sizes is None:
-        split_sizes = (1,)
-
-    if split_index >= len(split_sizes):
-        raise ValueError(
-            "split index {} is out of range, split_sizes {}".formart(
-                split_index, split_sizes
-            )
-        )
-
-    if random_seed is None:
-        from datetime import datetime
-
-        random_seed = int(datetime.utcnow().timestamp())
-
-    def distribute_to_str(dist):
-        if dist is None:
-            return ""
-        elif type(dist) is str:
-            return dist
-        elif type(dist) is oneflow._oneflow_internal.distribute.SplitDistribute:
-            return "S({})".format(dist.axis)
-        elif type(dist) is oneflow._oneflow_internal.distribute.BroadcastDistribute:
-            return "B"
-        else:
-            raise ValueError("unsupported distribute")
-
-    parallel_distribution = list(map(distribute_to_str, parallel_distribution))
-
-    if start_from_saved_progress:
-        iteration_name = "{}-iteration-sq{}-sa{}-bs{}-sd{}-sp{}-spi{}-{}".format(
-            name,
-            seq_length,
-            num_samples,
-            batch_size,
-            random_seed,
-            "_".join([str(s) for s in split_sizes]),
-            split_index,
-            "_".join(
-                [
-                    "S{}".format(p[2:-1]) if p.startswith("S") else p
-                    for p in parallel_distribution
-                ]
-            ),
-        )
-        iteration = flow.get_variable(
-            name=iteration_name,
-            shape=(1,),
-            dtype=flow.int64,
-            initializer=flow.constant_initializer(0, flow.int64),
-            model_name="iteration",
-            reuse=False,
-        )
-
-    op_builder = flow.user_op_builder(name).Op("megatron_gpt_mmap_data_loader")
-    if start_from_saved_progress:
-        op_builder.Input("iteration", [iteration])
-
-    op = (
-        op_builder.Output("out")
-        .Attr("data_file_prefix", data_file_prefix)
-        .Attr("seq_length", seq_length)
-        .Attr("label_length", label_length)
-        .Attr("num_samples", num_samples)
-        .Attr("batch_size", batch_size)
-        .Attr("dtype", dtype)
-        .Attr("shuffle", shuffle)
-        .Attr("random_seed", random_seed)
-        .Attr("split_sizes", split_sizes)
-        .Attr("split_index", split_index)
-        .Attr("parallel_distribution", parallel_distribution)
-        .Build()
-    )
-
-    return op.InferAndTryRun().SoleOutputBlob()
diff --git a/oneflow/python/ops/watch.py b/oneflow/python/ops/watch.py
deleted file mode 100644
index 161241d2221af4e1d7dc47ba8a2ead76e6ef4300..0000000000000000000000000000000000000000
--- a/oneflow/python/ops/watch.py
+++ /dev/null
@@ -1,439 +0,0 @@
-"""
-Copyright 2020 The OneFlow Authors. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from __future__ import absolute_import
-
-import uuid
-from typing import Callable, Optional, Union
-
-import oneflow.core.operator.op_conf_pb2 as op_conf_util
-import oneflow.python.framework.c_api_util as c_api_util
-import oneflow.python.framework.session_context as session_ctx
-import oneflow.python.framework.compile_context as compile_context
-import oneflow.python.framework.id_util as id_util
-import oneflow.python.framework.local_blob as local_blob_util
-import oneflow.python.framework.remote_blob as remote_blob_util
-import oneflow.python.framework.watcher as watcher_util
-import oneflow.python.framework.typing as oft
-import oneflow.python.framework.typing_util as oft_util
-import oneflow.python.lib.core.enable_if as enable_if
-import oneflow.python.framework.hob as hob
-from oneflow.core.job.lbi_diff_watcher_info_pb2 import LbiAndDiffWatcherUuidPair
-from oneflow.python.oneflow_export import oneflow_export
-import oneflow.python.eager as eager_util
-import oneflow
-import oneflow._oneflow_internal
-from oneflow._oneflow_internal import ConsistentBlob, MirroredBlob
-import inspect
-import numpy as np
-
-
-@oneflow_export("watch")
-def Watch(
-    blob_watched: oneflow._oneflow_internal.BlobDesc,
-    handler_or_prompt: Optional[Union[Callable, str]] = None,
-) -> None:
-    r"""Register callback for a blob. The callback function will be called after the computation produce the blob finishes. We can use it to watch the values of Blob.
-
-    Args:
-        blob_watched: a `Blob`
-        handler_or_prompt: a function has an argument of a `Blob`
-
-    For example:
-
-    Example 1:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-
-
-        def watch_handler(y: tp.Numpy):
-            print("out", y)
-
-
-        @flow.global_function()
-        def watch_Job() -> None:
-            init = flow.constant_initializer(2.5)
-            variable = flow.get_variable(
-                "variable-weight",
-                shape=(5, ),
-                initializer=init,
-                trainable=True
-            )
-            flow.watch(variable, watch_handler)
-
-
-        checkpoint = flow.train.CheckPoint()
-        checkpoint.init()
-        watch_Job()
-
-        # out [2.5 2.5 2.5 2.5 2.5]
-
-    Example 2:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        import numpy as np
-
-        def watch_handler(y: tp.Numpy):
-            print("out", y)
-
-
-        @flow.global_function()
-        def watch_Job(x: tp.Numpy.Placeholder((1, 3, 2, 2))
-        ) -> None:
-            initializer = flow.truncated_normal(0.1)
-            conv2d = flow.layers.conv2d(
-                x,
-                filters=3,
-                kernel_size=1,
-                strides=1,
-                padding='SAME',
-                kernel_initializer=initializer,
-                name="Conv2d"
-            )
-
-            flow.watch(conv2d, watch_handler)
-
-
-        checkpoint = flow.train.CheckPoint()
-        checkpoint.init()
-        x = np.ones(shape=(1, 3, 2, 2)).astype(np.float32)
-        watch_Job(x)
-
-        # out [[[[ 0.03757111  0.03757111]
-        #        [ 0.03757111  0.03757111]]
-
-        #       [[-0.36131713 -0.36131713]
-        #        [-0.36131713 -0.36131713]]
-
-        #       [[-0.12266113 -0.12266113]
-        #        [-0.12266113 -0.12266113]]]]
-
-    """
-    api = enable_if.unique([EagerWatch, LazyWatch])
-    return api(blob_watched, handler_or_prompt)
-
-
-@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
-def EagerWatch(blob_watched, handler_or_prompt=None):
-    handler = _CheckOrMakeHandler(blob_watched, handler_or_prompt)
-    local_blob = local_blob_util.MakeLocalBlob4EagerBlob(blob_watched)
-    handler(oft_util.TransformWatchedBlob(local_blob, handler))
-
-
-@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
-def LazyWatch(blob_watched, handler_or_prompt=None):
-    handler = _CheckOrMakeHandler(blob_watched, handler_or_prompt)
-    if isinstance(blob_watched, ConsistentBlob):
-        LazyConsistentWatch(blob_watched, handler)
-    elif isinstance(blob_watched, MirroredBlob):
-        handlers = _MakeSubConsistentBlobHandlers(blob_watched, handler)
-        for consistent_blob, sub_handler in zip(
-            blob_watched.sub_consistent_blob_list, handlers
-        ):
-            assert isinstance(consistent_blob, ConsistentBlob)
-            LazyConsistentWatch(consistent_blob, sub_handler)
-    else:
-        raise NotImplementedError
-
-
-def LazyConsistentWatch(blob_watched, handler):
-    handler_uuid = str(uuid.uuid1())
-    op_conf = op_conf_util.OperatorConf()
-    op_conf.name = id_util.UniqueStr("ForeignWatch_")
-    setattr(op_conf.foreign_watch_conf, "in", blob_watched.unique_name)
-    op_conf.foreign_watch_conf.handler_uuid = handler_uuid
-    device_name = blob_watched.parallel_conf.device_name(0)
-    with oneflow.scope.placement("cpu", "0:0"):
-        compile_context.CurJobAddOp(op_conf)
-    watcher_util.BindUuidAndHandler(handler_uuid, blob_watched, handler)
-
-
-@oneflow_export("watch_diff")
-def WatchDiff(
-    blob_watched: oneflow._oneflow_internal.BlobDesc,
-    handler_or_prompt: Optional[Union[Callable, str]] = None,
-) -> None:
-    r"""Register callback for gradient of a blob. The callback will be called after the computation produce the gradient blob finishes.
-
-    Args:
-        blob_watched: a `Blob`
-        handler_or_prompt: a function has an argument of a `Blob`
-
-    For example:
-
-    Example 1:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-
-
-        BATCH_SIZE = 20
-
-        def watch_diff_handler(blob: tp.Numpy):
-            print("watch_diff_handler:", blob, blob.shape, blob.dtype)
-
-        @flow.global_function(type="train")
-        def train_job(
-            images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
-            labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
-        ) -> tp.Numpy:
-            initializer = flow.truncated_normal(0.1)
-            with flow.scope.placement("gpu", "0:0"):
-                reshape = flow.reshape(images, [images.shape[0], -1])
-                hidden = flow.layers.dense(
-                    reshape,
-                    512,
-                    activation=flow.nn.relu,
-                    kernel_initializer=initializer,
-                    name="hidden",
-                )
-                logits = flow.layers.dense(
-                    hidden, 10, kernel_initializer=initializer, name="output"
-                )
-                loss = flow.nn.sparse_softmax_cross_entropy_with_logits(labels, logits, name="softmax_loss")
-
-            lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.1])
-            flow.optimizer.SGD(lr_scheduler, momentum=0).minimize(loss)
-            flow.watch_diff(logits, watch_diff_handler)
-            return loss
-
-
-        if __name__ == "__main__":
-            checkpoint = flow.train.CheckPoint()
-            checkpoint.init()
-            (train_images, train_labels), (test_images, test_labels) = flow.data.load_mnist(
-                    BATCH_SIZE
-            )
-            for i, (images, labels) in enumerate(zip(train_images, train_labels)):
-                loss = train_job(images, labels)
-
-
-        # watch_diff_handler: [[-1.88834548e-01  2.71021971e-03  2.28271242e-02  7.17673637e-03
-        #                       4.10183379e-03  8.93106461e-02  2.23669074e-02  3.86103359e-03
-        #                       3.12465224e-02  5.23346756e-03] .....
-
-    Example 2:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        import numpy as np
-
-
-        BATCH_SIZE = 20
-
-        def watch_diff_handler(blob: tp.Numpy):
-            print("watch_diff_handler:", blob)
-
-
-        @flow.global_function(type="train")
-        def watch_matmul_diff_job(
-            images: tp.Numpy.Placeholder((3, 3), dtype=flow.float),
-        ) -> None:
-            with flow.scope.placement("cpu", "0:0"):
-                weight_initializer = flow.constant_initializer(2)
-                weight_shape = (3, BATCH_SIZE)
-                weight = flow.get_variable(
-                    "matmultest-weight",
-                    shape=weight_shape,
-                    initializer=weight_initializer)
-                output = flow.linalg.matmul(images, weight)
-
-            lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.1])
-            flow.optimizer.SGD(lr_scheduler, momentum=0.9).minimize(output)
-            flow.watch_diff(weight, watch_diff_handler)
-
-
-        if __name__ == "__main__":
-            check_point = flow.train.CheckPoint()
-            check_point.init()
-
-            x = np.array([[1, 1, 1],
-                        [1, 1, 1],
-                        [1, 1, 1]]).astype(np.float32)
-            watch_matmul_diff_job(x)
-
-        # watch_diff_handler: [[3. 3. 3.]
-        #                      [3. 3. 3.]
-        #                      [3. 3. 3.]]
-
-    Example 3:
-
-    .. code-block:: python
-
-        import oneflow as flow
-        import oneflow.typing as tp
-        import numpy as np
-
-
-        def watch_diff_handler(blob: tp.Numpy):
-            print("watch_diff_handler:", blob, blob.shape, blob.dtype)
-
-
-        @flow.global_function(type="train")
-        def watch_conv_diff_job(
-            images: tp.Numpy.Placeholder((1, 1, 4, 4), dtype=flow.float),
-        ) -> None:
-            with flow.scope.placement("gpu", "0:0"):
-                weight_shape = (1, 1, 3, 3)
-                weight_initializer = flow.truncated_normal(0.1)
-                weight = flow.get_variable(
-                    name="conv-weight",
-                    shape=weight_shape,
-                    initializer=weight_initializer
-                )
-                output = flow.nn.conv2d(images, weight, strides=1, padding="VALID")
-
-            lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.1])
-            flow.optimizer.SGD(lr_scheduler, momentum=0.9).minimize(output)
-            flow.watch_diff(weight, watch_diff_handler)
-
-
-        if __name__ == "__main__":
-            check_point = flow.train.CheckPoint()
-            check_point.init()
-
-            x = np.array([[[[ 1.,  2.,  3.,  4.],
-                            [ 5.,  6.,  7.,  8.],
-                            [ 9., 10., 11., 12.],
-                            [13., 14., 15., 16.]]]]).astype(np.float32)
-
-            watch_conv_diff_job(x)
-
-        # watch_diff_handler: [[[[14. 18. 22.]
-        #                        [30. 34. 38.]
-        #                        [46. 50. 54.]]]]
-
-    """
-    api = enable_if.unique([EagerWatchDiff, LazyWatchDiff])
-    return api(blob_watched, handler_or_prompt)
-
-
-@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
-def EagerWatchDiff(blob_watched, handler_or_prompt=None):
-    handler = _CheckOrMakeHandler(blob_watched, handler_or_prompt)
-    handler_uuid = str(uuid.uuid1())
-    lbi_and_uuid = LbiAndDiffWatcherUuidPair()
-    # Copy cfg LBI to proto LBI
-    lbi_and_uuid.lbi.op_name = blob_watched.lbi.op_name()
-    lbi_and_uuid.lbi.blob_name = blob_watched.lbi.blob_name()
-    lbi_and_uuid.watcher_uuid = handler_uuid
-    c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair(lbi_and_uuid)
-    uuid2watch_handler = session_ctx.GetDefaultSession().uuid2watch_handler
-    uuid2watch_handler[handler_uuid] = lambda x: EagerWatch(x, handler_or_prompt)
-
-
-@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
-def LazyWatchDiff(blob_watched, handler_or_prompt=None):
-    handler = _CheckOrMakeHandler(blob_watched, handler_or_prompt)
-    if isinstance(blob_watched, ConsistentBlob):
-        LazyConsistentWatchDiff(blob_watched, handler)
-    elif isinstance(blob_watched, MirroredBlob):
-        handlers = _MakeSubConsistentBlobHandlers(blob_watched, handler)
-        for consistent_blob, sub_handler in zip(
-            blob_watched.sub_consistent_blob_list, handlers
-        ):
-            assert isinstance(consistent_blob, ConsistentBlob)
-            LazyConsistentWatchDiff(consistent_blob, sub_handler)
-    else:
-        raise NotImplementedError
-
-
-def LazyConsistentWatchDiff(blob_watched, handler):
-    handler_uuid = str(uuid.uuid1())
-    lbi_and_uuid = LbiAndDiffWatcherUuidPair()
-    # Copy cfg LBI to proto LBI
-    lbi_and_uuid.lbi.op_name = blob_watched.lbi.op_name()
-    lbi_and_uuid.lbi.blob_name = blob_watched.lbi.blob_name()
-    lbi_and_uuid.watcher_uuid = handler_uuid
-    c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair(lbi_and_uuid)
-    watcher_util.BindUuidAndHandler(handler_uuid, blob_watched, handler)
-
-
-def _CheckOrMakeHandler(blob_watched, handler_or_prompt):
-    if callable(handler_or_prompt):
-        parameters = inspect.signature(handler_or_prompt).parameters
-        oft_util.CheckWatchCallbackParameterAnnotation(parameters)
-        annotation = parameters[list(parameters.keys())[0]].annotation
-        oft_util.CheckWatchedBlobByAnnotation(blob_watched, annotation)
-        return handler_or_prompt
-    prompt = handler_or_prompt
-
-    def Handler(x: GetTypeAnnotation(blob_watched)):
-        if prompt is not None:
-            print(str(prompt))
-        print(x)
-
-    return Handler
-
-
-def _MakeSubConsistentBlobHandlers(blob_watched, handler):
-    assert isinstance(blob_watched, MirroredBlob)
-    handler4parallel_id_and_local_blob = _MakeHandler4ParallelIdAndLocalBlob(
-        blob_watched, handler
-    )
-    return [
-        _WrapperHandler4ParallelIdAndLocalBlob(i, handler4parallel_id_and_local_blob)
-        for i in range(len(blob_watched.sub_consistent_blob_list))
-    ]
-
-
-def _WrapperHandler4ParallelIdAndLocalBlob(
-    parallel_id, handler4parallel_id_and_local_blob
-):
-    return lambda local_blob: handler4parallel_id_and_local_blob(
-        parallel_id, local_blob
-    )
-
-
-def _MakeHandler4ParallelIdAndLocalBlob(blob_watched, handler):
-    parallel_id2consistent_local_blob = {}
-    len_sub_remote_blobs = len(blob_watched.sub_consistent_blob_list)
-
-    def HandlerParallelIdAndLocalBlob(parallel_id, local_blob):
-        assert parallel_id not in parallel_id2consistent_local_blob
-        parallel_id2consistent_local_blob[parallel_id] = local_blob
-        if len(parallel_id2consistent_local_blob) != len_sub_remote_blobs:
-            return
-        local_blob_list = [
-            parallel_id2consistent_local_blob[parallel_id]
-            for i in range(len_sub_remote_blobs)
-        ]
-        local_numpy = local_blob_list[0].numpy()
-        if len(local_blob_list) > 1:
-            print("WARNING: watch return tensor list will concat as axis = 0.")
-            local_numpy_list = [x.numpy() for x in local_blob_list]
-            local_numpy = np.concatenate(local_numpy_list, axis=0)
-        local_blob = local_blob_util.LocalBlob(local_numpy, blob_watched.is_dynamic)
-        handler(oft_util.TransformWatchedBlob(local_blob, handler))
-
-    return HandlerParallelIdAndLocalBlob
-
-
-def GetTypeAnnotation(blob_watched):
-    # TODO(chengcheng): oft.Numpy support dynamic
-    if not blob_watched.is_dynamic:
-        return oft.Numpy
-    else:
-        return oft.ListNumpy
diff --git a/oneflow/python/summary/__init__.py b/oneflow/python/summary/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/oneflow/python/summary/summary_graph.py b/oneflow/python/summary/summary_graph.py
deleted file mode 100644
index e3b5ff318a6a44bcb7b782bfa0f2e0eba06c8cee..0000000000000000000000000000000000000000
--- a/oneflow/python/summary/summary_graph.py
+++ /dev/null
@@ -1,69 +0,0 @@
-"""
-Copyright 2020 The OneFlow Authors. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-import os
-import oneflow.core.summary.projector_pb2 as projector_pb2
-from oneflow.python.oneflow_export import oneflow_export
-import oneflow._oneflow_internal
-import time
-import logging
-
-import oneflow as flow
-
-
-@oneflow_export("summary.Graph")
-class Graph(object):
-    r"""The class of Graph
-
-    This class can write 'computing_graph' or 'structure_graph' into log file
-    """
-
-    def __init__(self, logdir=None):
-        r"""Create a Graph object
-
-        Args:
-            logdir: The log dir
-
-        Raises:
-            Exception: If log dir is None or illegal
-        """
-        if logdir is None:
-            raise Exception("logdir should not be None!")
-        logdir += "/graph"
-        if not os.path.exists(logdir):
-            os.makedirs(logdir)
-        self.logdir_ = logdir
-        self.structure_graph_filename_ = None
-        self.compute_graph_filename_ = None
-
-    def write_structure_graph(self):
-        if (self.structure_graph_filename_ is not None) and (
-            os.path.exists(self.structure_graph_filename_)
-        ):
-            raise OSError("You must create only one structure graph log file!")
-
-        self.structure_graph_filename_ = self.logdir_ + "/structure_graph.json"
-        struct_graph_str = oneflow._oneflow_internal.GetSerializedStructureGraph()
-        with open(self.structure_graph_filename_, "w", encoding="utf-8") as f:
-            f.write(str(struct_graph_str))
-            f.flush()
-
-    @property
-    def logdir(self):
-        return self.logdir_
-
-    @property
-    def structure_graph_filename(self):
-        return self.structure_graph_filename_
diff --git a/oneflow/python/summary/summary_hparams.py b/oneflow/python/summary/summary_hparams.py
deleted file mode 100644
index 35e5c254fe71fa70a52fced884a05bc2594b171f..0000000000000000000000000000000000000000
--- a/oneflow/python/summary/summary_hparams.py
+++ /dev/null
@@ -1,356 +0,0 @@
-"""
-Copyright 2020 The OneFlow Authors. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import numpy as np
-import six
-import hashlib
-import json
-import time
-
-import oneflow.core.summary.plugin_data_pb2 as plugin_data_pb2
-import oneflow.core.summary.summary_pb2 as summary_pb2
-import oneflow.core.summary.event_pb2 as event_pb2
-import oneflow.core.summary.tensor_pb2 as tensor_pb2
-import oneflow.core.summary.projector_pb2 as projector_pb2
-from oneflow.python.oneflow_export import oneflow_export
-
-
-import oneflow as flow
-
-
-@oneflow_export("summary.text")
-def text(text, tag=None):
-    r"""Add a text list to Summary
-
-    Args:
-        text: A str list
-        tag: The tag of summary
-
-    Returns:
-        A protobuf message [Summary]
-    """
-    if isinstance(text, (tuple, list)) and len(text) > 0:
-        if not isinstance(tag, str) or tag is None:
-            tag = "text"
-        text_size = len(text)
-        tensor_shape = tensor_pb2.TensorShapeProto()
-        dim = tensor_shape.dim.add()
-        dim.size = text_size
-
-        tensor = tensor_pb2.TensorProto(
-            dtype=tensor_pb2.DT_STRING, tensor_shape=tensor_shape,
-        )
-        for idx in range(text_size):
-            tensor.string_val.append(text[idx].encode("utf-8"))
-        summary = summary_pb2.Summary()
-        value = summary.value.add(
-            tag=tag,
-            metadata=summary_pb2.SummaryMetadata(
-                plugin_data=summary_pb2.SummaryMetadata.PluginData(plugin_name="text")
-            ),
-            tensor=tensor,
-        )
-        return summary
-
-
-def _get_tensor(values, dtype=None, shape=None):
-    array = np.empty(shape, dtype=np.float)
-    tensor_shape = tensor_pb2.TensorShapeProto()
-    dim = tensor_shape.dim.add()
-    dim.size = 0
-
-    tensor_proto = tensor_pb2.TensorProto(
-        dtype=tensor_pb2.DT_FLOAT, tensor_shape=tensor_shape,
-    )
-    proto_values = array.ravel()
-    tensor_proto.float_val.extend([np.asscalar(x) for x in proto_values])
-    return tensor_proto
-
-
-@oneflow_export("summary.hparams")
-def hparams(hparams):
-    r"""Add hparams to Summary
-
-    Args:
-        hparams: A dict of Hparams
-
-    Raises:
-        TypeError: If the type of hparam not in (str, int, float, bool)
-        TypeError: If the type of metric not in (float, int)
-
-    Returns:
-        A protobuf message [Summary]
-    """
-    hparams, metrics = _get_hparams_dict(hparams)
-    jparams = json.dumps(hparams, sort_keys=True, separators=(",", ":"))
-    group_name = hashlib.sha256(jparams.encode("utf-8")).hexdigest()
-
-    session_start_info = plugin_data_pb2.SessionStartInfo(
-        group_name=group_name, start_time_secs=time.time(),
-    )
-    for key in sorted(hparams):
-        value = hparams[key]
-        if isinstance(value, str):
-            session_start_info.hparams[key].string_value = value
-        elif isinstance(value, (float, int)):
-            session_start_info.hparams[key].number_value = value
-        elif isinstance(value, bool):
-            session_start_info.hparams[key].bool_value = value
-        else:
-            raise TypeError("the type of value: %r is not supported!" % value)
-    for key in metrics:
-        value = metrics[key]
-        if isinstance(value, (float, int)):
-            session_start_info.metrics[key].number_value = value
-        else:
-            raise TypeError("the type of value: %r is not supported!" % value)
-
-    summary = summary_pb2.Summary()
-    summary_metadata = _get_metadata(
-        plugin_data_pb2.HParamsPluginData(session_start_info=session_start_info)
-    )
-    summary.value.add(
-        tag="_hparams_/session_start_info",
-        metadata=summary_metadata,
-        tensor=_get_tensor([], tensor_pb2.DT_FLOAT, (0,)),
-    )
-    return summary
-
-
-def _get_metadata(hparams_plugin_data):
-    plugin_data = plugin_data_pb2.HParamsPluginData()
-    plugin_data.CopyFrom(hparams_plugin_data)
-    plugin_data.version = 0
-    return summary_pb2.SummaryMetadata(
-        plugin_data=summary_pb2.SummaryMetadata.PluginData(
-            plugin_name="hparams", content=plugin_data.SerializeToString()
-        )
-    )
-
-
-def _get_hparams_dict(hparams):
-    hparams_dict = {}
-    metrics_dict = {}
-    for (key, value) in dict.items(hparams):
-        if key in hparams_dict or key in metrics_dict:
-            raise ValueError("the key is already exist %r" % (key,))
-        if isinstance(key, HParam):
-            key = key.name
-        if isinstance(key, Metric):
-            metrics_dict[key.name] = _get_value(value)
-            continue
-        hparams_dict[key] = _get_value(value)
-    return hparams_dict, metrics_dict
-
-
-def _get_value(value):
-    if isinstance(value, np.generic):
-        return value.item()
-    else:
-        return value
-
-
-@oneflow_export("summary.Hparam")
-class HParam(object):
-    r"""The class of Hparam
-
-    This class describes the name and the type of Hparam
-    """
-
-    def __init__(self, name, dtype=None):
-        r"""Create a Hparam object
-
-        Args:
-            name: Hparam name
-            dtype: Hparam type
-
-        Raises:
-            ValueError: If Hparam type not in (IntegerRange, RealRange, ValueSet)
-        """
-        self.name_ = name
-        self.dtype_ = dtype
-        if not isinstance(self.dtype_, (IntegerRange, RealRange, ValueSet, type(None))):
-            raise ValueError(
-                "Hparam dtype must be: (IntegerRange, RealRange, ValueSet) : %r"
-                % (self.dtype_,)
-            )
-
-    @property
-    def name(self):
-        return self.name_
-
-    @property
-    def dtype(self):
-        return self.dtype_
-
-
-@oneflow_export("summary.IntegerRange")
-class IntegerRange(object):
-    r"""The class of IntegerRange
-
-    This class takes a integer range between min_value and max_value
-    """
-
-    def __init__(self, min_value, max_value):
-        r"""Create an 'IntegerRange' object
-
-        Args:
-            min_value: The min value of the range
-            max_value: The max value of the range
-
-        Raises:
-            TypeError: If 'min_value' or 'max_value' is not an int
-            ValueError: If 'min_value' > 'max_value'
-        """
-        if not isinstance(max_value, int):
-            raise TypeError("max_value is not an integer value: %r" % (max_value,))
-        if not isinstance(min_value, int):
-            raise TypeError("min_value is not an integer value: %r" % (min_value,))
-        if min_value > max_value:
-            raise ValueError(
-                "max_value must bigger than min_value: %r > %r" % (min_value, max_value)
-            )
-        self.min_value_ = min_value
-        self.max_value_ = max_value
-
-    @property
-    def min_value(self):
-        return self.min_value_
-
-    @property
-    def max_value(self):
-        return self.max_value_
-
-
-@oneflow_export("summary.RealRange")
-class RealRange(object):
-    r"""The class of RealRange
-
-    This class takes a realnumber range between min_value and max_value
-    """
-
-    def __init__(self, min_value, max_value):
-        r"""Create a 'RealRange' object
-
-        Args:
-            min_value: The min value of the range
-            max_value: The max value of the range
-
-        Raises:
-            TypeError: If 'min_value' or 'max_value' is not an float
-            ValueError: If 'min_value' > 'max_value'
-        """
-        if not isinstance(max_value, float):
-            raise TypeError("max_value is not an float value: %r" % (max_value,))
-        if not isinstance(min_value, float):
-            raise TypeError("min_value is not an float value: %r" % (min_value,))
-        if min_value > max_value:
-            raise ValueError(
-                "max_value must bigger than min_value: %r > %r" % (min_value, max_value)
-            )
-        self.min_value_ = min_value
-        self.max_value_ = max_value
-
-    @property
-    def min_value(self):
-        return self.min_value_
-
-    @property
-    def max_value(self):
-        return self.max_value_
-
-
-@oneflow_export("summary.ValueSet")
-class ValueSet(object):
-    r"""The class of ValueSet
-
-    This class takes a list of value
-    """
-
-    def __init__(self, values, dtype=None):
-        r"""Create a ValueSet object
-
-        Args:
-            values: a list of values
-            dtype: the value type
-
-        Raises:
-            ValueError: If the value type not in (int, float, bool, str)
-            TypeError: If the value in the list is not same
-        """
-        self.values_ = list(values)
-        if dtype is None:
-            if self.values_:
-                dtype = type(self.values_[0])
-        if dtype not in (int, float, bool, str):
-            raise ValueError(
-                "Value type must in (int, float, bool, str), %r is not supported!"
-                % (dtype,)
-            )
-        self.dtype_ = dtype
-        for value in self.values_:
-            if not isinstance(value, self.dtype_):
-                raise TypeError(
-                    "The type of value is not supported! value: %r type: %s"
-                    % (value, self.dtype_.__name__)
-                )
-        self.values_.sort()
-
-    @property
-    def dtype(self):
-        return self.dtype_
-
-    @property
-    def values(self):
-        return list(self.values_)
-
-
-@oneflow_export("summary.Metric")
-class Metric(object):
-    r"""The class of Metric
-
-    This class takes a 'int' or 'float' value
-    """
-
-    def __init__(self, name, dtype=None):
-        r"""Create a Metric object
-
-        Args:
-            name: Metric name
-            dtype: Value type
-
-        Raises:
-            ValueError: If type is not 'int' or 'float'
-        """
-        self.name_ = name
-        if dtype is None:
-            dtype = float
-        if dtype not in (int, float):
-            raise ValueError(
-                "Value type must in (int, float), %r is not supported!" % (dtype,)
-            )
-        self.dtype_ = dtype
-
-    @property
-    def name(self):
-        return self.name_
-
-    @property
-    def dtype(self):
-        return self.dtype_
diff --git a/oneflow/python/summary/summary_projector.py b/oneflow/python/summary/summary_projector.py
deleted file mode 100644
index 4b8963d616faf674ffeebb6943738efd8a4640f5..0000000000000000000000000000000000000000
--- a/oneflow/python/summary/summary_projector.py
+++ /dev/null
@@ -1,160 +0,0 @@
-"""
-Copyright 2020 The OneFlow Authors. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-import os
-import oneflow.core.summary.projector_pb2 as projector_pb2
-from oneflow.python.oneflow_export import oneflow_export
-import time
-
-import oneflow as flow
-
-
-@oneflow_export("summary.Projector")
-class Projector(object):
-    r"""The class of Projector
-
-    This class can create an 'embedding_projector' or 'exception_projector'
-    """
-
-    def __init__(self, logdir=None):
-        r"""Create a Projector objector
-
-        Args:
-            logdir: The log dir
-
-        Raises:
-            Exception: If 'logdir' is None or illegal
-        """
-        if logdir is None:
-            raise Exception("logdir should not be None!")
-        logdir += "/projector"
-        if not os.path.exists(logdir):
-            os.makedirs(logdir)
-        self.logdir_ = logdir
-        self.embedding_filename_ = None
-        self.exception_filename_ = None
-
-    def create_embedding_projector(self):
-        if (self.embedding_filename_ is not None) and (
-            os.path.exists(self.embedding_filename_)
-        ):
-            raise OSError("You must create only one embedding projector!")
-        self.embedding_filename_ = (
-            self.logdir_ + "/projector." + str(int(time.time())) + ".log"
-        )
-
-    def create_exception_projector(self):
-        if (self.exception_filename_ is not None) and (
-            os.path.exists(self.exception_filename_)
-        ):
-            raise OSError("You must create only one embedding projector!")
-        self.exception_filename_ = (
-            self.logdir_ + "/projector.gradit." + str(int(time.time())) + ".log"
-        )
-
-    @property
-    def logdir(self):
-        return self.logdir_
-
-    @property
-    def exception_filename(self):
-        return self.exception_filename_
-
-    @property
-    def embedding_filename(self):
-        return self.embedding_filename_
-
-    def write_projector(self, filename=None, projector=None):
-        with open(filename, "wb") as f:
-            f.write(projector.SerializeToString())
-            f.flush()
-
-    def set_tensor(self, tensor: projector_pb2.Tensor, value):
-        for d in value.shape:
-            td = tensor.shape.dim.add()
-            td.size = d
-        tensor.dtype = str(value.dtype)
-        tensor.content = value.tobytes()
-
-    def set_projector(self, pro, tag, step, value, label=None):
-        pro.tag = str(tag)
-        pro.step = step
-        pro.WALL_TIME = time.time()
-        self.set_tensor(pro.value, value)
-        if label is not None:
-            self.set_tensor(pro.label, label)
-
-    def set_sample(self, sample, name, x, sample_type):
-        if name is not None:
-            sample.name = name
-        if sample_type == "image" or sample_type == "IMAGE":
-            sample.type = projector_pb2.Sample.SampleType.IMAGE
-        elif sample_type == "audio" or sample_type == "AUDIO":
-            sample.type = projector_pb2.Sample.SampleType.AUDIO
-        elif sample_type == "text" or sample_type == "TEXT":
-            sample.type = projector_pb2.Sample.SampleType.TEXT
-        else:
-            raise NotImplementedError
-        if x is not None:
-            self.set_tensor(sample.X, x)
-
-    def embedding_projector(
-        self,
-        value=None,
-        label=None,
-        tag=None,
-        step=None,
-        sample_name=None,
-        sample_type=None,
-        x=None,
-    ):
-        if tag is None:
-            tag = "embedding_projector"
-        summary_projector = projector_pb2.SummaryProjector()
-        summary_projector.metadata.type = projector_pb2.MetaData.ProjectorType.EMBEDDING
-        projector = summary_projector.projector.add()
-        self.set_projector(pro=projector, tag=tag, step=step, value=value, label=label)
-        if (sample_name is not None) and (sample_type is not None):
-            self.set_sample(
-                sample=summary_projector.sample,
-                name=sample_name,
-                x=x,
-                sample_type=sample_type,
-            )
-        self.write_projector(self.embedding_filename_, summary_projector)
-
-    def exception_projector(
-        self,
-        value=None,
-        tag=None,
-        step=None,
-        sample_name=None,
-        sample_type=None,
-        x=None,
-    ):
-        if tag is None:
-            tag = "exception_projector"
-        summary_projector = projector_pb2.SummaryProjector()
-        summary_projector.metadata.type = projector_pb2.MetaData.ProjectorType.EXCEPTION
-        projector = summary_projector.projector.add()
-        self.set_projector(pro=projector, tag=tag, step=step, value=value)
-        if (sample_name is not None) and (sample_type is not None):
-            self.set_sample(
-                sample=summary_projector.sample,
-                name=sample_name,
-                x=x,
-                sample_type=sample_type,
-            )
-        self.write_projector(self.exception_filename_, summary_projector)