diff --git a/docs/source/advanced.rst b/docs/source/advanced.rst deleted file mode 100644 index ceac82031a96553d763e80ebd3276b910c7dc32b..0000000000000000000000000000000000000000 --- a/docs/source/advanced.rst +++ /dev/null @@ -1,8 +0,0 @@ -oneflow.advanced -=================================== -Advanced features ----------------------------------- -.. currentmodule:: oneflow.advanced -.. automodule:: oneflow.advanced - :members: - :imported-members: diff --git a/docs/source/config.rst b/docs/source/config.rst deleted file mode 100644 index 02311463f22d3ffc9685ef64ac1c901efbc72500..0000000000000000000000000000000000000000 --- a/docs/source/config.rst +++ /dev/null @@ -1,6 +0,0 @@ -oneflow.config -=================================== -.. currentmodule:: oneflow.config -.. automodule:: oneflow.config - :members: - :imported-members: diff --git a/docs/source/data.rst b/docs/source/data.rst deleted file mode 100644 index dc032532d54782eab3cbafbc12d4d03f1d1a517d..0000000000000000000000000000000000000000 --- a/docs/source/data.rst +++ /dev/null @@ -1,7 +0,0 @@ -oneflow.data -=================================== - -.. currentmodule:: oneflow.data -.. automodule:: oneflow.data - :members: - :imported-members: diff --git a/docs/source/deprecated.rst b/docs/source/deprecated.rst deleted file mode 100644 index 36867d9d3fbe6ad94a44558cffed516b3ee48ee6..0000000000000000000000000000000000000000 --- a/docs/source/deprecated.rst +++ /dev/null @@ -1,8 +0,0 @@ -oneflow.deprecated -=================================== -Deprecated APIs ----------------------------------- -.. currentmodule:: oneflow.deprecated -.. automodule:: oneflow.deprecated - :members: - :imported-members: diff --git a/docs/source/distribute.rst b/docs/source/distribute.rst deleted file mode 100644 index 4e1c3fc827f9384ad3b24a85ba6169900066dbbd..0000000000000000000000000000000000000000 --- a/docs/source/distribute.rst +++ /dev/null @@ -1,7 +0,0 @@ -oneflow.distribute -=================================== - -.. currentmodule:: oneflow.distribute -.. automodule:: oneflow.distribute - :members: - :imported-members: diff --git a/docs/source/env.rst b/docs/source/env.rst deleted file mode 100644 index 5c7d4791d38fb2d684d2640b471dd3582e3e7dc7..0000000000000000000000000000000000000000 --- a/docs/source/env.rst +++ /dev/null @@ -1,6 +0,0 @@ -oneflow.env -=================================== -.. currentmodule:: oneflow.env -.. automodule:: oneflow.env - :members: - :imported-members: diff --git a/docs/source/experimental.rst b/docs/source/experimental.rst deleted file mode 100644 index 4bd0cd5bc5b8f55fd091b57f71c51658a5fdbb2e..0000000000000000000000000000000000000000 --- a/docs/source/experimental.rst +++ /dev/null @@ -1,262 +0,0 @@ -oneflow.experimental -=================================== -Experimental features ----------------------------------- -.. currentmodule:: oneflow.experimental -.. autofunction:: oneflow.experimental.nn.ReLU -.. autofunction:: oneflow.experimental.nn.ReLU6 -.. autofunction:: oneflow.experimental.nn.LeakyReLU -.. autofunction:: oneflow.experimental.nn.Tanh -.. autofunction:: oneflow.experimental.tanh -.. autofunction:: oneflow.experimental.Tensor.tanh -.. autofunction:: oneflow.experimental.asin -.. autofunction:: oneflow.experimental.Tensor.asin -.. autofunction:: oneflow.experimental.arcsin -.. autofunction:: oneflow.experimental.Tensor.arcsin -.. autofunction:: oneflow.experimental.asinh -.. autofunction:: oneflow.experimental.Tensor.asinh -.. autofunction:: oneflow.experimental.sinh -.. autofunction:: oneflow.experimental.Tensor.sinh -.. autofunction:: oneflow.experimental.atan2 -.. autofunction:: oneflow.experimental.Tensor.atan2 -.. autofunction:: oneflow.experimental.softplus -.. autofunction:: oneflow.experimental.arcsinh -.. autofunction:: oneflow.experimental.Tensor.arcsinh -.. autofunction:: oneflow.experimental.nn.ELU -.. autofunction:: oneflow.experimental.nn.GELU -.. autofunction:: oneflow.experimental.gelu -.. autofunction:: oneflow.experimental.Tensor.gelu -.. autofunction:: oneflow.experimental.nn.Sigmoid -.. autofunction:: oneflow.experimental.sigmoid -.. autofunction:: oneflow.experimental.Tensor.sigmoid -.. autofunction:: oneflow.experimental.nn.Hardsigmoid -.. autofunction:: oneflow.experimental.softmax -.. autofunction:: oneflow.experimental.Tensor.softmax -.. autofunction:: oneflow.experimental.nn.LogSigmoid -.. autofunction:: oneflow.experimental.nn.Softplus -.. autofunction:: oneflow.experimental.nn.LogSoftmax -.. autofunction:: oneflow.experimental.nn.Mish -.. autofunction:: oneflow.experimental.mish -.. autofunction:: oneflow.experimental.Tensor.mish -.. autofunction:: oneflow.experimental.acosh -.. autofunction:: oneflow.experimental.Tensor.acosh -.. autofunction:: oneflow.experimental.arccosh -.. autofunction:: oneflow.experimental.Tensor.arccosh -.. autofunction:: oneflow.experimental.arange -.. autofunction:: oneflow.experimental.argwhere -.. autofunction:: oneflow.experimental.Tensor.argwhere -.. autofunction:: oneflow.experimental.argmax -.. autofunction:: oneflow.experimental.Tensor.argmax -.. autofunction:: oneflow.experimental.nn.BatchNorm1d -.. autofunction:: oneflow.experimental.nn.BatchNorm2d -.. autofunction:: oneflow.experimental.nn.InstanceNorm1d -.. autofunction:: oneflow.experimental.nn.InstanceNorm2d -.. autofunction:: oneflow.experimental.nn.InstanceNorm3d -.. autofunction:: oneflow.experimental.nn.LayerNorm -.. autofunction:: oneflow.experimental.cast -.. autofunction:: oneflow.experimental.Tensor.cast -.. autofunction:: oneflow.experimental.cat -.. autofunction:: oneflow.experimental.ones -.. autofunction:: oneflow.experimental.zeros -.. autofunction:: oneflow.experimental.zeros_like -.. autofunction:: oneflow.experimental.ones_like -.. autofunction:: oneflow.experimental.Tensor.new_ones -.. autofunction:: oneflow.experimental.nn.Module -.. autofunction:: oneflow.experimental.nn.Parameter -.. autofunction:: oneflow.experimental.nn.Sequential -.. autofunction:: oneflow.experimental.nn.ParameterList -.. autofunction:: oneflow.experimental.nn.ParameterDict -.. autofunction:: oneflow.experimental.nn.ModuleList -.. autofunction:: oneflow.experimental.nn.ModuleDict -.. autofunction:: oneflow.experimental.nn.Conv1d -.. autofunction:: oneflow.experimental.nn.Conv2d -.. autofunction:: oneflow.experimental.nn.ZeroPad2d -.. autofunction:: oneflow.experimental.nn.ReflectionPad2d -.. autofunction:: oneflow.experimental.nn.ReplicationPad2d -.. autofunction:: oneflow.experimental.nn.ConstantPad2d -.. autofunction:: oneflow.experimental.nn.ConstantPad3d -.. autofunction:: oneflow.experimental.nn.ConvTranspose2d -.. autofunction:: oneflow.experimental.nn.Dropout -.. autofunction:: oneflow.experimental.slice -.. autofunction:: oneflow.experimental.eq -.. autofunction:: oneflow.experimental.ne -.. autofunction:: oneflow.experimental.to -.. autofunction:: oneflow.experimental.Tensor.to -.. autofunction:: oneflow.experimental.equal -.. autofunction:: oneflow.experimental.Tensor.eq -.. autofunction:: oneflow.experimental.not_equal -.. autofunction:: oneflow.experimental.Tensor.ne -.. autofunction:: oneflow.experimental.exp -.. autofunction:: oneflow.experimental.Tensor.exp -.. autofunction:: oneflow.experimental.erf -.. autofunction:: oneflow.experimental.Tensor.erf -.. autofunction:: oneflow.experimental.erfc -.. autofunction:: oneflow.experimental.Tensor.erfc -.. autofunction:: oneflow.experimental.round -.. autofunction:: oneflow.experimental.Tensor.round -.. autofunction:: oneflow.experimental.Tensor.expand -.. autofunction:: oneflow.experimental.nn.Flatten -.. autofunction:: oneflow.experimental.flatten -.. autofunction:: oneflow.experimental.Tensor.flatten -.. autofunction:: oneflow.experimental.gt -.. autofunction:: oneflow.experimental.Tensor.gt -.. autofunction:: oneflow.experimental.ge -.. autofunction:: oneflow.experimental.Tensor.ge -.. autofunction:: oneflow.experimental.lt -.. autofunction:: oneflow.experimental.Tensor.lt -.. autofunction:: oneflow.experimental.le -.. autofunction:: oneflow.experimental.Tensor.le -.. autofunction:: oneflow.experimental.nn.Identity -.. autofunction:: oneflow.experimental.nn.PixelShuffle -.. autofunction:: oneflow.experimental.nn.Linear -.. autofunction:: oneflow.experimental.nn.CrossEntropyLoss -.. autofunction:: oneflow.experimental.nn.CTCLoss -.. autofunction:: oneflow.experimental.nn.L1Loss -.. autofunction:: oneflow.experimental.nn.BCELoss -.. autofunction:: oneflow.experimental.chunk -.. autofunction:: oneflow.experimental.Tensor.chunk -.. autofunction:: oneflow.experimental.nn.NLLLoss -.. autofunction:: oneflow.experimental.nn.KLDivLoss -.. autofunction:: oneflow.experimental.nn.MSELoss -.. autofunction:: oneflow.experimental.nn.MarginRankingLoss -.. autofunction:: oneflow.experimental.nn.BCEWithLogitsLoss -.. autofunction:: oneflow.experimental.nn.SmoothL1Loss -.. autofunction:: oneflow.experimental.masked_fill -.. autofunction:: oneflow.experimental.Tensor.masked_fill -.. autofunction:: oneflow.experimental.masked_select -.. autofunction:: oneflow.experimental.Tensor.masked_select -.. autofunction:: oneflow.experimental.sum -.. autofunction:: oneflow.experimental.Tensor.sum -.. autofunction:: oneflow.experimental.min -.. autofunction:: oneflow.experimental.Tensor.min -.. autofunction:: oneflow.experimental.max -.. autofunction:: oneflow.experimental.Tensor.max -.. autofunction:: oneflow.experimental.mul -.. autofunction:: oneflow.experimental.Tensor.mul -.. autofunction:: oneflow.experimental.mean -.. autofunction:: oneflow.experimental.Tensor.mean -.. autofunction:: oneflow.experimental.sub -.. autofunction:: oneflow.experimental.var -.. autofunction:: oneflow.experimental.Tensor.var -.. autofunction:: oneflow.experimental.Tensor.sub -.. autofunction:: oneflow.experimental.div -.. autofunction:: oneflow.experimental.Tensor.div -.. autofunction:: oneflow.experimental.reciprocal -.. autofunction:: oneflow.experimental.Tensor.reciprocal -.. autofunction:: oneflow.experimental.add -.. autofunction:: oneflow.experimental.Tensor.add -.. autofunction:: oneflow.experimental.Tensor.add_ -.. autofunction:: oneflow.experimental.sign -.. autofunction:: oneflow.experimental.Tensor.sign -.. autofunction:: oneflow.experimental.sin -.. autofunction:: oneflow.experimental.Tensor.sin -.. autofunction:: oneflow.experimental.atan -.. autofunction:: oneflow.experimental.Tensor.atan -.. autofunction:: oneflow.experimental.arctan -.. autofunction:: oneflow.experimental.Tensor.arctan -.. autofunction:: oneflow.experimental.cos -.. autofunction:: oneflow.experimental.Tensor.cos -.. autofunction:: oneflow.experimental.log -.. autofunction:: oneflow.experimental.Tensor.log -.. autofunction:: oneflow.experimental.sqrt -.. autofunction:: oneflow.experimental.Tensor.sqrt -.. autofunction:: oneflow.experimental.square -.. autofunction:: oneflow.experimental.Tensor.square -.. autofunction:: oneflow.experimental.std -.. autofunction:: oneflow.experimental.Tensor.std -.. autofunction:: oneflow.experimental.pow -.. autofunction:: oneflow.experimental.Tensor.pow -.. autofunction:: oneflow.experimental.cosh -.. autofunction:: oneflow.experimental.Tensor.cosh -.. autofunction:: oneflow.experimental.acos -.. autofunction:: oneflow.experimental.Tensor.acos -.. autofunction:: oneflow.experimental.matmul -.. autofunction:: oneflow.experimental.Tensor.matmul -.. autofunction:: oneflow.experimental.bmm -.. autofunction:: oneflow.experimental.Tensor.bmm -.. autofunction:: oneflow.experimental.negative -.. autofunction:: oneflow.experimental.neg -.. autofunction:: oneflow.experimental.Tensor.negative -.. autofunction:: oneflow.experimental.nn.AvgPool1d -.. autofunction:: oneflow.experimental.nn.AvgPool2d -.. autofunction:: oneflow.experimental.nn.AvgPool3d -.. autofunction:: oneflow.experimental.nn.AdaptiveAvgPool1d -.. autofunction:: oneflow.experimental.nn.AdaptiveAvgPool2d -.. autofunction:: oneflow.experimental.nn.AdaptiveAvgPool3d -.. autofunction:: oneflow.experimental.adaptive_avg_pool1d -.. autofunction:: oneflow.experimental.adaptive_avg_pool2d -.. autofunction:: oneflow.experimental.adaptive_avg_pool3d -.. autofunction:: oneflow.experimental.nn.MaxPool1d -.. autofunction:: oneflow.experimental.nn.MaxPool2d -.. autofunction:: oneflow.experimental.nn.MaxPool3d -.. autofunction:: oneflow.experimental.repeat -.. autofunction:: oneflow.experimental.Tensor.repeat -.. autofunction:: oneflow.experimental.tile -.. autofunction:: oneflow.experimental.Tensor.tile -.. autofunction:: oneflow.experimental.fmod -.. autofunction:: oneflow.experimental.Tensor.fmod -.. autofunction:: oneflow.experimental.view -.. autofunction:: oneflow.experimental.Tensor.view -.. autofunction:: oneflow.experimental.reshape -.. autofunction:: oneflow.experimental.Tensor.reshape -.. autofunction:: oneflow.experimental.squeeze -.. autofunction:: oneflow.experimental.Tensor.squeeze -.. autofunction:: oneflow.experimental.flip -.. autofunction:: oneflow.experimental.Tensor.flip -.. autofunction:: oneflow.experimental.transpose -.. autofunction:: oneflow.experimental.Tensor.transpose -.. autofunction:: oneflow.experimental.unsqueeze -.. autofunction:: oneflow.experimental.Tensor.unsqueeze -.. autofunction:: oneflow.experimental.where -.. autofunction:: oneflow.experimental.Tensor.where -.. autofunction:: oneflow.experimental.gather -.. autofunction:: oneflow.experimental.Tensor.gather -.. autofunction:: oneflow.experimental.nn.Embedding -.. autofunction:: oneflow.experimental.Tensor.permute -.. autofunction:: oneflow.experimental.nn.Hardswish -.. autofunction:: oneflow.experimental.nn.PReLU -.. autofunction:: oneflow.experimental.nn.Hardtanh -.. autofunction:: oneflow.experimental.nn.functional.interpolate -.. autofunction:: oneflow.experimental.nn.Upsample -.. autofunction:: oneflow.experimental.nn.UpsamplingNearest2d -.. autofunction:: oneflow.experimental.nn.UpsamplingBilinear2d -.. autofunction:: oneflow.experimental.linalg.norm -.. autofunction:: oneflow.experimental.linalg.vector_norm -.. autofunction:: oneflow.experimental.linalg.matrix_norm -.. autofunction:: oneflow.experimental.Tensor.norm -.. autofunction:: oneflow.experimental.floor -.. autofunction:: oneflow.experimental.Tensor.floor -.. autofunction:: oneflow.experimental.addmm -.. autofunction:: oneflow.experimental.Tensor.addmm -.. autofunction:: oneflow.experimental.clamp -.. autofunction:: oneflow.experimental.Tensor.clamp -.. autofunction:: oneflow.experimental.clip -.. autofunction:: oneflow.experimental.Tensor.clip -.. autofunction:: oneflow.experimental.atanh -.. autofunction:: oneflow.experimental.Tensor.atanh -.. autofunction:: oneflow.experimental.arctanh -.. autofunction:: oneflow.experimental.Tensor.arctanh -.. autofunction:: oneflow.experimental.tan -.. autofunction:: oneflow.experimental.Tensor.tan -.. autofunction:: oneflow.experimental.log1p -.. autofunction:: oneflow.experimental.Tensor.log1p -.. autofunction:: oneflow.experimental.ceil -.. autofunction:: oneflow.experimental.Tensor.ceil -.. autofunction:: oneflow.experimental.expm1 -.. autofunction:: oneflow.experimental.Tensor.expm1 -.. autofunction:: oneflow.experimental.meshgrid -.. autofunction:: oneflow.experimental.topk -.. autofunction:: oneflow.experimental.Tensor.topk -.. autofunction:: oneflow.experimental.diag -.. autofunction:: oneflow.experimental.Tensor.diag -.. autofunction:: oneflow.experimental.nn.GroupNorm -.. autofunction:: oneflow.experimental.gather_nd -.. autofunction:: oneflow.experimental.scatter_nd -.. autofunction:: oneflow.experimental.nn.image.flip -.. autofunction:: oneflow.experimental.tensor_buffer_to_tensor -.. autofunction:: oneflow.experimental.tensor_to_tensor_buffer -.. autofunction:: oneflow.experimental.Tensor.type_as -.. autofunction:: oneflow.experimental.Tensor.long -.. autofunction:: oneflow.experimental.bernoulli -.. autofunction:: oneflow.experimental.in_top_k -.. autofunction:: oneflow.experimental.Tensor.in_top_k \ No newline at end of file diff --git a/docs/source/image.rst b/docs/source/image.rst index 5d721249b8daabde002ff0847af5f2af5099db04..83a82d47be8067cca3a1dd35fa2c81a98aebf994 100644 --- a/docs/source/image.rst +++ b/docs/source/image.rst @@ -1,8 +1,16 @@ -oneflow.image -=================================== -Image processing ----------------------------------- -.. currentmodule:: oneflow.image -.. automodule:: oneflow.image - :members: - :imported-members: +oneflow.nn.image +====================================== +Image operations for neural networks +-------------------------------------- +.. currentmodule:: oneflow.nn.image +.. automodule:: oneflow.nn.image + :members: ImageBatchAlign, + ImageDecode, + ImageFlip, + ImageNormalize, + ImageResize, + Resize, + batch_align, + decode, + flip, + normalize \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index f365979eac874241a575b038f4e43baa7ae4b640..2d38ce6ae041d132d62f91ab3ae531ecf94a6e24 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -12,28 +12,12 @@ OneFlow API Reference :caption: OneFlow Python API oneflow - env - config - optimizer - losses - math + tensor nn - layers - data - distribute - advanced - typing - tensorrt - deprecated - experimental + module F - scope - sysconfig - random - system - regularizers + linalg image - train optim diff --git a/docs/source/layers.rst b/docs/source/layers.rst deleted file mode 100644 index d23b760d877b9235405ee683ffa1589ee3652ea9..0000000000000000000000000000000000000000 --- a/docs/source/layers.rst +++ /dev/null @@ -1,8 +0,0 @@ -oneflow.layers -=================================== -Operators with variables ----------------------------------- -.. currentmodule:: oneflow.layers -.. automodule:: oneflow.layers - :members: - :imported-members: diff --git a/docs/source/linalg.rst b/docs/source/linalg.rst new file mode 100644 index 0000000000000000000000000000000000000000..641ea31adcc5565e10cf0d578ce44ba3ef819738 --- /dev/null +++ b/docs/source/linalg.rst @@ -0,0 +1,8 @@ +oneflow.linalg +=================================== +OneFlow linear algebra operations. +---------------------------------- +.. currentmodule:: oneflow.linalg +.. autofunction:: oneflow.linalg.matrix_norm +.. autofunction:: oneflow.linalg.norm +.. autofunction:: oneflow.linalg.vector_norm diff --git a/docs/source/losses.rst b/docs/source/losses.rst deleted file mode 100644 index 84f1a7853be5ffcd5b9b30b7964beb4076384b47..0000000000000000000000000000000000000000 --- a/docs/source/losses.rst +++ /dev/null @@ -1,8 +0,0 @@ -oneflow.losses -=================================== -Operators for neural networks ----------------------------------- -.. currentmodule:: oneflow.losses -.. automodule:: oneflow.losses - :members: - :imported-members: diff --git a/docs/source/math.rst b/docs/source/math.rst deleted file mode 100644 index 43819c42e8393c16bc12cacc45b3aaf10522ed48..0000000000000000000000000000000000000000 --- a/docs/source/math.rst +++ /dev/null @@ -1,6 +0,0 @@ -oneflow.math -=================================== -.. currentmodule:: oneflow.math -.. automodule:: oneflow.math - :members: - :imported-members: diff --git a/docs/source/module.rst b/docs/source/module.rst new file mode 100644 index 0000000000000000000000000000000000000000..d86b2838136bac916636825941d73cda4e968f69 --- /dev/null +++ b/docs/source/module.rst @@ -0,0 +1,11 @@ +oneflow.nn.Module +================================================ +Module and its containers for neural networks +--------------------------------------------------- +.. currentmodule:: oneflow.nn +.. autoclass:: oneflow.nn.Module + :members: +.. autoclass:: oneflow.nn.ModuleDict + :members: +.. autoclass:: oneflow.nn.ModuleList + :members: diff --git a/docs/source/nn.rst b/docs/source/nn.rst index 02cec61e4dc251c1fdd0650b6c2cf934111c1bbb..53e6fba300236de411f2eb5b34936a8e9031c72c 100644 --- a/docs/source/nn.rst +++ b/docs/source/nn.rst @@ -4,5 +4,75 @@ Operators for neural networks ---------------------------------- .. currentmodule:: oneflow.nn .. automodule:: oneflow.nn - :members: - :imported-members: + :members: AdaptiveAvgPool1d, + AdaptiveAvgPool2d, + AdaptiveAvgPool3d, + AvgPool1d, + AvgPool2d, + AvgPool3d, + BCELoss, + BCEWithLogitsLoss, + BatchNorm1d, + BatchNorm2d, + COCOReader, + CTCLoss, + CoinFlip, + ConstantPad2d, + ConstantPad3d, + Conv1d, + Conv2d, + ConvTranspose2d, + CropMirrorNormalize, + CrossEntropyLoss, + Dropout, + ELU, + Embedding, + Flatten, + GELU, + GroupNorm, + Hardsigmoid, + Hardswish, + Hardtanh, + Identity, + InstanceNorm1d, + InstanceNorm2d, + InstanceNorm3d, + KLDivLoss, + L1Loss, + LayerNorm, + LeakyReLU, + Linear, + LogSigmoid, + LogSoftmax, + MSELoss, + MarginRankingLoss, + MaxPool1d, + MaxPool2d, + MaxPool3d, + Mish, + NLLLoss, + OFRecordImageDecoder, + OFRecordImageDecoderRandomCrop, + OfrecordRawDecoder, + OfrecordReader, + PReLU, + Parameter, + ParameterDict, + ParameterList, + PixelShuffle, + PixelShufflev2, + ReLU, + ReLU6, + ReflectionPad2d, + ReplicationPad2d, + Sequential, + Sigmoid, + SmoothL1Loss, + Softmax, + Softplus, + Tanh, + Upsample, + UpsamplingBilinear2d, + UpsamplingNearest2d, + ZeroPad2d, + ctc_greedy_decoder diff --git a/docs/source/oneflow.rst b/docs/source/oneflow.rst index d2317c87ac34e313c82857c36bcc484a200c4612..e2e0a93bd67b50a77ac5722ab3f0758764d8efac 100644 --- a/docs/source/oneflow.rst +++ b/docs/source/oneflow.rst @@ -1,23 +1,42 @@ oneflow =================================== +oneflow +---------------------------------- .. currentmodule:: oneflow .. automodule:: oneflow - :members: - :imported-members: - -Types ----------------------------------- - -oneflow.double - -oneflow.float - -oneflow.float32 - -oneflow.float64 - -oneflow.int32 - -oneflow.int64 - -oneflow.int8 + :members: advanced, + argsort, + argwhere, + clamp, + clip, + diag, + enable_eager_execution, + expand, + flatten, + function_config, + gather, + gather_nd, + in_top_k, + load, + masked_fill, + matmul, + ones, + ones_like, + repeat, + reshape, + save, + saved_model, + scatter_nd, + slice, + slice_update, + sort, + squeeze, + stack, + tensor, + to, + transpose, + where, + zeros, + zeros_like + +.. autofunction:: oneflow.data.load_mnist(train_batch_size=100, test_batch_size=100, data_format='NCHW') \ No newline at end of file diff --git a/docs/source/optim.rst b/docs/source/optim.rst index 04a7f435dfa30bf24bd50ba8fd73e5857687fdde..b52c473db24633578058e8f095911e5a30465952 100644 --- a/docs/source/optim.rst +++ b/docs/source/optim.rst @@ -3,12 +3,16 @@ oneflow.optim Optimizers ---------------------------------- .. currentmodule:: oneflow.optim -.. autofunction:: oneflow.experimental.optim.Optimizer.zero_grad -.. autofunction:: oneflow.experimental.optim.Adam -.. autofunction:: oneflow.experimental.optim.AdamW -.. autofunction:: oneflow.experimental.optim.RMSprop -.. autofunction:: oneflow.experimental.optim.SGD -.. autofunction:: oneflow.experimental.optim.lr_scheduler.CosineAnnealingLR -.. autofunction:: oneflow.experimental.optim.lr_scheduler.StepLR -.. autofunction:: oneflow.experimental.optim.lr_scheduler.LambdaLR +.. automodule:: oneflow.optim + :members: Adam, + AdamW, + Optimizer, + RMSprop, + SGD, + lr_scheduler +.. automodule:: oneflow.optim.lr_scheduler + :members: CosineAnnealingLR, + LambdaLR, + LrScheduler, + StepLR diff --git a/docs/source/optimizer.rst b/docs/source/optimizer.rst deleted file mode 100644 index 558089fa03270c39fc813151ad0cc58e346f202c..0000000000000000000000000000000000000000 --- a/docs/source/optimizer.rst +++ /dev/null @@ -1,14 +0,0 @@ -oneflow.optimizer -=================================== -Optimizers ----------------------------------- -.. currentmodule:: oneflow.optimizer -.. automodule:: oneflow.optimizer - :members: - :imported-members: -.. automodule:: oneflow.optimizer.warmup - :members: - :imported-members: -.. automodule:: oneflow.optimizer.grad_clipping - :members: - :imported-members: \ No newline at end of file diff --git a/docs/source/random.rst b/docs/source/random.rst deleted file mode 100644 index 06febe1b3fd38c38c1497d99aec4303c2a4fd3e2..0000000000000000000000000000000000000000 --- a/docs/source/random.rst +++ /dev/null @@ -1,8 +0,0 @@ -oneflow.random -=================================== -Random ----------------------------------- -.. currentmodule:: oneflow.random -.. automodule:: oneflow.random - :members: - :imported-members: diff --git a/docs/source/regularizers.rst b/docs/source/regularizers.rst deleted file mode 100644 index b2350253cd3c929e25efa57e2b5f0b789b59cac0..0000000000000000000000000000000000000000 --- a/docs/source/regularizers.rst +++ /dev/null @@ -1,8 +0,0 @@ -oneflow.regularizers -=================================== -Regularizers ----------------------------------- -.. currentmodule:: oneflow.regularizers -.. automodule:: oneflow.regularizers - :members: - :imported-members: diff --git a/docs/source/scope.rst b/docs/source/scope.rst deleted file mode 100644 index 4d5ec092dc35550829b5a1d079b7d0641c857b1c..0000000000000000000000000000000000000000 --- a/docs/source/scope.rst +++ /dev/null @@ -1,8 +0,0 @@ -oneflow.scope -=================================== -Scope ----------------------------------- -.. currentmodule:: oneflow.scope -.. automodule:: oneflow.scope - :members: - :imported-members: diff --git a/docs/source/sysconfig.rst b/docs/source/sysconfig.rst deleted file mode 100644 index c5aa1753ec400d16d3d6a2e758c86061ad9bdb33..0000000000000000000000000000000000000000 --- a/docs/source/sysconfig.rst +++ /dev/null @@ -1,8 +0,0 @@ -oneflow.sysconfig -=================================== -System configurations ----------------------------------- -.. currentmodule:: oneflow.sysconfig -.. automodule:: oneflow.sysconfig - :members: - :imported-members: diff --git a/docs/source/system.rst b/docs/source/system.rst deleted file mode 100644 index 627ba9667a32991ab7092639bb2b790bdc899b61..0000000000000000000000000000000000000000 --- a/docs/source/system.rst +++ /dev/null @@ -1,8 +0,0 @@ -oneflow.system -=================================== -System configurations ----------------------------------- -.. currentmodule:: oneflow.system -.. automodule:: oneflow.system - :members: - :imported-members: diff --git a/docs/source/tensor.rst b/docs/source/tensor.rst new file mode 100644 index 0000000000000000000000000000000000000000..a8e4c7637d20ef7591d22e9521ac005b3ad6a37e --- /dev/null +++ b/docs/source/tensor.rst @@ -0,0 +1,149 @@ +oneflow.Tensor +=================================== +OneFlow Tensor Class +---------------------------------- +.. currentmodule:: oneflow +.. autoclass:: oneflow.Tensor + :members: abs, + acos, + acosh, + add, + add_, + addmm, + arccosh, + arcsin, + arcsinh, + arctan, + arctanh, + argmax, + argsort, + argwhere, + asin, + asinh, + atan, + atan2, + atanh, + backward, + bmm, + cast, + ceil, + chunk, + clamp, + clip, + clone, + copy_, + cos, + cosh, + data, + data_ptr, + detach, + determine, + device, + diag, + dim, + div, + dtype, + element_size, + eq, + erf, + erfc, + exp, + expand, + expm1, + fill_, + flatten, + flip, + floor, + gather, + ge, + gelu, + get_device, + grad, + grad_fn, + gt, + in_top_k, + is_consistent, + is_contiguous, + is_cuda, + is_determined, + is_lazy, + is_leaf, + kaiming_normal_, + kaiming_uniform_, + le, + log, + log1p, + long, + lt, + masked_fill, + masked_select, + matmul, + max, + mean, + min, + mish, + mul, + ndim, + ndimension, + ne, + negative, + nelement, + new_ones, + norm, + normal_, + numel, + numpy, + permute, + placement, + pow, + reciprocal, + register_hook, + repeat, + requires_grad, + requires_grad_, + reshape, + retain_grad, + round, + rsqrt, + sbp, + set_data_initializer, + set_is_consistent, + set_is_lazy, + set_placement, + set_sbp, + shape, + sigmoid, + sign, + sin, + sin_, + sinh, + size, + softmax, + softplus, + sort, + sqrt, + square, + squeeze, + stack, + std, + storage_offset, + stride, + sub, + sum, + tan, + tanh, + tile, + to, + tolist, + topk, + transpose, + triu, + type_as, + uniform_, + unsqueeze, + var, + view, + where, + xavier_normal_, + xavier_uniform_, + zeros_ \ No newline at end of file diff --git a/docs/source/tensorrt.rst b/docs/source/tensorrt.rst deleted file mode 100644 index 8e5f5b6037a34a719f0e8bf08cef05a4bb91cf3b..0000000000000000000000000000000000000000 --- a/docs/source/tensorrt.rst +++ /dev/null @@ -1,8 +0,0 @@ -oneflow.tensorrt -=================================== -TensorRT integration ----------------------------------- -.. currentmodule:: oneflow.tensorrt -.. automodule:: oneflow.tensorrt - :members: - :imported-members: diff --git a/docs/source/train.rst b/docs/source/train.rst deleted file mode 100644 index d8c5ac8d637354dcb164c8dc0b0caf317279bc77..0000000000000000000000000000000000000000 --- a/docs/source/train.rst +++ /dev/null @@ -1,6 +0,0 @@ -oneflow.train -=================================== -.. currentmodule:: oneflow.train -.. automodule:: oneflow.train - :members: - :imported-members: diff --git a/docs/source/typing.rst b/docs/source/typing.rst deleted file mode 100644 index 1c22051049713216242695b24f5ebf785177df85..0000000000000000000000000000000000000000 --- a/docs/source/typing.rst +++ /dev/null @@ -1,8 +0,0 @@ -oneflow.typing -=================================== -Typing system ----------------------------------- -.. currentmodule:: oneflow.typing -.. automodule:: oneflow.typing - :members: - :imported-members: diff --git a/oneflow/python/experimental/load_mnist.py b/oneflow/python/experimental/load_mnist.py index 7083105bf49237bb62d3cbded44e3c1a18577210..c067de8f068d73e9bae4f6223d4506e145903548 100644 --- a/oneflow/python/experimental/load_mnist.py +++ b/oneflow/python/experimental/load_mnist.py @@ -81,7 +81,7 @@ def load_mnist( out_dir (str, optional): dir to save downloaded file. Defaults to "./". Returns: - [type]: (train_images, train_labels), (test_images, test_labels) + (train_images, train_labels), (test_images, test_labels) """ path = get_mnist_file(hash_check, url, out_dir) diff --git a/oneflow/python/framework/docstr/math_ops.py b/oneflow/python/framework/docstr/math_ops.py index 42fc5f47ab68a02fea71e6b4b77c543401a94fb1..ad805d078c2ca07032f2580d60a7168bfeed0707 100644 --- a/oneflow/python/framework/docstr/math_ops.py +++ b/oneflow/python/framework/docstr/math_ops.py @@ -34,9 +34,8 @@ add_docstr( .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> x1 = flow.Tensor(np.array([-0.5461, 0.1347, -2.7266, -0.2746]).astype(np.float32)) >>> y1 = flow.F.sin(x1) >>> y1 @@ -67,9 +66,8 @@ add_docstr( .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> x = np.array([1.4309, 1.2706, -0.8562, 0.9796]) >>> x = flow.Tensor(x, dtype=flow.float32) >>> y = flow.F.cos(x) diff --git a/oneflow/python/framework/docstr/random.py b/oneflow/python/framework/docstr/random.py index 17bfca5f336520168c77cc68cae3cd04b9a4c226..631180e589b59d25ac6f3e6657fc6bb6460ad910 100644 --- a/oneflow/python/framework/docstr/random.py +++ b/oneflow/python/framework/docstr/random.py @@ -37,9 +37,7 @@ add_docstr( .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow >>> arr = np.array( ... [ ... [1.0, 1.0, 1.0], diff --git a/oneflow/python/framework/functional.py b/oneflow/python/framework/functional.py index 4a7e76f32f9a6f1e16cda96ed4285256f9489de5..af726befbca345249583d82f7bd427de36f04835 100644 --- a/oneflow/python/framework/functional.py +++ b/oneflow/python/framework/functional.py @@ -51,7 +51,6 @@ class Function: def RegisterFunctionalApis(): import inspect import oneflow.F - import oneflow.experimental.F for s in dir(oneflow._oneflow_internal.F): f = getattr(oneflow._oneflow_internal.F, s) @@ -60,9 +59,7 @@ def RegisterFunctionalApis(): if s in _function_name_aliases: func_name = _function_name_aliases[s] setattr(oneflow.F, func_name, Function(func_name, f)) - setattr(oneflow.experimental.F, func_name, Function(func_name, f)) setattr(oneflow.F, s, Function(func_name, f)) - setattr(oneflow.experimental.F, s, Function(func_name, f)) del inspect diff --git a/oneflow/python/framework/ops.py b/oneflow/python/framework/ops.py index a4721b4fdf0700752096761d75b19b1c37c6f8dd..685ec44b3f9f088a6364f9e7914ca35563379215 100644 --- a/oneflow/python/framework/ops.py +++ b/oneflow/python/framework/ops.py @@ -30,17 +30,6 @@ import oneflow._oneflow_internal from typing import Union, Optional, Sequence -@oneflow_export("repeat") -@stable_api -def api_repeat( - input: oneflow._oneflow_internal.BlobDesc, - repeat_num: int, - name: Optional[str] = None, -) -> oneflow._oneflow_internal.BlobDesc: - func = enable_if.unique([repeat]) - return func(input, repeat_num, name=name) - - @enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled) def repeat(input, repeat_num, name=None): assert not oneflow.eager_execution_enabled() diff --git a/oneflow/python/framework/tensor.py b/oneflow/python/framework/tensor.py index 91f24440fd9815a6b0bc1173de6837ceb84b21ef..ede8f8a6e775373675d38c0e7973098180b12e22 100644 --- a/oneflow/python/framework/tensor.py +++ b/oneflow/python/framework/tensor.py @@ -52,7 +52,7 @@ def register_local_tensor_method(name=None): def _local_tensor_numpy(eager_local_tensor): if eager_local_tensor.dtype == flow.tensor_buffer: shapes, dtypes = eager_local_tensor._tensor_buffer_shapes_and_dtypes - tensors = flow.experimental.tensor_buffer_to_list_of_tensors( + tensors = flow.tensor_buffer_to_list_of_tensors( Tensor(eager_local_tensor), shapes, dtypes ) return [t.numpy() for t in tensors] @@ -545,7 +545,7 @@ class Tensor: @register_local_tensor_method() def __rsub__(self, other): - return flow.experimental.sub(other, self) + return flow.sub(other, self) @register_local_tensor_method() def __truediv__(self, other): @@ -553,15 +553,15 @@ class Tensor: @register_local_tensor_method() def __rtruediv__(self, other): - return flow.experimental.div(other, self) + return flow.div(other, self) @register_local_tensor_method() def __neg__(self): - return flow.experimental.neg(self) + return flow.neg(self) @register_local_tensor_method() def __pow__(self, b): - return flow.experimental.pow(self, b) + return flow.pow(self, b) @register_local_tensor_method() def __mod__(self, other): diff --git a/oneflow/python/nn/graph.py b/oneflow/python/nn/graph.py index b3e46e415ee2d089b37c43815a9f29d02308aeba..b93cfed4b7935b9cf0070db015033b581bb0a8c7 100644 --- a/oneflow/python/nn/graph.py +++ b/oneflow/python/nn/graph.py @@ -23,7 +23,7 @@ import oneflow.python.framework.c_api_util as c_api_util import oneflow.python.framework.graph_build_util as graph_build_util import oneflow.python.framework.session_context as session_ctx from oneflow._oneflow_internal import Tensor as InternalTensor -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.multi_client_session import MultiClientSession from oneflow.python.nn.graph_block import Block, BlockType from oneflow.python.nn.graph_optimizer import OptimizerConfig @@ -34,7 +34,6 @@ from oneflow.python.framework.function_util import FunctionConfig @oneflow_export("nn.Graph", "nn.graph.Graph") -@experimental_api class Graph(object): _child_init_cnt = dict() @@ -255,7 +254,6 @@ class Graph(object): @oneflow_export("nn.graph.GraphConfig") -@experimental_api class GraphConfig(FunctionConfig): def __init__(self): super().__init__() diff --git a/oneflow/python/nn/graph_block.py b/oneflow/python/nn/graph_block.py index 925b4c5ddb08ec5082c594ae53679e515c22a408..23e7b7f77f5afb5e0d77350ceb1c1a92340b815d 100644 --- a/oneflow/python/nn/graph_block.py +++ b/oneflow/python/nn/graph_block.py @@ -20,7 +20,7 @@ from typing import Union, Optional, Iterator, Set import oneflow._oneflow_internal import oneflow.python.framework.graph_build_util as graph_build_util -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import Tensor from oneflow.python.nn.module import Module from oneflow.python.nn.parameter import Parameter @@ -35,7 +35,6 @@ class BlockType: @oneflow_export("nn.graph.Block") -@experimental_api class Block(object): def __init__( self, @@ -308,7 +307,6 @@ class Block(object): @oneflow_export("nn.graph.BlockConfig") -@experimental_api class BlockConfig(object): def __init__(self): self._stage_id = None diff --git a/oneflow/python/nn/graph_optimizer.py b/oneflow/python/nn/graph_optimizer.py index 8502885e2ca732a239ea1a9915205033a8aebdee..6ebced4b6e012d49ab830c1cf531c0b99404534e 100644 --- a/oneflow/python/nn/graph_optimizer.py +++ b/oneflow/python/nn/graph_optimizer.py @@ -16,11 +16,10 @@ limitations under the License. from __future__ import absolute_import from oneflow.python.nn.optimizer.optimizer import Optimizer -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export @oneflow_export("nn.graph.OptimizerConfig") -@experimental_api class OptimizerConfig(object): def __init__( self, diff --git a/oneflow/python/nn/modules/abs.py b/oneflow/python/nn/modules/abs.py index a3a09a6ad9ad0fd258fc2fec58dacfc5e83d2cff..cd50499d6ff50c94642fcc16853d3fc868de2ebc 100644 --- a/oneflow/python/nn/modules/abs.py +++ b/oneflow/python/nn/modules/abs.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.framework.tensor import register_tensor_op @@ -30,7 +30,6 @@ class Abs(Module): @oneflow_export("abs") @register_tensor_op("abs") -@experimental_api def abs_op(x): r"""Return the absolute value of each element in input tensor:math:`y = |x|` element-wise. @@ -41,9 +40,8 @@ def abs_op(x): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> x = flow.Tensor(np.array([-1, 2, -3, 4]).astype(np.float32)) >>> flow.abs(x) diff --git a/oneflow/python/nn/modules/acos.py b/oneflow/python/nn/modules/acos.py index 3f5b40872fa09ea6f7bf417990a5fab49a140b77..64bd494444673f26b34ecc02a3e7fc7db0dd081b 100644 --- a/oneflow/python/nn/modules/acos.py +++ b/oneflow/python/nn/modules/acos.py @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.framework.tensor import register_tensor_op @@ -29,7 +29,6 @@ class Acos(Module): @oneflow_export("acos") @register_tensor_op("acos") -@experimental_api def acos_op(tensor): r""" Returns a new tensor with the inverse cosine of the elements of :attr:`input`. @@ -44,15 +43,14 @@ def acos_op(tensor): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> arr = np.array([0.5, 0.6, 0.7]) >>> input = flow.Tensor(arr, dtype=flow.float32) >>> output = flow.acos(input) - >>> print(output.numpy()) - [1.0471976 0.9272952 0.79539883] + >>> output + tensor([1.0472, 0.9273, 0.7954], dtype=oneflow.float32) """ diff --git a/oneflow/python/nn/modules/acosh.py b/oneflow/python/nn/modules/acosh.py index 8a8363a412b72d52fc28e9bc77beac5f54084750..57ded6279746d3b0061c6b56d452d4576798f4a0 100644 --- a/oneflow/python/nn/modules/acosh.py +++ b/oneflow/python/nn/modules/acosh.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.nn.module import Module @@ -29,7 +29,6 @@ class Acosh(Module): @oneflow_export("acosh") -@experimental_api def acosh_op(x): r"""Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`. @@ -44,9 +43,8 @@ def acosh_op(x): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> x1 = flow.Tensor(np.array([2, 3, 4]).astype(np.float32)) >>> out1 = flow.acosh(x1) >>> out1 @@ -62,13 +60,12 @@ def acosh_op(x): @register_tensor_op("acosh") -@experimental_api def acosh_op_tensor(x): r""" acosh() -> Tensor - See :func:`oneflow.experimental.acosh` + See :func:`oneflow.acosh` """ @@ -76,11 +73,10 @@ def acosh_op_tensor(x): @oneflow_export("arccosh") -@experimental_api def arccosh_op(x): r""" - See :func:`oneflow.experimental.acosh` + See :func:`oneflow.acosh` """ @@ -88,13 +84,12 @@ def arccosh_op(x): @register_tensor_op("arccosh") -@experimental_api def arccosh_op_tensor(x): r""" arccosh() -> Tensor - See :func:`oneflow.experimental.acosh` + See :func:`oneflow.acosh` """ diff --git a/oneflow/python/nn/modules/activation.py b/oneflow/python/nn/modules/activation.py index c855a03345ec3290b4f72cdccdec165ece53bf32..7b3102284123f0a20be6579acaa2d07c8c4a630b 100644 --- a/oneflow/python/nn/modules/activation.py +++ b/oneflow/python/nn/modules/activation.py @@ -21,7 +21,7 @@ import oneflow as flow import oneflow._oneflow_internal from oneflow.python.nn.module import Module from oneflow.python.nn.modules.utils import _check_inplace_valid -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -45,7 +45,6 @@ def _softmax_need_transpose(x, axis): @oneflow_export("nn.PReLU") -@experimental_api class PReLU(Module): """Applies the element-wise function: @@ -81,9 +80,8 @@ class PReLU(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> m = flow.nn.PReLU() >>> input = flow.Tensor(np.asarray([[[[1, -2], [3, 4]]]]), dtype=flow.float32) >>> print(m(input).numpy()) @@ -105,7 +103,6 @@ class PReLU(Module): @oneflow_export("nn.ReLU") -@experimental_api class ReLU(Module): r"""Applies the rectified linear unit function element-wise: @@ -123,9 +120,8 @@ class ReLU(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> relu = flow.nn.ReLU() >>> ndarr = np.asarray([1, -2, 3]) >>> x = flow.Tensor(ndarr) @@ -149,7 +145,6 @@ class ReLU(Module): @oneflow_export("nn.ReLU6") -@experimental_api class ReLU6(Module): r"""Applies the element-wise function: @@ -174,9 +169,8 @@ class ReLU6(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32) >>> input = flow.Tensor(x) >>> relu6 = flow.nn.ReLU6() @@ -202,7 +196,6 @@ class ReLU6(Module): @oneflow_export("nn.Tanh") -@experimental_api class Tanh(Module): r"""This operator computes the hyperbolic tangent value of Tensor. @@ -223,9 +216,8 @@ class Tanh(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = np.array([-1, 0, 1]).astype(np.float32) >>> input = flow.Tensor(x) >>> tanh = flow.nn.Tanh() @@ -244,7 +236,6 @@ class Tanh(Module): @oneflow_export("tanh") @register_tensor_op("tanh") -@experimental_api def tanh_op(x): r"""This operator computes the hyperbolic tangent value of Tensor. @@ -265,9 +256,8 @@ def tanh_op(x): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = np.array([-1, 0, 1]).astype(np.float32) >>> input = flow.Tensor(x) >>> tanh = flow.nn.Tanh() @@ -280,7 +270,6 @@ def tanh_op(x): @oneflow_export("nn.ELU") -@experimental_api class ELU(Module): r"""Applies the element-wise function: @@ -306,9 +295,8 @@ class ELU(Module): >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32) >>> input = flow.Tensor(x) >>> elu = flow.nn.ELU() @@ -336,7 +324,6 @@ class ELU(Module): @oneflow_export("nn.GELU") -@experimental_api class GELU(Module): r"""Gelu activation operator. @@ -356,9 +343,8 @@ class GELU(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32) >>> input = flow.Tensor(x) >>> gelu = flow.nn.GELU() @@ -378,7 +364,6 @@ class GELU(Module): @oneflow_export("gelu") @register_tensor_op("gelu") -@experimental_api def gelu_op(x): r"""Gelu activation operator. @@ -398,9 +383,8 @@ def gelu_op(x): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32) >>> input = flow.Tensor(x) >>> gelu = flow.nn.GELU() @@ -414,7 +398,6 @@ def gelu_op(x): @oneflow_export("nn.Sigmoid") -@experimental_api class Sigmoid(Module): r"""Applies the element-wise function: @@ -431,9 +414,8 @@ class Sigmoid(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = flow.Tensor(np.array([0.81733328, 0.43621480, 0.10351428])) >>> m = flow.nn.Sigmoid() >>> out = m(x) @@ -450,7 +432,6 @@ class Sigmoid(Module): @oneflow_export("sigmoid") @register_tensor_op("sigmoid") -@experimental_api def sigmoid_op(x): r"""Applies the element-wise function: @@ -467,9 +448,8 @@ def sigmoid_op(x): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = flow.Tensor(np.array([0.81733328, 0.43621480, 0.10351428])) >>> out = flow.sigmoid(x) >>> out @@ -480,7 +460,6 @@ def sigmoid_op(x): @oneflow_export("nn.Hardsigmoid") -@experimental_api class Hardsigmoid(Module): r"""Applies the element-wise function: @@ -504,9 +483,8 @@ class Hardsigmoid(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32) >>> input = flow.Tensor(x) >>> hardsigmoid = flow.nn.Hardsigmoid() @@ -533,7 +511,6 @@ class Hardsigmoid(Module): @oneflow_export("nn.Softmax") -@experimental_api class Softmax(Module): def __init__(self, dim: Optional[int] = None): super().__init__() @@ -555,7 +532,6 @@ class Softmax(Module): @oneflow_export("softmax") @register_tensor_op("softmax") -@experimental_api def softmax_op(tensor, dim=None): r"""Applies the Softmax function to an n-dimensional input Tensor rescaling them so that the elements of the n-dimensional output Tensor @@ -587,9 +563,8 @@ def softmax_op(tensor, dim=None): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> m = flow.nn.Softmax(dim = 2) >>> x = flow.Tensor( ... np.array( @@ -606,7 +581,6 @@ def softmax_op(tensor, dim=None): @oneflow_export("nn.LogSoftmax") -@experimental_api class LogSoftmax(Module): r"""Applies the :math:`\log(\text{Softmax}(x))` function to an n-dimensional input Tensor. @@ -628,9 +602,8 @@ class LogSoftmax(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> m = flow.nn.LogSoftmax(dim=1) >>> x = flow.Tensor( ... np.array( @@ -673,7 +646,6 @@ class LogSoftmax(Module): @oneflow_export("nn.LogSigmoid") -@experimental_api class LogSigmoid(Module): r"""Applies the element-wise function: @@ -691,9 +663,8 @@ class LogSigmoid(Module): >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32) >>> input = flow.Tensor(x) >>> logsigmoid = flow.nn.LogSigmoid() @@ -708,13 +679,12 @@ class LogSigmoid(Module): super().__init__() def forward(self, x): - sigmoid_res = flow.experimental.sigmoid(x) - res = flow.experimental.log(sigmoid_res) + sigmoid_res = flow.sigmoid(x) + res = flow.log(sigmoid_res) return res @oneflow_export("nn.Softplus") -@experimental_api class Softplus(Module): r"""Applies the element-wise function: @@ -741,9 +711,8 @@ class Softplus(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32) >>> input = flow.Tensor(x) >>> softplus = flow.nn.Softplus() @@ -759,12 +728,10 @@ class Softplus(Module): self.threshold = threshold def forward(self, x): - return flow.experimental.where( + return flow.where( x * self.beta > self.threshold, x, - 1 - / self.beta - * flow.experimental.log(1.0 + flow.experimental.exp(self.beta * x)), + 1 / self.beta * flow.log(1.0 + flow.exp(self.beta * x)), ) def extra_repr(self): @@ -772,7 +739,6 @@ class Softplus(Module): @oneflow_export("nn.Hardswish") -@experimental_api class Hardswish(Module): r"""Applies the hardswish function, element-wise, as described in the paper: `Searching for MobileNetV3`_. @@ -795,9 +761,8 @@ class Hardswish(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = np.array([-0.5, 0, 0.5]).astype(np.float32) >>> input = flow.Tensor(x) >>> hardswish = flow.nn.Hardswish() @@ -825,7 +790,6 @@ class Hardswish(Module): @oneflow_export("nn.Hardtanh") -@experimental_api class Hardtanh(Module): r""" Applies the HardTanh function element-wise @@ -861,9 +825,8 @@ class Hardtanh(Module): >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> m = flow.nn.Hardtanh() >>> arr = np.array([0.2, 0.3, 3.0, 4.0]) >>> x = flow.Tensor(arr) @@ -909,7 +872,6 @@ class Hardtanh(Module): @oneflow_export("nn.LeakyReLU") -@experimental_api class LeakyReLU(Module): r"""Applies the element-wise function: @@ -933,9 +895,8 @@ class LeakyReLU(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> m = flow.nn.LeakyReLU(0.1) >>> arr = np.array([0.2, 0.3, 3.0, 4.0]) >>> x = flow.Tensor(arr) @@ -961,7 +922,6 @@ class LeakyReLU(Module): @oneflow_export("nn.Mish") -@experimental_api class Mish(Module): r"""Applies the element-wise function: @@ -981,9 +941,8 @@ class Mish(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = np.array([1, 2, 3]).astype(np.float32) >>> input = flow.Tensor(x) >>> mish = flow.nn.Mish() @@ -998,11 +957,10 @@ class Mish(Module): super().__init__() def forward(self, x): - return x * flow.experimental.tanh(flow.experimental.softplus(x)) + return x * flow.tanh(flow.softplus(x)) @oneflow_export("mish") -@experimental_api def mish_op(x): r"""Applies the element-wise function: @@ -1012,18 +970,17 @@ def mish_op(x): .. note:: See `Mish: A Self Regularized Non-Monotonic Neural Activation Function <https://arxiv.org/abs/1908.08681>`_ - See :mod:`oneflow.experimental.nn.Mish` + See :mod:`oneflow.nn.Mish` """ return Mish()(x) @register_tensor_op("mish") -@experimental_api def mish_op_tensor(x): r""" mish() -> Tensor - See :func:`oneflow.experimental.mish` + See :func:`oneflow.mish` """ return Mish()(x) diff --git a/oneflow/python/nn/modules/adaptive_pool.py b/oneflow/python/nn/modules/adaptive_pool.py index c13c630b41fef26d1dba9b844a0f11aa0fcc61a0..4772a95c796e06c89843fcef47059e2f5ed2b1e1 100644 --- a/oneflow/python/nn/modules/adaptive_pool.py +++ b/oneflow/python/nn/modules/adaptive_pool.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export def _generate_output_size(input_size, output_size): @@ -41,7 +41,6 @@ def _generate_output_size(input_size, output_size): @oneflow_export("nn.AdaptiveAvgPool1d") -@experimental_api class AdaptiveAvgPool1d(Module): r"""Applies a 1D adaptive average pooling over an input signal composed of several input planes. @@ -56,10 +55,9 @@ class AdaptiveAvgPool1d(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> import oneflow.experimental.nn as nn - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> import oneflow.nn as nn + >>> m = nn.AdaptiveAvgPool1d(5) >>> input = flow.Tensor(np.random.randn(1, 64, 8)) >>> output = m(input) @@ -84,11 +82,10 @@ class AdaptiveAvgPool1d(Module): @oneflow_export("adaptive_avg_pool1d") -@experimental_api def adaptive_avg_pool1d(input, output_size): r"""Applies a 1D adaptive average pooling over an input signal composed of several input planes. - See :mod:`oneflow.experimental.nn.AdaptiveAvgPool1d` + See :mod:`oneflow.nn.AdaptiveAvgPool1d` Args: input: input tensor @@ -98,7 +95,6 @@ def adaptive_avg_pool1d(input, output_size): @oneflow_export("nn.AdaptiveAvgPool2d") -@experimental_api class AdaptiveAvgPool2d(Module): r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes. @@ -116,10 +112,9 @@ class AdaptiveAvgPool2d(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> import oneflow.experimental.nn as nn - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> import oneflow.nn as nn + >>> m = nn.AdaptiveAvgPool2d((5,7)) >>> input = flow.Tensor(np.random.randn(1, 64, 8, 9)) >>> output = m(input) @@ -151,11 +146,10 @@ class AdaptiveAvgPool2d(Module): @oneflow_export("adaptive_avg_pool2d") -@experimental_api def adaptive_avg_pool2d(input, output_size): r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes. - See :mod:`oneflow.experimental.nn.AdaptiveAvgPool2d` + See :mod:`oneflow.nn.AdaptiveAvgPool2d` Args: input: input tensor @@ -165,7 +159,6 @@ def adaptive_avg_pool2d(input, output_size): @oneflow_export("nn.AdaptiveAvgPool3d") -@experimental_api class AdaptiveAvgPool3d(Module): r"""Applies a 3D adaptive average pooling over an input signal composed of several input planes. @@ -183,10 +176,9 @@ class AdaptiveAvgPool3d(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> import oneflow.experimental.nn as nn - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> import oneflow.nn as nn + >>> m = nn.AdaptiveAvgPool3d((5,7,9)) >>> input = flow.Tensor(np.random.randn(1, 64, 8, 9, 10)) >>> output = m(input) @@ -218,11 +210,10 @@ class AdaptiveAvgPool3d(Module): @oneflow_export("adaptive_avg_pool3d") -@experimental_api def adaptive_avg_pool3d(input, output_size): r"""Applies a 3D adaptive average pooling over an input signal composed of several input planes. - See :mod:`oneflow.experimental.nn.AdaptiveAvgPool3d` + See :mod:`oneflow.nn.AdaptiveAvgPool3d` Args: input: input tensor diff --git a/oneflow/python/nn/modules/arange.py b/oneflow/python/nn/modules/arange.py index 1b5344f75ed79a9eb447650d5432e1798a20248d..8b6eb487dfa5277186a8aa956dba71ef8cb2af16 100644 --- a/oneflow/python/nn/modules/arange.py +++ b/oneflow/python/nn/modules/arange.py @@ -17,7 +17,7 @@ from typing import Union import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -58,7 +58,6 @@ class Arange(Module): @oneflow_export("arange") -@experimental_api def arange_op( start: int = 0, end: int = None, @@ -89,9 +88,8 @@ def arange_op( .. code-block:: python - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> y = flow.arange(0, 5) >>> y tensor([0, 1, 2, 3, 4], dtype=oneflow.int64) diff --git a/oneflow/python/nn/modules/argmax.py b/oneflow/python/nn/modules/argmax.py index 8d1e4671eaee5c4c00a09275827f414936a9b167..d897775843fcaf451007766fb7f3ba12f06d7287 100644 --- a/oneflow/python/nn/modules/argmax.py +++ b/oneflow/python/nn/modules/argmax.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.ops.transpose_util import ( get_perm_when_transpose_axis_to_last_dim, @@ -40,13 +40,13 @@ class Argmax(Module): if axis == num_axes - 1: x = flow.F.argmax(input) if self.keepdim == True: - x = flow.experimental.unsqueeze(x, -1) + x = flow.unsqueeze(x, -1) return x else: perm = get_perm_when_transpose_axis_to_last_dim(num_axes, axis) x = flow.F.transpose(input, perm=perm) x = flow.F.argmax(x) - x = flow.experimental.unsqueeze(x, -1) + x = flow.unsqueeze(x, -1) x = flow.F.transpose(x, perm=get_inversed_perm(perm)) if self.keepdim == False: x = x.squeeze(dim=[axis]) @@ -55,7 +55,6 @@ class Argmax(Module): @oneflow_export("argmax") @register_tensor_op("argmax") -@experimental_api def argmax_op(input, dim: int = None, keepdim: bool = False): """The op computes the index with the largest value of a Tensor at specified axis. @@ -72,9 +71,8 @@ def argmax_op(input, dim: int = None, keepdim: bool = False): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = np.array([[1, 3, 8, 7, 2], ... [1, 9, 4, 3, 2]], dtype=np.float32) diff --git a/oneflow/python/nn/modules/argsort.py b/oneflow/python/nn/modules/argsort.py index 19d0407fae4c6be371134081f30b3fe05e7fbb8c..a23cac68bcf092e94d752da7b521438caf107575 100644 --- a/oneflow/python/nn/modules/argsort.py +++ b/oneflow/python/nn/modules/argsort.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.ops.transpose_util import ( get_perm_when_transpose_axis_to_last_dim, @@ -51,7 +51,6 @@ class Argsort(Module): @oneflow_export("argsort") @register_tensor_op("argsort") -@experimental_api def argsort_op(input, dim: int = -1, descending: bool = False): """This operator sorts the input Tensor at specified dim and return the indices of the sorted Tensor. @@ -68,9 +67,7 @@ def argsort_op(input, dim: int = -1, descending: bool = False): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow >>> x = np.array([[10, 2, 9, 3, 7], ... [1, 9, 4, 3, 2]]).astype("float32") >>> input = flow.Tensor(x) diff --git a/oneflow/python/nn/modules/argwhere.py b/oneflow/python/nn/modules/argwhere.py index 0ce94cefd0dd2d41e4bd0b38c81ad17808b40283..5b8d0d41098c119b8b492bc0ec355fcf006a254a 100644 --- a/oneflow/python/nn/modules/argwhere.py +++ b/oneflow/python/nn/modules/argwhere.py @@ -18,7 +18,7 @@ from typing import Optional import oneflow as flow import numpy as np from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -32,11 +32,10 @@ class Argwhere(Module): def forward(self, x): res, size = flow.F.argwhere(x, dtype=self.dtype) slice_tup_list = [[0, int(size.numpy()), 1]] - return flow.experimental.slice(res, slice_tup_list=slice_tup_list) + return flow.slice(res, slice_tup_list=slice_tup_list) @oneflow_export("argwhere") -@experimental_api def argwhere_op(x, dtype: Optional[flow.dtype] = None): """This operator finds the indices of input Tensor `x` elements that are non-zero. @@ -54,9 +53,7 @@ def argwhere_op(x, dtype: Optional[flow.dtype] = None): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow >>> x = np.array([[0, 1, 0], ... [2, 0, 2]]).astype(np.float32) @@ -72,13 +69,12 @@ def argwhere_op(x, dtype: Optional[flow.dtype] = None): @register_tensor_op("argwhere") -@experimental_api def argwhere_tebsor_op(x, dtype: Optional[flow.dtype] = None): """ argwhere() -> Tensor - See :func:`oneflow.experimental.argwhere` + See :func:`oneflow.argwhere` """ return Argwhere(dtype=dtype)(x) diff --git a/oneflow/python/nn/modules/atan2.py b/oneflow/python/nn/modules/atan2.py index 96975cb3d7e4a222e17f2359b3185a1c8a7bab26..07af7adc62d53ef85c5e6c5f2b4331481060e1fe 100644 --- a/oneflow/python/nn/modules/atan2.py +++ b/oneflow/python/nn/modules/atan2.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.nn.module import Module @@ -32,7 +32,6 @@ class Atan2(Module): @oneflow_export("atan2") -@experimental_api def atan2_op(input, other): r"""Element-wise arctangent of input{i}/other{i} with consideration of the quadrant. Returns a new tensor with the signed @@ -49,7 +48,7 @@ def atan2_op(input, other): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np >>> x1 = flow.Tensor(np.array([1,2,3])) @@ -59,7 +58,6 @@ def atan2_op(input, other): >>> x3 = flow.Tensor(np.array([1,0,-1])) >>> y3 = flow.Tensor(np.array([0,1,0])) - >>> flow.enable_eager_execution() >>> flow.atan2(x1,y1).numpy() array([0.32175055, 0.7853982 , 1.2490457 ], dtype=float32) >>> flow.atan2(x2,y2).numpy() @@ -72,13 +70,12 @@ def atan2_op(input, other): @register_tensor_op("atan2") -@experimental_api def atan2_op_tensor(input, other): r""" atan2(other) -> Tensor - See :func:`oneflow.experimental.atan2` + See :func:`oneflow.atan2` """ return Atan2()(input, other) diff --git a/oneflow/python/nn/modules/atanh.py b/oneflow/python/nn/modules/atanh.py index 0b729d1d61f10e93d004bb1f03548c807cc8035f..f8165adeb998da807a00f80f5d121d6925d08ed4 100644 --- a/oneflow/python/nn/modules/atanh.py +++ b/oneflow/python/nn/modules/atanh.py @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.framework.tensor import register_tensor_op @@ -28,7 +28,6 @@ class Atanh(Module): @oneflow_export("atanh") -@experimental_api def atanh_op(input): r"""Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`. @@ -42,9 +41,8 @@ def atanh_op(input): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> np_arr = np.array([0.5, 0.6, 0.7]).astype(np.float32) >>> input = flow.Tensor(np_arr) >>> output = flow.atanh(input) @@ -57,11 +55,10 @@ def atanh_op(input): @register_tensor_op("atanh") -@experimental_api def atanh_op_tensor(x): r""" atanh() -> Tensor - See :func:`oneflow.experimental.atanh` + See :func:`oneflow.atanh` """ @@ -69,22 +66,20 @@ def atanh_op_tensor(x): @oneflow_export("arctanh") -@experimental_api def arctanh_op(input): r""" - Alias for :func:`oneflow.experimental.atanh` + Alias for :func:`oneflow.atanh` """ return Atanh()(input) @register_tensor_op("arctanh") -@experimental_api def arctanh_op_tensor(input): r""" - Alias for :func:`oneflow.experimental.atanh` + Alias for :func:`oneflow.atanh` """ return Atanh()(input) diff --git a/oneflow/python/nn/modules/batchnorm.py b/oneflow/python/nn/modules/batchnorm.py index 67dbecbc0eb000e2cc2adf26d50e2c0f262b2bc2..cde5b0a178cd9de4c30c5df28eb50c39bf54f9b2 100644 --- a/oneflow/python/nn/modules/batchnorm.py +++ b/oneflow/python/nn/modules/batchnorm.py @@ -16,7 +16,7 @@ limitations under the License. from typing import Union import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module @@ -199,7 +199,6 @@ class _BatchNorm(_NormBase): @oneflow_export("nn.BatchNorm1d") -@experimental_api class BatchNorm1d(_BatchNorm): r"""Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D inputs with optional additional channel dimension) as described in the paper @@ -261,10 +260,9 @@ class BatchNorm1d(_BatchNorm): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> x = flow.Tensor(np.random.randn(20, 100)) >>> m = flow.nn.BatchNorm1d(100) >>> y = m(x) @@ -279,7 +277,6 @@ class BatchNorm1d(_BatchNorm): @oneflow_export("nn.BatchNorm2d") -@experimental_api class BatchNorm2d(_BatchNorm): r"""Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs with additional channel dimension) as described in the paper @@ -341,10 +338,9 @@ class BatchNorm2d(_BatchNorm): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> x = flow.Tensor(np.random.randn(4, 2, 8, 3)) >>> m = flow.nn.BatchNorm2d(num_features=2, eps=1e-5, momentum=0.1) >>> y = m(x) diff --git a/oneflow/python/nn/modules/bmm.py b/oneflow/python/nn/modules/bmm.py index 71ba52cd1f9747d9d58d005459172826c55dae44..77cdb2b4b0706929bd992d7fde3c22e48651999e 100644 --- a/oneflow/python/nn/modules/bmm.py +++ b/oneflow/python/nn/modules/bmm.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -31,7 +31,6 @@ class BMM(Module): @oneflow_export("bmm") -@experimental_api def bmm_op(x, y): """ Performs a batch matrix-matrix product of matrices stored in input and mat2. @@ -48,9 +47,8 @@ def bmm_op(x, y): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> input1 = flow.Tensor(np.random.randn(10, 3, 4), dtype=flow.float32) >>> input2 = flow.Tensor(np.random.randn(10, 4, 5), dtype=flow.float32) >>> of_out = flow.bmm(input1, input2) @@ -61,13 +59,12 @@ def bmm_op(x, y): @register_tensor_op("bmm") -@experimental_api def bmm_op_tensor(x, y): r""" bmm() -> Tensor - See :func:`oneflow.experimental.bmm` + See :func:`oneflow.bmm` """ return BMM()(x, y) diff --git a/oneflow/python/nn/modules/broadcast_like.py b/oneflow/python/nn/modules/broadcast_like.py index 4a59a2ec316d6f9ae3bc1f45d1d16f067f08e1f6..a51a5c2925b41651eac5ccd3681aad711c69a17d 100644 --- a/oneflow/python/nn/modules/broadcast_like.py +++ b/oneflow/python/nn/modules/broadcast_like.py @@ -18,7 +18,7 @@ from typing import Optional, Sequence import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export def _calc_broadcast_axes(x, like_tensor): @@ -50,6 +50,5 @@ class BroadCastLike(Module): @oneflow_export("broadcast_like") -@experimental_api def broadcast_like_op(x, like_tensor, broadcast_axes: Optional[Sequence] = None): return BroadCastLike(broadcast_axes=broadcast_axes)(x, like_tensor) diff --git a/oneflow/python/nn/modules/cast.py b/oneflow/python/nn/modules/cast.py index 8b7a01bdc3848dda9d768ca1376718a2e8ba028d..324bf92d68210bb8286c0f335c4ad977a29f7a26 100644 --- a/oneflow/python/nn/modules/cast.py +++ b/oneflow/python/nn/modules/cast.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -30,7 +30,6 @@ class Cast(Module): @oneflow_export("cast") @register_tensor_op("cast") -@experimental_api def cast_op(x, dtype): r"""The operation takes input tensor `x` and casts it to the output with `dtype` @@ -45,10 +44,8 @@ def cast_op(x, dtype): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> np_arr = np.random.randn(2, 3, 4, 5).astype(np.float32) >>> input = flow.Tensor(np_arr, dtype=flow.float32) >>> output = flow.cast(input, flow.int8) diff --git a/oneflow/python/nn/modules/chunk.py b/oneflow/python/nn/modules/chunk.py index ab92a7b13414801e1d9d9e39c636f4eac6436a76..7bb9b124c0669e27c134936add29621133750c5e 100644 --- a/oneflow/python/nn/modules/chunk.py +++ b/oneflow/python/nn/modules/chunk.py @@ -17,7 +17,7 @@ from typing import Optional import oneflow as flow from oneflow.python.framework.tensor import Tensor -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.nn.module import Module from oneflow.python.ops.array_ops import check_slice_tup_list @@ -87,12 +87,11 @@ class Chunk(Module): @oneflow_export("chunk") @register_tensor_op("chunk") -@experimental_api def chunk_op(input, chunks, dim): r"""Splits a tensor into a specific number of chunks. Each chunk is a view of the input tensor. Last chunk will be smaller if the tensor size along the given dimension dim is not divisible by chunks. Args: - input (oneflow.experimental.Tensor): The tensor to split. + input (oneflow.Tensor): The tensor to split. chunks (int): Number of chunks to return. dim (int): Dimension along which to split the tensor. @@ -103,10 +102,9 @@ def chunk_op(input, chunks, dim): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> np_arr = np.random.randn(5, 3, 6, 9).astype(np.float32) >>> input = flow.Tensor(np_arr) >>> of_out = [] diff --git a/oneflow/python/nn/modules/concat.py b/oneflow/python/nn/modules/concat.py index 1286fdceb8b67c4e48c5ee4d6e7e5f406f69d248..b6c7d62b06b9270ea898f947792aee710af3a7bb 100644 --- a/oneflow/python/nn/modules/concat.py +++ b/oneflow/python/nn/modules/concat.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import Tensor from oneflow.python.framework.tensor import register_tensor_op from typing import Optional, Sequence @@ -52,7 +52,6 @@ class Cat(Module): @oneflow_export("cat") -@experimental_api def concat_op(inputs, dim=0): r"""Concatenate two or more `Tensor` s at specified axis. @@ -69,8 +68,7 @@ def concat_op(inputs, dim=0): .. code-block:: python - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() + >>> import oneflow as flow >>> import numpy as np >>> input1 = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32) diff --git a/oneflow/python/nn/modules/constant.py b/oneflow/python/nn/modules/constant.py index 4b8137217f02ea85885c1b9fe9f7b6747b4990cd..667a047f9b87268cd1d99ff1f821b7fb5a70a8bc 100644 --- a/oneflow/python/nn/modules/constant.py +++ b/oneflow/python/nn/modules/constant.py @@ -16,7 +16,7 @@ limitations under the License. import oneflow as flow from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.common_types import _size_any_t from oneflow.python.nn.modules.utils import _single @@ -64,7 +64,6 @@ class Ones(_ConstantBase): @oneflow_export("ones") -@experimental_api def ones_op( size: Union[_size_any_t, flow.Size], dtype: Optional[flow.dtype] = None, @@ -86,9 +85,7 @@ def ones_op( .. code-block:: python - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow >>> y = flow.ones(5) >>> y tensor([1., 1., 1., 1., 1.], dtype=oneflow.float32) @@ -103,7 +100,6 @@ class Zeros(_ConstantBase): @oneflow_export("zeros") -@experimental_api def zeros_op( size: Union[_size_any_t, flow.Size], dtype: Optional[flow.dtype] = None, @@ -125,9 +121,7 @@ def zeros_op( .. code-block:: python - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow >>> y = flow.zeros(5) >>> y tensor([0., 0., 0., 0., 0.], dtype=oneflow.float32) @@ -145,7 +139,6 @@ class ZerosLike(Module): @oneflow_export("zeros_like") -@experimental_api def zeros_like_op(other): r""" Returns a tensor filled with the scalar value 0, with the same size as input. @@ -158,12 +151,12 @@ def zeros_like_op(other): .. code-block:: python - import oneflow.experimental as flow - import numpy as np - - x = flow.Tensor(np.random.rand([5])) - y = flow.zeros_like(x) - # [0. 0. 0. 0. 0. ] + >>> import oneflow as flow + >>> import numpy as np + >>> x = flow.Tensor(np.random.rand(5)) + >>> y = flow.zeros_like(x) + >>> y + tensor([0., 0., 0., 0., 0.], dtype=oneflow.float32) """ return ZerosLike()(other) @@ -178,7 +171,6 @@ class OnesLike(Module): @oneflow_export("ones_like") -@experimental_api def ones_like_op(other): r""" Returns a tensor filled with the scalar value 1, with the same size as input. @@ -191,12 +183,12 @@ def ones_like_op(other): .. code-block:: python - import oneflow.experimental as flow - import numpy as np - - x = flow.Tensor(np.random.rand([5])) - y = flow.ones_like(x) - # [1. 1. 1. 1. 1. ] + >>> import oneflow as flow + >>> import numpy as np + >>> x = flow.Tensor(np.random.rand(5)) + >>> y = flow.ones_like(x) + >>> y + tensor([1., 1., 1., 1., 1.], dtype=oneflow.float32) """ return OnesLike()(other) @@ -254,7 +246,6 @@ class NewOnes(Module): @register_tensor_op("new_ones") -@experimental_api def new_ones_op(x, size=None, dtype=None, device=None, requires_grad=False): r""" @@ -271,9 +262,8 @@ def new_ones_op(x, size=None, dtype=None, device=None, requires_grad=False): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = flow.Tensor(np.ones((1, 2, 3))) >>> y = x.new_ones((2, 2)) >>> y diff --git a/oneflow/python/nn/modules/container.py b/oneflow/python/nn/modules/container.py index f36517b320a1823faa44ad03d70442c00819ef80..0404f2a0cfc0ad36be5d7f2a76dd7336b95104f6 100644 --- a/oneflow/python/nn/modules/container.py +++ b/oneflow/python/nn/modules/container.py @@ -20,7 +20,7 @@ from itertools import islice import operator import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from typing import ( @@ -40,7 +40,6 @@ T = TypeVar("T") @oneflow_export("nn.Sequential") -@experimental_api class Sequential(Module): r"""A sequential container. Modules will be added to it in the order they are passed in the constructor. @@ -50,7 +49,7 @@ class Sequential(Module): .. code-block:: python - >>> import oneflow.experimental.nn as nn + >>> import oneflow.nn as nn >>> nn.Sequential(nn.Conv2d(1,20,5), nn.ReLU(), nn.Conv2d(20,64,5), nn.ReLU()) #doctest: +ELLIPSIS Sequential( (0): Conv2d(1, 20, kernel_size=(5, 5), stride=(1, 1)) @@ -135,7 +134,6 @@ class Sequential(Module): @oneflow_export("nn.ParameterList") -@experimental_api class ParameterList(Module): def __init__(self, parameters: Optional[Iterable["Parameter"]] = None) -> None: super(ParameterList, self).__init__() @@ -247,7 +245,6 @@ class ParameterList(Module): @oneflow_export("nn.ParameterDict") -@experimental_api class ParameterDict(Module): def __init__(self, parameters: Optional[Mapping[str, "Parameter"]] = None) -> None: super(ParameterDict, self).__init__() @@ -291,7 +288,6 @@ class ParameterDict(Module): @oneflow_export("nn.ModuleList") -@experimental_api class ModuleList(Module): def __init__(self, modules: Optional[Iterable[Module]] = None) -> None: super(ModuleList, self).__init__() @@ -382,7 +378,6 @@ class ModuleList(Module): @oneflow_export("nn.ModuleDict") -@experimental_api class ModuleDict(Module): def __init__(self, modules: Optional[Mapping[str, Module]] = None) -> None: super(ModuleDict, self).__init__() diff --git a/oneflow/python/nn/modules/conv.py b/oneflow/python/nn/modules/conv.py index 58432a5784c82057cbc55bb57d8395632638686a..8c0250aa5aaf8434fd7ac7a40538632ee3ed4ce5 100644 --- a/oneflow/python/nn/modules/conv.py +++ b/oneflow/python/nn/modules/conv.py @@ -15,7 +15,7 @@ limitations under the License. """ import math import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.nn.modules.utils import _single, _pair from oneflow.python.nn.common_types import _size_1_t, _size_2_t @@ -58,7 +58,7 @@ def slice(x, begin, size): stop = b + s slice_tup_list.append((start, stop, step)) - return flow.experimental.slice(x, slice_tup_list) + return flow.slice(x, slice_tup_list) class ConvUtil(object): @@ -77,7 +77,6 @@ class ConvUtil(object): @oneflow_export("nn.Conv1d") -@experimental_api class Conv1d(Module): r"""The interface is consistent with PyTorch. The documentation is referenced from: https://pytorch.org/docs/master/generated/torch.nn.Conv1d.html#conv1d @@ -155,10 +154,9 @@ class Conv1d(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> import oneflow.experimental.nn as nn - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> import oneflow.nn as nn + >>> arr = np.random.randn(20, 16, 50) >>> input = flow.Tensor(arr) >>> m = nn.Conv1d(16, 33, 3, stride=2) @@ -245,7 +243,7 @@ class Conv1d(Module): groups=1, ) ) - res = flow.experimental.cat(out_list, dim=in_channel_axis) + res = flow.cat(out_list, dim=in_channel_axis) else: res = flow.F.conv1d( x, @@ -277,7 +275,6 @@ class Conv1d(Module): @oneflow_export("nn.Conv2d") -@experimental_api class Conv2d(Module): r"""The interface is consistent with PyTorch. The documentation is referenced from: https://pytorch.org/docs/master/generated/torch.nn.Conv2d.html#conv2d @@ -381,10 +378,9 @@ class Conv2d(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> import oneflow.experimental.nn as nn - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> import oneflow.nn as nn + >>> arr = np.random.randn(20, 16, 50, 100) >>> input = flow.Tensor(arr) >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1)) @@ -472,7 +468,7 @@ class Conv2d(Module): groups=1, ) ) - res = flow.experimental.cat(out_list, dim=in_channel_axis) + res = flow.cat(out_list, dim=in_channel_axis) else: res = flow.F.conv2d( x, diff --git a/oneflow/python/nn/modules/dataset.py b/oneflow/python/nn/modules/dataset.py index 8b35ed603598ec983c9c1eecc90ece63277187ad..482f4cad030c43cb699bde216ffc0c23228f4778 100644 --- a/oneflow/python/nn/modules/dataset.py +++ b/oneflow/python/nn/modules/dataset.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.nn.modules.utils import ( _single, @@ -41,7 +41,6 @@ def mirrored_gen_random_seed(seed=None): @oneflow_export("nn.OfrecordReader") -@experimental_api class OfrecordReader(Module): def __init__( self, @@ -79,7 +78,6 @@ class OfrecordReader(Module): @oneflow_export("nn.OfrecordRawDecoder") -@experimental_api class OfrecordRawDecoder(Module): def __init__( self, @@ -115,7 +113,6 @@ class OfrecordRawDecoder(Module): @oneflow_export("nn.CoinFlip") -@experimental_api class CoinFlip(Module): def __init__( self, @@ -141,7 +138,6 @@ class CoinFlip(Module): @oneflow_export("nn.CropMirrorNormalize") -@experimental_api class CropMirrorNormalize(Module): def __init__( self, @@ -198,7 +194,6 @@ class CropMirrorNormalize(Module): @oneflow_export("nn.OFRecordImageDecoderRandomCrop") -@experimental_api class OFRecordImageDecoderRandomCrop(Module): def __init__( self, @@ -231,7 +226,6 @@ class OFRecordImageDecoderRandomCrop(Module): @oneflow_export("nn.OFRecordImageDecoder") -@experimental_api class OFRecordImageDecoder(Module): def __init__( self, blob_name: str, color_space: str = "BGR", @@ -271,7 +265,6 @@ class TensorBufferToListOfTensors(Module): @oneflow_export("tensor_buffer_to_list_of_tensors") -@experimental_api def tensor_buffer_to_list_of_tensors(tensor, out_shapes, out_dtypes): return TensorBufferToListOfTensors( [list(out_shape) for out_shape in out_shapes], out_dtypes, len(out_shapes) @@ -279,7 +272,6 @@ def tensor_buffer_to_list_of_tensors(tensor, out_shapes, out_dtypes): @oneflow_export("nn.image.Resize") -@experimental_api class ImageResize(Module): def __init__( self, @@ -413,10 +405,10 @@ class ImageResize(Module): res = self._op(input) res_image = res[0] if len(res) == 3: - new_size = flow.experimental.tensor_buffer_to_tensor( + new_size = flow.tensor_buffer_to_tensor( res[1], dtype=flow.int32, instance_shape=(2,) ) - scale = flow.experimental.tensor_buffer_to_tensor( + scale = flow.tensor_buffer_to_tensor( res[2], dtype=flow.float32, instance_shape=(2,) ) else: @@ -426,7 +418,6 @@ class ImageResize(Module): @oneflow_export("tmp.RawDecoder") -@experimental_api def raw_decoder( input_record, blob_name: str, @@ -453,7 +444,6 @@ def raw_decoder( @oneflow_export("tmp.OfrecordReader") -@experimental_api def get_ofrecord_handle( ofrecord_dir: str, batch_size: int = 1, @@ -479,7 +469,6 @@ def get_ofrecord_handle( @oneflow_export("nn.image.flip") -@experimental_api class ImageFlip(Module): r"""This operator flips the images. @@ -505,10 +494,9 @@ class ImageFlip(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> import oneflow.experimental.nn as nn - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> import oneflow.nn as nn + >>> arr = np.array([ ... [[[1, 2, 3], [3, 2, 1]], ... [[2, 3, 4], [4, 3, 2]]], @@ -542,7 +530,6 @@ class ImageFlip(Module): @oneflow_export("nn.image.decode") -@experimental_api class ImageDecode(Module): def __init__(self, dtype: flow.dtype = flow.uint8, color_space: str = "BGR"): super().__init__() @@ -560,7 +547,6 @@ class ImageDecode(Module): @oneflow_export("nn.image.normalize") -@experimental_api class ImageNormalize(Module): def __init__(self, std: Sequence[float], mean: Sequence[float]): super().__init__() @@ -578,7 +564,6 @@ class ImageNormalize(Module): @oneflow_export("nn.COCOReader") -@experimental_api class COCOReader(Module): def __init__( self, @@ -623,7 +608,6 @@ class COCOReader(Module): @oneflow_export("nn.image.batch_align") -@experimental_api class ImageBatchAlign(Module): def __init__(self, shape: Sequence[int], dtype: flow.dtype, alignment: int): super().__init__() diff --git a/oneflow/python/nn/modules/deconv.py b/oneflow/python/nn/modules/deconv.py index be572ba18da9262aff3f4753dd12c65e3df0e3bf..3e3756c57dd05c425e650be40fb70a6fde3a38ff 100644 --- a/oneflow/python/nn/modules/deconv.py +++ b/oneflow/python/nn/modules/deconv.py @@ -16,7 +16,7 @@ limitations under the License. import math import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.modules.utils import _pair from oneflow.python.nn.common_types import _size_2_t from oneflow.python.nn import init @@ -58,7 +58,7 @@ def slice(x, begin, size): stop = b + s slice_tup_list.append((start, stop, step)) - return flow.experimental.slice(x, slice_tup_list) + return flow.slice(x, slice_tup_list) class ConvUtil(object): @@ -77,7 +77,6 @@ class ConvUtil(object): @oneflow_export("nn.ConvTranspose2d") -@experimental_api class ConvTranspose2d(Module): r""" @@ -128,10 +127,9 @@ class ConvTranspose2d(Module): Examples:: >>> import numpy as np - >>> import oneflow.experimental as flow - >>> import oneflow.experimental.nn as nn - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> import oneflow.nn as nn + >>> m = nn.ConvTranspose2d(16, 33, 3, stride=2) >>> # non-square kernels and unequal stride and with padding >>> m = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2)) @@ -234,7 +232,7 @@ class ConvTranspose2d(Module): ], )[0] ) - res = flow.experimental.cat(out_list, dim=in_channel_axis) + res = flow.cat(out_list, dim=in_channel_axis) else: res = self._op(x, self.weight)[0] if self._bias_add_op is not None: diff --git a/oneflow/python/nn/modules/diag.py b/oneflow/python/nn/modules/diag.py index 49ed84fa5e2f44fed0c4e3c28bd187b51db03c18..8d3658fb18efb38bf51b70c7bfe51965380a5927 100644 --- a/oneflow/python/nn/modules/diag.py +++ b/oneflow/python/nn/modules/diag.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.framework.tensor import register_tensor_op @@ -30,7 +30,6 @@ class Diag(Module): @oneflow_export("diag") -@experimental_api def diag_op(input, diagonal=0): r""" If input is a vector (1-D tensor), then returns a 2-D square tensor with the elements of input as the diagonal. @@ -48,10 +47,8 @@ def diag_op(input, diagonal=0): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> arr = np.array( ... [ ... [1.0, 2.0, 3.0], @@ -69,11 +66,10 @@ def diag_op(input, diagonal=0): @register_tensor_op("diag") -@experimental_api def diag_op_tensor(input, diagonal=0): r""" diag() -> Tensor - See :func:`oneflow.experimental.diag` + See :func:`oneflow.diag` """ diff --git a/oneflow/python/nn/modules/dropout.py b/oneflow/python/nn/modules/dropout.py index 3f44a1c8b7c85799bc9bffe667c77aa8fccd27c0..155cb37a0e1ab32bbe77a2a0d3980895e22ead70 100644 --- a/oneflow/python/nn/modules/dropout.py +++ b/oneflow/python/nn/modules/dropout.py @@ -17,7 +17,7 @@ import sys import random import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export import oneflow.python.framework.id_util as id_util @@ -40,7 +40,6 @@ class _DropoutNd(Module): @oneflow_export("nn.Dropout") -@experimental_api class Dropout(_DropoutNd): r"""During training, randomly zeroes some of the elements of the input tensor with probability :attr:`p` using samples from a Bernoulli @@ -69,9 +68,8 @@ class Dropout(_DropoutNd): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> m = flow.nn.Dropout(p=0) >>> arr = np.array( ... [ diff --git a/oneflow/python/nn/modules/eq.py b/oneflow/python/nn/modules/eq.py index 458a9fb913db379f1dc2f37ffaefc5bfb27f90a2..a2ee1e223918018817b05d2b828ce6a64cf2c89d 100644 --- a/oneflow/python/nn/modules/eq.py +++ b/oneflow/python/nn/modules/eq.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -44,7 +44,6 @@ class Eq(Module): @oneflow_export("eq", "equal") @register_tensor_op("eq") -@experimental_api def eq_op(input, other): r""" Computes element-wise equality. @@ -62,10 +61,9 @@ def eq_op(input, other): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> input = flow.Tensor(np.array([2, 3, 4, 5]), dtype=flow.float32) >>> other = flow.Tensor(np.array([2, 3, 4, 1]), dtype=flow.float32) diff --git a/oneflow/python/nn/modules/exp.py b/oneflow/python/nn/modules/exp.py index cbfbea2999e2b2772bb48f461a763fbab233bf6e..7ee79493d7b498995e48489f646217f7c9aa5ea3 100644 --- a/oneflow/python/nn/modules/exp.py +++ b/oneflow/python/nn/modules/exp.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -29,7 +29,6 @@ class Exp(Module): @oneflow_export("exp") @register_tensor_op("exp") -@experimental_api def exp_op(x): """This operator computes the exponential of Tensor. @@ -50,9 +49,8 @@ def exp_op(x): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = flow.Tensor(np.array([1, 2, 3]).astype(np.float32)) >>> y = x.exp() >>> y diff --git a/oneflow/python/nn/modules/expand.py b/oneflow/python/nn/modules/expand.py index 11e9695392a2e897ca22e2577ba4a7477e3a45ff..05d815601b67c8a1a259b9e783c2598ab86e0216 100644 --- a/oneflow/python/nn/modules/expand.py +++ b/oneflow/python/nn/modules/expand.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from typing import Optional @@ -27,13 +27,12 @@ class Expand(Module): def forward(self, x): if x.dtype == flow.int8: - x = flow.experimental.cast(x, flow.int32) + x = flow.cast(x, flow.int32) return flow.F.expand(x, self.expand_size) @oneflow_export("expand") @register_tensor_op("expand") -@experimental_api def expand_op(x, *sizes): """This operator expand the input tensor to a larger size. @@ -54,10 +53,8 @@ def expand_op(x, *sizes): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> x = np.array([[[[0, 1]], ... [[2, 3]], ... [[4, 5]]]]).astype(np.int32) diff --git a/oneflow/python/nn/modules/flatten.py b/oneflow/python/nn/modules/flatten.py index a32949997b4c14aee506da62c653159389558f82..90bb369229825b87023025dbee3420feba9af34b 100644 --- a/oneflow/python/nn/modules/flatten.py +++ b/oneflow/python/nn/modules/flatten.py @@ -15,12 +15,11 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @oneflow_export("nn.Flatten") -@experimental_api class Flatten(Module): """Flattens a contiguous range of dims into a tensor. For use with: nn.Sequential. @@ -33,13 +32,12 @@ class Flatten(Module): .. code-block:: python - import oneflow.experimental as flow - - input = flow.Tensor(32, 1, 5, 5) - m = flow.nn.Flatten() - output = m(input) - output.size() - # out flow.Size([32, 25]) + >>> import oneflow as flow + >>> input = flow.Tensor(32, 1, 5, 5) + >>> m = flow.nn.Flatten() + >>> output = m(input) + >>> output.shape + flow.Size([32, 25]) """ @@ -57,7 +55,6 @@ class Flatten(Module): @oneflow_export("flatten") @register_tensor_op("flatten") -@experimental_api def _flow_flatten(input, start_dim: int = 0, end_dim: int = -1): """Flattens a contiguous range of dims into a tensor. @@ -70,12 +67,10 @@ def _flow_flatten(input, start_dim: int = 0, end_dim: int = -1): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow >>> input = flow.Tensor(32, 1, 5, 5) >>> output = input.flatten(start_dim=1) - >>> output.size() + >>> output.shape flow.Size([32, 25]) """ diff --git a/oneflow/python/nn/modules/flip.py b/oneflow/python/nn/modules/flip.py index 6c61a6b836a16030ce55144b3f6b274f74598bb1..a89478955435dc5488e5efb0fbe4cffef0eb6c66 100644 --- a/oneflow/python/nn/modules/flip.py +++ b/oneflow/python/nn/modules/flip.py @@ -18,7 +18,7 @@ import collections from typing import Optional, Sequence, Union import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.nn.modules.utils import _check_axis @@ -47,7 +47,6 @@ class Flip(Module): @oneflow_export("flip") -@experimental_api def flip_op(input, dims): r""" @@ -67,7 +66,7 @@ def flip_op(input, dims): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np >>> np_arr = np.arange(0, 8).reshape((2, 2, 2)).astype(np.float32) @@ -86,10 +85,9 @@ def flip_op(input, dims): @register_tensor_op("flip") -@experimental_api def flip_op_tensor(input, dims): r""" - See :func:`oneflow.experimental.flip` + See :func:`oneflow.flip` """ return Flip(dims)(input) diff --git a/oneflow/python/nn/modules/floor.py b/oneflow/python/nn/modules/floor.py index 853fc49fdd4da3836c22826b3f9e8e0399efb8fd..59135923157e03d8e385cc8c29a9608a4bd93291 100644 --- a/oneflow/python/nn/modules/floor.py +++ b/oneflow/python/nn/modules/floor.py @@ -18,7 +18,7 @@ import collections from typing import Optional, Sequence, Union import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.nn.modules.utils import _check_axis @@ -33,7 +33,6 @@ class Floor(Module): @oneflow_export("floor") -@experimental_api def floor_op(x): r""" @@ -49,9 +48,8 @@ def floor_op(x): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> input = flow.Tensor(np.array([-0.5, 1.5, 0, 0.8]), dtype=flow.float32) >>> output = flow.floor(input) >>> output.shape @@ -73,10 +71,9 @@ def floor_op(x): @register_tensor_op("floor") -@experimental_api def floor_op_tensor(input): r""" - See :func:`oneflow.experimental.floor` + See :func:`oneflow.floor` """ return Floor()(input) diff --git a/oneflow/python/nn/modules/gather.py b/oneflow/python/nn/modules/gather.py index 7250f6f344b286622380be6c3cc9e7b412a5477f..5df57153df6cb5773f6853b38211f940ff5529e3 100644 --- a/oneflow/python/nn/modules/gather.py +++ b/oneflow/python/nn/modules/gather.py @@ -16,7 +16,7 @@ limitations under the License. import oneflow as flow from oneflow.python.framework.tensor import Tensor -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.nn.module import Module @@ -52,11 +52,10 @@ class Gather(Module): @oneflow_export("gather") @register_tensor_op("gather") -@experimental_api def gather_op(input, index, dim=0, sparse_grad=False): r"""Gathers values along an axis specified by `dim`. - For a 3-D tensor the output is specified by: + For a 3-D tensor the output is specified by:: out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0 out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1 @@ -76,10 +75,8 @@ def gather_op(input, index, dim=0, sparse_grad=False): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> input = np.random.randn(3, 4, 3, 5) >>> index = np.random.choice(np.arange(3), size=180, replace=True).reshape((3, 4, 3, 5)) >>> output = flow.gather(flow.Tensor(input), flow.Tensor(index, dtype=flow.int), dim=1) diff --git a/oneflow/python/nn/modules/gather_nd.py b/oneflow/python/nn/modules/gather_nd.py index 6386954c76160545146b50bffe215c570e4eea74..332748d28d5e8bd1c58b73881898938c36562d4a 100644 --- a/oneflow/python/nn/modules/gather_nd.py +++ b/oneflow/python/nn/modules/gather_nd.py @@ -16,7 +16,7 @@ limitations under the License. import oneflow as flow from oneflow.python.framework.tensor import Tensor -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module @@ -37,7 +37,6 @@ class Gather_nd(Module): @oneflow_export("gather_nd") -@experimental_api def gather_nd_op(input, index): r"""This operator is a high-dimensional extension of `gather`, `index` is a K-dimensional tensor, which is regarded as a index of input Tensor `input`. @@ -57,10 +56,8 @@ def gather_nd_op(input, index): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> input = flow.Tensor(np.array([[1, 2,3], [4, 5,6],[7,8,9]]), dtype=flow.float) >>> index_1 = flow.Tensor(np.array([[0], [2]]), dtype=flow.int) >>> out_1 = flow.gather_nd(input,index_1) diff --git a/oneflow/python/nn/modules/greater.py b/oneflow/python/nn/modules/greater.py index f1771e7011641aa1eee8e8db8ded1c7d1076272c..51cba084b26e97087e2062240a3802ae85b7b4a1 100644 --- a/oneflow/python/nn/modules/greater.py +++ b/oneflow/python/nn/modules/greater.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -25,18 +25,17 @@ class Greater(Module): def forward(self, x, y): if x.dtype != flow.float32: - x = flow.experimental.cast(x, flow.float32) + x = flow.cast(x, flow.float32) if isinstance(y, int) or isinstance(y, float): y = flow.Tensor( [float(y)], dtype=flow.float32, device=flow.device(x.device.type) ) if y.dtype != flow.float32: - y = flow.experimental.cast(y, flow.float32) + y = flow.cast(y, flow.float32) return flow.F.broadcast_greater(x, y) @oneflow_export("gt") -@experimental_api def greater_op(x, y): r"""Returns the truth value of :math:`x > y` element-wise. @@ -52,9 +51,8 @@ def greater_op(x, y): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> input1 = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32) >>> input2 = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32) @@ -67,13 +65,12 @@ def greater_op(x, y): @register_tensor_op("gt") -@experimental_api def greater_op_tensor(x, y): r""" gt() -> Tensor - See :func:`oneflow.experimental.gt` + See :func:`oneflow.gt` """ return Greater()(x, y) diff --git a/oneflow/python/nn/modules/greater_equal.py b/oneflow/python/nn/modules/greater_equal.py index 7cbddde52a44c11f66327d4a6d14e2e7fab9faa6..97b3d39887b6e3eed19088c0cf90a8f487e106a0 100644 --- a/oneflow/python/nn/modules/greater_equal.py +++ b/oneflow/python/nn/modules/greater_equal.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -25,18 +25,17 @@ class GreaterEqual(Module): def forward(self, x, y): if x.dtype != flow.float32: - x = flow.experimental.cast(x, flow.float32) + x = flow.cast(x, flow.float32) if isinstance(y, int) or isinstance(y, float): y = flow.Tensor( [float(y)], dtype=flow.float32, device=flow.device(x.device.type) ) if y.dtype != flow.float32: - y = flow.experimental.cast(y, flow.float32) + y = flow.cast(y, flow.float32) return flow.F.broadcast_greater_equal(x, y) @oneflow_export("ge") -@experimental_api def greater_equal_op(x, y): r"""Returns the truth value of :math:`x >= y` element-wise. @@ -52,9 +51,8 @@ def greater_equal_op(x, y): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> input1 = flow.Tensor(np.array([1, 2, 3]).astype(np.float32), dtype=flow.float32) >>> input2 = flow.Tensor(np.array([1, 1, 4]).astype(np.float32), dtype=flow.float32) @@ -67,13 +65,12 @@ def greater_equal_op(x, y): @register_tensor_op("ge") -@experimental_api def greater_equal_op_tensor(x, y): r""" ge() -> Tensor - See :func:`oneflow.experimental.ge` + See :func:`oneflow.ge` """ return GreaterEqual()(x, y) diff --git a/oneflow/python/nn/modules/in_top_k.py b/oneflow/python/nn/modules/in_top_k.py index 6cf752625fc219125adaa64755f5b3092d074af3..e261afd53c7e2466a557fe5eb455257ff5978f6c 100644 --- a/oneflow/python/nn/modules/in_top_k.py +++ b/oneflow/python/nn/modules/in_top_k.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.nn.module import Module @@ -42,7 +42,6 @@ class InTopk(Module): @oneflow_export("in_top_k") -@experimental_api def in_top_k_op(targets, predictions, k): r"""Says whether the targets are in the top K predictions. @@ -58,9 +57,8 @@ def in_top_k_op(targets, predictions, k): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> targets1 = flow.Tensor(np.array([3, 1]), dtype=flow.int32) >>> predictions1 = flow.Tensor(np.array([[0.0, 1.0, 2.0, 3.0], [3.0, 2.0, 1.0, 0.0],]), dtype=flow.float32) >>> out1 = flow.in_top_k(targets1, predictions1, k=1) @@ -81,13 +79,12 @@ def in_top_k_op(targets, predictions, k): @register_tensor_op("in_top_k") -@experimental_api def in_top_k_op_tensor(targets, predictions, k): r""" in_top_k() -> Tensor - See :func:`oneflow.experimental.in_top_k` + See :func:`oneflow.in_top_k` """ diff --git a/oneflow/python/nn/modules/instancenorm.py b/oneflow/python/nn/modules/instancenorm.py index 7af7531e92623f8e2204f2c56e979e1e0e0b6497..54a9a3b432e84218812a1ed64c7d48ce55f3daf9 100644 --- a/oneflow/python/nn/modules/instancenorm.py +++ b/oneflow/python/nn/modules/instancenorm.py @@ -16,7 +16,7 @@ limitations under the License. import oneflow as flow from oneflow.python.nn.modules.batchnorm import _NormBase -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export class _InstanceNorm(_NormBase): @@ -41,7 +41,7 @@ class _InstanceNorm(_NormBase): mean = x.mean(2, keepdim=True) variance = x.var(2, keepdim=True) - normalized = (x - mean) / flow.experimental.sqrt(variance + self.eps) + normalized = (x - mean) / flow.sqrt(variance + self.eps) if self.weight and params_shape[0] == self.weight.nelement(): weight = self.weight.reshape(shape=nd_params_shape) @@ -65,7 +65,6 @@ class _InstanceNorm(_NormBase): @oneflow_export("nn.InstanceNorm1d") -@experimental_api class InstanceNorm1d(_InstanceNorm): r"""The interface is consistent with PyTorch. The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.InstanceNorm1d.html @@ -131,10 +130,8 @@ class InstanceNorm1d(_InstanceNorm): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> # Without Learnable Parameters >>> m = flow.nn.InstanceNorm1d(100) @@ -158,7 +155,6 @@ class InstanceNorm1d(_InstanceNorm): @oneflow_export("nn.InstanceNorm2d") -@experimental_api class InstanceNorm2d(_InstanceNorm): r"""The interface is consistent with PyTorch. The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.InstanceNorm2d.html @@ -224,10 +220,8 @@ class InstanceNorm2d(_InstanceNorm): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> # Without Learnable Parameters >>> m = flow.nn.InstanceNorm2d(100) @@ -244,7 +238,6 @@ class InstanceNorm2d(_InstanceNorm): @oneflow_export("nn.InstanceNorm3d") -@experimental_api class InstanceNorm3d(_InstanceNorm): r"""The interface is consistent with PyTorch. The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.InstanceNorm3d.html @@ -310,11 +303,8 @@ class InstanceNorm3d(_InstanceNorm): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - - >>> # Without Learnable Parameters >>> m = flow.nn.InstanceNorm3d(100) >>> # With Learnable Parameters diff --git a/oneflow/python/nn/modules/interpolate.py b/oneflow/python/nn/modules/interpolate.py index 5eb360a2bcb8c1d0ecaaeb562dd57137184d8416..3fe64cac26e759167c92bf348804f5b0f4d94133 100644 --- a/oneflow/python/nn/modules/interpolate.py +++ b/oneflow/python/nn/modules/interpolate.py @@ -17,7 +17,7 @@ import math import warnings import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from typing import Optional, Union, Tuple @@ -220,7 +220,6 @@ class Interpolate(Module): @oneflow_export("nn.functional.interpolate") -@experimental_api def interpolate( input, size=None, @@ -303,10 +302,9 @@ def interpolate( .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> input = flow.Tensor(np.arange(1, 5).reshape((1, 1, 4)), dtype=flow.float32) >>> output = flow.nn.functional.interpolate(input, scale_factor=2.0, mode="linear") >>> output diff --git a/oneflow/python/nn/modules/less.py b/oneflow/python/nn/modules/less.py index 86fef66871f867a000e895549ee39cb11fca9bdf..c635420171d9d9f0611ff7fe4bae5087de16954a 100644 --- a/oneflow/python/nn/modules/less.py +++ b/oneflow/python/nn/modules/less.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -25,19 +25,18 @@ class Less(Module): def forward(self, x, y): if x.dtype != flow.float32: - x = flow.experimental.cast(x, flow.float32) + x = flow.cast(x, flow.float32) if isinstance(y, int) or isinstance(y, float): y = flow.Tensor( [float(y)], dtype=flow.float32, device=flow.device(x.device.type) ) if y.dtype != flow.float32: - y = flow.experimental.cast(y, flow.float32) + y = flow.cast(y, flow.float32) return flow.F.broadcast_less(x, y) @oneflow_export("lt") @register_tensor_op("lt") -@experimental_api def less_op(x, y): r"""Returns the truth value of :math:`x < y` element-wise. @@ -53,9 +52,8 @@ def less_op(x, y): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> input1 = flow.Tensor(np.array([1, 2, 3]).astype(np.float32), dtype=flow.float32) >>> input2 = flow.Tensor(np.array([1, 2, 4]).astype(np.float32), dtype=flow.float32) diff --git a/oneflow/python/nn/modules/less_equal.py b/oneflow/python/nn/modules/less_equal.py index 7f127f00a04cc015f65b3990e8b8d3c467793b97..f1b2ef2180a3156528c17161e6fe8731e788d250 100644 --- a/oneflow/python/nn/modules/less_equal.py +++ b/oneflow/python/nn/modules/less_equal.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -25,19 +25,18 @@ class LessEqual(Module): def forward(self, x, y): if x.dtype != flow.float32: - x = flow.experimental.cast(x, flow.float32) + x = flow.cast(x, flow.float32) if isinstance(y, int) or isinstance(y, float): y = flow.Tensor( [float(y)], dtype=flow.float32, device=flow.device(x.device.type) ) if y.dtype != flow.float32: - y = flow.experimental.cast(y, flow.float32) + y = flow.cast(y, flow.float32) return flow.F.broadcast_less_equal(x, y) @oneflow_export("le") @register_tensor_op("le") -@experimental_api def less_equal_op(x, y): r"""Returns the truth value of :math:`x <= y` element-wise. @@ -53,9 +52,8 @@ def less_equal_op(x, y): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> input1 = flow.Tensor(np.array([1, 2, 3]).astype(np.float32), dtype=flow.float32) >>> input2 = flow.Tensor(np.array([1, 1, 4]).astype(np.float32), dtype=flow.float32) diff --git a/oneflow/python/nn/modules/linear.py b/oneflow/python/nn/modules/linear.py index 3f9aee4cd52416c035aca8dae680ae3722de316a..9d3d80fed669116c69e88a4f94f9b88e405808c3 100644 --- a/oneflow/python/nn/modules/linear.py +++ b/oneflow/python/nn/modules/linear.py @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import Tensor from oneflow.python.nn.module import Module from oneflow.python.nn.init import _calculate_fan_in_and_fan_out @@ -22,7 +22,6 @@ import math @oneflow_export("nn.Identity") -@experimental_api class Identity(Module): """A placeholder identity operator that is argument-insensitive. @@ -54,7 +53,6 @@ class Identity(Module): @oneflow_export("nn.Linear") -@experimental_api class Linear(Module): """Applies a linear transformation to the incoming data: :math:`y = xA^T + b` @@ -84,9 +82,8 @@ class Linear(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> m = flow.nn.Linear(20, 30, False) >>> input = flow.Tensor(np.random.randn(128, 20)) diff --git a/oneflow/python/nn/modules/log1p.py b/oneflow/python/nn/modules/log1p.py index 320bc8f6af6086b62f7b3332a2d1d4a031c2e733..0b9489edec8c4f01072a48b9f684719a7b6999c1 100644 --- a/oneflow/python/nn/modules/log1p.py +++ b/oneflow/python/nn/modules/log1p.py @@ -17,7 +17,7 @@ limitations under the License. import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -32,7 +32,6 @@ class Log1p(Module): @oneflow_export("log1p") @register_tensor_op("log1p") -@experimental_api def log1p_op(input): r"""Returns a new tensor with the natural logarithm of (1 + input). @@ -43,9 +42,8 @@ def log1p_op(input): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> x = flow.Tensor(np.array([1.3, 1.5, 2.7])) >>> out = flow.log1p(x).numpy() >>> out diff --git a/oneflow/python/nn/modules/loss.py b/oneflow/python/nn/modules/loss.py index 848698eb7707c38d14d55e258f4ef23400bfccdf..6e1d60dbbe2d17e45ef460688b6f2b2cb9459630 100644 --- a/oneflow/python/nn/modules/loss.py +++ b/oneflow/python/nn/modules/loss.py @@ -17,13 +17,12 @@ from typing import Optional import oneflow as flow from oneflow.python.framework.tensor import Tensor -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.nn.modules.constant import _ConstantBase @oneflow_export("nn.L1Loss") -@experimental_api class L1Loss(Module): r"""This operator computes the L1 Loss between each element in `input` and `target`. @@ -48,21 +47,19 @@ class L1Loss(Module): output = \sum_{i=1}^n|Target_i - Input_i| Args: - input (oneflow.experimental.Tensor): The input Tensor. - target (oneflow.experimental.Tensor): The target Tensor. + input (oneflow.Tensor): The input Tensor. + target (oneflow.Tensor): The target Tensor. reduction (str): The reduce type, it can be one of "none", "mean", "sum". Defaults to "mean". Returns: - oneflow.experimental.Tensor: The result Tensor. + oneflow.Tensor: The result Tensor. For example: .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> input = flow.Tensor([[1, 1, 1], [2, 2, 2], [7, 7, 7]], dtype = flow.float32) >>> target = flow.Tensor([[4, 4, 4], [4, 4, 4], [4, 4, 4]], dtype = flow.float32) >>> m = flow.nn.L1Loss(reduction="none") @@ -99,17 +96,16 @@ class L1Loss(Module): input.shape == target.shape ), "The Input shape must be the same as Target shape" - l1_value = flow.experimental.abs(flow.experimental.sub(input, target)) + l1_value = flow.abs(flow.sub(input, target)) if self.reduction == "mean": - return flow.experimental.mean(l1_value) + return flow.mean(l1_value) elif self.reduction == "sum": - return flow.experimental.sum(l1_value) + return flow.sum(l1_value) else: return l1_value @oneflow_export("nn.CrossEntropyLoss") -@experimental_api class CrossEntropyLoss(Module): r"""This criterion combines :class:`~flow.nn.LogSoftmax` and :class:`~flow.nn.NLLLoss` in one single class. @@ -145,10 +141,9 @@ class CrossEntropyLoss(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> input = flow.Tensor( ... [[-0.1664078, -1.7256707, -0.14690138], ... [-0.21474946, 0.53737473, 0.99684894], @@ -206,19 +201,17 @@ class CrossEntropyLoss(Module): input, target, depth=input.shape[len(input.shape) - 1] ) if self.ignore_index is not None: - zeros = flow.experimental.zeros( - size=out.shape, dtype=out.dtype, device=out.device - ) - condition = flow.experimental.eq(target, self.ignore_index) - ones = flow.experimental.ones( + zeros = flow.zeros(size=out.shape, dtype=out.dtype, device=out.device) + condition = flow.eq(target, self.ignore_index) + ones = flow.ones( size=condition.shape, dtype=condition.dtype, device=condition.device ) condition = ones.sub(condition).reshape(tuple(out.shape)) - out = flow.experimental.where(condition, out, zeros) + out = flow.where(condition, out, zeros) if self.reduction == "mean": reduce_sum = out.sum() reduce_count = condition.argwhere().shape[0] - out = flow.experimental.mul(reduce_sum, 1.0 / reduce_count) + out = flow.mul(reduce_sum, 1.0 / reduce_count) if self.reduction == "mean": return out.mean() @@ -231,7 +224,6 @@ class CrossEntropyLoss(Module): @oneflow_export("nn.BCELoss") -@experimental_api class BCELoss(Module): r"""This operator computes the binary cross entropy loss. @@ -256,23 +248,21 @@ class BCELoss(Module): out = -\sum_{i=1}^n(Target_i*log(Input_i) + (1-Target_i)*log(1-Input_i)) Args: - weight (oneflow.experimental.Tensor, optional): The manual rescaling weight to the loss. Default to None, whose corresponding weight value is 1. + weight (oneflow.Tensor, optional): The manual rescaling weight to the loss. Default to None, whose corresponding weight value is 1. reduction (str, optional): The reduce type, it can be one of "none", "mean", "sum". Defaults to "mean". Attention: The input value must be in the range of (0, 1). Or the loss function may return `nan` value. Returns: - oneflow.experimental.Tensor: The result Tensor. + oneflow.Tensor: The result Tensor. For example: .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> input = flow.Tensor(np.array([[1.2, 0.2, -0.3], [0.7, 0.6, -2]]).astype(np.float32)) >>> target = flow.Tensor(np.array([[0, 1, 0], [1, 0, 1]]).astype(np.float32)) >>> weight = flow.Tensor(np.array([[2, 2, 2], [2, 2, 2]]).astype(np.float32)) @@ -315,9 +305,8 @@ class BCELoss(Module): input.shape == target.shape ), "The Input shape must be the same as Target shape" - _cross_entropy_loss = flow.experimental.negative( - target * flow.experimental.log(input) - + (1 - target) * flow.experimental.log(1 - input) + _cross_entropy_loss = flow.negative( + target * flow.log(input) + (1 - target) * flow.log(1 - input) ) if self.weight is not None: @@ -329,15 +318,14 @@ class BCELoss(Module): _weighted_loss = _cross_entropy_loss if self.reduction == "mean": - return flow.experimental.mean(_weighted_loss) + return flow.mean(_weighted_loss) elif self.reduction == "sum": - return flow.experimental.sum(_weighted_loss) + return flow.sum(_weighted_loss) else: return _weighted_loss @oneflow_export("nn.NLLLoss") -@experimental_api class NLLLoss(Module): r""" The negative log likelihood loss. It is useful to train a classification problem with `C` classes. @@ -389,8 +377,7 @@ class NLLLoss(Module): .. code-block:: python - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() + >>> import oneflow as flow >>> import numpy as np >>> input = flow.Tensor( @@ -461,19 +448,17 @@ class NLLLoss(Module): raise NotImplemented if self.ignore_index is not None: - zeros = flow.experimental.zeros( - size=res.shape, dtype=res.dtype, device=res.device - ) - condition = flow.experimental.eq(target, self.ignore_index) - ones = flow.experimental.ones( + zeros = flow.zeros(size=res.shape, dtype=res.dtype, device=res.device) + condition = flow.eq(target, self.ignore_index) + ones = flow.ones( size=condition.shape, dtype=condition.dtype, device=condition.device ) condition = ones.sub(condition).reshape(tuple(res.shape)) - res = flow.experimental.where(condition, res, zeros) + res = flow.where(condition, res, zeros) if self.reduction == "mean": res = res.sum() reduce_count = condition.argwhere().shape[0] - res = flow.experimental.mul(res, 1.0 / reduce_count) + res = flow.mul(res, 1.0 / reduce_count) if self.reduction == "none": return res @@ -484,7 +469,6 @@ class NLLLoss(Module): @oneflow_export("nn.KLDivLoss") -@experimental_api class KLDivLoss(Module): r"""The interface is consistent with PyTorch. The documentation is referenced from: @@ -553,10 +537,8 @@ class KLDivLoss(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> input = flow.Tensor([-0.9021705, 0.08798598, 1.04686249], dtype=flow.float32) >>> target = flow.Tensor([1.22386942, -0.89729659, 0.01615712], dtype=flow.float32) >>> m = flow.nn.KLDivLoss(reduction="none", log_target=False) @@ -587,30 +569,29 @@ class KLDivLoss(Module): def forward(self, input: Tensor, target: Tensor) -> Tensor: if self.log_target: - _kl_div_loss = flow.experimental.exp(target) * (target - input) + _kl_div_loss = flow.exp(target) * (target - input) else: - _kl_div_out_loss = target * (flow.experimental.log(target) - input) - _zeros = flow.experimental.zeros( + _kl_div_out_loss = target * (flow.log(target) - input) + _zeros = flow.zeros( size=_kl_div_out_loss.shape, dtype=_kl_div_out_loss.dtype, device=_kl_div_out_loss.device, ) # when target < 0, we set to `0`, when target > 0, we set to `1`. - _condition = flow.experimental.gt(target, 0) + _condition = flow.gt(target, 0) # To avoid the `nan` value in log operation # We set those positions which `target` is less than zero as `0` - _kl_div_loss = flow.experimental.where(_condition, _kl_div_out_loss, _zeros) + _kl_div_loss = flow.where(_condition, _kl_div_out_loss, _zeros) if self.reduction == "mean": - return flow.experimental.mean(_kl_div_loss) + return flow.mean(_kl_div_loss) elif self.reduction == "sum": - return flow.experimental.sum(_kl_div_loss) + return flow.sum(_kl_div_loss) else: return _kl_div_loss @oneflow_export("nn.MSELoss") -@experimental_api class MSELoss(Module): r"""The interface is consistent with PyTorch. The documentation is referenced from: @@ -657,10 +638,8 @@ class MSELoss(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> input = flow.Tensor( ... [[-0.02557137, 0.03101675, 1.37493674], ... [0.25599439, -1.08372561, -0.21006816]], dtype=flow.float32) @@ -695,20 +674,17 @@ class MSELoss(Module): self.reduction = reduction def forward(self, input: Tensor, target: Tensor) -> Tensor: - mean_squared_difference = flow.experimental.square( - flow.experimental.sub(input, target) - ) + mean_squared_difference = flow.square(flow.sub(input, target)) if self.reduction == "mean": - return flow.experimental.mean(mean_squared_difference) + return flow.mean(mean_squared_difference) elif self.reduction == "sum": - return flow.experimental.sum(mean_squared_difference) + return flow.sum(mean_squared_difference) else: # Do no reduction return mean_squared_difference @oneflow_export("nn.MarginRankingLoss") -@experimental_api class MarginRankingLoss(Module): r"""Creates a criterion that measures the loss given inputs :math:`x1`, :math:`x2`, two 1D mini-batch `Tensors`, @@ -739,10 +715,8 @@ class MarginRankingLoss(Module): .. code-block:: python - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() + >>> import oneflow as flow >>> import numpy as np - >>> x1 = flow.Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=flow.float32) >>> x2 = flow.Tensor(np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]), dtype=flow.float32) >>> target = flow.Tensor(np.array([[1, -1, 1],[-1, 1, -1], [1, 1, 1]]), dtype=flow.float32) @@ -779,13 +753,9 @@ class MarginRankingLoss(Module): self.reduction = reduction def forward(self, input1, input2, target): - res = flow.experimental.clip( - flow.experimental.add( - self.margin, - flow.experimental.mul( - target, - flow.experimental.mul(-1, flow.experimental.sub(input1, input2)), - ), + res = flow.clip( + flow.add( + self.margin, flow.mul(target, flow.mul(-1, flow.sub(input1, input2)),), ), min=0.0, ) @@ -799,7 +769,6 @@ class MarginRankingLoss(Module): @oneflow_export("nn.CTCLoss") -@experimental_api class CTCLoss(Module): r"""The Connectionist Temporal Classification loss. The interface is consistent with PyTorch. @@ -863,8 +832,7 @@ class CTCLoss(Module): .. code-block:: python - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() + >>> import oneflow as flow >>> import numpy as np >>> log_probs = np.array( ... [ @@ -931,7 +899,7 @@ class CTCLoss(Module): ) -> Tensor: loss, _ = self._op(log_probs, targets, input_lengths, target_lengths) if self.zero_infinity: - cond = flow.experimental.eq( + cond = flow.eq( loss, self.constant( size=loss.shape, @@ -940,33 +908,29 @@ class CTCLoss(Module): device=loss.device, )(), ) - loss = flow.experimental.where( + loss = flow.where( cond, - flow.experimental.zeros( - size=loss.shape, dtype=loss.dtype, device=loss.device - ), + flow.zeros(size=loss.shape, dtype=loss.dtype, device=loss.device), loss, ) if self.reduction == "mean": - return flow.experimental.mean( + return flow.mean( self._xdivy_op( loss, - flow.experimental.cast( - flow.experimental.clamp(target_lengths, min=1), - dtype=log_probs.dtype, + flow.cast( + flow.clamp(target_lengths, min=1), dtype=log_probs.dtype, ), )[0] ) elif self.reduction == "sum": - return flow.experimental.sum(loss) + return flow.sum(loss) else: return loss @oneflow_export("nn.BCEWithLogitsLoss") -@experimental_api class BCEWithLogitsLoss(Module): r"""This operator combines the `Sigmoid` and `BCELoss` together. For numerical stability, we apply some math tricks instead of using `Sigmoid` layer with `BCELoss`. @@ -1010,10 +974,7 @@ class BCEWithLogitsLoss(Module): .. code-block:: python - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - >>> import oneflow.typing as tp - + >>> import oneflow as flow >>> input = flow.Tensor([[1.2, 0.2, -0.3], [0.7, 0.6, -2], [0.7, 0.6, -2]], dtype=flow.float32) >>> target = flow.Tensor([[0, 1, 0], [1, 0, 1], [1, 0, 1]], dtype=flow.float32) >>> weight = flow.Tensor([[2, 2, 2], [2, 2, 2], [2, 2, 2]], dtype=flow.float32) @@ -1069,25 +1030,19 @@ class BCEWithLogitsLoss(Module): ) ) - _neg_input = flow.experimental.negative(input) - _max_val = flow.experimental.clip(_neg_input, 0) - _neg_max_val = flow.experimental.negative(_max_val) + _neg_input = flow.negative(input) + _max_val = flow.clip(_neg_input, 0) + _neg_max_val = flow.negative(_max_val) if self.pos_weight: _log_weight = ((self.pos_weight - 1) * target) + 1 _loss = (1 - target) * input + _log_weight * ( - flow.experimental.log( - flow.experimental.exp(_neg_max_val) - + flow.experimental.exp(_neg_input - _max_val) - ) + flow.log(flow.exp(_neg_max_val) + flow.exp(_neg_input - _max_val)) + _max_val ) else: _loss = (1 - target) * input + _max_val - _loss += flow.experimental.log( - flow.experimental.exp(_neg_max_val) - + flow.experimental.exp(_neg_input - _max_val) - ) + _loss += flow.log(flow.exp(_neg_max_val) + flow.exp(_neg_input - _max_val)) if self.weight is not None: assert ( @@ -1098,16 +1053,15 @@ class BCEWithLogitsLoss(Module): _weighted_loss = _loss if self.reduction == "mean": - return flow.experimental.mean(_weighted_loss) + return flow.mean(_weighted_loss) elif self.reduction == "sum": - return flow.experimental.sum(_weighted_loss) + return flow.sum(_weighted_loss) else: # Do no reduction return _weighted_loss @oneflow_export("nn.SmoothL1Loss") -@experimental_api class SmoothL1Loss(Module): r"""Creates a criterion that uses a squared term if the absolute element-wise error falls below beta and an L1 term otherwise. @@ -1185,10 +1139,9 @@ class SmoothL1Loss(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> x = flow.Tensor(np.array([0.1, 0.4, 0.3, 0.5, 0.9]).astype(np.float32), dtype=flow.float32) >>> y = flow.Tensor(np.array([0.3, 0.9, 2.5, 0.4, 0.3]).astype(np.float32), dtype=flow.float32) >>> m = flow.nn.SmoothL1Loss(reduction="none") @@ -1244,9 +1197,9 @@ class SmoothL1Loss(Module): if self.reduction == "none": return loss elif self.reduction == "sum": - return flow.experimental.sum(loss) + return flow.sum(loss) elif self.reduction == "mean": - return flow.experimental.mean(loss) + return flow.mean(loss) if __name__ == "__main__": diff --git a/oneflow/python/nn/modules/masked_fill.py b/oneflow/python/nn/modules/masked_fill.py index 19701c42a0f4ac4aad83186772a2332224238f61..eb4dd5fe7e3fbdc1cb64d2e62c286f221128d2b5 100644 --- a/oneflow/python/nn/modules/masked_fill.py +++ b/oneflow/python/nn/modules/masked_fill.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -33,7 +33,6 @@ class MaskedFill(Module): @oneflow_export("masked_fill") @register_tensor_op("masked_fill") -@experimental_api def masked_fill_op(input, mask, value): r""" Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is True. @@ -47,9 +46,8 @@ def masked_fill_op(input, mask, value): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> in_arr = np.array( ... [[[-0.13169311, 0.97277078, 1.23305363, 1.56752789], ... [-1.51954275, 1.87629473, -0.53301206, 0.53006478], diff --git a/oneflow/python/nn/modules/masked_select.py b/oneflow/python/nn/modules/masked_select.py index 63733c63034c0064af546d5dfb51ca0f91d5eb4d..8a53f17904a5d5382fa92165a892d772651596bb 100644 --- a/oneflow/python/nn/modules/masked_select.py +++ b/oneflow/python/nn/modules/masked_select.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.framework.tensor import register_tensor_op @@ -38,29 +38,28 @@ class MaskedSelect(Module): broadcast_x_axes.append(i) if max_dim != mask.shape[i]: broadcast_mask_axes.append(i) - broadcast_like_tensor = flow.experimental.zeros( + broadcast_like_tensor = flow.zeros( tuple(broadcast_like_shape), dtype=flow.float32, device=x.device, ) broadcast_like_tensor.requires_grad = x.requires_grad or mask.requires_grad if len(broadcast_x_axes) != 0: - x = flow.experimental.broadcast_like( + x = flow.broadcast_like( x, broadcast_like_tensor, broadcast_axes=tuple(broadcast_x_axes) ) if len(broadcast_mask_axes) != 0: - mask = flow.experimental.broadcast_like( + mask = flow.broadcast_like( mask, broadcast_like_tensor, broadcast_axes=tuple(broadcast_mask_axes) ) mask = mask.to(dtype=x.dtype) res = flow.F.mul(x, mask) - indices = flow.experimental.argwhere(res) + indices = flow.argwhere(res) gather_res = flow.F.gather_nd(res, indices) return gather_res.flatten() @oneflow_export("masked_select") -@experimental_api def masked_select_op(x, mask): r""" @@ -76,10 +75,9 @@ def masked_select_op(x, mask): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> x = flow.Tensor(np.array([[-0.4620, 0.3139], [0.3898, -0.7197], [0.0478, -0.1657]]), dtype=flow.float32) >>> mask = x.gt(0.05) >>> out = flow.masked_select(x, mask) @@ -90,11 +88,10 @@ def masked_select_op(x, mask): @register_tensor_op("masked_select") -@experimental_api def tensor_masked_select_op(x, mask): r""" - See :func:`oneflow.experimental.masked_select` + See :func:`oneflow.masked_select` """ return MaskedSelect()(x, mask) diff --git a/oneflow/python/nn/modules/math_ops.py b/oneflow/python/nn/modules/math_ops.py index f1e9951c8df0cd80fb3dcb782fe24e3ec8a395dd..f9760dbe919d4b0d80b92e9389558d2a9241c0c2 100644 --- a/oneflow/python/nn/modules/math_ops.py +++ b/oneflow/python/nn/modules/math_ops.py @@ -18,7 +18,7 @@ import collections from typing import Optional, Sequence, Union import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.nn.modules.utils import _check_axis, _check_inplace_valid @@ -65,7 +65,6 @@ class BroadcastMul(Module): @oneflow_export("mul") @register_tensor_op("mul") -@experimental_api def _mul(input, other): r"""Computes the multiplication of input by other for each element, scalar and broadcast promotation are supported. @@ -79,9 +78,8 @@ def _mul(input, other): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + # element-wise multiply >>> input = flow.Tensor(np.random.randn(2,3)) >>> other = flow.Tensor(np.random.randn(2,3)) @@ -128,21 +126,16 @@ class Variance(Module): def forward(self, input): axis = _check_axis(self.dim, input.shape) if isinstance(axis, list) and len(axis) == 0: - return flow.experimental.zeros(size=input.shape) + return flow.zeros(size=input.shape) else: - return flow.experimental.sub( - flow.experimental.mean( - flow.experimental.square(input), axis, self.keepdim - ), - flow.experimental.square( - flow.experimental.mean(input, axis, self.keepdim) - ), + return flow.sub( + flow.mean(flow.square(input), axis, self.keepdim), + flow.square(flow.mean(input, axis, self.keepdim)), ) @oneflow_export("var") @register_tensor_op("var") -@experimental_api def variance_op(input, dim=None, keepdim=False): r"""Returns the variance of each row of the `input` tensor in the given dimension `dim`. @@ -163,9 +156,8 @@ def variance_op(input, dim=None, keepdim=False): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> np_arr = np.random.randn(2,3,4,5) >>> input = flow.Tensor(np_arr) >>> output = flow.var(input, 1, True) @@ -206,7 +198,6 @@ class ScalarAdd(Module): @oneflow_export("sub") @register_tensor_op("sub") -@experimental_api def _sub(input, other): r"""Computes the subtraction of input by other for each element, scalar and broadcast promotation are supported. The formula is: @@ -219,9 +210,8 @@ def _sub(input, other): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + # element-wise subtract >>> input = flow.Tensor(np.random.randn(2,3)) >>> other = flow.Tensor(np.random.randn(2,3)) @@ -276,7 +266,6 @@ class ScalarDivByTensor(Module): @oneflow_export("div") @register_tensor_op("div") -@experimental_api def _div(input, other): r"""Computes the division of input by other for each element, scalar and broadcast promotation are supported. The formula is: @@ -293,9 +282,8 @@ def _div(input, other): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + # element-wise divide >>> input = flow.Tensor(np.random.randn(2,3)) >>> other = flow.Tensor(np.random.randn(2,3)) @@ -320,7 +308,7 @@ def _div(input, other): """ if isinstance(input, (int, float)): - return ScalarMul(input)(flow.experimental.reciprocal(other)) + return ScalarMul(input)(flow.reciprocal(other)) elif isinstance(other, (int, float)): if other == 0 or other == 0.0: other = 0.0 @@ -345,7 +333,6 @@ class Reciprocal(Module): @oneflow_export("reciprocal") @register_tensor_op("reciprocal") -@experimental_api def _reciprocal(x): r"""Computes the safe reciprocal of x. If x is zero, the reciprocal will be also set to zero. @@ -355,9 +342,8 @@ def _reciprocal(x): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = flow.Tensor(np.array([[1, 2, 3], [4, 5, 6]])) >>> out = flow.reciprocal(x) >>> out.numpy() @@ -400,7 +386,6 @@ class BroadcastAdd(Module): @oneflow_export("add") @register_tensor_op("add") -@experimental_api def _add(x, y): r"""Computes the addition of x by y for each element, scalar and broadcast promotation are supported. The formula is: @@ -413,9 +398,8 @@ def _add(x, y): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + # element-wise add >>> x = flow.Tensor(np.random.randn(2,3)) >>> y = flow.Tensor(np.random.randn(2,3)) @@ -454,10 +438,9 @@ def _add(x, y): @register_tensor_op("add_") -@experimental_api def _add_inplace(x, y): r""" - In-place version of :func:`oneflow.experimental.Tensor.add`. + In-place version of :func:`oneflow.Tensor.add`. """ if isinstance(y, (int, float)): @@ -471,7 +454,7 @@ def _add_inplace(x, y): elif y.shape == (1,): return ScalarAddByTensor(inplace=True)(x, y) else: - y = flow.experimental.broadcast_like(y, x) + y = flow.broadcast_like(y, x) return ElementwiseAdd(inplace=True)(x, y) @@ -484,7 +467,6 @@ class Asin(Module): @oneflow_export("asin") -@experimental_api def asin_op(input): r""" Returns a new tensor with the arcsine of the elements of :attr:`input`. @@ -499,9 +481,8 @@ def asin_op(input): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> input = flow.Tensor(np.array([-0.5, 0.8, 1.0, -0.8]), dtype=flow.float32) >>> output = flow.asin(input) >>> output.shape @@ -520,31 +501,28 @@ def asin_op(input): @register_tensor_op("asin") -@experimental_api def asin_op_tensor(input): r""" - See :func:`oneflow.experimental.asin` + See :func:`oneflow.asin` """ return Asin()(input) @oneflow_export("arcsin") -@experimental_api def arcsin_op(input): r""" - Alias for :func:`oneflow.experimental.asin` + Alias for :func:`oneflow.asin` """ return Asin()(input) @register_tensor_op("arcsin") -@experimental_api def arcsin_op_tensor(input): r""" - See :func:`oneflow.experimental.asin` + See :func:`oneflow.asin` """ return Asin()(input) @@ -558,7 +536,6 @@ class Asinh(Module): @oneflow_export("asinh") -@experimental_api def asinh_op(input): r""" Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`. @@ -573,9 +550,8 @@ def asinh_op(input): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> input = flow.Tensor(np.array([2, 3, 4]), dtype=flow.float32) >>> output = flow.asinh(input) >>> output.shape @@ -596,31 +572,28 @@ def asinh_op(input): @oneflow_export("arcsinh") -@experimental_api def arcsinh_op(input): r""" - Alias for :func:`oneflow.experimental.asinh` + Alias for :func:`oneflow.asinh` """ return Asinh()(input) @register_tensor_op("asinh") -@experimental_api def asinh_op_tensor(input): r""" - See :func:`oneflow.experimental.asinh` + See :func:`oneflow.asinh` """ return Asinh()(input) @register_tensor_op("arcsinh") -@experimental_api def arcsinh_op_tensor(input): r""" - See :func:`oneflow.experimental.asinh` + See :func:`oneflow.asinh` """ return Asinh()(input) @@ -637,7 +610,6 @@ class Sin(Module): @oneflow_export("sin") -@experimental_api def sin_op(tensor): r""" Returns a new tensor with the sine of the elements of :attr:`input`. @@ -653,9 +625,8 @@ def sin_op(tensor): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> x1 = flow.Tensor(np.array([-0.5461, 0.1347, -2.7266, -0.2746]).astype(np.float32)) >>> out1 = flow.sin(x1) >>> out1 @@ -671,13 +642,12 @@ def sin_op(tensor): @register_tensor_op("sin") -@experimental_api def sin_op_tensor(tensor): r""" sin() -> Tensor - See :func:`oneflow.experimental.sin` + See :func:`oneflow.sin` """ @@ -685,10 +655,9 @@ def sin_op_tensor(tensor): @register_tensor_op("sin_") -@experimental_api def inplace_sin_op_tensor(x): r""" - In-place version of :func:`oneflow.experimental.sin` + In-place version of :func:`oneflow.sin` """ @@ -705,7 +674,6 @@ class Cos(Module): @oneflow_export("cos") @register_tensor_op("cos") -@experimental_api def cos_op(tensor): r""" Returns a new tensor with the cosine of the elements of :attr:`input`. @@ -720,9 +688,8 @@ def cos_op(tensor): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> arr = np.array([1.4309, 1.2706, -0.8562, 0.9796]) >>> input = flow.Tensor(arr, dtype=flow.float32) >>> output = flow.cos(input).numpy() @@ -740,7 +707,6 @@ class Atan(Module): @oneflow_export("atan") -@experimental_api def atan_op(tensor): r""" Returns a new tensor with the arctangent of the elements of :attr:`input`. @@ -755,9 +721,8 @@ def atan_op(tensor): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> input = flow.Tensor(np.array([0.5, 0.6, 0.7]), dtype=flow.float32) >>> output = flow.atan(input) >>> output.shape @@ -768,32 +733,29 @@ def atan_op(tensor): @register_tensor_op("atan") -@experimental_api def atan_op_tensor(tensor): r""" - See :func:`oneflow.experimental.atan` + See :func:`oneflow.atan` """ return Atan()(tensor) @oneflow_export("arctan") -@experimental_api def arctan_op(tensor): r""" - Alias for :func:`oneflow.experimental.atan` + Alias for :func:`oneflow.atan` """ return Atan()(tensor) @register_tensor_op("arctan") -@experimental_api def arctan_op_tensor(tensor): r""" - See :func:`oneflow.experimental.arctan` + See :func:`oneflow.arctan` """ return Atan()(tensor) @@ -820,7 +782,6 @@ class FMod(Module): @oneflow_export("fmod") -@experimental_api def fmod_op(input, other): r""" fmod(input, other, *, out=None) -> Tensor @@ -842,8 +803,7 @@ def fmod_op(input, other): Example:: - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() + >>> import oneflow as flow >>> flow.fmod(flow.tensor([-3., -2, -1, 1, 2, 3]), 2) tensor([-1., -0., -1., 1., 0., 1.], dtype=oneflow.float32) >>> flow.fmod(flow.tensor([1, 2, 3, 4, 5]), 1.5) @@ -856,11 +816,10 @@ def fmod_op(input, other): @register_tensor_op("fmod") -@experimental_api def fmod_op_tensor(input, other): r""" - See :func:`oneflow.experimental.fmod` + See :func:`oneflow.fmod` """ return FMod()(input, other) @@ -876,7 +835,6 @@ class Log(Module): @oneflow_export("log") @register_tensor_op("log") -@experimental_api def log_op(tensor): r""" Returns a new tensor with the natural logarithm of the elements of :attr:`input`. @@ -891,9 +849,8 @@ def log_op(tensor): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> arr = np.random.randn(2, 3, 4, 5) >>> input = flow.Tensor(arr, dtype=flow.float32) >>> output = flow.log(input) @@ -933,7 +890,6 @@ class Sqrt(Module): @oneflow_export("rsqrt") @register_tensor_op("rsqrt") -@experimental_api def rsqrt_op(input): r"""Returns a new tensor with the reciprocal of the square-root of each of the elements of :attr:`input`. @@ -948,10 +904,9 @@ def rsqrt_op(input): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> a = flow.Tensor(np.array([1.0, 2.0, 3.0])) >>> out = flow.rsqrt(a).numpy() >>> out @@ -970,7 +925,6 @@ class Rsqrt(Module): @oneflow_export("sqrt") @register_tensor_op("sqrt") -@experimental_api def sqrt_op(input): r"""Returns a new tensor with the square-root of the elements of :attr:`input`. @@ -984,10 +938,9 @@ def sqrt_op(input): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> arr = np.array([1.0, 2.0, 3.0]) >>> input = flow.Tensor(arr) >>> output = flow.sqrt(input).numpy() @@ -1007,7 +960,6 @@ class Square(Module): @oneflow_export("square") @register_tensor_op("square") -@experimental_api def square_op(input): r"""Returns a new tensor with the square of the elements of :attr:`input`. @@ -1021,10 +973,9 @@ def square_op(input): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> arr = np.array([1.0, 2.0, 3.0]) >>> input = flow.Tensor(arr) >>> output = flow.square(input).numpy() @@ -1049,7 +1000,7 @@ class Std(Module): def forward(self, x): self.axis = _check_axis(self.dim, x.shape) if isinstance(self.axis, list) and len(self.axis) == 0: - return flow.experimental.zeros(size=x.shape) + return flow.zeros(size=x.shape) else: if len(self.axis) == 0: self.reduce_count = x.nelement() @@ -1058,11 +1009,10 @@ class Std(Module): self.reduce_count *= x.shape[i] sum = ( - flow.experimental.sum(self.square_op(x), self.axis, self.keepdim) - / self.reduce_count + flow.sum(self.square_op(x), self.axis, self.keepdim) / self.reduce_count ) square = self.square_op( - flow.experimental.sum(x, self.axis, self.keepdim) / self.reduce_count + flow.sum(x, self.axis, self.keepdim) / self.reduce_count ) subtract = self.subtract_op(sum, square) res = self.sqrt_op(subtract) @@ -1071,7 +1021,6 @@ class Std(Module): @oneflow_export("std") @register_tensor_op("std") -@experimental_api def std_op(tensor, dim, unbiased=True, keepdim=False): r""" Returns the standard-deviation of each row of the :attr:`input` tensor in the @@ -1095,10 +1044,9 @@ def std_op(tensor, dim, unbiased=True, keepdim=False): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> arr = np.array([1.0, 2.0, 3.0]) >>> input = flow.Tensor(arr) >>> output = flow.std(input, dim=0).numpy() @@ -1122,7 +1070,6 @@ class Pow(Module): @oneflow_export("pow") @register_tensor_op("pow") -@experimental_api def pow_op(tensor, exponent): r"""Takes the power of each element in input with exponent and returns a tensor with the result. Exponent can be either a single float number, a single int number, or a tensor with the same shape as input. When exponent is a scalar value, the operation applied is: @@ -1146,10 +1093,9 @@ def pow_op(tensor, exponent): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> x = flow.Tensor(np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])) >>> out = flow.pow(x, 2).numpy() >>> out @@ -1177,7 +1123,6 @@ class Addmm(Module): @oneflow_export("addmm") -@experimental_api def addmm_op(input, mat1, mat2, alpha=1, beta=1): r"""addmm(beta=1, input, alpha=1, mat1, mat2, out=None) -> Tensor @@ -1209,8 +1154,7 @@ def addmm_op(input, mat1, mat2, alpha=1, beta=1): For example: >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() + >>> import oneflow as flow >>> input = flow.tensor(np.array([[1,2,4],[5,11,9.1]])) >>> mat1 = flow.tensor(np.array([[7.3,1.9,7.3],[10.2,1,5.5]])) >>> mat2 = flow.tensor(np.array([[7.3,1.9,7.3],[10.2,1,5.5],[3.7,2.2,8.1]])) @@ -1236,10 +1180,9 @@ def addmm_op(input, mat1, mat2, alpha=1, beta=1): @register_tensor_op("addmm") -@experimental_api def addmm_op_tensor(input, mat1, mat2, alpha=1, beta=1): r""" - See :func:`oneflow.experimental.addmm` + See :func:`oneflow.addmm` """ return Addmm()(input, mat1, mat2, alpha, beta) @@ -1291,7 +1234,6 @@ class Clamp(Module): @oneflow_export("clamp") -@experimental_api def clamp_op(tensor, min=None, max=None): r""" Clamp all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]` and return @@ -1318,9 +1260,8 @@ def clamp_op(tensor, min=None, max=None): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> arr = np.array([0.2, 0.6, -1.5, -0.3]) >>> input = flow.Tensor(arr) >>> output = flow.clamp(input, min=-0.5, max=0.5) @@ -1344,28 +1285,25 @@ def clamp_op(tensor, min=None, max=None): @register_tensor_op("clamp") -@experimental_api def clamp_op_tensor(tensor, min=None, max=None): r""" - See :func:`oneflow.experimental.clamp` + See :func:`oneflow.clamp` """ return Clamp(min, max)(tensor) @oneflow_export("clip") -@experimental_api def clip_op(tensor, min=None, max=None): r""" - Alias for :func:`oneflow.experimental.clamp` + Alias for :func:`oneflow.clamp` """ return Clamp(min, max)(tensor) @register_tensor_op("clip") -@experimental_api def clip_op_tensor(tensor, min=None, max=None): r""" - See :func:`oneflow.experimental.clamp` + See :func:`oneflow.clamp` """ return Clamp(min, max)(tensor) @@ -1380,7 +1318,6 @@ class Cosh(Module): @oneflow_export("cosh") @register_tensor_op("cosh") -@experimental_api def cosh_op(tensor): r""" Returns a new tensor with the hyperbolic cosine of the elements of :attr:`input`. @@ -1396,9 +1333,8 @@ def cosh_op(tensor): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> arr = np.array([ 0.1632, 1.1835, -0.6979, -0.7325]) >>> input = flow.Tensor(arr, dtype=flow.float32) >>> output = flow.cosh(input).numpy() @@ -1419,7 +1355,6 @@ class Erf(Module): @oneflow_export("erf") @register_tensor_op("erf") -@experimental_api def erf_op(input): r"""Computes the error function of each element. The error function is defined as follows: @@ -1436,10 +1371,9 @@ def erf_op(input): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> x = flow.Tensor(np.array([0, -1., 10.]), dtype=flow.float32) >>> out = flow.erf(x) >>> out.shape @@ -1469,10 +1403,9 @@ def erf_op(input): @register_tensor_op("erf") -@experimental_api def erf_op_tensor(input): r""" - See :func:`oneflow.experimental.erf` + See :func:`oneflow.erf` """ return Erf()(input) @@ -1488,7 +1421,6 @@ class Erfc(Module): @oneflow_export("erfc") @register_tensor_op("erfc") -@experimental_api def erfc_op(input): r"""Computes the complementary error function of each element of input. The complementary error function is defined as follows: @@ -1506,10 +1438,9 @@ def erfc_op(input): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> x = flow.Tensor(np.array([0, -1., 10.]), dtype=flow.float32) >>> out = flow.erfc(x) >>> out.shape @@ -1539,10 +1470,9 @@ def erfc_op(input): @register_tensor_op("erfc") -@experimental_api def erfc_op_tensor(input): r""" - See :func:`oneflow.experimental.erfc` + See :func:`oneflow.erfc` """ return Erfc()(input) @@ -1556,7 +1486,6 @@ class Ceil(Module): @oneflow_export("ceil") -@experimental_api def ceil_op(x): r"""Returns a new tensor with the ceil of the elements of :attr:`x`, the smallest integer greater than or equal to each element. @@ -1577,10 +1506,8 @@ def ceil_op(x): .. code-block:: python - >>> import oneflow.experimental as flow - >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> import numpy as np >>> x = flow.Tensor(np.array([0.1, -2, 3.4]).astype(np.float32)) >>> y = flow.ceil(x) >>> print(y.shape) @@ -1617,10 +1544,9 @@ def ceil_op(x): @register_tensor_op("ceil") -@experimental_api def ceil_op_tensor(x): r""" - See :func:`oneflow.experimental.ceil` + See :func:`oneflow.ceil` """ return Ceil()(x) @@ -1635,7 +1561,6 @@ class Expm1(Module): @oneflow_export("expm1") -@experimental_api def expm1_op(x): """Returns a new tensor with the exponential of the elements minus 1 of :attr:`x`. @@ -1656,25 +1581,23 @@ def expm1_op(x): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> x = flow.Tensor(np.array([1, 2, 3]).astype(np.float32)) >>> y = flow.expm1(x) - >>> print(y.shape) + >>> y.shape flow.Size([3]) - >>> print(y.numpy()) - [ 1.7182817 6.389056 19.085537 ] + >>> y + tensor([ 1.7183, 6.3891, 19.0855], dtype=oneflow.float32) >>> x = flow.Tensor(np.array([[2, 4, 6],[7, 8, 9]]).astype(np.float32)) >>> y = x.expm1() - >>> print(y.shape) + >>> y.shape flow.Size([2, 3]) - >>> print(y.numpy()) - [[6.3890562e+00 5.3598152e+01 4.0242880e+02] - [1.0956332e+03 2.9799580e+03 8.1020840e+03]] + >>> y + tensor([[6.3891e+00, 5.3598e+01, 4.0243e+02], + [1.0956e+03, 2.9800e+03, 8.1021e+03]], dtype=oneflow.float32) @@ -1695,10 +1618,9 @@ def expm1_op(x): @register_tensor_op("expm1") -@experimental_api def expm1_op_tensor(x): r""" - See :func:`oneflow.experimental.expm1` + See :func:`oneflow.expm1` """ return Expm1()(x) @@ -1731,24 +1653,23 @@ class Topk(Module): if self.largest: indices = self._op_topk_last_dim(input)[0] else: - neg_input = flow.experimental.mul(input, -1) + neg_input = flow.mul(input, -1) indices = self._op_topk_last_dim(neg_input)[0] - return (flow.experimental.gather(input, indices, dim=axis), indices) + return (flow.gather(input, indices, dim=axis), indices) else: perm = get_perm_when_transpose_axis_to_last_dim(num_axes, axis) x = flow.F.transpose(input, perm=perm) if self.largest: indices = self._op_topk_last_dim(x)[0] else: - neg_input = flow.experimental.mul(x, -1) + neg_input = flow.mul(x, -1) indices = self._op_topk_last_dim(neg_input)[0] indices = flow.F.transpose(indices, perm=get_inversed_perm(perm)) - return (flow.experimental.gather(input, indices, dim=axis), indices) + return (flow.gather(input, indices, dim=axis), indices) @oneflow_export("topk") @register_tensor_op("topk") -@experimental_api def topk_op(input, k, dim: int = None, largest: bool = True, sorted: bool = True): r"""Finds the values and indices of the k largest entries at specified axis. @@ -1766,9 +1687,8 @@ def topk_op(input, k, dim: int = None, largest: bool = True, sorted: bool = True .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> x = np.array([[1, 3, 8, 7, 2], [1, 9, 4, 3, 2]], dtype=np.float32) >>> (values, indices) = flow.topk(flow.Tensor(x), k=3, dim=1) >>> values diff --git a/oneflow/python/nn/modules/matmul.py b/oneflow/python/nn/modules/matmul.py index 61c443444f127b3aea5969026998dc9ba21b9b08..fb85253aa3704b210230ad110eea0b1dc45eaacb 100644 --- a/oneflow/python/nn/modules/matmul.py +++ b/oneflow/python/nn/modules/matmul.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op import oneflow.python.framework.id_util as id_util from typing import Optional, Sequence @@ -46,7 +46,6 @@ class MatMul(Module): @oneflow_export("matmul") @register_tensor_op("matmul") -@experimental_api def matmul_op(input, other): r"""This operator applies matrix multiplication to two Tensor. @@ -61,9 +60,8 @@ def matmul_op(input, other): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> input1 = flow.Tensor(np.random.randn(2, 6), dtype=flow.float32) >>> input2 = flow.Tensor(np.random.randn(6, 5), dtype=flow.float32) >>> of_out = flow.matmul(input1, input2) diff --git a/oneflow/python/nn/modules/meshgrid.py b/oneflow/python/nn/modules/meshgrid.py index 437a4ce768a506d85e06cc71d909000a38273bb6..0c5ee7e5ff06c0c2f820c3e61b114edf820dd639 100644 --- a/oneflow/python/nn/modules/meshgrid.py +++ b/oneflow/python/nn/modules/meshgrid.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export class MeshGrid(Module): @@ -49,7 +49,6 @@ class MeshGrid(Module): @oneflow_export("meshgrid") -@experimental_api def meshgrid_op(*inputs): r"""The interface is consistent with PyTorch. The documentation is referenced from: @@ -73,9 +72,8 @@ def meshgrid_op(*inputs): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> input1 = flow.Tensor(np.array([1, 2, 3]), dtype=flow.float32) >>> input2 = flow.Tensor(np.array([4, 5, 6]), dtype=flow.float32) >>> of_x, of_y = flow.meshgrid(input1, input2) diff --git a/oneflow/python/nn/modules/ne.py b/oneflow/python/nn/modules/ne.py index 74ad5552a661bd73949b3cb2cda24d2b854e31ab..305c14dc41bae81979f067bddd42d480c9a18e4d 100644 --- a/oneflow/python/nn/modules/ne.py +++ b/oneflow/python/nn/modules/ne.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -44,7 +44,6 @@ class Ne(Module): @oneflow_export("ne", "not_equal") @register_tensor_op("ne") -@experimental_api def ne_op(input, other): r""" Computes element-wise not equality. @@ -62,10 +61,9 @@ def ne_op(input, other): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> input = flow.Tensor(np.array([2, 3, 4, 5]), dtype=flow.float32) >>> other = flow.Tensor(np.array([2, 3, 4, 1]), dtype=flow.float32) diff --git a/oneflow/python/nn/modules/negative.py b/oneflow/python/nn/modules/negative.py index 6721814f4c9d93f4478f06d01d79617cb3b526ae..3d09fafe9705c597f7040868b565ab62da8ac8c1 100644 --- a/oneflow/python/nn/modules/negative.py +++ b/oneflow/python/nn/modules/negative.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -29,7 +29,6 @@ class Negative(Module): @oneflow_export("negative", "neg") @register_tensor_op("negative") -@experimental_api def negative_op(x): """This operator computes the negative value of Tensor. @@ -44,9 +43,8 @@ def negative_op(x): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> input = flow.Tensor( ... np.array([1.0, -1.0, 2.3]).astype(np.float32), dtype=flow.float32 ... ) diff --git a/oneflow/python/nn/modules/norm.py b/oneflow/python/nn/modules/norm.py index 2290d5b9e042a5af5a1581f831602054b747ae1a..cb0ffff0c2f750d867dd0651f9bb236f16cac082 100644 --- a/oneflow/python/nn/modules/norm.py +++ b/oneflow/python/nn/modules/norm.py @@ -17,7 +17,7 @@ limitations under the License. import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -47,9 +47,9 @@ def check_dim(num_dims, input_dim): def _norm_min_max(input, ord, dim, keepdim): if ord > 0: - return flow.experimental.max(input, dim=dim, keepdim=keepdim) + return flow.max(input, dim=dim, keepdim=keepdim) else: - return flow.experimental.min(input, dim=dim, keepdim=keepdim) + return flow.min(input, dim=dim, keepdim=keepdim) class Vector_Norm(Module): @@ -71,24 +71,14 @@ class Vector_Norm(Module): def _vector_norm(self, x, ord, dim, keepdim=False): if ord == 0: # TODO: fix error when input are all zero vector - return flow.experimental.cast( - flow.tensor([flow.experimental.argwhere(x).shape[0]]), flow.float32 - ) + return flow.cast(flow.tensor([flow.argwhere(x).shape[0]]), flow.float32) elif ord == float("inf"): - return flow.experimental.max( - flow.experimental.abs(x), dim=dim, keepdim=keepdim - ) + return flow.max(flow.abs(x), dim=dim, keepdim=keepdim) elif ord == float("-inf"): - return flow.experimental.min( - flow.experimental.abs(x), dim=dim, keepdim=keepdim - ) + return flow.min(flow.abs(x), dim=dim, keepdim=keepdim) else: - return flow.experimental.pow( - flow.experimental.sum( - flow.experimental.pow(flow.experimental.abs(x), ord), - dim=dim, - keepdim=keepdim, - ), + return flow.pow( + flow.sum(flow.pow(flow.abs(x), ord), dim=dim, keepdim=keepdim,), 1.0 / ord, ) @@ -142,29 +132,21 @@ class Matrix_Norm(Module): if ord == "nuc": raise NotImplementedError elif ord == "fro": - return flow.experimental.sqrt( - flow.experimental.sum( - flow.experimental.square(x), dim=dim, keepdim=keepdim - ) - ) + return flow.sqrt(flow.sum(flow.square(x), dim=dim, keepdim=keepdim)) elif ord in [float("inf"), float("-inf")]: dim_0, dim_1 = dim[0], dim[1] dim_0, dim_1 = dim_1, dim_0 if dim_1 > dim_0 and not keepdim: dim_1 -= 1 - res = flow.experimental.sum( - flow.experimental.abs(x), dim=dim_0, keepdim=keepdim - ) + res = flow.sum(flow.abs(x), dim=dim_0, keepdim=keepdim) return _norm_min_max(res, ord, dim_1, keepdim) elif ord in [1, -1]: dim_0, dim_1 = dim[0], dim[1] if dim_1 > dim_0 and not keepdim: dim_1 -= 1 - res = flow.experimental.sum( - flow.experimental.abs(x), dim=dim_0, keepdim=keepdim - ) + res = flow.sum(flow.abs(x), dim=dim_0, keepdim=keepdim) return _norm_min_max(res, ord, dim_1, keepdim) elif ord in [2, -2]: raise NotImplementedError @@ -208,7 +190,6 @@ class Norm(Module): @oneflow_export("linalg.norm") -@experimental_api def norm_op(input, ord=None, dim=None, keepdim=False): r"""linalg.norm(input, ord=None, dim=None, keepdim=False, *, out=None) -> Tensor @@ -262,10 +243,9 @@ def norm_op(input, ord=None, dim=None, keepdim=False): Examples:: - >>> import oneflow.experimental as flow - >>> from oneflow.experimental import linalg as LA + >>> import oneflow as flow + >>> from oneflow import linalg as LA >>> import numpy as np - >>> flow.enable_eager_execution() >>> a = flow.tensor(np.arange(9, dtype=np.float32) - 4) >>> a tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.], dtype=oneflow.float32) @@ -329,16 +309,14 @@ def norm_op(input, ord=None, dim=None, keepdim=False): @register_tensor_op("norm") -@experimental_api def norm_tensor_op(input, ord=None, dim=None, keepdim=False): r""" - See :func:`oneflow.experimental.linalg.norm` + See :func:`oneflow.linalg.norm` """ return Norm(ord, dim, keepdim)(input) @oneflow_export("linalg.vector_norm") -@experimental_api def vector_norm_tensor_op(input, ord=2, dim=None, keepdim=False): r""" linalg.vector_norm(input, ord=2, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor @@ -385,10 +363,9 @@ def vector_norm_tensor_op(input, ord=2, dim=None, keepdim=False): Examples:: - >>> import oneflow.experimental as flow - >>> from oneflow.experimental import linalg as LA + >>> import oneflow as flow + >>> from oneflow import linalg as LA >>> import numpy as np - >>> flow.enable_eager_execution() >>> a = flow.tensor(np.arange(9, dtype=np.float32) - 4) >>> a tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.], dtype=oneflow.float32) @@ -406,7 +383,6 @@ def vector_norm_tensor_op(input, ord=2, dim=None, keepdim=False): @oneflow_export("linalg.matrix_norm") -@experimental_api def matrix_norm_tensor_op(input, ord="fro", dim=(-2, -1), keepdim=False): r""" linalg.matrix_norm(input, ord='fro', dim=(-2, -1), keepdim=False, *, dtype=None, out=None) -> Tensor @@ -450,10 +426,9 @@ def matrix_norm_tensor_op(input, ord="fro", dim=(-2, -1), keepdim=False): Examples:: - >>> import oneflow.experimental as flow - >>> from oneflow.experimental import linalg as LA + >>> import oneflow as flow + >>> from oneflow import linalg as LA >>> import numpy as np - >>> flow.enable_eager_execution() >>> a = flow.tensor(np.arange(9, dtype=np.float32)).reshape((3,3)) >>> a tensor([[0., 1., 2.], diff --git a/oneflow/python/nn/modules/normalization.py b/oneflow/python/nn/modules/normalization.py index 0b53993465892f14641d611b2b0f69064a843245..0e8ed903b9cfbac4875932ae88b761deb2b86df3 100644 --- a/oneflow/python/nn/modules/normalization.py +++ b/oneflow/python/nn/modules/normalization.py @@ -16,7 +16,7 @@ limitations under the License. import oneflow as flow from oneflow.python.nn import init from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import Tensor from typing import Tuple, Union @@ -24,7 +24,6 @@ _shape_t = Union[int, Tuple[int], flow._oneflow_internal.Size] @oneflow_export("nn.GroupNorm") -@experimental_api class GroupNorm(Module): r"""The interface is consistent with PyTorch. The documentation is referenced from: @@ -64,10 +63,8 @@ class GroupNorm(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> input = flow.Tensor(np.random.randn(20, 6, 10, 10)) >>> # Separate 6 channels into 3 groups >>> m = flow.nn.GroupNorm(3, 6) @@ -112,22 +109,20 @@ class GroupNorm(Module): input.shape[1] == self.num_channels ), "The channels of input tensor must equal num_channels" origin_shape = input.shape - reshape_to_1d = flow.experimental.reshape( + reshape_to_1d = flow.reshape( input, shape=[origin_shape[0], self.num_groups, -1] ) - mean = flow.experimental.mean(reshape_to_1d, dim=2, keepdim=True) - variance = flow.experimental.var(reshape_to_1d, dim=2, keepdim=True) - normalized = (reshape_to_1d - mean) / flow.experimental.sqrt( - variance + self.eps - ) - normalized = flow.experimental.reshape( + mean = flow.mean(reshape_to_1d, dim=2, keepdim=True) + variance = flow.var(reshape_to_1d, dim=2, keepdim=True) + normalized = (reshape_to_1d - mean) / flow.sqrt(variance + self.eps) + normalized = flow.reshape( normalized, shape=[origin_shape[0], self.num_channels, -1] ) if self.weight: normalized = normalized * self.weight if self.bias: normalized = normalized + self.bias - res = flow.experimental.reshape(normalized, shape=tuple(input.shape)) + res = flow.reshape(normalized, shape=tuple(input.shape)) return res @@ -138,7 +133,6 @@ class GroupNorm(Module): @oneflow_export("nn.LayerNorm") -@experimental_api class LayerNorm(Module): r"""Applies Layer Normalization over a mini-batch of inputs as described in the paper `Layer Normalization <https://arxiv.org/abs/1607.06450>`__ @@ -186,9 +180,8 @@ class LayerNorm(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> input_arr = np.array( ... [ ... [ diff --git a/oneflow/python/nn/modules/padding.py b/oneflow/python/nn/modules/padding.py index edbea801ea479f3d9727d5ab180323dfa164d4fd..7e11d483dba67c46c9919dd0222cf604bb8474b5 100644 --- a/oneflow/python/nn/modules/padding.py +++ b/oneflow/python/nn/modules/padding.py @@ -16,12 +16,11 @@ limitations under the License. from typing import Union import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module @oneflow_export("nn.ReplicationPad2d") -@experimental_api class ReplicationPad2d(Module): r"""The interface is consistent with PyTorch. The documentation is referenced from: @@ -44,9 +43,8 @@ class ReplicationPad2d(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> replicationpad_layer_0 = flow.nn.ReplicationPad2d((2, 2, 1, 1)) >>> input = flow.Tensor(np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32)) >>> input_int = flow.Tensor(np.arange(18).reshape((1, 2, 3, 3)).astype(np.int32)) @@ -113,7 +111,6 @@ class ReplicationPad2d(Module): @oneflow_export("nn.ReflectionPad2d") -@experimental_api class ReflectionPad2d(Module): r"""The interface is consistent with PyTorch. The documentation is referenced from: @@ -140,9 +137,8 @@ class ReflectionPad2d(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> input = flow.Tensor(np.arange(18).reshape((1, 2, 3, 3)), dtype=flow.float32) >>> m = flow.nn.ReflectionPad2d((2, 2, 1, 1)) >>> out = m(input) @@ -191,7 +187,6 @@ class ReflectionPad2d(Module): @oneflow_export("nn.ConstantPad2d") -@experimental_api class ConstantPad2d(Module): r"""The interface is consistent with PyTorch. The documentation is referenced from: @@ -216,9 +211,8 @@ class ConstantPad2d(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> constantpad_layer_0 = flow.nn.ConstantPad2d((2, 2, 1, 1), 1) >>> input = flow.Tensor(np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32)) >>> input_int = flow.Tensor(np.arange(18).reshape((1, 2, 3, 3)).astype(np.int32)) @@ -275,7 +269,6 @@ class ConstantPad2d(Module): @oneflow_export("nn.ConstantPad3d") -@experimental_api class ConstantPad3d(Module): r"""Pads the input tensor boundaries with a constant value. The interface is consistent with PyTorch, and referenced from: @@ -304,7 +297,7 @@ class ConstantPad3d(Module): Examples:: - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np >>> input = flow.tensor(np.arange(8).reshape(1,1,2,2,2).astype(np.int32)) diff --git a/oneflow/python/nn/modules/permute.py b/oneflow/python/nn/modules/permute.py index f65736b6975853e26667ebb04df6c36b0e86d3a9..fd4465a76744fba7ef22236fcb20e82c659ff4f9 100644 --- a/oneflow/python/nn/modules/permute.py +++ b/oneflow/python/nn/modules/permute.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from typing import Optional, Sequence @@ -39,7 +39,6 @@ class Permute(Module): @register_tensor_op("permute") -@experimental_api def permute_op(tensor, *dims): r"""Returns a view of the original tensor with its dimensions permuted. @@ -51,9 +50,8 @@ def permute_op(tensor, *dims): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> input = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32) >>> out = input.permute(1, 0, 2, 3).shape >>> out diff --git a/oneflow/python/nn/modules/pixelshuffle.py b/oneflow/python/nn/modules/pixelshuffle.py index 00fd7910defc43a45227c9530291088318ba9298..be4b56d0fc7c1e2f26871a894ee5a5298a83f8ec 100644 --- a/oneflow/python/nn/modules/pixelshuffle.py +++ b/oneflow/python/nn/modules/pixelshuffle.py @@ -16,12 +16,11 @@ limitations under the License. from typing import Optional from oneflow.python.framework.tensor import Tensor -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module @oneflow_export("nn.PixelShuffle") -@experimental_api class PixelShufflev2(Module): r""" Part of the documentation is referenced from: @@ -69,20 +68,18 @@ class PixelShufflev2(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> m = flow.nn.PixelShuffle(upscale_factor=2) >>> x = flow.Tensor(np.random.randn(3, 4, 5, 5)) >>> y = m(x) - >>> print(y.size()) + >>> y.shape flow.Size([3, 1, 10, 10]) >>> m = flow.nn.PixelShuffle(h_upscale_factor=3, w_upscale_factor=4) >>> x = flow.Tensor(np.random.randn(1, 24, 2, 2)) >>> y = m(x) - >>> print(y.size()) + >>> y.shape flow.Size([1, 2, 6, 8]) .. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network: diff --git a/oneflow/python/nn/modules/pooling.py b/oneflow/python/nn/modules/pooling.py index b183d196283efdc530cd926e3db53650ed231635..fc61b3771be2167eab021ac2bd8a7896bcc83e2e 100644 --- a/oneflow/python/nn/modules/pooling.py +++ b/oneflow/python/nn/modules/pooling.py @@ -16,7 +16,7 @@ limitations under the License. from typing import Optional import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.nn.modules.utils import _getint, _single, _pair, _triple from oneflow.python.nn.common_types import _size_1_t, _size_2_t, _size_3_t @@ -24,7 +24,6 @@ from oneflow.python.ops.nn_ops import calc_pool_padding, get_dhw_offset, _GetSeq @oneflow_export("nn.AvgPool1d") -@experimental_api class AvgPool1d(Module): r"""Applies a 1D average pooling over an input signal composed of several input planes. @@ -69,7 +68,6 @@ class AvgPool1d(Module): @oneflow_export("nn.AvgPool2d") -@experimental_api class AvgPool2d(Module): r"""Performs the 2d-average pooling on the input. @@ -92,7 +90,7 @@ class AvgPool2d(Module): .. code-block:: python - import oneflow.experimental as flow + import oneflow as flow import numpy as np @@ -158,7 +156,6 @@ class AvgPool2d(Module): @oneflow_export("nn.AvgPool3d") -@experimental_api class AvgPool3d(Module): r"""Applies a 3D average pooling over an input signal composed of several input planes. @@ -202,10 +199,9 @@ class AvgPool3d(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> m = flow.nn.AvgPool3d(kernel_size=(2,2,2),padding=(0,0,0),stride=(1,1,1)) >>> x = flow.Tensor(np.random.randn(9, 7, 11, 32, 20)) >>> y = m(x) @@ -264,7 +260,6 @@ class AvgPool3d(Module): @oneflow_export("nn.MaxPool1d") -@experimental_api class MaxPool1d(Module): r"""The interface is consistent with PyTorch. The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.MaxPool1d.html#torch.nn.MaxPool1d @@ -348,7 +343,6 @@ class MaxPool1d(Module): @oneflow_export("nn.MaxPool2d") -@experimental_api class MaxPool2d(Module): r"""The interface is consistent with PyTorch. The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html#torch.nn.MaxPool2d @@ -402,10 +396,8 @@ class MaxPool2d(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> kernel_size, stride, padding = (3, 4), (1, 1), (1, 2) >>> m = flow.nn.MaxPool2d(kernel_size, stride, padding) >>> np.random.seed(0) @@ -469,7 +461,6 @@ class MaxPool2d(Module): @oneflow_export("nn.MaxPool3d") -@experimental_api class MaxPool3d(Module): r"""The interface is consistent with PyTorch. The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.MaxPool3d.html#torch.nn.MaxPool3d @@ -530,10 +521,8 @@ class MaxPool3d(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> kernel_size, stride, padding = (3, 3, 4), (1, 1, 1), (1, 1, 2) >>> m = flow.nn.MaxPool3d(kernel_size, stride, padding) >>> np.random.seed(0) diff --git a/oneflow/python/nn/modules/random_ops.py b/oneflow/python/nn/modules/random_ops.py index 884b235cd7bdd3d179d1ace43d7c48f642e63e3e..7ad718dec4f75edbddbede1e2a1d7e3c0fbb9519 100644 --- a/oneflow/python/nn/modules/random_ops.py +++ b/oneflow/python/nn/modules/random_ops.py @@ -17,11 +17,10 @@ import sys import random import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export @oneflow_export("bernoulli") -@experimental_api def bernoulli(input, *, generator=None, out=None): r"""This operator returns a Tensor with binaray random numbers (0 / 1) from a Bernoulli distribution. @@ -39,9 +38,8 @@ def bernoulli(input, *, generator=None, out=None): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> arr = np.array( ... [ ... [1.0, 1.0, 1.0], diff --git a/oneflow/python/nn/modules/reduce_ops.py b/oneflow/python/nn/modules/reduce_ops.py index d4a34b3fe943fad3c9b55d551d14c861bcba99a6..08ebf0c8986bc6f1a116cc55d81ef64dcc211e4f 100644 --- a/oneflow/python/nn/modules/reduce_ops.py +++ b/oneflow/python/nn/modules/reduce_ops.py @@ -18,7 +18,7 @@ import collections from typing import Optional, Sequence, Union import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.nn.modules.utils import _check_axis @@ -52,7 +52,6 @@ class Sum(Module): @oneflow_export("sum") @register_tensor_op("sum") -@experimental_api def _sum(input, dim=None, keepdim=False): r"""Computes the sum of row of elements in a tensor in the given axis, if the axis is None, sum of all elements will be caculated. @@ -60,8 +59,7 @@ def _sum(input, dim=None, keepdim=False): .. code-block:: python - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() + >>> import oneflow as flow >>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]]) >>> flow.sum(input) tensor([21.], dtype=oneflow.float32) @@ -93,7 +91,6 @@ class Mean(Module): @oneflow_export("mean") @register_tensor_op("mean") -@experimental_api def _mean(input, dim=None, keepdim=False): r"""Computes the mean of row of elements in a tensor in the given axis, if the axis is None, mean of all elements will be caculated. @@ -101,8 +98,7 @@ def _mean(input, dim=None, keepdim=False): .. code-block:: python - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() + >>> import oneflow as flow >>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]]) >>> flow.mean(input) tensor([3.5], dtype=oneflow.float32) @@ -135,7 +131,6 @@ class Min(Module): @oneflow_export("min") @register_tensor_op("min") -@experimental_api def _min(input, dim=None, keepdim=False): r"""Computes the minimum value of all elements in the input tensor. @@ -143,8 +138,7 @@ def _min(input, dim=None, keepdim=False): .. code-block:: python - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() + >>> import oneflow as flow >>> input = flow.Tensor([[4, 1, 5], [2, 6, 3]]) >>> flow.min(input) tensor([1.], dtype=oneflow.float32) @@ -177,7 +171,6 @@ class Max(Module): @oneflow_export("max") @register_tensor_op("max") -@experimental_api def _max(input, dim=None, keepdim=False): r"""Computes the maximum value of all elements in the input tensor. @@ -185,8 +178,7 @@ def _max(input, dim=None, keepdim=False): .. code-block:: python - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() + >>> import oneflow as flow >>> input = flow.Tensor([[4, 1, 5], [2, 6, 3]]) >>> flow.max(input) tensor([6.], dtype=oneflow.float32) diff --git a/oneflow/python/nn/modules/repeat.py b/oneflow/python/nn/modules/repeat.py index e151d4aa2510415d0f5b935609233f5076d7e6f6..de5d11acd5b9d6b9f7fa7cd72b256ab7e49302d7 100644 --- a/oneflow/python/nn/modules/repeat.py +++ b/oneflow/python/nn/modules/repeat.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -55,15 +55,14 @@ class Repeat(Module): expand_dim.insert(0, repeat[i]) out_reshape.insert(0, repeat[i]) - new_tensor = flow.experimental.reshape(input, in_reshape) + new_tensor = flow.reshape(input, in_reshape) tmp_tensor = new_tensor.expand(*expand_dim) - out = flow.experimental.reshape(tmp_tensor, out_reshape) + out = flow.reshape(tmp_tensor, out_reshape) return out @oneflow_export("repeat") @register_tensor_op("repeat") -@experimental_api def repeat_op(x, sizes): """This operator repeat the input tensor to a larger size along the specified dimensions. @@ -78,10 +77,8 @@ def repeat_op(x, sizes): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> x = np.array([[[[0, 1]], ... [[2, 3]], ... [[4, 5]]]]).astype(np.int32) diff --git a/oneflow/python/nn/modules/reshape.py b/oneflow/python/nn/modules/reshape.py index 9996e69e035165069a467360969abe0a00d92539..e3aa1441f113713a41f2606bb47679770bad3835 100644 --- a/oneflow/python/nn/modules/reshape.py +++ b/oneflow/python/nn/modules/reshape.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from typing import Sequence @@ -31,7 +31,6 @@ class Reshape(Module): @oneflow_export("reshape") @register_tensor_op("reshape") -@experimental_api def reshape_op(x, shape: Sequence[int] = None): """This operator reshapes a Tensor. @@ -48,9 +47,7 @@ def reshape_op(x, shape: Sequence[int] = None): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow >>> x = np.array( ... [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]] ... ).astype(np.float32) @@ -66,7 +63,6 @@ def reshape_op(x, shape: Sequence[int] = None): @oneflow_export("view") @register_tensor_op("view") -@experimental_api def view_op(x, shape: Sequence[int] = None): r""" The interface is consistent with PyTorch. @@ -103,9 +99,8 @@ def view_op(x, shape: Sequence[int] = None): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = np.array( ... [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]] ... ).astype(np.float32) diff --git a/oneflow/python/nn/modules/round.py b/oneflow/python/nn/modules/round.py index ff5356cdeb43ec381bf4b5bcd0f2cdb42d634630..a3e0cf7a6f7d7acb75df8a483866ce7e40960739 100644 --- a/oneflow/python/nn/modules/round.py +++ b/oneflow/python/nn/modules/round.py @@ -16,7 +16,7 @@ limitations under the License. import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -29,7 +29,6 @@ class Round(Module): @oneflow_export("round") -@experimental_api def round_op(x): """This operator rounds the value of Blob to the nearest integer. Args: @@ -40,9 +39,8 @@ def round_op(x): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> x1 = flow.Tensor(np.array([1.49999, 1.500001, 2.7]).astype(np.float32)) >>> out1 = flow.round(x1) >>> out1.numpy() @@ -58,12 +56,11 @@ def round_op(x): @register_tensor_op("round") -@experimental_api def round_op_tensor(x): r""" round() -> Tensor - See :func:`oneflow.experimental.round` + See :func:`oneflow.round` """ diff --git a/oneflow/python/nn/modules/scatter_nd.py b/oneflow/python/nn/modules/scatter_nd.py index 8efc3a969099c89172e441cd2f6a364dba5fcb1e..4027a5e5a0c9d2419f715b536b9deba804025db3 100644 --- a/oneflow/python/nn/modules/scatter_nd.py +++ b/oneflow/python/nn/modules/scatter_nd.py @@ -16,13 +16,33 @@ limitations under the License. import oneflow as flow from oneflow.python.framework.tensor import Tensor -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module +class ScatterNd(Module): + def __init__(self, shape: list): + super().__init__() + if not isinstance(shape, list): + raise ValueError("shape must be list!") + self.shape = shape + + def forward(self, index, updates): + self._op = ( + flow.builtin_op("scatter_nd") + .Input("indices") + .Input("updates") + .Output("out") + .Attr("shape", self.shape) + .Build() + ) + + res = self._op(index, updates)[0] + return res + + @oneflow_export("scatter_nd") -@experimental_api -class Scatter_nd(Module): +def _scatter_nd_op(index, update, shape): r"""This operator inserts the elements in `updates` according to the `index` and create a new Tensor. Args: @@ -34,37 +54,16 @@ class Scatter_nd(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - - >>> scatter_nd_layer = flow.scatter_nd([8]) >>> index = flow.Tensor(np.array([[1], [6], [4]]), dtype=flow.int) >>> update = flow.Tensor(np.array([10.2,5.1,12.7]), dtype=flow.float) - >>> out = scatter_nd_layer(index,update) + >>> out = flow.scatter_nd(index,update, [8]) >>> out tensor([ 0. , 10.2, 0. , 0. , 12.7, 0. , 5.1, 0. ], dtype=oneflow.float32) """ - - def __init__(self, shape: list): - super().__init__() - if not isinstance(shape, list): - raise ValueError("shape must be list!") - self.shape = shape - - def forward(self, index, updates): - self._op = ( - flow.builtin_op("scatter_nd") - .Input("indices") - .Input("updates") - .Output("out") - .Attr("shape", self.shape) - .Build() - ) - - res = self._op(index, updates)[0] - return res + return ScatterNd(shape)(index, update) if __name__ == "__main__": diff --git a/oneflow/python/nn/modules/sign.py b/oneflow/python/nn/modules/sign.py index bbcdde77612c5ff4a2486694a8edc9cd0512b88b..9db6b8aedfb3725f90d6d07b825ca969f1ecc487 100644 --- a/oneflow/python/nn/modules/sign.py +++ b/oneflow/python/nn/modules/sign.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.nn.module import Module @@ -29,7 +29,6 @@ class Sign(Module): @oneflow_export("sign") -@experimental_api def sign_op(x): r"""Computes the sign of Tensor. @@ -44,9 +43,8 @@ def sign_op(x): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> x1 = flow.Tensor(np.array([-2, 0, 2]).astype(np.float32)) >>> out1 = flow.sign(x1) >>> out1.numpy() @@ -61,13 +59,12 @@ def sign_op(x): @register_tensor_op("sign") -@experimental_api def sign_op_tensor(x): r""" sign() -> Tensor - See :func:`oneflow.experimental.sign` + See :func:`oneflow.sign` """ diff --git a/oneflow/python/nn/modules/sinh.py b/oneflow/python/nn/modules/sinh.py index 9708cb65dd1862d8c09c19e515feb87d392b2629..4922864cdd68421735786eaff293d444a770bf22 100644 --- a/oneflow/python/nn/modules/sinh.py +++ b/oneflow/python/nn/modules/sinh.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -28,7 +28,6 @@ class Sinh(Module): @oneflow_export("sinh") -@experimental_api def sinh_op(x): r"""Returns a new tensor with the hyperbolic sine of the elements of :attr:`input`. @@ -43,13 +42,12 @@ def sinh_op(x): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> x1 = flow.Tensor(np.array([1, 2, 3])) >>> x2 = flow.Tensor(np.array([1.53123589,0.54242598,0.15117185])) >>> x3 = flow.Tensor(np.array([1,0,-1])) - >>> flow.enable_eager_execution() >>> flow.sinh(x1).numpy() array([ 1.1752012, 3.6268604, 10.017875 ], dtype=float32) >>> flow.sinh(x2).numpy() @@ -63,13 +61,12 @@ def sinh_op(x): @register_tensor_op("sinh") -@experimental_api def sinh_op_tensor(x): r""" sinh() -> Tensor - See :func:`oneflow.experimental.sinh` + See :func:`oneflow.sinh` """ diff --git a/oneflow/python/nn/modules/slice.py b/oneflow/python/nn/modules/slice.py index 2ec0420330ade3abd0f5920cedc5984f15f46b2a..7b7f6583e0bcb3dcad0743611ef0025ca024c003 100644 --- a/oneflow/python/nn/modules/slice.py +++ b/oneflow/python/nn/modules/slice.py @@ -16,7 +16,7 @@ limitations under the License. import numpy as np import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.ops.array_ops import check_slice_tup_list, GetSliceAttrs from typing import Sequence, Tuple @@ -35,7 +35,6 @@ class Slice(Module): @oneflow_export("slice") -@experimental_api def slice_op(x, slice_tup_list: Sequence[Tuple[int, int, int]]): r"""Extracts a slice from a tensor. The `slice_tup_list` assigns the slice indices in each dimension, the format is (start, stop, step). @@ -50,9 +49,7 @@ def slice_op(x, slice_tup_list: Sequence[Tuple[int, int, int]]): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow >>> input = flow.Tensor(np.random.randn(3, 6, 9).astype(np.float32)) >>> tup_list = [[None, None, None], [0, 5, 2], [0, 6, 3]] >>> y = flow.slice(input, slice_tup_list=tup_list) @@ -79,7 +76,6 @@ class SliceUpdate(Module): @oneflow_export("slice_update") -@experimental_api def slice_update_op(x, update, slice_tup_list: Sequence[Tuple[int, int, int]]): r"""Update a slice of tensor `x`. Like `x[start:stop:step] = update`. @@ -93,9 +89,7 @@ def slice_update_op(x, update, slice_tup_list: Sequence[Tuple[int, int, int]]): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow >>> input = flow.Tensor(np.array([1, 1, 1, 1, 1]).astype(np.float32)) >>> update = flow.Tensor(np.array([2, 3, 4]).astype(np.float32)) >>> y = flow.slice_update(input, update, slice_tup_list=[[1, 4, 1]]) @@ -123,9 +117,8 @@ class LogicalSliceAssign(Module): ) -# NOTE: conflict with existing userop: flow.experimental.logical_slice_assign, so use tmp.logical_slice_assign +# NOTE: conflict with existing userop: flow.logical_slice_assign, so use tmp.logical_slice_assign @oneflow_export("tmp.logical_slice_assign") -@experimental_api def logical_slice_assign_op(x, update, slice_tup_list: Sequence[Tuple[int, int, int]]): r"""Update a slice of tensor `x`(in-place). Like `x[start:stop:step] = update`. @@ -139,9 +132,8 @@ def logical_slice_assign_op(x, update, slice_tup_list: Sequence[Tuple[int, int, .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> input = flow.Tensor(np.array([1, 1, 1, 1, 1]).astype(np.float32)) >>> update = flow.Tensor(np.array([2, 3, 4]).astype(np.float32)) >>> y = flow.tmp.logical_slice_assign(input, update, slice_tup_list=[[1, 4, 1]]) diff --git a/oneflow/python/nn/modules/softplus.py b/oneflow/python/nn/modules/softplus.py index 68cac8ddb10cd2bf609fae9722f2ac82c025df7d..98d0f66ac6b910941052d28b610da47877d50d26 100644 --- a/oneflow/python/nn/modules/softplus.py +++ b/oneflow/python/nn/modules/softplus.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -29,7 +29,6 @@ class Softplus(Module): @oneflow_export("softplus") @register_tensor_op("softplus") -@experimental_api def softplus_op(x): r"""Applies the element-wise function: @@ -50,13 +49,12 @@ def softplus_op(x): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> x1 = flow.Tensor(np.array([1, 2, 3])) >>> x2 = flow.Tensor(np.array([1.53123589,0.54242598,0.15117185])) >>> x3 = flow.Tensor(np.array([1,0,-1])) - >>> flow.enable_eager_execution() >>> flow.softplus(x1).numpy() array([1.3132616, 2.126928 , 3.0485873], dtype=float32) >>> flow.softplus(x2).numpy() diff --git a/oneflow/python/nn/modules/sort.py b/oneflow/python/nn/modules/sort.py index a6e1d3267585e5c0cb4238e2ae8582627d651a97..8b42cde38b3ae16f8f2dd73dc5cd0161ac780f36 100644 --- a/oneflow/python/nn/modules/sort.py +++ b/oneflow/python/nn/modules/sort.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.ops.transpose_util import ( get_perm_when_transpose_axis_to_last_dim, @@ -42,18 +42,17 @@ class Sort(Module): assert 0 <= dim < num_dims, "dim out of range" if dim == num_dims - 1: indices = self._argsort_op(input)[0] - return (flow.experimental.gather(input, indices, dim), indices) + return (flow.gather(input, indices, dim), indices) else: perm = get_perm_when_transpose_axis_to_last_dim(num_dims, dim) x = flow.F.transpose(input, perm=perm) indices = self._argsort_op(x)[0] indices = flow.F.transpose(indices, perm=get_inversed_perm(perm)) - return (flow.experimental.gather(input, indices, dim), indices) + return (flow.gather(input, indices, dim), indices) @oneflow_export("sort") @register_tensor_op("sort") -@experimental_api def sort_op(input, dim: int = -1, descending: bool = False): """Sorts the elements of the input tensor along a given dimension in ascending order by value. @@ -71,10 +70,8 @@ def sort_op(input, dim: int = -1, descending: bool = False): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> x = np.array([[1, 3, 8, 7, 2], [1, 9, 4, 3, 2]], dtype=np.float32) >>> input = flow.Tensor(x) >>> (values, indices) = flow.sort(input) diff --git a/oneflow/python/nn/modules/sparse.py b/oneflow/python/nn/modules/sparse.py index eb95d58bbc8965b0eb512b5a0eda9b93d548efa8..9f5f3dbb46ee558c4771db8878ace2db92bfc4da 100644 --- a/oneflow/python/nn/modules/sparse.py +++ b/oneflow/python/nn/modules/sparse.py @@ -44,9 +44,8 @@ class Embedding(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> indices = flow.Tensor([[1, 2, 4, 5], [4, 3, 2, 9]], dtype=flow.int) >>> m = flow.nn.Embedding(10, 3) >>> y = m(indices) diff --git a/oneflow/python/nn/modules/squeeze.py b/oneflow/python/nn/modules/squeeze.py index a293e7fa15972973ee88142d00fdc3c51b28bfb0..764a7508cfbcd630c0da80cf01f19c41564c4adf 100644 --- a/oneflow/python/nn/modules/squeeze.py +++ b/oneflow/python/nn/modules/squeeze.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op import oneflow.python.framework.id_util as id_util from typing import Optional, Sequence @@ -34,7 +34,6 @@ class Squeeze(Module): @oneflow_export("squeeze") @register_tensor_op("squeeze") -@experimental_api def squeeze_op(input, dim: Optional[Sequence[int]] = None): """This operator removes the specified dimention which size is 1 of the input Tensor. If the `dim` is not specified, this operator will remove all the dimention which size is 1 of the input Tensor. @@ -52,10 +51,8 @@ def squeeze_op(input, dim: Optional[Sequence[int]] = None): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - >>> input = flow.Tensor(np.array([[[[1, 1, 1]]]]).astype(np.int32)) >>> out = flow.squeeze(input, dim=[1, 2]).shape >>> out diff --git a/oneflow/python/nn/modules/stack.py b/oneflow/python/nn/modules/stack.py index e763ae53645efa9bf1f0a841b1f941fbe0de54c3..fd22e75e16cbcbad1830645fe84a61fd3c25819b 100644 --- a/oneflow/python/nn/modules/stack.py +++ b/oneflow/python/nn/modules/stack.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from typing import List, Tuple -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from oneflow.python.nn.module import Module from oneflow.python.framework.tensor import Tensor @@ -45,12 +45,11 @@ class Stack(Module): current_shape ) unsqueezed.append(inputs[i].unsqueeze(dim=self.dim)) - return flow.experimental.cat(unsqueezed, dim=self.dim) + return flow.cat(unsqueezed, dim=self.dim) @oneflow_export("stack") @register_tensor_op("stack") -@experimental_api def stack(inputs: Tensor, dim: int = 0) -> None: r"""Concatenates a sequence of tensors along a new dimension. The returned tensor shares the same underlying data with input tensors. @@ -70,9 +69,8 @@ def stack(inputs: Tensor, dim: int = 0) -> None: .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> x = flow.Tensor(np.random.rand(1, 3, 5)) >>> y = flow.Tensor(np.random.rand(1, 3, 5)) >>> out = flow.stack([x, y], dim = -1) diff --git a/oneflow/python/nn/modules/tan.py b/oneflow/python/nn/modules/tan.py index 1f6b91899080ca5f35b7789541363ce945b8dc33..9f3b7067f0c24d8bea6a020a355e3501f7cbf448 100644 --- a/oneflow/python/nn/modules/tan.py +++ b/oneflow/python/nn/modules/tan.py @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.framework.tensor import register_tensor_op @@ -29,7 +29,6 @@ class Tan(Module): @oneflow_export("tan") -@experimental_api def tan_op(input): r"""Returns the tan value of the elements of :attr:`input`. @@ -43,9 +42,8 @@ def tan_op(input): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> np_arr = np.array([-1/4*np.pi, 0, 1/4*np.pi]).astype(np.float32) >>> input = flow.Tensor(np_arr) >>> output = flow.tan(input) @@ -58,11 +56,10 @@ def tan_op(input): @register_tensor_op("tan") -@experimental_api def tan_op_tensor(input): r""" tan() -> Tensor - See :func:`oneflow.experimental.tan` + See :func:`oneflow.tan` """ diff --git a/oneflow/python/nn/modules/tensor_buffer.py b/oneflow/python/nn/modules/tensor_buffer.py index af6819ae5d2daf2bea9b6ad1bf213ba03df5c2b4..8b2fe8fd99e0f14ae8384a0ee83846d071fa3c26 100644 --- a/oneflow/python/nn/modules/tensor_buffer.py +++ b/oneflow/python/nn/modules/tensor_buffer.py @@ -17,7 +17,7 @@ from typing import Sequence, Optional import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export class TensorBufferToTensor(Module): @@ -37,7 +37,6 @@ class TensorBufferToTensor(Module): @oneflow_export("tensor_buffer_to_tensor") -@experimental_api def tensor_buffer_to_tensor_op(x, dtype: flow.dtype, instance_shape: Sequence[int]): """This operator converts the Tensor's type from TensorBuffer to original type. Some operator's output data type is `TensorBuffer`, you can use this operator to convert back @@ -59,9 +58,7 @@ def tensor_buffer_to_tensor_op(x, dtype: flow.dtype, instance_shape: Sequence[in .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow >>> x = np.random.randn(4, 16, 64, 64).astype(np.float32) >>> x = flow.Tensor(x) >>> x = flow.tensor_to_tensor_buffer(x, instance_dims=2) @@ -89,7 +86,6 @@ class TensorToTensorBuffer(Module): @oneflow_export("tensor_to_tensor_buffer") -@experimental_api def tensor_to_tensor_buffer(x, instance_dims: int): """This operator converts the Tensor's type to TensorBuffer. @@ -108,9 +104,7 @@ def tensor_to_tensor_buffer(x, instance_dims: int): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow >>> x = np.random.randn(4, 16, 64, 64).astype(np.float32) >>> x = flow.Tensor(x) >>> x = flow.tensor_to_tensor_buffer(x, instance_dims=2) @@ -141,7 +135,6 @@ class GenTensorBuffer(Module): @oneflow_export("gen_tensor_buffer") -@experimental_api def gen_tensor_buffer( shape: Sequence[int], shape_list: Sequence[Sequence[int]], diff --git a/oneflow/python/nn/modules/tensor_ops.py b/oneflow/python/nn/modules/tensor_ops.py index b27130d520ab5a9ff056feefd81358427935567c..ef9b36b40b2477eba2147f4e28d725da90665b25 100644 --- a/oneflow/python/nn/modules/tensor_ops.py +++ b/oneflow/python/nn/modules/tensor_ops.py @@ -14,7 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. """ import oneflow as flow -from oneflow.python.oneflow_export import experimental_api from oneflow.python.nn.module import Module from oneflow.python.framework.tensor import register_tensor_op @@ -28,7 +27,6 @@ class TypeAs(Module): @register_tensor_op("type_as") -@experimental_api def type_as_op(input, target): r"""Returns this tensor cast to the type of the given tensor. This is a no-op if the tensor is already of the correct type. @@ -41,10 +39,9 @@ def type_as_op(input, target): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> input = flow.Tensor(np.random.randn(1, 2, 3), dtype=flow.float32) >>> target = flow.Tensor(np.random.randn(4, 5, 6), dtype = flow.int32) >>> input = input.type_as(target) @@ -64,7 +61,6 @@ class Long(Module): @register_tensor_op("long") -@experimental_api def long_op(input): r"""`Tensor.long()` is equivalent to `Tensor.to(flow.int64)`. See to(). @@ -75,10 +71,9 @@ def long_op(input): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> input = flow.Tensor(np.random.randn(1, 2, 3), dtype=flow.float32) >>> input = input.long() >>> input.dtype diff --git a/oneflow/python/nn/modules/tile.py b/oneflow/python/nn/modules/tile.py index 0e583c44272eb3f4498ad6b1a9e613fb6a05d378..7b0c52dd6795452f020b52511240ffe4ce8ae01d 100644 --- a/oneflow/python/nn/modules/tile.py +++ b/oneflow/python/nn/modules/tile.py @@ -16,7 +16,7 @@ limitations under the License. from typing import Union import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import Tensor, register_tensor_op @@ -40,7 +40,6 @@ class Tile(Module): @oneflow_export("tile") @register_tensor_op("tile") -@experimental_api def tile_op(x, reps): r"""The interface is consistent with PyTorch. The documentation is referenced from: @@ -69,10 +68,9 @@ def tile_op(x, reps): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> x = np.array([1, 2]).astype(np.int32) >>> input = flow.Tensor(x, dtype=flow.int32) >>> out = input.tile(reps=(2,)) diff --git a/oneflow/python/nn/modules/to.py b/oneflow/python/nn/modules/to.py index d265e7704e1acb9e93c829c0a3f51161097d8ed5..222643d340d97b426e743e47c30474d36a85e4ff 100644 --- a/oneflow/python/nn/modules/to.py +++ b/oneflow/python/nn/modules/to.py @@ -60,9 +60,8 @@ def to_op(input, *args, **kwargs): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> arr = np.random.randint(1, 9, size=(1, 2, 3, 4)) >>> input = flow.Tensor(arr) >>> output = input.to(dtype=flow.float32) diff --git a/oneflow/python/nn/modules/transpose.py b/oneflow/python/nn/modules/transpose.py index 5984e221ee0ca527ca3eea9fa81a9fe1787a3081..fe57cba043e022c6f84c8a7e1eac191c48d393ec 100644 --- a/oneflow/python/nn/modules/transpose.py +++ b/oneflow/python/nn/modules/transpose.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from typing import Optional, Sequence @@ -60,7 +60,6 @@ class Transpose(Module): @oneflow_export("transpose") @register_tensor_op("transpose") -@experimental_api def transpose_op(tensor, dim0, dim1): r"""Returns a tensor that is a transposed version of input. The given dimensions dim0 and dim1 are swapped. @@ -78,9 +77,7 @@ def transpose_op(tensor, dim0, dim1): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow >>> input = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32) >>> out = flow.transpose(input, 0, 1).shape >>> out diff --git a/oneflow/python/nn/modules/triu.py b/oneflow/python/nn/modules/triu.py index 37381307044f5c4be6dc5d174025d0fe74c4f896..ece3cfd12e23a48897f1748c3bc4a60f5aa17b44 100644 --- a/oneflow/python/nn/modules/triu.py +++ b/oneflow/python/nn/modules/triu.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module from oneflow.python.framework.tensor import register_tensor_op @@ -31,7 +31,6 @@ class Triu(Module): @oneflow_export("triu") @register_tensor_op("triu") -@experimental_api def triu_op(x, diagonal=0): r"""Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices input, the other elements of the result tensor out are set to 0. @@ -44,10 +43,9 @@ def triu_op(x, diagonal=0): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() - + >>> x = flow.Tensor(np.ones(shape=(3, 3)).astype(np.float32)) >>> flow.triu(x) tensor([[1., 1., 1.], diff --git a/oneflow/python/nn/modules/unsqueeze.py b/oneflow/python/nn/modules/unsqueeze.py index cc1008039cff48d9e53157d152647a93c3ecdd67..5cd41eaaf254411ae1e5e80817da1acc5fc85b0b 100644 --- a/oneflow/python/nn/modules/unsqueeze.py +++ b/oneflow/python/nn/modules/unsqueeze.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -36,7 +36,6 @@ class Unsqueeze(Module): @oneflow_export("unsqueeze") @register_tensor_op("unsqueeze") -@experimental_api def unsqueeze_op(input, dim): r"""Returns a new tensor with a dimension of size one inserted at the specified position. @@ -56,9 +55,8 @@ def unsqueeze_op(input, dim): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> x = flow.Tensor(np.random.rand(2, 3, 4)) >>> y = x.unsqueeze(2) >>> y.shape diff --git a/oneflow/python/nn/modules/upsampling.py b/oneflow/python/nn/modules/upsampling.py index 729acbe487c8fa440b0264b7a6a68479cf0ec005..0686e81a322c50ed5159185cf667addb7ce6b6e9 100644 --- a/oneflow/python/nn/modules/upsampling.py +++ b/oneflow/python/nn/modules/upsampling.py @@ -15,13 +15,12 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op from typing import Optional, Union, Tuple @oneflow_export("nn.Upsample") -@experimental_api class Upsample(Module): r"""The interface is consistent with PyTorch. @@ -85,7 +84,7 @@ class Upsample(Module): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> input = flow.Tensor(np.arange(1, 5).reshape((1, 1, 2, 2)), dtype=flow.float32) >>> input = input.to("cuda") @@ -112,7 +111,7 @@ class Upsample(Module): self.align_corners = align_corners def forward(self, x): - return flow.experimental.nn.functional.interpolate( + return flow.nn.functional.interpolate( x, size=self.size, scale_factor=self.scale_factor, @@ -130,7 +129,6 @@ class Upsample(Module): @oneflow_export("nn.UpsamplingNearest2d") -@experimental_api class UpsamplingNearest2d(Upsample): r"""Applies a 2D nearest neighbor upsampling to an input signal composed of several input channels. @@ -163,9 +161,8 @@ class UpsamplingNearest2d(Upsample): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> input = flow.Tensor(np.arange(1, 5).reshape((1, 1, 2, 2)), dtype=flow.float32) >>> input = input.to("cuda") >>> m = flow.nn.UpsamplingNearest2d(scale_factor=2.0) @@ -186,7 +183,6 @@ class UpsamplingNearest2d(Upsample): @oneflow_export("nn.UpsamplingBilinear2d") -@experimental_api class UpsamplingBilinear2d(Upsample): r"""Applies a 2D bilinear upsampling to an input signal composed of several input channels. @@ -220,9 +216,8 @@ class UpsamplingBilinear2d(Upsample): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow + >>> input = flow.Tensor(np.arange(1, 5).reshape((1, 1, 2, 2)), dtype=flow.float32) >>> input = input.to("cuda") >>> m = flow.nn.UpsamplingBilinear2d(scale_factor=2.0) diff --git a/oneflow/python/nn/modules/where.py b/oneflow/python/nn/modules/where.py index 51f42e07b21d1d57336e3ac318004f11494fe0be..c0cd273f31fae5895e79f80c942d527945dacf5b 100644 --- a/oneflow/python/nn/modules/where.py +++ b/oneflow/python/nn/modules/where.py @@ -15,7 +15,7 @@ limitations under the License. """ import oneflow as flow from oneflow.python.nn.module import Module -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.framework.tensor import register_tensor_op @@ -66,26 +66,26 @@ class Where(Module): if max_dim != y.shape[i]: broadcast_y_axes.append(i) - broadcast_like_tensor = flow.experimental.zeros( + broadcast_like_tensor = flow.zeros( tuple(broadcast_like_shape), dtype=flow.float32 ) broadcast_like_tensor = broadcast_like_tensor.to(x.device.type) broadcast_like_tensor.requires_grad = x.requires_grad or y.requires_grad if len(broadcast_condition_axes) != 0: - condition = flow.experimental.cast(condition, flow.float32) - broadcast_cond = flow.experimental.broadcast_like( + condition = flow.cast(condition, flow.float32) + broadcast_cond = flow.broadcast_like( condition, broadcast_like_tensor, tuple(broadcast_condition_axes) ) - broadcast_cond = flow.experimental.cast(broadcast_cond, flow.int32) + broadcast_cond = flow.cast(broadcast_cond, flow.int32) if len(broadcast_x_axes) != 0: - broadcast_x = flow.experimental.broadcast_like( + broadcast_x = flow.broadcast_like( x, broadcast_like_tensor, broadcast_axes=tuple(broadcast_x_axes) ) if len(broadcast_y_axes) != 0: - broadcast_y = flow.experimental.broadcast_like( + broadcast_y = flow.broadcast_like( y, broadcast_like_tensor, broadcast_axes=tuple(broadcast_y_axes) ) @@ -94,7 +94,6 @@ class Where(Module): @oneflow_export("where") @register_tensor_op("where") -@experimental_api def where_op(condition, x, y): """Return a tensor of elements selected from either :attr:`x` or :attr:`y`, depending on :attr:`condition`. If the element in condition is larger than 0, @@ -120,9 +119,7 @@ def where_op(condition, x, y): .. code-block:: python >>> import numpy as np - >>> import oneflow.experimental as flow - >>> flow.enable_eager_execution() - + >>> import oneflow as flow >>> x = flow.Tensor( ... np.array([[-0.4620, 0.3139], [0.3898, -0.7197], [0.0478, -0.1657]]), ... dtype=flow.float32, diff --git a/oneflow/python/nn/modules/zeropad2d.py b/oneflow/python/nn/modules/zeropad2d.py index 19b862a491ca2ff1bc1a10b7f3328b702b8819c2..3b442e12aab14f3c1584d2e680d98cb8c1bc4eda 100644 --- a/oneflow/python/nn/modules/zeropad2d.py +++ b/oneflow/python/nn/modules/zeropad2d.py @@ -18,12 +18,11 @@ from __future__ import absolute_import from typing import Union import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.module import Module @oneflow_export("nn.ZeroPad2d") -@experimental_api class ZeroPad2d(Module): r"""The interface is consistent with PyTorch. The documentation is referenced from: @@ -46,9 +45,8 @@ class ZeroPad2d(Module): .. code-block:: python - >>> import oneflow.experimental as flow + >>> import oneflow as flow >>> import numpy as np - >>> flow.enable_eager_execution() >>> zeropad_layer_int = flow.nn.ZeroPad2d(2) >>> zeropad_layer_tuple = flow.nn.ZeroPad2d((1,2,2,0)) >>> input = flow.Tensor(np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32)) diff --git a/oneflow/python/nn/optimizer/adam.py b/oneflow/python/nn/optimizer/adam.py index 7d253b1be38d4553884bbaa144a5f57f845e5db0..4a01b085168514d3653646a8e00ad623ce961528 100644 --- a/oneflow/python/nn/optimizer/adam.py +++ b/oneflow/python/nn/optimizer/adam.py @@ -19,13 +19,12 @@ import collections import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.parameter import Parameter from oneflow.python.nn.optimizer.optimizer import Optimizer, ParamGroup @oneflow_export("optim.Adam") -@experimental_api class Adam(Optimizer): r"""Implements Adam algorithm. @@ -106,8 +105,8 @@ class Adam(Optimizer): for param in param_group.parameters: assert param.is_leaf, "parameters must be leaf tensor" self._state[param] = dict() - self._state[param]["exp_avg"] = flow.experimental.zeros_like(param) - self._state[param]["exp_avg_sq"] = flow.experimental.zeros_like(param) + self._state[param]["exp_avg"] = flow.zeros_like(param) + self._state[param]["exp_avg_sq"] = flow.zeros_like(param) self._op = ( flow.builtin_op("adam_update") diff --git a/oneflow/python/nn/optimizer/adamw.py b/oneflow/python/nn/optimizer/adamw.py index 60900a315f92755551061037da32ce2f2055f181..9f532995d14e7b784838db48dc908e34089d8c9d 100644 --- a/oneflow/python/nn/optimizer/adamw.py +++ b/oneflow/python/nn/optimizer/adamw.py @@ -19,13 +19,12 @@ import collections import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.parameter import Parameter from oneflow.python.nn.optimizer.optimizer import ParamGroup, Optimizer @oneflow_export("optim.AdamW") -@experimental_api class AdamW(Optimizer): r"""Implements AdamW algorithm. @@ -109,8 +108,8 @@ class AdamW(Optimizer): for param in param_group.parameters: assert param.is_leaf, "parameters must be leaf tensor" self._state[param] = dict() - self._state[param]["exp_avg"] = flow.experimental.zeros_like(param) - self._state[param]["exp_avg_sq"] = flow.experimental.zeros_like(param) + self._state[param]["exp_avg"] = flow.zeros_like(param) + self._state[param]["exp_avg_sq"] = flow.zeros_like(param) self._op = ( flow.builtin_op("adam_update") diff --git a/oneflow/python/nn/optimizer/cosine_annealing_lr.py b/oneflow/python/nn/optimizer/cosine_annealing_lr.py index d3e050bc0a151cab09660c2bafc8f0a57f95b1cf..0086b771da0658715fa5858f02e8fc0d09541555 100644 --- a/oneflow/python/nn/optimizer/cosine_annealing_lr.py +++ b/oneflow/python/nn/optimizer/cosine_annealing_lr.py @@ -16,12 +16,11 @@ limitations under the License. import math -from oneflow.python.oneflow_export import experimental_api, oneflow_export +from oneflow.python.oneflow_export import oneflow_export from .lr_scheduler import LrScheduler @oneflow_export("optim.lr_scheduler.CosineAnnealingLR") -@experimental_api class CosineAnnealingLR(LrScheduler): r"""This operator creates a Cosine decayed learning rate scheduler. @@ -56,7 +55,7 @@ class CosineAnnealingLR(LrScheduler): .. code-block:: python - import oneflow.experimental as flow + import oneflow as flow ... cosine_annealing_lr = flow.optim.lr_scheduler.CosineAnnealingLR(optimizer, steps=100, alpha=0.0) diff --git a/oneflow/python/nn/optimizer/lambda_lr.py b/oneflow/python/nn/optimizer/lambda_lr.py index 3b3fb744bbb21e0f25cb6c8abbf50e363766564a..935d36b1a141934821effc3a0057ecdf511e6bf1 100644 --- a/oneflow/python/nn/optimizer/lambda_lr.py +++ b/oneflow/python/nn/optimizer/lambda_lr.py @@ -16,12 +16,11 @@ limitations under the License. import types -from oneflow.python.oneflow_export import experimental_api, oneflow_export +from oneflow.python.oneflow_export import oneflow_export from .lr_scheduler import LrScheduler @oneflow_export("optim.lr_scheduler.LambdaLR") -@experimental_api class LambdaLR(LrScheduler): r""" Sets the learning rate of each parameter group to the initial lr times a given function. @@ -42,7 +41,7 @@ class LambdaLR(LrScheduler): .. code-block:: python - import oneflow.experimental as flow + import oneflow as flow ... lambda1 = lambda step: step // 30 diff --git a/oneflow/python/nn/optimizer/lr_scheduler.py b/oneflow/python/nn/optimizer/lr_scheduler.py index 4b1ab959a99fa3eed879918232f678571e3f0e9b..9cf0486c447a6bf43e2e6b034468f7517551b1a8 100644 --- a/oneflow/python/nn/optimizer/lr_scheduler.py +++ b/oneflow/python/nn/optimizer/lr_scheduler.py @@ -14,12 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. """ -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from .optimizer import Optimizer @oneflow_export("optim.lr_scheduler._LRScheduler") -@experimental_api class LrScheduler(object): def __init__(self, optimizer, last_step=-1, verbose=False): if not isinstance(optimizer, Optimizer): diff --git a/oneflow/python/nn/optimizer/optimizer.py b/oneflow/python/nn/optimizer/optimizer.py index f6544969a99435a260f9ed4da6c017b2e459bac2..34897539b1c3b6cef8faff3f66b94a1aa7e264e3 100644 --- a/oneflow/python/nn/optimizer/optimizer.py +++ b/oneflow/python/nn/optimizer/optimizer.py @@ -18,7 +18,7 @@ import warnings from typing import Dict, Callable, Union, Any, Iterator import collections -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.parameter import Parameter from oneflow.python.framework.tensor import Tensor @@ -56,7 +56,6 @@ class ParamGroup(object): @oneflow_export("optim.Optimizer") -@experimental_api class Optimizer(object): def __init__(self): self.param_groups = list() diff --git a/oneflow/python/nn/optimizer/rmsprop.py b/oneflow/python/nn/optimizer/rmsprop.py index f775488bb6f33adf2da5d1859e964d9e81a6bd82..0e4cdcd5807b028c4b739fffdc3fc2df9155bf83 100644 --- a/oneflow/python/nn/optimizer/rmsprop.py +++ b/oneflow/python/nn/optimizer/rmsprop.py @@ -19,13 +19,12 @@ import collections import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.parameter import Parameter from oneflow.python.nn.optimizer.optimizer import ParamGroup, Optimizer @oneflow_export("optim.RMSprop") -@experimental_api class RMSprop(Optimizer): r"""Implements RMSprop algorithm. @@ -123,9 +122,9 @@ class RMSprop(Optimizer): for param in param_group.parameters: assert param.is_leaf, "parameters must be leaf tensor" self._state[param] = dict() - self._state[param]["square_avg"] = flow.experimental.zeros_like(param) + self._state[param]["square_avg"] = flow.zeros_like(param) if param_group["centered"]: - self._state[param]["grad_avg"] = flow.experimental.zeros_like(param) + self._state[param]["grad_avg"] = flow.zeros_like(param) self._centered_rmsprop = ( flow.builtin_op("rmsprop_update") diff --git a/oneflow/python/nn/optimizer/sgd.py b/oneflow/python/nn/optimizer/sgd.py index 0ed091efeda511cf11771e7578228b3d70480007..88d00cfbf697937794aa1b5e397451cf2bef8bed 100644 --- a/oneflow/python/nn/optimizer/sgd.py +++ b/oneflow/python/nn/optimizer/sgd.py @@ -20,13 +20,12 @@ import math import oneflow as flow -from oneflow.python.oneflow_export import oneflow_export, experimental_api +from oneflow.python.oneflow_export import oneflow_export from oneflow.python.nn.parameter import Parameter from .optimizer import Optimizer, ParamGroup @oneflow_export("optim.SGD") -@experimental_api class SGD(Optimizer): r"""Implements SGD algorithm. @@ -88,9 +87,7 @@ class SGD(Optimizer): assert param.is_leaf, "parameters must be leaf tensor" self._state[param] = dict() if param_group["momentum"] != 0.0: - self._state[param]["momentum_buf"] = flow.experimental.zeros_like( - param - ) + self._state[param]["momentum_buf"] = flow.zeros_like(param) self._momentum_sgd = ( flow.builtin_op("momentum_update") diff --git a/oneflow/python/nn/optimizer/step_lr.py b/oneflow/python/nn/optimizer/step_lr.py index ba0c9a1b105cdf156948e5f85de373d305f7f3cb..7e546f688ea7dd2054b5e2e5e736072a6ac4e184 100644 --- a/oneflow/python/nn/optimizer/step_lr.py +++ b/oneflow/python/nn/optimizer/step_lr.py @@ -14,12 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. """ -from oneflow.python.oneflow_export import experimental_api, oneflow_export +from oneflow.python.oneflow_export import oneflow_export from .lr_scheduler import LrScheduler @oneflow_export("optim.lr_scheduler.StepLR") -@experimental_api class StepLR(LrScheduler): r""" Decays the learning rate of each parameter group by gamma every step_size steps. @@ -37,7 +36,7 @@ class StepLR(LrScheduler): .. code-block:: python - import oneflow.experimental as flow + import oneflow as flow ... step_lr = flow.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1) diff --git a/oneflow/python/ops/constant_op.py b/oneflow/python/ops/constant_op.py index 8433a5e8459f36a0351e5925275ac39de9bf28a5..21f75689bb41f9aea07bca7d4814017892b95913 100644 --- a/oneflow/python/ops/constant_op.py +++ b/oneflow/python/ops/constant_op.py @@ -221,95 +221,3 @@ def constant_like( setattr(out_lbi, "op_name", op_conf.name) setattr(out_lbi, "blob_name", "out") return remote_blob_util.RemoteBlob(out_lbi) - - -@oneflow_export("ones_like") -@stable_api -def ones_like( - like: oneflow._oneflow_internal.BlobDesc, - dtype: Optional[flow.dtype] = None, - name: Optional[str] = None, -) -> oneflow._oneflow_internal.BlobDesc: - """This operator creates a Blob with all elements set to `1` that has the same shape as `like`. - - Args: - like (oneflow._oneflow_internal.BlobDesc): A Blob. - dtype (Optional[flow.dtype], optional): The data type of Blob. Defaults to None. - name (Optional[str], optional): The name for the operation. Defaults to None. - - Returns: - oneflow._oneflow_internal.BlobDesc: The result Blob. - - For example: - - .. code-block:: python - - import oneflow as flow - import numpy as np - import oneflow.typing as tp - - - @flow.global_function() - def ones_like_Job() -> tp.Numpy: - constant_blob = flow.constant(value=1.5, - shape=(1, 3, 3), - dtype=flow.float) - ones_like_blob = flow.ones_like(like=constant_blob, - dtype=flow.float) - return ones_like_blob - - - out = ones_like_Job() - - # out [[[1. 1. 1.] - # [1. 1. 1.] - # [1. 1. 1.]]] - - """ - return constant_like(like, 1, dtype=dtype, name=name) - - -@oneflow_export("zeros_like") -@stable_api -def zeros_like( - like: oneflow._oneflow_internal.BlobDesc, - dtype: Optional[flow.dtype] = None, - name: Optional[str] = None, -) -> oneflow._oneflow_internal.BlobDesc: - """This operator creates a Blob that has the same shape as `like` whose all elements are set to `0`. - - Args: - like (oneflow._oneflow_internal.BlobDesc): A Blob. - dtype (Optional[flow.dtype], optional): The data type of Blob. Defaults to None. - name (Optional[str], optional): The name for the operation. Defaults to None. - - Returns: - oneflow._oneflow_internal.BlobDesc: The result Blob. - - For example: - - .. code-block:: python - - import oneflow as flow - import numpy as np - import oneflow.typing as tp - - - @flow.global_function() - def zeros_like_Job() -> tp.Numpy: - constant_blob = flow.constant(value=1.5, - shape=(1, 3, 3), - dtype=flow.float) - zeros_like_blob = flow.zeros_like(like=constant_blob, - dtype=flow.float) - return zeros_like_blob - - - out = zeros_like_Job() - - # out [[[0. 0. 0.] - # [0. 0. 0.] - # [0. 0. 0.]]] - - """ - return constant_like(like, 0, dtype=dtype, name=name) diff --git a/oneflow/python/ops/diag_ops.py b/oneflow/python/ops/diag_ops.py deleted file mode 100644 index 2c75e748f9ea72c8a5ab0a86c99ec605d261209a..0000000000000000000000000000000000000000 --- a/oneflow/python/ops/diag_ops.py +++ /dev/null @@ -1,74 +0,0 @@ -""" -Copyright 2020 The OneFlow Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" - -import oneflow as flow -import oneflow.python.framework.id_util as id_util -from oneflow.python.oneflow_export import oneflow_export, stable_api -import oneflow.python.framework.remote_blob as remote_blob_util -from typing import Optional -import oneflow._oneflow_internal - - -@oneflow_export("diag") -@stable_api -def diag( - input: oneflow._oneflow_internal.BlobDesc, - diagonal: Optional[int] = 0, - name: Optional[str] = None, -) -> oneflow._oneflow_internal.BlobDesc: - """This operator compute diagonal. - - If input is a vector, then returns a square matrix with the elements of input as the diagonal. - If input is a matrix, then returns a vector with the diagonal elements of input. - - Args: - input (remote_blob_util.BlobDef): The input Blob - diagonal (Optional[int], 0): The diagonal to consider. If diagonal = 0, it is the main diagonal. If diagonal > 0, it is above the main diagonal. If diagonal < 0, it is below the main diagonal. Defaults to 0. - - Returns: - remote_blob_util.BlobDef: The result Blob. - - For example: - - .. code-block:: python - - import oneflow as flow - import numpy as np - import oneflow.typing as tp - - - @flow.global_function() - def Diag_Job(input: tp.Numpy.Placeholder((3, 3), dtype=flow.float32),) -> tp.Numpy: - return flow.diag(input) - - - input = np.array([[1.0, 2.0, 3.0], - [4.0, 5.0, 6.0], - [7.0, 8.0, 9.0],], dtype=np.float32) - out = Diag_Job(input) - # out [1. 5. 9.] - - """ - return ( - flow.user_op_builder(name if name is not None else id_util.UniqueStr("Diag_")) - .Op("diag") - .Input("in", [input]) - .Attr("diagonal", int(diagonal)) - .Output("out") - .Build() - .InferAndTryRun() - .RemoteBlobList()[0] - ) diff --git a/oneflow/python/ops/linalg.py b/oneflow/python/ops/linalg.py index d52ba7825846f60a6a4d6736dd2f24fe9044576a..c75ba4a9c23171264eb4dad2f44e7b7852b43ffb 100644 --- a/oneflow/python/ops/linalg.py +++ b/oneflow/python/ops/linalg.py @@ -26,110 +26,3 @@ import oneflow.python.framework.remote_blob as remote_blob_util from oneflow.python.oneflow_export import oneflow_export, stable_api import oneflow._oneflow_internal from typing import Optional - - -@oneflow_export("matmul", "linalg.matmul") -@stable_api -def matmul( - a: oneflow._oneflow_internal.BlobDesc, - b: oneflow._oneflow_internal.BlobDesc, - transpose_a: bool = False, - transpose_b: bool = False, - alpha: float = 1.0, - name: Optional[str] = None, -) -> oneflow._oneflow_internal.BlobDesc: - r"""This operator applies matrix multiplication to two Blobs. - - Args: - a (oneflow._oneflow_internal.BlobDesc): A Blob - b (oneflow._oneflow_internal.BlobDesc): A Blob - transpose_a (bool, optional): Whether to transpose A Blob. Defaults to False. - transpose_b (bool, optional): Whether to transpose B Blob. Defaults to False. - name (Optional[str], optional): The name for the operation. Defaults to None. - - Returns: - oneflow._oneflow_internal.BlobDesc: The result Blob - - For example: - - .. code-block:: python - - import oneflow as flow - import numpy as np - import oneflow.typing as tp - - - @flow.global_function() - def matmul_Job(A: tp.Numpy.Placeholder((3, 3)), - B: tp.Numpy.Placeholder((3, 3)) - ) -> tp.Numpy: - return flow.linalg.matmul(A, B) - - - A = np.array([[1, 0, 0], - [0, 1, 1], - [0, 0, 1]]).astype(np.float32) - B = np.array([[3, 4, 5], - [6, 7, 8], - [9, 10, 11]]).astype(np.float32) - out = matmul_Job(A, B) - - # output [[ 3. 4. 5.] - # [15. 17. 19.] - # [ 9. 10. 11.]] - - """ - if name is None: - name = id_util.UniqueStr("Matmul_") - - assert len(a.shape) >= 2 - assert len(b.shape) >= 2 - - if len(a.shape) == len(b.shape): - if len(a.shape) == 2: - op = ( - flow.user_op_builder(name) - .Op("matmul") - .Input("a", [a]) - .Input("b", [b]) - .Output("out") - .Attr("transpose_a", transpose_a) - .Attr("transpose_b", transpose_b) - .Attr("alpha", float(alpha)) - .Build() - ) - else: - op = ( - flow.user_op_builder(name) - .Op("batch_matmul") - .Input("a", [a]) - .Input("b", [b]) - .Output("out") - .Attr("transpose_a", transpose_a) - .Attr("transpose_b", transpose_b) - .Attr("alpha", float(alpha)) - .Build() - ) - else: - # NOTE: support broadcast b to a only for now - if len(b.shape) != 2: - raise ValueError( - "don't support number of dimensions of a being less than number of dimensions of b" - ) - - if transpose_a: - raise ValueError("don't support tensor a to be tranpose") - - op = ( - flow.user_op_builder(name) - .Op("broadcast_matmul") - .Input("a", [a]) - .Input("b", [b]) - .Output("out") - .Attr("transpose_a", transpose_a) - .Attr("transpose_b", transpose_b) - .Attr("alpha", float(alpha)) - .Build() - ) - - return op.InferAndTryRun().SoleOutputBlob() diff --git a/oneflow/python/ops/sort_ops.py b/oneflow/python/ops/sort_ops.py index d0b49f2de0b108743e8fa9b940e4bbdb4937bcec..afd9344615a5564eeaf99ce970bd46ef0088bff7 100644 --- a/oneflow/python/ops/sort_ops.py +++ b/oneflow/python/ops/sort_ops.py @@ -44,61 +44,6 @@ def _sort_at_last_dim( ) -@oneflow_export("sort") -@stable_api -def sort( - input: oneflow._oneflow_internal.BlobDesc, - axis: int = -1, - direction: str = "ASCENDING", - name: Optional[str] = None, -) -> oneflow._oneflow_internal.BlobDesc: - """This operator sorts the input Blob at specified axis. - - Args: - input (oneflow._oneflow_internal.BlobDesc): A Blob - axis (int, optional): dimension to be sorted. Defaults to the last dim (-1) - direction (str, optional): The direction in which to sort the Blob values. If the direction is "ASCENDING", The order of input will be sorted as ascending, else, the order of input will be sorted as descending. Defaults to "ASCENDING". - name (Optional[str], optional): The name for the operation. Defaults to None. - - Returns: - oneflow._oneflow_internal.BlobDesc: The sorted Blob - - For example: - - .. code-block:: python - - import oneflow as flow - import numpy as np - import oneflow.typing as tp - - - @flow.global_function() - def sort_Job(x: tp.Numpy.Placeholder((5, )) - ) -> tp.Numpy: - return flow.sort(input=x) - - x = np.array([10, 2, 9, 3, 7]).astype("float32") - out = sort_Job(x) - - # out [ 2. 3. 7. 9. 10.] - - """ - assert direction in ["ASCENDING", "DESCENDING"] - name = name if name is not None else id_util.UniqueStr("Sort_") - num_axes = len(input.shape) - axis = axis if axis >= 0 else axis + num_axes - assert 0 <= axis < num_axes, "axis out of range" - if axis == num_axes - 1: - return _sort_at_last_dim(input, direction, name) - else: - perm = get_perm_when_transpose_axis_to_last_dim(num_axes, axis) - x = flow.transpose(input, perm, False, True, name + "_transpose") - x = _sort_at_last_dim(x, direction, name) - return flow.transpose( - x, get_inversed_perm(perm), False, True, name + "_inverse_transpose" - ) - - def _argsort_at_last_dim( input: oneflow._oneflow_internal.BlobDesc, direction: str = "ASCENDING", @@ -117,58 +62,3 @@ def _argsort_at_last_dim( .InferAndTryRun() .RemoteBlobList()[0] ) - - -@oneflow_export("argsort") -@stable_api -def argsort( - input: oneflow._oneflow_internal.BlobDesc, - axis: int = -1, - direction: str = "ASCENDING", - name: Optional[str] = None, -) -> oneflow._oneflow_internal.BlobDesc: - """This operator sorts the input Blob at specified axis and return the indices of the sorted Blob. - - Args: - input (oneflow._oneflow_internal.BlobDesc): A Blob - axis (int, optional): dimension to be sorted. Defaults to the last dim (-1) - direction (str, optional): The direction in which to sort the Blob values. If the direction is "ASCENDING", The order of input will be sorted as ascending, else, the order of input will be sorted as descending. Defaults to "ASCENDING". - name (Optional[str], optional): The name for the operation. Defaults to None. - - Returns: - oneflow._oneflow_internal.BlobDesc: The indices of the sorted Blob - - For example: - - .. code-block:: python - - import oneflow as flow - import numpy as np - import oneflow.typing as tp - - - @flow.global_function() - def argsort_Job(x: tp.Numpy.Placeholder((5, )) - ) -> tp.Numpy: - return flow.argsort(input=x) - - x = np.array([10, 2, 9, 3, 7]).astype("float32") - out = argsort_Job(x) - - # out [1 3 4 2 0] - - """ - assert direction in ["ASCENDING", "DESCENDING"] - name = name if name is not None else id_util.UniqueStr("ArgSort_") - num_axes = len(input.shape) - axis = axis if axis >= 0 else axis + num_axes - assert 0 <= axis < num_axes, "axis out of range" - if axis == num_axes - 1: - return _argsort_at_last_dim(input, direction, name) - else: - perm = get_perm_when_transpose_axis_to_last_dim(num_axes, axis) - x = flow.transpose(input, perm, False, True, name + "_transpose") - x = _argsort_at_last_dim(x, direction, name) - return flow.transpose( - x, get_inversed_perm(perm), False, True, name + "_inverse_transpose" - ) diff --git a/oneflow/python/ops/tensor_buffer_ops.py b/oneflow/python/ops/tensor_buffer_ops.py index eb08529ee22c6653cbc30c9649f9dfcac84012c7..4bc172aa9826d532455cabec8dd622bd1313f0bd 100644 --- a/oneflow/python/ops/tensor_buffer_ops.py +++ b/oneflow/python/ops/tensor_buffer_ops.py @@ -23,229 +23,3 @@ import oneflow._oneflow_internal import oneflow.python.framework.id_util as id_util from oneflow.python.oneflow_export import oneflow_export, stable_api from typing import Optional, Sequence, List - - -@oneflow_export("tensor_buffer_to_tensor") -@stable_api -def tensor_buffer_to_tensor( - x: oneflow._oneflow_internal.BlobDesc, - dtype: flow.dtype, - instance_shape: Sequence[int], - name: Optional[str] = None, -) -> oneflow._oneflow_internal.BlobDesc: - r"""This operator converts the Blob's type from TensorBuffer to Tensor. - Some operator's output data type is `TensorBuffer`, you can use this operator to convert back - to `Tensor`. - - Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_ - for more about TensorBuffer. - - - Args: - x (oneflow._oneflow_internal.BlobDesc): Input `Blob`. - dtype (flow.dtype): The data dtype. - instance_shape (Sequence[int]): The shape of each TensorBuffer instance. - name (Optional[str], optional): The name for the operation. Defaults to None. - - Returns: - oneflow._oneflow_internal.BlobDesc: A `Blob`. - - For example: - - .. code-block:: python - - import oneflow as flow - import numpy as np - import oneflow.typing as tp - - - @flow.global_function() - def tensor_buffer_to_tensor_Job(x: tp.Numpy.Placeholder(shape=(4, 16, 64, 64), dtype=flow.float32), - ) -> tp.Numpy: - x = flow.tensor_to_tensor_buffer(x, - instance_dims=2) - return flow.tensor_buffer_to_tensor(x, - instance_shape=(64, 64), - dtype=flow.float) - - x = np.random.randn(4, 16, 64, 64).astype(np.float32) - out = tensor_buffer_to_tensor_Job(x) - - # out.shape (4, 16, 64, 64) - - """ - if name is None: - name = id_util.UniqueStr("TensorBufferToTensor_") - return ( - flow.user_op_builder(name) - .Op("tensor_buffer_to_tensor") - .Input("in", [x]) - .Output("out") - .Attr("dtype", dtype) - .Attr("instance_shape", instance_shape) - .Build() - .InferAndTryRun() - .RemoteBlobList()[0] - ) - - -@oneflow_export("tensor_to_tensor_buffer") -@stable_api -def tensor_to_tensor_buffer( - x: oneflow._oneflow_internal.BlobDesc, - instance_dims: int, - name: Optional[str] = None, -) -> oneflow._oneflow_internal.BlobDesc: - r"""This operator converts the Blob's type from Tensor to TensorBuffer. - - Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_ - for more about TensorBuffer. - - - Args: - x (oneflow._oneflow_internal.BlobDesc): Input `Blob`. - instance_dims (int): The dimensions of dynamic tensor instance. - name (Optional[str], optional): The name for the operation. Defaults to None. - - Returns: - oneflow._oneflow_internal.BlobDesc: The result Blob. - - For example: - - .. code-block:: python - - import oneflow as flow - import numpy as np - import oneflow.typing as tp - - - @flow.global_function() - def tensor_buffer_to_tensor_Job(x: tp.Numpy.Placeholder(shape=(4, 16, 64, 64), dtype=flow.float32), - ) -> tp.Numpy: - x = flow.tensor_to_tensor_buffer(x, - instance_dims=2) - return flow.tensor_buffer_to_tensor(x, - instance_shape=(64, 64), - dtype=flow.float) - - x = np.random.randn(4, 16, 64, 64).astype(np.float32) - out = tensor_buffer_to_tensor_Job(x) - - # out.shape (4, 16, 64, 64) - - """ - if name is None: - name = id_util.UniqueStr("TensorToTensorBuffer_") - return ( - flow.user_op_builder(name) - .Op("tensor_to_tensor_buffer") - .Input("in", [x]) - .Output("out") - .Attr("instance_dims", instance_dims) - .Build() - .InferAndTryRun() - .RemoteBlobList()[0] - ) - - -@oneflow_export("gen_tensor_buffer") -@stable_api -def gen_tensor_buffer( - shape: Sequence[int], - shape_list: Sequence[Sequence[int]], - value_list: Sequence[float], - data_type: Optional[flow.dtype] = flow.float32, - dynamic_out: Optional[bool] = False, - name: Optional[str] = None, -) -> oneflow._oneflow_internal.BlobDesc: - r"""This operator generates a tensor buffer blob. - - Args: - shape (Sequence[int]): shape of output blob - shape_list ( Sequence[Sequence[int]]): shapes for tensor buffer in output blob - value_list (Sequence[float]): values for tensor buffer in output blob - data_type (Optional[flow.dtype]): data type for tensor buffer in output blob - dynamic_out (Optional[bool]): if output is a dynamic blob - name (Optional[str]): The name for the operation. Defaults to None. - - Returns: - BlobDesc: The result Blob. - - For example: - - .. code-block:: python - - import oneflow as flow - - @flow.global_function(function_config=func_config) - def GenTensorBufferJob(): - with flow.scope.placement("cpu", "0:0"): - x = flow.gen_tensor_buffer([(2,)], [(2, 1), (1, 2)], [0.0, 1.0]) - y = flow.tensor_buffer_to_list_of_tensors(x, (100, 100), flow.float, True) - return y - - # y_0.shape (2, 1), y_1.shape (1. 2) - - """ - return ( - flow.user_op_builder( - name if name is not None else id_util.UniqueStr("GenTensorBuffer_") - ) - .Op("gen_tensor_buffer") - .Output("out") - .Attr("shape", shape) - .Attr("shape_list", shape_list) - .Attr("value_list", value_list) - .Attr("data_type", data_type) - .Attr("dynamic_out", dynamic_out) - .Build() - .InferAndTryRun() - .RemoteBlobList()[0] - ) - - -@oneflow_export("tensor_buffer_to_list_of_tensors") -@stable_api -def tensor_buffer_to_list_of_tensors( - x: oneflow._oneflow_internal.BlobDesc, - out_shape: Sequence[int], - out_dtype: flow.dtype, - dynamic_out: Optional[bool] = False, - name: Optional[str] = None, -) -> List[oneflow._oneflow_internal.BlobDesc]: - r"""This operator converts the Blob of TensorBuffer to list of Tensors. Every element in x will be converted - to a Tensor and output will be flatten to a list. - - Args: - x (BlobDesc): Input `Blob`, data type must be tensor buffer. - out_shape (Sequence[int]): max shape for a tensor buffer in x - out_dtype (flow.dtype,): output data type - dynamic_out (Optional[bool]): if output is dynamic blob. Default to False. - name (Optional[str]): The name for the operation. Default to None. - - Returns: - List[BlobDesc]: result blobs - - For example: - - .. code-block:: python - - # the same with `gen_tensor_buffer` op - - """ - return ( - flow.user_op_builder( - name - if name is not None - else id_util.UniqueStr("TensorBufferToListOfTensors_") - ) - .Op("tensor_buffer_to_list_of_tensors") - .Input("in", [x]) - .Output("out", functools.reduce(operator.mul, x.shape, 1)) - .Attr("out_dtype", out_dtype) - .Attr("out_shape", out_shape) - .Attr("dynamic_out", dynamic_out) - .Build() - .InferAndTryRun() - .RemoteBlobList() - ) diff --git a/oneflow/python/test/dataloader/test_numpy_dataset.py b/oneflow/python/test/dataloader/test_numpy_dataset.py index 2289be657560ba3f3c41e066fc1d5cc1f46ef009..56e5c44ac1d8a2ff819b65947f223bcd51596971 100644 --- a/oneflow/python/test/dataloader/test_numpy_dataset.py +++ b/oneflow/python/test/dataloader/test_numpy_dataset.py @@ -16,7 +16,7 @@ limitations under the License. import unittest import numpy as np -import oneflow.experimental as flow +import oneflow as flow import oneflow.python.utils.data as Data diff --git a/oneflow/python/test/dataloader/test_tensor_dataset.py b/oneflow/python/test/dataloader/test_tensor_dataset.py index 6566b39f62c6715078f8ad660119c37143ce7783..85699a695662cd097d7c82df22cb85d9fe2c001a 100644 --- a/oneflow/python/test/dataloader/test_tensor_dataset.py +++ b/oneflow/python/test/dataloader/test_tensor_dataset.py @@ -16,9 +16,9 @@ limitations under the License. import unittest import numpy as np -import oneflow.experimental as flow +import oneflow as flow import oneflow.python.utils.data as Data -import oneflow.experimental.nn as nn +import oneflow.nn as nn class LinearNet(nn.Module): diff --git a/oneflow/python/test/graph/test_forward_graph.py b/oneflow/python/test/graph/test_forward_graph.py index 02177e77bb52136b0869c8de34c9db0d989c2241..6bd7460c7b60249be98aa65c7d71e3818225ad2e 100644 --- a/oneflow/python/test/graph/test_forward_graph.py +++ b/oneflow/python/test/graph/test_forward_graph.py @@ -17,7 +17,7 @@ import unittest import os import oneflow -import oneflow.experimental as flow +import oneflow as flow class SubModule(flow.nn.Module): diff --git a/oneflow/python/test/graph/test_graph.py b/oneflow/python/test/graph/test_graph.py index 0a07ec04bce3b32602d25e9565418ea3becaf8b5..d2387354705b2df17fb8fefd2e0a1521839a2588 100644 --- a/oneflow/python/test/graph/test_graph.py +++ b/oneflow/python/test/graph/test_graph.py @@ -19,7 +19,7 @@ import os import numpy as np import oneflow -import oneflow.experimental as flow +import oneflow as flow import oneflow.python.framework.graph_build_util as graph_build_util diff --git a/oneflow/python/test/graph/test_graph_optimizer.py b/oneflow/python/test/graph/test_graph_optimizer.py index 6bc78f94d1c7c9b28fca8e0dd7343b651d240a63..952b8ebf3f94484b6c59849f13522ef9b88cb645 100644 --- a/oneflow/python/test/graph/test_graph_optimizer.py +++ b/oneflow/python/test/graph/test_graph_optimizer.py @@ -20,7 +20,7 @@ import os import numpy as np import oneflow -import oneflow.experimental as flow +import oneflow as flow @flow.unittest.skip_unless_1n1d() diff --git a/oneflow/python/test/graph/test_input_op_expr.py b/oneflow/python/test/graph/test_input_op_expr.py index f5e40544ee51db7c0b1ab6f67c0f3e68c8e14057..4cb52209f03f9f3dc485ac40be66118e44730bfb 100644 --- a/oneflow/python/test/graph/test_input_op_expr.py +++ b/oneflow/python/test/graph/test_input_op_expr.py @@ -19,7 +19,7 @@ import numpy as np import os import oneflow -import oneflow.experimental as flow +import oneflow as flow import oneflow.python.framework.session_context as session_ctx import oneflow._oneflow_internal from oneflow.python.framework.multi_client_session import MultiClientSession diff --git a/oneflow/python/test/graph/test_multi_client_session.py b/oneflow/python/test/graph/test_multi_client_session.py index 6d06cdc147cdff2ccddc26b8cf7b18183d1c4ad4..c7caeefb134dad6fcbafd93623bb04da8bf4c799 100644 --- a/oneflow/python/test/graph/test_multi_client_session.py +++ b/oneflow/python/test/graph/test_multi_client_session.py @@ -17,7 +17,7 @@ import unittest import os import oneflow -import oneflow.experimental as flow +import oneflow as flow import oneflow.python.framework.session_context as session_ctx from oneflow.python.framework.multi_client_session import MultiClientSession diff --git a/oneflow/python/test/graph/test_output_op_expr.py b/oneflow/python/test/graph/test_output_op_expr.py index cf650216eeb2e8a5807d080c20f4bbfcb4d93cb2..84780bc13c60dfc39f49bec62a0f1102304f0119 100644 --- a/oneflow/python/test/graph/test_output_op_expr.py +++ b/oneflow/python/test/graph/test_output_op_expr.py @@ -19,7 +19,7 @@ import numpy as np import os import oneflow -import oneflow.experimental as flow +import oneflow as flow import oneflow.python.framework.session_context as session_ctx import oneflow._oneflow_internal from oneflow.python.framework.multi_client_session import MultiClientSession diff --git a/oneflow/python/test/graph/test_user_op_expr.py b/oneflow/python/test/graph/test_user_op_expr.py index 2a55ce5947082bc7093ae568be621fed7c1a11e9..ac06a750e5a249c1af3e64f725063b3d4fff4c77 100644 --- a/oneflow/python/test/graph/test_user_op_expr.py +++ b/oneflow/python/test/graph/test_user_op_expr.py @@ -19,7 +19,7 @@ import numpy as np import os import oneflow -import oneflow.experimental as flow +import oneflow as flow import oneflow.python.framework.session_context as session_ctx import oneflow._oneflow_internal from oneflow.python.framework.multi_client_session import MultiClientSession diff --git a/oneflow/python/test/graph/test_variable_op_expr.py b/oneflow/python/test/graph/test_variable_op_expr.py index 363d562fc4280471b254da2a63dbf7a4e6565520..a5d3b0576587cab80817db7bf5b95d46aba4f104 100644 --- a/oneflow/python/test/graph/test_variable_op_expr.py +++ b/oneflow/python/test/graph/test_variable_op_expr.py @@ -19,7 +19,7 @@ import numpy as np import os import oneflow -import oneflow.experimental as flow +import oneflow as flow import oneflow.python.framework.session_context as session_ctx import oneflow._oneflow_internal from oneflow.python.framework.multi_client_session import MultiClientSession diff --git a/oneflow/python/test/modules/resnet50_model.py b/oneflow/python/test/modules/resnet50_model.py index 883289bb9fee7505dce83441e81aac4113ee57c2..b63ca9fda3fe5e73ab5a96a63adfed7bfb81a1f1 100644 --- a/oneflow/python/test/modules/resnet50_model.py +++ b/oneflow/python/test/modules/resnet50_model.py @@ -13,9 +13,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import oneflow.experimental as flow -import oneflow.experimental.nn as nn -from oneflow.experimental import Tensor +import oneflow as flow +import oneflow.nn as nn +from oneflow import Tensor from typing import Type, Any, Callable, Union, List, Optional diff --git a/oneflow/python/test/modules/test_abs.py b/oneflow/python/test/modules/test_abs.py index 14a5e6a8b571023aafac7a113261dd95edc0b209..8dbeac7b312e8b7ce1bd52869ff1f7031012f131 100644 --- a/oneflow/python/test/modules/test_abs.py +++ b/oneflow/python/test/modules/test_abs.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_acos.py b/oneflow/python/test/modules/test_acos.py index 7d96154838a394b46c296501468b64d023ac6536..1d17fe24be7d60609e95654b3b2a6be22b582d30 100644 --- a/oneflow/python/test/modules/test_acos.py +++ b/oneflow/python/test/modules/test_acos.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_acosh.py b/oneflow/python/test/modules/test_acosh.py index 91c559e0021500cbddbfc9409690a045ad3d977f..cb5909ff55091c866f9b8fc24bb832b05769b35c 100644 --- a/oneflow/python/test/modules/test_acosh.py +++ b/oneflow/python/test/modules/test_acosh.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_activation.py b/oneflow/python/test/modules/test_activation.py index daf366e18faf64c30d2c08d1dc9b1e5da02b2ab3..af3a23be28df514ce610990772ad67c97bf504f9 100644 --- a/oneflow/python/test/modules/test_activation.py +++ b/oneflow/python/test/modules/test_activation.py @@ -19,7 +19,7 @@ from collections import OrderedDict import numpy as np from scipy import special -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_adaptive_pool.py b/oneflow/python/test/modules/test_adaptive_pool.py index 6b05177e8aa303cd994a9686466942c207d6c264..0baac1c056f4cfc24f4f8571e45a21a238ad4fdd 100644 --- a/oneflow/python/test/modules/test_adaptive_pool.py +++ b/oneflow/python/test/modules/test_adaptive_pool.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_add.py b/oneflow/python/test/modules/test_add.py index 268039afd783e75f8522a7ba30648f15b71d497b..870f5752f4f22b4d064564ef00ec2cb4578284dd 100644 --- a/oneflow/python/test/modules/test_add.py +++ b/oneflow/python/test/modules/test_add.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_addmm.py b/oneflow/python/test/modules/test_addmm.py index 787a0303309c79267628e696dfacc618541f0241..9e7584dd98a73a74896a46073f3ad3d8ca24c267 100644 --- a/oneflow/python/test/modules/test_addmm.py +++ b/oneflow/python/test/modules/test_addmm.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_allreduce.py b/oneflow/python/test/modules/test_allreduce.py index 840cffe8e300cdb40f668e0697ce80d25d85287b..635e5dc2b7dd7420e5a05c8f725b1a376388886a 100644 --- a/oneflow/python/test/modules/test_allreduce.py +++ b/oneflow/python/test/modules/test_allreduce.py @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. """ import unittest -import oneflow.experimental as flow +import oneflow as flow import numpy as np diff --git a/oneflow/python/test/modules/test_arange.py b/oneflow/python/test/modules/test_arange.py index 5466fc72c93bdd004945c30ee575800ad68871d0..462ec7921504387f09486baa7b484fc2adbe1c48 100644 --- a/oneflow/python/test/modules/test_arange.py +++ b/oneflow/python/test/modules/test_arange.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_argmax.py b/oneflow/python/test/modules/test_argmax.py index 2f6e97661d6adf0677afe950e239b03a88c32db5..f49e4719ef50554b1ddebe877a744d30d34e50dc 100644 --- a/oneflow/python/test/modules/test_argmax.py +++ b/oneflow/python/test/modules/test_argmax.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList @@ -75,7 +75,7 @@ def _test_argmax_dim_equal_none(test_case, device): test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten())) -@flow.unittest.skip_unless_1n1d() +@flow.unittest.skip_unless_1n1d() class TestArgmax(flow.unittest.TestCase): def test_argmax(test_case): arg_dict = OrderedDict() diff --git a/oneflow/python/test/modules/test_argsort.py b/oneflow/python/test/modules/test_argsort.py index 88c44be03c97ccc9f05385087e153968cbb5933a..d4e076f954e010ba5b03d0e5e7597b6844b35066 100644 --- a/oneflow/python/test/modules/test_argsort.py +++ b/oneflow/python/test/modules/test_argsort.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList, type_name_to_flow_type diff --git a/oneflow/python/test/modules/test_argwhere.py b/oneflow/python/test/modules/test_argwhere.py index dfcbe189e133d00a43aaca19a2487e0cc60af11a..6ec0cc0185dce15a7f1fe085b37ab14a4ace153e 100644 --- a/oneflow/python/test/modules/test_argwhere.py +++ b/oneflow/python/test/modules/test_argwhere.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_atan.py b/oneflow/python/test/modules/test_atan.py index 29f9a695bcca90670e2023f089880c74765b65d1..9897a8d0015856a2e2756e030bbeec835705ddfd 100644 --- a/oneflow/python/test/modules/test_atan.py +++ b/oneflow/python/test/modules/test_atan.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_atan2.py b/oneflow/python/test/modules/test_atan2.py index 5617fe90fa28c0705e0af78c136145385902635f..35fbbbe598578852e8fc89b657d93e563f881edf 100644 --- a/oneflow/python/test/modules/test_atan2.py +++ b/oneflow/python/test/modules/test_atan2.py @@ -19,7 +19,7 @@ from collections import OrderedDict import numpy as np from test_util import GenArgList -import oneflow.experimental as flow +import oneflow as flow from automated_test_util import * @@ -102,7 +102,7 @@ def _test_atan2_backward(test_case, device): test_y_grad() -@flow.unittest.skip_unless_1n1d() +@flow.unittest.skip_unless_1n1d() class TestAtan2(flow.unittest.TestCase): def test_atan2_forward(test_case): arg_dict = OrderedDict() diff --git a/oneflow/python/test/modules/test_atanh.py b/oneflow/python/test/modules/test_atanh.py index 701d6cc40e85bb0f5e59546042ddec46c82951d9..d28dc640d9b20cb7fab38745c4055365bb01764e 100644 --- a/oneflow/python/test/modules/test_atanh.py +++ b/oneflow/python/test/modules/test_atanh.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_autograd.py b/oneflow/python/test/modules/test_autograd.py index 7e84ccbcce050f7068ddf8e8b631d0fa45a15704..6eca2808d63a3fcd6c43ef6d9c464111ced687ca 100644 --- a/oneflow/python/test/modules/test_autograd.py +++ b/oneflow/python/test/modules/test_autograd.py @@ -18,7 +18,7 @@ import unittest from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_avgpool.py b/oneflow/python/test/modules/test_avgpool.py index c3f6bf7e95241de857d382e87265fa703aa8b797..594ebde3b919ed863aa992b652fec0c698a70c88 100644 --- a/oneflow/python/test/modules/test_avgpool.py +++ b/oneflow/python/test/modules/test_avgpool.py @@ -19,7 +19,7 @@ from collections import OrderedDict import numpy as np import math -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_avgpool2d.py b/oneflow/python/test/modules/test_avgpool2d.py index 6d06de0f55b546d24fe24e41eadd33412f8d9e80..08880a2e125b878cf474551aa07a4b89c0c8bc2b 100644 --- a/oneflow/python/test/modules/test_avgpool2d.py +++ b/oneflow/python/test/modules/test_avgpool2d.py @@ -17,7 +17,7 @@ import unittest import numpy as np -import oneflow.experimental as flow +import oneflow as flow from oneflow.python.nn.modules.utils import ( _single, _pair, diff --git a/oneflow/python/test/modules/test_batchnorm.py b/oneflow/python/test/modules/test_batchnorm.py index 70416a886cfc0fe248a0331172e1d4ad8a215a46..cda8d0a4e64dec82ed30b8b816c005d951aef53f 100644 --- a/oneflow/python/test/modules/test_batchnorm.py +++ b/oneflow/python/test/modules/test_batchnorm.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_bce_loss.py b/oneflow/python/test/modules/test_bce_loss.py index c51aff4ba23c5f1e3e2d253aeecf6e4915dacafd..85d476ed352fd030a35c4e5ab9b665d33f8869e7 100644 --- a/oneflow/python/test/modules/test_bce_loss.py +++ b/oneflow/python/test/modules/test_bce_loss.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_bcewithlogitsloss.py b/oneflow/python/test/modules/test_bcewithlogitsloss.py index 77eb94b3e445f36f821e413b981183212d272d54..87e82d69f86a4c08b6d41e9a7c5ab3899ab9b04e 100644 --- a/oneflow/python/test/modules/test_bcewithlogitsloss.py +++ b/oneflow/python/test/modules/test_bcewithlogitsloss.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_bernoulli.py b/oneflow/python/test/modules/test_bernoulli.py index df0e41067a9cd0b245372062427f8c793493eeae..d7eb7f2db04e4b9f1263e4ea703f805049c7c2de 100644 --- a/oneflow/python/test/modules/test_bernoulli.py +++ b/oneflow/python/test/modules/test_bernoulli.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_bmm.py b/oneflow/python/test/modules/test_bmm.py index 96155b0d773ae1b3a669ac335ac28af70f1f5a0e..3703b8fd9c0b8e4753da481b77776fccfed80bdb 100644 --- a/oneflow/python/test/modules/test_bmm.py +++ b/oneflow/python/test/modules/test_bmm.py @@ -18,7 +18,7 @@ from collections import OrderedDict import unittest import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_broadcast_like.py b/oneflow/python/test/modules/test_broadcast_like.py index 8198727b1784ded020907b54eaf2e38bdbc48041..76bb0f16a3d637335dba63624cfa6108d688a734 100644 --- a/oneflow/python/test/modules/test_broadcast_like.py +++ b/oneflow/python/test/modules/test_broadcast_like.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_cast.py b/oneflow/python/test/modules/test_cast.py index 186581e24dc8f1fa4244cea5fa85820e5e19cb4a..df220430aa3bdb11387e40f1fd5b631e74cd2f51 100644 --- a/oneflow/python/test/modules/test_cast.py +++ b/oneflow/python/test/modules/test_cast.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_ceil.py b/oneflow/python/test/modules/test_ceil.py index 17fd1ee16f5fae7d577a9b8775c0318cbb3c6c9f..ebc26b01b9d2ee975e138b26c0350a0e5b4dd720 100644 --- a/oneflow/python/test/modules/test_ceil.py +++ b/oneflow/python/test/modules/test_ceil.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_chunk.py b/oneflow/python/test/modules/test_chunk.py index 2c759830c8f62eccf490c4c0073e48c09f80beba..11eecee739c5ca4c35cb85f8da3804c1e95843d5 100644 --- a/oneflow/python/test/modules/test_chunk.py +++ b/oneflow/python/test/modules/test_chunk.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_clamp.py b/oneflow/python/test/modules/test_clamp.py index 4c0a671deaa26caad10fcba4856fc5cfd47ed87c..1a94cf539c04cb7eb506360615f98cc771054444 100644 --- a/oneflow/python/test/modules/test_clamp.py +++ b/oneflow/python/test/modules/test_clamp.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_concat.py b/oneflow/python/test/modules/test_concat.py index d0b2f5af8eebfff5a7d2749e6b88105de60c95fc..9b294788be65d8971e578d9ece3c4c3d95878118 100644 --- a/oneflow/python/test/modules/test_concat.py +++ b/oneflow/python/test/modules/test_concat.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList @@ -120,7 +120,7 @@ def _test_concat_grad_and_no_grad(test_case, device): ) -@flow.unittest.skip_unless_1n1d() +@flow.unittest.skip_unless_1n1d() class TestModule(flow.unittest.TestCase): def test_concat(test_case): arg_dict = OrderedDict() diff --git a/oneflow/python/test/modules/test_constant.py b/oneflow/python/test/modules/test_constant.py index caf582641f162c96047236cd47355ae4cdbeb8b4..ce043716e2a5ba21e60264377c3d667db67a5a9c 100644 --- a/oneflow/python/test/modules/test_constant.py +++ b/oneflow/python/test/modules/test_constant.py @@ -20,7 +20,7 @@ from oneflow.python.framework.tensor import register_tensor_op import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_constantpad.py b/oneflow/python/test/modules/test_constantpad.py index 22391dbb728f52c38a5ffb481473a0b8858adc70..5df0db2459b71e6192877c186bcc9eea086f2914 100644 --- a/oneflow/python/test/modules/test_constantpad.py +++ b/oneflow/python/test/modules/test_constantpad.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from automated_test_util import * from test_util import ( GenArgList, diff --git a/oneflow/python/test/modules/test_conv.py b/oneflow/python/test/modules/test_conv.py index 922aebfdfba8699321e99f0c5e0caf568e9020a8..347f41f07cecdd95f87c9eda6f5effaaab7d62c8 100644 --- a/oneflow/python/test/modules/test_conv.py +++ b/oneflow/python/test/modules/test_conv.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_conv1d.py b/oneflow/python/test/modules/test_conv1d.py index a0e70002934fb0bde88006de1ca6d29a043a5c31..de939d9232b9b5f8ae1ab836a300f13a51420383 100644 --- a/oneflow/python/test/modules/test_conv1d.py +++ b/oneflow/python/test/modules/test_conv1d.py @@ -18,8 +18,8 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow -import oneflow.experimental.nn as nn +import oneflow as flow +import oneflow.nn as nn from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_crossentropyloss.py b/oneflow/python/test/modules/test_crossentropyloss.py index 000366cfe2cfa7204b21e070be6f3980ac4ad390..d9f7483353a0c0ee17808b0b757bf67856966cf8 100644 --- a/oneflow/python/test/modules/test_crossentropyloss.py +++ b/oneflow/python/test/modules/test_crossentropyloss.py @@ -17,7 +17,7 @@ import unittest import numpy as np -import oneflow.experimental as flow +import oneflow as flow from automated_test_util import * diff --git a/oneflow/python/test/modules/test_ctc_loss.py b/oneflow/python/test/modules/test_ctc_loss.py index 5dd4fbffb5acadd853564d09a09f996f1975df29..4b2295b4ed2021b32a9f250517d5ba68ae3062ff 100644 --- a/oneflow/python/test/modules/test_ctc_loss.py +++ b/oneflow/python/test/modules/test_ctc_loss.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList ninf = -float("inf") diff --git a/oneflow/python/test/modules/test_dataset.py b/oneflow/python/test/modules/test_dataset.py index 533559c0469f1f17c9e301fd6467a03aa28439f9..48b68fcb27af25b1543a2e139386f966cb3efaf7 100644 --- a/oneflow/python/test/modules/test_dataset.py +++ b/oneflow/python/test/modules/test_dataset.py @@ -20,7 +20,7 @@ import os import cv2 import numpy as np -import oneflow.experimental as flow +import oneflow as flow @flow.unittest.skip_unless_1n1d() diff --git a/oneflow/python/test/modules/test_deconv.py b/oneflow/python/test/modules/test_deconv.py index f93572de10bc42dfac5d549cf324c4f25d55a8dd..752812fd8905c1f8e124c1d6cbee9be3e17de2c4 100644 --- a/oneflow/python/test/modules/test_deconv.py +++ b/oneflow/python/test/modules/test_deconv.py @@ -18,8 +18,8 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow -import oneflow.experimental.nn as nn +import oneflow as flow +import oneflow.nn as nn from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_diag.py b/oneflow/python/test/modules/test_diag.py index a1ff854a3397f78449f5e790ac9645e011ce8afd..2e478e1c253987a6716b04cc5149affe52b030d8 100644 --- a/oneflow/python/test/modules/test_diag.py +++ b/oneflow/python/test/modules/test_diag.py @@ -19,7 +19,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_div.py b/oneflow/python/test/modules/test_div.py index 860276c4e7b45c3b79a24f42b7646551b495b5ea..4a3d156f3d73b29f46bf032054eb23eb95495a04 100644 --- a/oneflow/python/test/modules/test_div.py +++ b/oneflow/python/test/modules/test_div.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_dropout.py b/oneflow/python/test/modules/test_dropout.py index 3135c06b316efe9b2a1cffd648f27cb48584eea4..a04abad84b32cd0c77d2f5d777a7b7d7b83213c6 100644 --- a/oneflow/python/test/modules/test_dropout.py +++ b/oneflow/python/test/modules/test_dropout.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_eq.py b/oneflow/python/test/modules/test_eq.py index 99c94f4f24ab1d3fe5b9d81edfea99f69b4169ab..a2df493ad0c7197e6bfd522f69dad6bd7e966326 100644 --- a/oneflow/python/test/modules/test_eq.py +++ b/oneflow/python/test/modules/test_eq.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_erf.py b/oneflow/python/test/modules/test_erf.py index cd1d2df6cfc6f1ba7f50eb0894e9f29e08dabecd..0e0de88320cf37328f05661bb847f2b70c4b7313 100644 --- a/oneflow/python/test/modules/test_erf.py +++ b/oneflow/python/test/modules/test_erf.py @@ -15,10 +15,10 @@ limitations under the License. """ import unittest import numpy as np -import oneflow.experimental as flow +import oneflow as flow from scipy import special from collections import OrderedDict -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_erfc.py b/oneflow/python/test/modules/test_erfc.py index 28f776e92560ffeab138f2626c1969cbe39fcbf0..6b2c5df03ed4a023153c60ab75874e10d8e8933b 100644 --- a/oneflow/python/test/modules/test_erfc.py +++ b/oneflow/python/test/modules/test_erfc.py @@ -15,10 +15,10 @@ limitations under the License. """ import unittest import numpy as np -import oneflow.experimental as flow +import oneflow as flow from scipy import special from collections import OrderedDict -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_exp.py b/oneflow/python/test/modules/test_exp.py index 5584a1e0c9ec952eece87b0b0b19818175f307f1..7c92d705509119fda7cd43a7eba60697f97f9fe5 100644 --- a/oneflow/python/test/modules/test_exp.py +++ b/oneflow/python/test/modules/test_exp.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList @@ -37,7 +37,7 @@ def _test_exp_impl(test_case, shape, device): test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_out, 1e-4, 1e-4)) -@flow.unittest.skip_unless_1n1d() +@flow.unittest.skip_unless_1n1d() class TestExp(flow.unittest.TestCase): def test_exp(test_case): arg_dict = OrderedDict() diff --git a/oneflow/python/test/modules/test_expand.py b/oneflow/python/test/modules/test_expand.py index a8afdf819af2661c12c40f8e3c5355fd70d21a18..1bd328cce9ee8c13c564c77e733b59b80d38784a 100644 --- a/oneflow/python/test/modules/test_expand.py +++ b/oneflow/python/test/modules/test_expand.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_expm1.py b/oneflow/python/test/modules/test_expm1.py index e98606eac112cfd0aa938d0373f20b27b31c356f..38220c7242b13e56d435def7e07d23783216729c 100644 --- a/oneflow/python/test/modules/test_expm1.py +++ b/oneflow/python/test/modules/test_expm1.py @@ -18,11 +18,9 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList -flow.enable_eager_execution() - def _test_expm1_impl(test_case, device, shape): x = flow.Tensor( diff --git a/oneflow/python/test/modules/test_flatten.py b/oneflow/python/test/modules/test_flatten.py index bb1ec5e93a37fe1d65dd97eca65b139070f41c98..e6cb0368711e6ea186378e9e40d6f38c53f48df9 100644 --- a/oneflow/python/test/modules/test_flatten.py +++ b/oneflow/python/test/modules/test_flatten.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_flip.py b/oneflow/python/test/modules/test_flip.py index a5ad28f215c31e99902b891f81b4199f910180cb..17a68bf0746b93697204c0f1ca2f866898e0635b 100644 --- a/oneflow/python/test/modules/test_flip.py +++ b/oneflow/python/test/modules/test_flip.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_floor.py b/oneflow/python/test/modules/test_floor.py index 092713ac056a3c04bf8077a23de0e99eb28c4cd6..31ebc5ab1fc94a1b83e2f0d3e2914f1be7a70f4d 100644 --- a/oneflow/python/test/modules/test_floor.py +++ b/oneflow/python/test/modules/test_floor.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_fmod.py b/oneflow/python/test/modules/test_fmod.py index 46348f0adcb0143b78aaad1695c392d8c8bd6076..407f6b636b9da960fd4768385e01ddd92d8ce854 100644 --- a/oneflow/python/test/modules/test_fmod.py +++ b/oneflow/python/test/modules/test_fmod.py @@ -19,7 +19,7 @@ import random as rd import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_functional_docstr.py b/oneflow/python/test/modules/test_functional_docstr.py index 8202d6bd77da3bf5299252b501e39f66ad19383b..e73c6c3c469d321be4c172c158575d0a8405b6ba 100644 --- a/oneflow/python/test/modules/test_functional_docstr.py +++ b/oneflow/python/test/modules/test_functional_docstr.py @@ -56,7 +56,7 @@ def _run_functional_doctest( class TestFunctionalDocstrModule(flow.unittest.TestCase): def test_functional_docstr(test_case): arg_dict = OrderedDict() - arg_dict["module"] = [flow.F, flow.experimental.F] + arg_dict["module"] = [flow.F] for arg in GenArgList(arg_dict): _run_functional_doctest( test_case, raise_on_error=True, verbose=None, module=arg[0] diff --git a/oneflow/python/test/modules/test_gather.py b/oneflow/python/test/modules/test_gather.py index c4c5add7d74a40f42f27ba5ef77423c0d6a76e62..d102800a6c5cc2b6d94c74eb9a2a851409bb8a49 100644 --- a/oneflow/python/test/modules/test_gather.py +++ b/oneflow/python/test/modules/test_gather.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_gather_nd.py b/oneflow/python/test/modules/test_gather_nd.py index 4677de7e6a43be1b8db7b64f46aa4d1ffeec67e7..720652ac5af3c0f07e269f2dccb4e8de10925648 100644 --- a/oneflow/python/test/modules/test_gather_nd.py +++ b/oneflow/python/test/modules/test_gather_nd.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_greater.py b/oneflow/python/test/modules/test_greater.py index 0fdb4f4237e2747f81f817113575e70d4239b3cc..842753c01523f3f6d2881426e76e9a09a8c52f76 100644 --- a/oneflow/python/test/modules/test_greater.py +++ b/oneflow/python/test/modules/test_greater.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_greater_equal.py b/oneflow/python/test/modules/test_greater_equal.py index e5435feca516580a47ab4bb6fa5f3e258be4dca2..e06dc33550039e5c4aa860a47f7aade9abbcabea 100644 --- a/oneflow/python/test/modules/test_greater_equal.py +++ b/oneflow/python/test/modules/test_greater_equal.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_groupnorm.py b/oneflow/python/test/modules/test_groupnorm.py index eaec60b0065f6caa46b7b954e64194a3e13eacd5..763823eb5b5860a275f37ed9ce5ff47b715784c4 100644 --- a/oneflow/python/test/modules/test_groupnorm.py +++ b/oneflow/python/test/modules/test_groupnorm.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_image_batch_align.py b/oneflow/python/test/modules/test_image_batch_align.py index 5dedd701faf7a3d523eb5453d4f49b04cda0e8f7..ad963a92ff08afff3864b2409e2a0b7cae00ff91 100644 --- a/oneflow/python/test/modules/test_image_batch_align.py +++ b/oneflow/python/test/modules/test_image_batch_align.py @@ -20,7 +20,7 @@ import cv2 import operator from functools import reduce -import oneflow.experimental as flow +import oneflow as flow def _read_images_by_cv(image_files): diff --git a/oneflow/python/test/modules/test_image_decode.py b/oneflow/python/test/modules/test_image_decode.py index 41438cf1919b66bcd1fb4239320972d1b8e27448..68c404deacd9a669f706013a63c6f79d0c958b71 100644 --- a/oneflow/python/test/modules/test_image_decode.py +++ b/oneflow/python/test/modules/test_image_decode.py @@ -18,7 +18,7 @@ import unittest import numpy as np import cv2 -import oneflow.experimental as flow +import oneflow as flow @flow.unittest.skip_unless_1n1d() diff --git a/oneflow/python/test/modules/test_image_flip.py b/oneflow/python/test/modules/test_image_flip.py index 05185b9677797d3c669d456ba1e34c4242788323..969260413c1b809e48de8d4487d1b9d321e596bb 100644 --- a/oneflow/python/test/modules/test_image_flip.py +++ b/oneflow/python/test/modules/test_image_flip.py @@ -16,7 +16,7 @@ limitations under the License. import unittest import cv2 import numpy as np -import oneflow.experimental as flow +import oneflow as flow def _of_image_flip(images, image_static_shape, flip_code): diff --git a/oneflow/python/test/modules/test_image_normalize.py b/oneflow/python/test/modules/test_image_normalize.py index 84e23cdf4dfa28fed31dd9203adbe96d8edd4ef5..f60f5c7aebc9582a535fe0459a614da68ccdda31 100644 --- a/oneflow/python/test/modules/test_image_normalize.py +++ b/oneflow/python/test/modules/test_image_normalize.py @@ -16,7 +16,7 @@ limitations under the License. import unittest import cv2 import numpy as np -import oneflow.experimental as flow +import oneflow as flow def _of_image_normalize(images, image_static_shape, std, mean): diff --git a/oneflow/python/test/modules/test_image_resize.py b/oneflow/python/test/modules/test_image_resize.py index cb9d66e4c0452328959537c4bd36c165a0521586..7c0de4cc0b70bbb791e575b456011f53a723af32 100644 --- a/oneflow/python/test/modules/test_image_resize.py +++ b/oneflow/python/test/modules/test_image_resize.py @@ -16,8 +16,8 @@ limitations under the License. import unittest import cv2 import numpy as np -import oneflow.experimental as flow -import oneflow.experimental.nn as nn +import oneflow as flow +import oneflow.nn as nn import image_test_util diff --git a/oneflow/python/test/modules/test_in_top_k.py b/oneflow/python/test/modules/test_in_top_k.py index 7e5107f279eb098c29c3e0e22ed63def19f670b3..01811431d3ca41d011f14fa8edfafc44cd8b08c8 100644 --- a/oneflow/python/test/modules/test_in_top_k.py +++ b/oneflow/python/test/modules/test_in_top_k.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_instancenorm.py b/oneflow/python/test/modules/test_instancenorm.py index 57e5cf454f8ba98ae1b7df1251d28a1dd52bf3dd..3a14d07a11ccc75aef1d24c8c5cce139c212f562 100644 --- a/oneflow/python/test/modules/test_instancenorm.py +++ b/oneflow/python/test/modules/test_instancenorm.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_instruction_replay.py b/oneflow/python/test/modules/test_instruction_replay.py index a12572fb4b73b1baa6576fab1f4f6382c1fde135..1717403e28bff60abd59e284ce6d129af7d4c22d 100644 --- a/oneflow/python/test/modules/test_instruction_replay.py +++ b/oneflow/python/test/modules/test_instruction_replay.py @@ -20,7 +20,7 @@ from collections import OrderedDict import oneflow import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_interpolate.py b/oneflow/python/test/modules/test_interpolate.py index 49dbc8cee565c575e359fdd70a3ccb2a211acc71..1b7001411aa3e3e928237b9f6b9b075e14a725e6 100644 --- a/oneflow/python/test/modules/test_interpolate.py +++ b/oneflow/python/test/modules/test_interpolate.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_kldivloss.py b/oneflow/python/test/modules/test_kldivloss.py index 75513010ceb3b39a4c027015d7e3af917e85c310..e93245ec45a299993c2be813299b2aede5f001ab 100644 --- a/oneflow/python/test/modules/test_kldivloss.py +++ b/oneflow/python/test/modules/test_kldivloss.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_l1loss.py b/oneflow/python/test/modules/test_l1loss.py index a79384ceab7253b77a3abf7aab7f28550474a3ff..7e485640f1c47ad6a4d49dc151b253b44b2957cf 100644 --- a/oneflow/python/test/modules/test_l1loss.py +++ b/oneflow/python/test/modules/test_l1loss.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_less.py b/oneflow/python/test/modules/test_less.py index 34c4968eee2f01eb3ac614bd1e421df3b7e1253d..000a66029209cb309f99fa7fcc0cc1dad386b62a 100644 --- a/oneflow/python/test/modules/test_less.py +++ b/oneflow/python/test/modules/test_less.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_less_equal.py b/oneflow/python/test/modules/test_less_equal.py index b59f0a955b04efe06dcebc0fac9d696795c7e91c..021143fc883f014c3c349b94034b1da5fb7235e1 100644 --- a/oneflow/python/test/modules/test_less_equal.py +++ b/oneflow/python/test/modules/test_less_equal.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_linear.py b/oneflow/python/test/modules/test_linear.py index 0085196765826546f33b44f42325fee742aed553..e2fe89924c6ec817b13ea31883b4bf076567be48 100644 --- a/oneflow/python/test/modules/test_linear.py +++ b/oneflow/python/test/modules/test_linear.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_log1p.py b/oneflow/python/test/modules/test_log1p.py index 5e60d9783060c186f1ec941ba2eeffedbf393bfb..19309ab94dfe1b0127cf03f27462a873e9fff9d5 100644 --- a/oneflow/python/test/modules/test_log1p.py +++ b/oneflow/python/test/modules/test_log1p.py @@ -19,7 +19,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_lr_scheduler.py b/oneflow/python/test/modules/test_lr_scheduler.py index e1d6d57ced1766a91d4a8268aa175325519d2c02..1bc26888435abf3f981ca58f7f8c1356ec8f3400 100644 --- a/oneflow/python/test/modules/test_lr_scheduler.py +++ b/oneflow/python/test/modules/test_lr_scheduler.py @@ -17,7 +17,7 @@ limitations under the License. import math import unittest -import oneflow.experimental as flow +import oneflow as flow from oneflow.python.nn.parameter import Parameter diff --git a/oneflow/python/test/modules/test_marginrankingloss.py b/oneflow/python/test/modules/test_marginrankingloss.py index d8733a789bd4fb98450665af08a38b7d0f174b4a..3dde442488a4313f291f712d63d30270696d1483 100644 --- a/oneflow/python/test/modules/test_marginrankingloss.py +++ b/oneflow/python/test/modules/test_marginrankingloss.py @@ -18,8 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow -from oneflow.python.ops.constant_op import zeros_like +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_masked_fill.py b/oneflow/python/test/modules/test_masked_fill.py index 040fa97e7d7d35c6f35764d4a3cdf39549e48325..f8c84d1041bb9f04a0b29af3d2ef00ff284f7a69 100644 --- a/oneflow/python/test/modules/test_masked_fill.py +++ b/oneflow/python/test/modules/test_masked_fill.py @@ -17,7 +17,7 @@ import unittest import numpy as np -import oneflow.experimental as flow +import oneflow as flow from automated_test_util import * diff --git a/oneflow/python/test/modules/test_masked_select.py b/oneflow/python/test/modules/test_masked_select.py index 8e9c8f5bab460c8f8d8074fb12e9937b041ffa45..3d2ca55c6d30f2bce0be7c961ec0ec5f3c08438e 100644 --- a/oneflow/python/test/modules/test_masked_select.py +++ b/oneflow/python/test/modules/test_masked_select.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_math_ops.py b/oneflow/python/test/modules/test_math_ops.py index 0df4a804dbd1b9d37f204cd5b6e0a8ff80ea29d5..c8a1592646384b84861813b5cc917fcf3116ff41 100644 --- a/oneflow/python/test/modules/test_math_ops.py +++ b/oneflow/python/test/modules/test_math_ops.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type from automated_test_util import * diff --git a/oneflow/python/test/modules/test_matmul.py b/oneflow/python/test/modules/test_matmul.py index 7ccd5e69e27ae50a417ad9781c9311f829629a93..1dd4c2880a52526e1a12a8b033fbb364659498fb 100644 --- a/oneflow/python/test/modules/test_matmul.py +++ b/oneflow/python/test/modules/test_matmul.py @@ -19,7 +19,7 @@ import unittest import numpy as np import torch -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_mean.py b/oneflow/python/test/modules/test_mean.py index a43adea37019643b545818f5f1a7ab208213b056..87304c89368d83265092b191088fcbd3539e2550 100644 --- a/oneflow/python/test/modules/test_mean.py +++ b/oneflow/python/test/modules/test_mean.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_meshgrid.py b/oneflow/python/test/modules/test_meshgrid.py index e4a9612f1fb83b33645c6f6b787f9f39fa589b3a..22cb5f4c5f0118dbc415ea4892e74d8b7242e793 100644 --- a/oneflow/python/test/modules/test_meshgrid.py +++ b/oneflow/python/test/modules/test_meshgrid.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_module.py b/oneflow/python/test/modules/test_module.py index 670b22c8e1a4a82331b213036fcf2caf526ec9bc..ca0efeea5df8ecdbac8d0a77cbc747f3f5ffedf9 100644 --- a/oneflow/python/test/modules/test_module.py +++ b/oneflow/python/test/modules/test_module.py @@ -21,7 +21,7 @@ import tempfile import numpy as np -import oneflow.experimental as flow +import oneflow as flow import oneflow.typing as tp diff --git a/oneflow/python/test/modules/test_module_to.py b/oneflow/python/test/modules/test_module_to.py index eb170e0cabe22dd7c2bdbb0539133f623812245a..974aa94f3b75fb8af7bb6b7e8f70056a6760e70e 100644 --- a/oneflow/python/test/modules/test_module_to.py +++ b/oneflow/python/test/modules/test_module_to.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList dummy_val = np.random.randn(2, 3) diff --git a/oneflow/python/test/modules/test_mseloss.py b/oneflow/python/test/modules/test_mseloss.py index bc505c1d6df29c205d48c4579441304fd6d75a04..2e82f9d89c2d5ed32ff6b5ee1e0ec8f36f008c0d 100644 --- a/oneflow/python/test/modules/test_mseloss.py +++ b/oneflow/python/test/modules/test_mseloss.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_mul.py b/oneflow/python/test/modules/test_mul.py index 58cd05ce84f4c3564ff94b4d575dbffa28be4cc0..54063d693a83c16f155e2ba41d614454faa13720 100644 --- a/oneflow/python/test/modules/test_mul.py +++ b/oneflow/python/test/modules/test_mul.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_ne.py b/oneflow/python/test/modules/test_ne.py index 4ba9839afdb7264c5b34288f824379da7b8d6787..f7db2d3a6a6466cb8c17deb1fee710293deed9aa 100644 --- a/oneflow/python/test/modules/test_ne.py +++ b/oneflow/python/test/modules/test_ne.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_negative.py b/oneflow/python/test/modules/test_negative.py index af06e774d756459bbb9a2f7fbc8e48066f8eadc0..b45f69a268ef1ca6605741afcaf46dc285fec1bc 100644 --- a/oneflow/python/test/modules/test_negative.py +++ b/oneflow/python/test/modules/test_negative.py @@ -18,7 +18,7 @@ from collections import OrderedDict import unittest import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList @@ -60,7 +60,7 @@ def _test_negative_backward(test_case, shape, device): ) -@flow.unittest.skip_unless_1n1d() +@flow.unittest.skip_unless_1n1d() class TestNegativeModule(flow.unittest.TestCase): def test_negative(test_case): arg_dict = OrderedDict() diff --git a/oneflow/python/test/modules/test_nllloss.py b/oneflow/python/test/modules/test_nllloss.py index 2038af18667ed245fbbc5edc5c2ddefcfa6b0f96..d93c8d10edb328a72a3f054163dce5ccafa1c89e 100644 --- a/oneflow/python/test/modules/test_nllloss.py +++ b/oneflow/python/test/modules/test_nllloss.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList @@ -316,7 +316,7 @@ def _test_nllloss_bert_sum(test_case, device): test_case.assertTrue(np.allclose(of_out.numpy(), np_out)) -@flow.unittest.skip_unless_1n1d() +@flow.unittest.skip_unless_1n1d() class TestNLLLossModule(flow.unittest.TestCase): def test_nllloss(test_case): arg_dict = OrderedDict() diff --git a/oneflow/python/test/modules/test_nllloss_grad.py b/oneflow/python/test/modules/test_nllloss_grad.py index b511d694f7a5dc73e0e75fdbd4500c84e264b77b..67c7a07f79f0dd98a3f636ebe5e26ef4d90206a5 100644 --- a/oneflow/python/test/modules/test_nllloss_grad.py +++ b/oneflow/python/test/modules/test_nllloss_grad.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_norm.py b/oneflow/python/test/modules/test_norm.py index a7bd456b98b2e8992ed6c85b4e4edb084556fcdf..adacbfb54f13382b0c3bbd9ff21af75b9c8de615 100644 --- a/oneflow/python/test/modules/test_norm.py +++ b/oneflow/python/test/modules/test_norm.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_normalization.py b/oneflow/python/test/modules/test_normalization.py index be67e90c3535f1dead93abeab24ba64c780130d5..2a32d9e6d7546c18984f9dcbc6ee046c51f81881 100644 --- a/oneflow/python/test/modules/test_normalization.py +++ b/oneflow/python/test/modules/test_normalization.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_ones_like.py b/oneflow/python/test/modules/test_ones_like.py index 08402115052f665b0d6248f43d6972bf2256609f..abfb768835868236dcfa08167ee95fa4e8285d03 100644 --- a/oneflow/python/test/modules/test_ones_like.py +++ b/oneflow/python/test/modules/test_ones_like.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_optim_adam.py b/oneflow/python/test/modules/test_optim_adam.py index a5a893f798a2f95716bca2013ba19195465e3c99..85da39eb0e55d18a7f05d385db023e3decaeef2f 100644 --- a/oneflow/python/test/modules/test_optim_adam.py +++ b/oneflow/python/test/modules/test_optim_adam.py @@ -17,7 +17,7 @@ import unittest from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from oneflow.python.nn.parameter import Parameter diff --git a/oneflow/python/test/modules/test_optim_adamw.py b/oneflow/python/test/modules/test_optim_adamw.py index a6e5799d42737c78d725b4d59ef5b3e8a6ee1d76..2db04cd859701441e26f5ec0da5cb66f5b24990e 100644 --- a/oneflow/python/test/modules/test_optim_adamw.py +++ b/oneflow/python/test/modules/test_optim_adamw.py @@ -17,7 +17,7 @@ import unittest from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from oneflow.python.nn.parameter import Parameter diff --git a/oneflow/python/test/modules/test_optim_rmsprop.py b/oneflow/python/test/modules/test_optim_rmsprop.py index 76a4716b2f4a8281c5c7c9b3d22fc03695f1bba7..2d839adbdc825bf34d58ff934793c0d6a9c88208 100644 --- a/oneflow/python/test/modules/test_optim_rmsprop.py +++ b/oneflow/python/test/modules/test_optim_rmsprop.py @@ -17,7 +17,7 @@ import unittest from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from oneflow.python.nn.parameter import Parameter diff --git a/oneflow/python/test/modules/test_optim_sgd.py b/oneflow/python/test/modules/test_optim_sgd.py index 09d85b5fcec3234aac31e00412079532cc272fde..29ec3281d2626419bbe1a361c3659b81901bb200 100644 --- a/oneflow/python/test/modules/test_optim_sgd.py +++ b/oneflow/python/test/modules/test_optim_sgd.py @@ -18,7 +18,8 @@ import unittest from collections import OrderedDict import numpy as np -import oneflow.experimental as flow + +import oneflow as flow from test_util import GenArgDict from oneflow.python.nn.parameter import Parameter diff --git a/oneflow/python/test/modules/test_permute.py b/oneflow/python/test/modules/test_permute.py index d4d86f23d460d2f3d8b439d80aede2b91f83a175..433c0097fb020f791fed5504a552a6e3713b35fa 100644 --- a/oneflow/python/test/modules/test_permute.py +++ b/oneflow/python/test/modules/test_permute.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_pixel_shuffle.py b/oneflow/python/test/modules/test_pixel_shuffle.py index 3e70c3980e35cc8404596187630d1c20feb81d56..873270490cf6daef2252f6cb3606d829e0b99bd4 100644 --- a/oneflow/python/test/modules/test_pixel_shuffle.py +++ b/oneflow/python/test/modules/test_pixel_shuffle.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_pooling.py b/oneflow/python/test/modules/test_pooling.py index 29301e1d40441c457b0e21ec487bd067d2a557db..2d1e31732da23c03277517724978422673e61b2d 100644 --- a/oneflow/python/test/modules/test_pooling.py +++ b/oneflow/python/test/modules/test_pooling.py @@ -19,7 +19,7 @@ from collections import OrderedDict import numpy as np import math -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_pow.py b/oneflow/python/test/modules/test_pow.py index 6680acda8c0ea5e9b18cd19c03d5bb010152a306..09b0541b5a74025f2b8471e0830dabf181acbe4e 100644 --- a/oneflow/python/test/modules/test_pow.py +++ b/oneflow/python/test/modules/test_pow.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList @@ -90,7 +90,7 @@ def _test_pow_backward_impl(test_case, device): test_x_grad_scalar() -@flow.unittest.skip_unless_1n1d() +@flow.unittest.skip_unless_1n1d() class TestPow(flow.unittest.TestCase): def test_pow_forward(test_case): arg_dict = OrderedDict() diff --git a/oneflow/python/test/modules/test_prelu.py b/oneflow/python/test/modules/test_prelu.py index 1cce91288a51c6731b5d90451e9b932f5fd8866e..60136177e52b4265002202e9858cb9e3160ceafb 100644 --- a/oneflow/python/test/modules/test_prelu.py +++ b/oneflow/python/test/modules/test_prelu.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_reciprocal.py b/oneflow/python/test/modules/test_reciprocal.py index e37602e9742f6ba2b70e0d7aa077726b341209c1..4f9f4f702b1fb9b9fedc54d0c3cc92e217f75b6b 100644 --- a/oneflow/python/test/modules/test_reciprocal.py +++ b/oneflow/python/test/modules/test_reciprocal.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_reduce_ops.py b/oneflow/python/test/modules/test_reduce_ops.py index 32a8ea6a6b1eddd0c0da644ca15d48abfff3ec23..8e077155673bfd5785243694b4606266bdcb3abb 100644 --- a/oneflow/python/test/modules/test_reduce_ops.py +++ b/oneflow/python/test/modules/test_reduce_ops.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_reflection_pad2d.py b/oneflow/python/test/modules/test_reflection_pad2d.py index bf2568118a8445d789f9365ba95115d157c41a0d..4845f7de5f142da088baf6ef56f96233898d6fb9 100644 --- a/oneflow/python/test/modules/test_reflection_pad2d.py +++ b/oneflow/python/test/modules/test_reflection_pad2d.py @@ -15,7 +15,7 @@ limitations under the License. """ import unittest from collections import OrderedDict -import oneflow.experimental as flow +import oneflow as flow import numpy as np from test_util import ( @@ -109,7 +109,7 @@ def _test_reflection_pad2d(test_case, shape, padding, device): test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_grad, 1e-4, 1e-4)) -@flow.unittest.skip_unless_1n1d() +@flow.unittest.skip_unless_1n1d() class TestReflectionPad2dModule(flow.unittest.TestCase): def test_reflection_pad2d(test_case): arg_dict = OrderedDict() diff --git a/oneflow/python/test/modules/test_repeat.py b/oneflow/python/test/modules/test_repeat.py index 09c9ccff182767259d4fce5ad856049cac51dc50..bfad6b5cd34a70ecef575b1d1b36301aaafa7673 100644 --- a/oneflow/python/test/modules/test_repeat.py +++ b/oneflow/python/test/modules/test_repeat.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_replicationpad2d.py b/oneflow/python/test/modules/test_replicationpad2d.py index a607cde5e82a8567db5c8671f7a61b4a1be6893a..0a99f6bfa6b4202575c09e02a8a9a1e70902d7aa 100644 --- a/oneflow/python/test/modules/test_replicationpad2d.py +++ b/oneflow/python/test/modules/test_replicationpad2d.py @@ -18,7 +18,8 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow + from test_util import ( GenArgList, diff --git a/oneflow/python/test/modules/test_reshape.py b/oneflow/python/test/modules/test_reshape.py index 0ff62a03c1725c996a1119eeaf3f36eae1871885..1f009ea3bd1d5c656179370b9e86bb4765ae352e 100644 --- a/oneflow/python/test/modules/test_reshape.py +++ b/oneflow/python/test/modules/test_reshape.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_resnet50_with_bn.py b/oneflow/python/test/modules/test_resnet50_with_bn.py index 80b0dabe71a087b0c466b99325e1af8883d22768..efdd029e22b499073ab18af5db4744eda1c33266 100644 --- a/oneflow/python/test/modules/test_resnet50_with_bn.py +++ b/oneflow/python/test/modules/test_resnet50_with_bn.py @@ -15,7 +15,7 @@ limitations under the License. """ import os import unittest -import oneflow.experimental as flow +import oneflow as flow from resnet50_model import resnet50 diff --git a/oneflow/python/test/modules/test_resnet50_without_bn.py b/oneflow/python/test/modules/test_resnet50_without_bn.py index 2f3a9d3ac316ede2dcc675044277c6b29f6602fc..2a1aab49257f543493ac34ac18226c310e4b402c 100644 --- a/oneflow/python/test/modules/test_resnet50_without_bn.py +++ b/oneflow/python/test/modules/test_resnet50_without_bn.py @@ -18,7 +18,7 @@ import unittest import numpy as np -import oneflow.experimental as flow +import oneflow as flow from resnet50_model import resnet50, FakeBN diff --git a/oneflow/python/test/modules/test_round.py b/oneflow/python/test/modules/test_round.py index d7e81fe3c6f6dffdba981d0dc2d30092176037b2..5c860f115362bb05922c97f7520cba373c0e6410 100644 --- a/oneflow/python/test/modules/test_round.py +++ b/oneflow/python/test/modules/test_round.py @@ -19,7 +19,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_scatter_nd.py b/oneflow/python/test/modules/test_scatter_nd.py index cecc8cedd3081d6093165fb01e00de8d21305bd9..2c675feab3e856c920367b24eb8dd656fa3474f5 100644 --- a/oneflow/python/test/modules/test_scatter_nd.py +++ b/oneflow/python/test/modules/test_scatter_nd.py @@ -18,12 +18,11 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList def _test_scatter_nd(test_case, device): - scatter_nd_layer = flow.scatter_nd([8]) indices = flow.Tensor( np.array([[1], [6], [4]]), dtype=flow.int, device=flow.device(device) ) @@ -32,13 +31,12 @@ def _test_scatter_nd(test_case, device): ) np_out = np.array([0.0, 10.2, 0.0, 0.0, 12.7, 0.0, 5.1, 0.0]) - output = scatter_nd_layer(indices, update) + output = flow.scatter_nd(indices, update, [8]) test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-4, 1e-4)) def _test_scatter_nd_t(test_case, device): - scatter_nd_layer = flow.scatter_nd([5, 3]) indices = flow.Tensor( np.array([[0], [4], [2]]), dtype=flow.int, device=flow.device(device) ) @@ -57,13 +55,12 @@ def _test_scatter_nd_t(test_case, device): [2.0, 2.0, 2.0], ] ) - output = scatter_nd_layer(indices, update) + output = flow.scatter_nd(indices, update, [5, 3]) test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-4, 1e-4)) def _test_scatter_nd_backward(test_case, device): - scatter_nd_layer = flow.scatter_nd([8]) indices = flow.Tensor( np.array([[1], [6], [4]]), dtype=flow.int, device=flow.device(device) ) @@ -76,7 +73,7 @@ def _test_scatter_nd_backward(test_case, device): np_out = np.array([0.0, 10.2, 0.0, 0.0, 12.7, 0.0, 5.1, 0.0]) np_grad = np.array([1.0, 1.0, 1.0]) - output = scatter_nd_layer(indices, of_update) + output = flow.scatter_nd(indices, of_update, [8]) out_sum = output.sum() out_sum.backward() diff --git a/oneflow/python/test/modules/test_sign.py b/oneflow/python/test/modules/test_sign.py index 6d3d6c66ecd0b96c08ea4fe1de7d30981df44b9d..152147e6fb32fffa84b96a71900bbb9283025a01 100644 --- a/oneflow/python/test/modules/test_sign.py +++ b/oneflow/python/test/modules/test_sign.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList @@ -38,7 +38,7 @@ def _test_sign_impl(test_case, shape, device): test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_grad, 1e-4, 1e-4)) -@flow.unittest.skip_unless_1n1d() +@flow.unittest.skip_unless_1n1d() class TestSign(flow.unittest.TestCase): def test_sign(test_case): arg_dict = OrderedDict() diff --git a/oneflow/python/test/modules/test_slice.py b/oneflow/python/test/modules/test_slice.py index 5f7cb3497ecf6461f56330969d41906a8bfbb777..4c49e13571f4a868d09cfd10e88dc2a4d4d167b8 100644 --- a/oneflow/python/test/modules/test_slice.py +++ b/oneflow/python/test/modules/test_slice.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_smoothl1loss.py b/oneflow/python/test/modules/test_smoothl1loss.py index e9689e3a1a45ff70798d72b3517af30d949ab591..1d34c090fbd41ba616d872f73bc36c9c90126eab 100644 --- a/oneflow/python/test/modules/test_smoothl1loss.py +++ b/oneflow/python/test/modules/test_smoothl1loss.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type diff --git a/oneflow/python/test/modules/test_softplus.py b/oneflow/python/test/modules/test_softplus.py index d18e3b20754391854c053c547865666574c6b6ed..f007021096f2164c68589c5ee06a63e912d92e54 100644 --- a/oneflow/python/test/modules/test_softplus.py +++ b/oneflow/python/test/modules/test_softplus.py @@ -19,7 +19,7 @@ from collections import OrderedDict import numpy as np from test_util import GenArgList -import oneflow.experimental as flow +import oneflow as flow def _test_softplus_impl(test_case, shape, device): @@ -38,7 +38,7 @@ def _test_softplus_impl(test_case, shape, device): test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_x_grad, 1e-4, 1e-4)) -@flow.unittest.skip_unless_1n1d() +@flow.unittest.skip_unless_1n1d() class Testsoftplus(flow.unittest.TestCase): def test_softplus(test_case): arg_dict = OrderedDict() diff --git a/oneflow/python/test/modules/test_sort.py b/oneflow/python/test/modules/test_sort.py index f27bd58e58c671a2f03baf3a7091f733de444b88..c37fac6a33f6cbf363e2a87387e847b13522ced5 100644 --- a/oneflow/python/test/modules/test_sort.py +++ b/oneflow/python/test/modules/test_sort.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList, type_name_to_flow_type diff --git a/oneflow/python/test/modules/test_sparse.py b/oneflow/python/test/modules/test_sparse.py index 351b64f33c40f09d9d8018c23b4a18363b034d49..dbbd160cba5d80e1f15de04066318006693ae1bf 100644 --- a/oneflow/python/test/modules/test_sparse.py +++ b/oneflow/python/test/modules/test_sparse.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_squeeze.py b/oneflow/python/test/modules/test_squeeze.py index 3e1e4ee9c828c3e8a4a6473f92e7cdf8f64d26d8..a8c68c44960c880c35d436a79bc39733bbd7745a 100644 --- a/oneflow/python/test/modules/test_squeeze.py +++ b/oneflow/python/test/modules/test_squeeze.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_stack.py b/oneflow/python/test/modules/test_stack.py index 9fbeda76567a9e95c3bfef6d88f399e081de7484..13abe23251cfa186e2c039c47b103cbc9a777f07 100644 --- a/oneflow/python/test/modules/test_stack.py +++ b/oneflow/python/test/modules/test_stack.py @@ -27,7 +27,7 @@ def _test_stack(test_case, device, shape): x_tensor = flow.Tensor(x, dtype=flow.float32, device=flow.device(device)) y_tensor = flow.Tensor(y, dtype=flow.float32, device=flow.device(device)) out_np = np.stack([x, y], axis=1) - out_of = flow.experimental.stack([x_tensor, y_tensor], dim=1).numpy() + out_of = flow.stack([x_tensor, y_tensor], dim=1).numpy() test_case.assertTrue(np.allclose(out_np, out_of, 1e-5, 1e-5)) @@ -37,7 +37,7 @@ def _test_stack_tuple_input(test_case, device, shape): x_tensor = flow.Tensor(x, dtype=flow.float32, device=flow.device(device)) y_tensor = flow.Tensor(y, dtype=flow.float32, device=flow.device(device)) out_np = np.stack([x, y], axis=0) - out_of = flow.experimental.stack((x_tensor, y_tensor), dim=0).numpy() + out_of = flow.stack((x_tensor, y_tensor), dim=0).numpy() test_case.assertTrue(np.allclose(out_np, out_of, 1e-5, 1e-5)) @@ -46,7 +46,7 @@ def _test_stack_backward(test_case, device, shape): y = np.random.rand(*shape) x_tensor = flow.Tensor(x, device=flow.device(device), requires_grad=True) y_tensor = flow.Tensor(y, device=flow.device(device), requires_grad=True) - out_of = flow.experimental.stack([x_tensor, y_tensor]).sum() + out_of = flow.stack([x_tensor, y_tensor]).sum() out_of.backward() test_case.assertTrue( np.allclose(x_tensor.grad.numpy(), np.ones(x_tensor.shape), 1e-5, 1e-5) @@ -62,7 +62,7 @@ def _test_stack_different_dim(test_case, device, shape): x_tensor = flow.Tensor(x, device=flow.device(device)) y_tensor = flow.Tensor(y, device=flow.device(device)) for axis in range(-len(x.shape) - 1, len(x.shape) + 1): - out_of = flow.experimental.stack([x_tensor, y_tensor], dim=axis) + out_of = flow.stack([x_tensor, y_tensor], dim=axis) out_np = np.stack([x, y], axis=axis) test_case.assertTrue(np.allclose(out_np, out_of.numpy(), 1e-05, 1e-05)) @@ -77,7 +77,7 @@ def _test_stack_multi_input(test_case, device, shape): tmp = np.random.rand(*shape) x.append(tmp) x_tensor.append(flow.Tensor(tmp, device=flow.device(device))) - out_of = flow.experimental.stack(x_tensor, dim=-1) + out_of = flow.stack(x_tensor, dim=-1) out_np = np.stack(x, axis=-1) test_case.assertTrue(np.allclose(out_np, out_of.numpy(), 1e-05, 1e-05)) diff --git a/oneflow/python/test/modules/test_sub.py b/oneflow/python/test/modules/test_sub.py index b5e3c85f44e633d242a37d87cdcad38fd5412b4e..f7c9fee1742fca098ea6894e9caebf7b736177a5 100644 --- a/oneflow/python/test/modules/test_sub.py +++ b/oneflow/python/test/modules/test_sub.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_sum.py b/oneflow/python/test/modules/test_sum.py index 0f82d7de8476171d5869a9666cb97c94223b2b17..1c42105fea50e04b1597429fa8cb3ff4558cb6e6 100644 --- a/oneflow/python/test/modules/test_sum.py +++ b/oneflow/python/test/modules/test_sum.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList from automated_test_util import * diff --git a/oneflow/python/test/modules/test_tan.py b/oneflow/python/test/modules/test_tan.py index cf76dddb56672b4226639b5f3b21d569fde8b133..eef452d7787f1dd22c5b6b468231f08bb5cfc344 100644 --- a/oneflow/python/test/modules/test_tan.py +++ b/oneflow/python/test/modules/test_tan.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_tensor_buffer.py b/oneflow/python/test/modules/test_tensor_buffer.py index dbe8553e17d24638e0a3a0912dc703599bd39e88..c376ac90b9a1b92dbbb8c5d3bdaf0ac11b7ee156 100644 --- a/oneflow/python/test/modules/test_tensor_buffer.py +++ b/oneflow/python/test/modules/test_tensor_buffer.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList, type_name_to_flow_type diff --git a/oneflow/python/test/modules/test_tensor_ops.py b/oneflow/python/test/modules/test_tensor_ops.py index 31325c2a2a13eb7336becc7f4496093edd582fe4..2b0c5d7bf357e86b8c75666c6fd184485a387d1b 100644 --- a/oneflow/python/test/modules/test_tensor_ops.py +++ b/oneflow/python/test/modules/test_tensor_ops.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_tensor_to.py b/oneflow/python/test/modules/test_tensor_to.py index 946c14383e139d266a94c70af0c675162374e9eb..3e798dee08a584efa29ccabf3ceb4a2d60091e61 100644 --- a/oneflow/python/test/modules/test_tensor_to.py +++ b/oneflow/python/test/modules/test_tensor_to.py @@ -17,7 +17,7 @@ import unittest import numpy as np -import oneflow.experimental as flow +import oneflow as flow @flow.unittest.skip_unless_1n1d() diff --git a/oneflow/python/test/modules/test_tile.py b/oneflow/python/test/modules/test_tile.py index b26656b6b75bd06217a405e6ba5243adc87dfe85..4f2bb006a8a631f3e70692170c3d8cccf7f7e95d 100644 --- a/oneflow/python/test/modules/test_tile.py +++ b/oneflow/python/test/modules/test_tile.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_transpose.py b/oneflow/python/test/modules/test_transpose.py index 2206b8c5f0aea0f668f81809619e054f0beec91b..a642d64f68a80c78466bfe115e077a28a3f1aa18 100644 --- a/oneflow/python/test/modules/test_transpose.py +++ b/oneflow/python/test/modules/test_transpose.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_triu.py b/oneflow/python/test/modules/test_triu.py index 3595e1b658f87aaab1af10bc9e69244b5ddd1acf..8bc213a611d912f9d0bd221a4d34a84dd156e8d1 100644 --- a/oneflow/python/test/modules/test_triu.py +++ b/oneflow/python/test/modules/test_triu.py @@ -18,8 +18,8 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow -import oneflow.experimental.nn as nn +import oneflow as flow +import oneflow.nn as nn from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_unsqueeze.py b/oneflow/python/test/modules/test_unsqueeze.py index 4aed31818681c585a539ad22b4fccdfeda6069b5..463161b56f3f22ba5694a598eb4e39fa18f7ce39 100644 --- a/oneflow/python/test/modules/test_unsqueeze.py +++ b/oneflow/python/test/modules/test_unsqueeze.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_upsample2d.py b/oneflow/python/test/modules/test_upsample2d.py index 49406d3bf56a9c5bbe3f9d9611850b29fcbf5217..6d98e9fe8b1cd21316585d7091a1c64f3ae98952 100644 --- a/oneflow/python/test/modules/test_upsample2d.py +++ b/oneflow/python/test/modules/test_upsample2d.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_view.py b/oneflow/python/test/modules/test_view.py index 297020ae3c8405bbcdf708aa3e45ab017e9cbebf..1a0a06db948cefb6a96cbf5655bf36cabb2ee4ab 100644 --- a/oneflow/python/test/modules/test_view.py +++ b/oneflow/python/test/modules/test_view.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_where.py b/oneflow/python/test/modules/test_where.py index 4624213d2e1a27786a80671c7b4ee466188cd59a..ab34c9e3afb5b3e9473a5f874eb720b5b812b1ba 100644 --- a/oneflow/python/test/modules/test_where.py +++ b/oneflow/python/test/modules/test_where.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import GenArgList diff --git a/oneflow/python/test/modules/test_zeropad2d.py b/oneflow/python/test/modules/test_zeropad2d.py index 6becbaa38550b0161aab86b02fe2039d0dd22c3b..cf55742b5822c6e2e4874a0d6b9ce7bbf98c55bc 100644 --- a/oneflow/python/test/modules/test_zeropad2d.py +++ b/oneflow/python/test/modules/test_zeropad2d.py @@ -18,7 +18,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow from test_util import ( GenArgList, FlattenArray, diff --git a/oneflow/python/test/tensor/test_tensor.py b/oneflow/python/test/tensor/test_tensor.py index 26233184fe0bdb65024e7015a82ed2041b116493..07de0ed10562512d4334ef1b29d9b4a9c8e870b2 100644 --- a/oneflow/python/test/tensor/test_tensor.py +++ b/oneflow/python/test/tensor/test_tensor.py @@ -19,7 +19,7 @@ from collections import OrderedDict import numpy as np -import oneflow.experimental as flow +import oneflow as flow import oneflow.typing as oft from automated_test_util import * diff --git a/oneflow/python/test_utils/automated_test_util/generators.py b/oneflow/python/test_utils/automated_test_util/generators.py index 9f478430c3f9ae33817be905819335135bec22d3..1ab24ec1dc19f2e4f2d8abc045dc7455e499b901 100644 --- a/oneflow/python/test_utils/automated_test_util/generators.py +++ b/oneflow/python/test_utils/automated_test_util/generators.py @@ -21,7 +21,7 @@ import random as random_util import os import numpy as np -import oneflow.experimental as flow +import oneflow as flow import torch import numpy as np diff --git a/oneflow/python/test_utils/automated_test_util/torch_flow_dual_object.py b/oneflow/python/test_utils/automated_test_util/torch_flow_dual_object.py index 8dd7da9620d85056bc97d7277ff3ae797b29117f..551a6b4c525e82603ac7c1d3a21994ae46381375 100644 --- a/oneflow/python/test_utils/automated_test_util/torch_flow_dual_object.py +++ b/oneflow/python/test_utils/automated_test_util/torch_flow_dual_object.py @@ -19,8 +19,7 @@ import functools import os import torch as torch_original -import oneflow as flow_stable -import oneflow.experimental as flow +import oneflow as flow import numpy as np from .generators import generator, random_tensor, Nothing @@ -233,7 +232,7 @@ def check_equality(dual_object: DualObject, rtol=1e-4, atol=1e-5): @equality_checker(torch_original.Tensor, flow.Tensor) -@equality_checker(torch_original.Tensor, flow_stable._oneflow_internal.Tensor) +@equality_checker(torch_original.Tensor, flow._oneflow_internal.Tensor) def check_tensor_equality(torch_tensor, flow_tensor, rtol=1e-4, atol=1e-5): # TODO: check dtype if torch_tensor.grad is not None: diff --git a/oneflow/python/utils/data/_utils/collate.py b/oneflow/python/utils/data/_utils/collate.py index 7b2291524ea215fb210f29ee12b4c6bd144d2e59..46c2610d43c85b9349d2e6236007b917c88c28a2 100644 --- a/oneflow/python/utils/data/_utils/collate.py +++ b/oneflow/python/utils/data/_utils/collate.py @@ -78,7 +78,7 @@ def default_collate(batch): elem_type = type(elem) if isinstance(elem, (flow.Tensor, flow._oneflow_internal.Tensor)): # TODO: tensor.storage()._new_shared(numel) - return flow.experimental.stack(batch, dim=0) + return flow.stack(batch, dim=0) elif ( elem_type.__module__ == "numpy" and elem_type.__name__ != "str_"