
    ёi3                       S SK r S SKrS SKJr  S SKrS SKrS SKJrJ	r	  S SK
Jr  S SKJrJrJrJrJrJrJrJrJr  S SKJr  S SKJr  S SKJrJrJr  S S	KJrJ r   S S
K!J"r"  / r#\     S'S j5       r$ S(S jr%\S)S j5       r&      S*S jr'\"" SSSSS9          S+S j5       r(          S,S jr)            S-S jr*            S.S jr+          S/S jr,\         S0S j5       r- S1S jr.             S2S jr/\S3S j5       r0 " S S5      r1\S4S j5       r2S4S  jr3S5S! jr4\1Rj                  \2l5        \1Rl                  \2l6                S6S" jr7\     S7S# j5       r8       S8S$ jr9 " S% S&5      r:g)9    N)reduce)coreunique_name)check_dtype)	ProgramVariabledefault_main_programin_dygraph_modein_dynamic_or_pir_modein_pir_mode
name_scopeprogram_guardstatic_only)	ParamAttr)signature_safe_contextmanager)LayerHelper
check_typecheck_variable_and_dtype)ConstantNormal)
deprecated   c           
      .         SS jnU" U UUUUUUS9$ )a|  

Fully-Connected layer can take a tensor or a list of tensor as its inputs.
It creates a 2-D weight tensor for each input tensor, which represents its
weight matrix from each input unit to each output unit. The fully connected
layer multiplies each input tensor with its corresponding weight to produce
an output tensor with shape :math:`[batch\_size, *, size]` , where :math:`*`
means any number of additional dimensions. If a list of tensor is given,
the results of multiple output tensors with shape :math:`[batch\_size, *, size]`
will be summed up. If :attr:`bias_attr` is not False, a 1-D bias tensor will
be created and added to the output. Finally, if :attr:`activation` is not None,
it will be applied to the output as well.

For a single input tensor :math:`X` , the equation is:

.. math::

    Out = Act({XW + b})

For a list of input tensor, the equation is:

.. math::

    Out = Act({\sum_{i=0}^{N-1}X_iW_i + b})

where:

* :math:`N`: The number of the input tensors. :math:`N` equals to :math:`len(X)` if :math:`X` is list of tensor.
* :math:`X_i`: The i-th input tensor.
* :math:`W_i`: The i-th weight matrix corresponding i-th input tensor.
* :math:`b`: The bias created by this layer (if needed).
* :math:`Act`: The activation function.
* :math:`Out`: The output tensor.

.. code-block:: text

    # Case 1, input is a single tensor:
    x.data = [[[0.1, 0.2],
               [0.3, 0.4]]]
    x.shape = (1, 2, 2) # 1 is batch_size

    out = paddle.static.nn.fc(x=x, size=1, num_flatten_dims=2)

    # Get the output:
    out.data = [[0.83234344], [0.34936576]]
    out.shape = (1, 2, 1)

    # Case 2, input is a list of tensor:
    x0.data = [[[0.1, 0.2],
                [0.3, 0.4]]]
    x0.shape = (1, 2, 2) # 1 is batch_size

    x1.data = [[[0.1, 0.2, 0.3]]]
    x1.shape = (1, 1, 3)

    out = paddle.static.nn.fc(x=[x0, x1], size=2)

    # Get the output:
    out.data = [[0.18669507, 0.1893476]]
    out.shape = (1, 2)

Args:
    x (Tensor|list[Tensor]|tuple[Tensor]): A tensor or a list/tuple of tensors. The number of dimensions
        of each tensor is at least 2. The data type should be float16, float32 or float64.
    size (int): The number of output units in this layer, which also means the feature
        size of output tensor.
    num_flatten_dims (int, optional): The fc layer can accept an input tensor with more than
        two dimensions. If this happens, the multi-dimensional tensor will first be flattened
        into a 2-D matrix. The parameter :attr:`num_flatten_dims` determines how the input
        tensor is flattened: the first :math:`num\_flatten\_dims` (inclusive, index starts from 1)
        dimensions will be flatten to form the first dimension of the final matrix (height of
        the matrix), and the rest :math:`rank(x) - num\_flatten\_dims` dimensions are
        flattened to form the second dimension of the final matrix (width of the matrix).
        For example, assuming that :attr:`x` is a 5-dimensional tensor with a shape
        :math:`[2, 3, 4, 5, 6]` , and :attr:`num_flatten_dims` = 3.
        Then, the flattened matrix will have a shape :math:`[2 * 3 * 4, 5 * 6] = [24, 30]` .
        Default: 1.
    weight_attr (ParamAttr, optional): The attribute for the learnable weight.
        The default value is None, and the weight will be initialized to zero.
        For detailed information, please refer to :attr:`paddle.ParamAttr`.
        Warning, if x is a list of tensor, weight_attr should also be a list of same length.
    bias_attr (ParamAttr|bool, optional): The attribute of the learnable bias.
        If it is set to False, no bias will be added to the output.
        If it is set to None or one kind of ParamAttr, a bias parameter will
        be created according to ParamAttr. For detailed information, please refer
        to :attr:`paddle.ParamAttr`. The default value is None and the bias will be
        initialized to zero.
    activation (str, optional): Activation to be applied to the output of
        this layer, such as tanh, softmax, sigmoid, relu. For more information,
        please refer to :ref:`api_guide_activations_en` . Default: None.
    name (str, optional): The default value is None. Normally there is no need for user to set
        it. For more information, please refer to :ref:`api_guide_Name` .

Returns:
    Tensor, its shape is :math:`[batch\_size, *, size]` , and the data type is same with input.

Examples:
    .. code-block:: python

        >>> # doctest: +SKIP("This has diff in xdoctest env")
        >>> import paddle
        >>> paddle.enable_static()

        >>> # When input is a single tensor
        >>> x = paddle.static.data(name="x", shape=[1, 2, 2], dtype="float32")
        >>> out = paddle.static.nn.fc(
        ...     x=x,
        ...     size=1,
        ...     num_flatten_dims=2,
        ...     weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)),
        ...     bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0)))
        >>> print(out)
        var fc_0.tmp_1 : DENSE_TENSOR.shape(1, 2, 1).dtype(float32).stop_gradient(False)

        >>> # When input is multiple tensors
        >>> x0 = paddle.static.data(name="x0", shape=[1, 2, 2], dtype="float32")
        >>> x1 = paddle.static.data(name="x1", shape=[1, 1, 3], dtype="float32")

        >>> out = paddle.static.nn.fc(
        ...     x=[x0, x1],
        ...     size=2,
        ...     weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)),
        ...     bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0)))
        >>> print(out)
        var fc_1.tmp_3 : DENSE_TENSOR.shape(1, 2).dtype(float32).stop_gradient(False)

c           	         [        S0 [        5       D6n[        U S[        [        [
        [        R                  R                  4S5        [        U [        [        45      (       aO  [        U 5       H@  u  p[        U	S[        U5      -   S-   [
        [        R                  R                  4S5        MB     UR                  5       n
[        U
S/ SQS5        / nUR                  5        GH  u  pUR                  nUS:X  a  [!        U5      S-
  n[#        S XS  S5      U/nUR%                  X>U
S	S
9n['        5       (       a{  [!        UR                  5      S:  aK  / UR                  S U Q[(        R*                  " UR                  US  5      Pn[        R,                  " UU5      n[        R.                  " X5      nO*UR1                  U
5      nUR3                  SXS.SU0USS.S9  UR5                  U5        GM     [!        U5      S:X  a  US   nOM['        5       (       a  [        R6                  " U5      nO'UR1                  U
5      nUR3                  SSU0SU00 S9  UR9                  UUS9nUR;                  U5      $ )Nfcinputzinput[]float16uint16float32float64r   c                 
    X-  $ N )abs     W/var/www/html/banglarbhumi/venv/lib/python3.13/site-packages/paddle/static/nn/common.py<lambda>%fc.<locals>.fc_base.<locals>.<lambda>   s    AE    Fattrshapedtypeis_bias   mul)XYOut)x_num_col_dimsy_num_col_dimstypeinputsoutputsattrsr   sumr4   )	dim_start)r   )r   localsr   listtupler   paddlepirValue
isinstance	enumeratestrinput_dtyper   iter_inputs_and_paramsr/   lenr   create_parameterr   npprodreshapematmul"create_variable_for_type_inference	append_opappendadd_nappend_bias_opappend_activation)r   sizenum_flatten_dims
param_attr	bias_attractnamehelperiinput_xr0   mul_results	input_varinput_shapeparam_shapew	new_shapetmppre_biaspre_activations                       r)   fc_basefc.<locals>.fc_base   s    .VX.7T5(FJJ4D4DEt	
 edE]++'.
s1v%+vzz//0	 / ""$7G	
 %+%B%B%D!I#//K2%#&{#3a#7 );7H+I1MK ''% ( A }}y'!+!"):*:;!	0@0A BC!I !'y) DImmI1??F  !*3"CL*:*+	 !  s#= &E@ {q "1~H]]||K0H@@GH[))	    .. 0 / 
 ''77r,   )r   rW   rX   rY   rZ   r[   r\   r   NNNNr&   )xrW   rX   weight_attrrZ   
activationr\   ri   s           r)   r   r   0   s?    Z K8Z ) r,   c           	      (   [        U S/ SQS5        USL a  USL d   S5       e[        S0 [        5       D6nUR                  5       nU[        R
                  :X  a  [        R                  nU R                  n[        U R                  5      S:  d  [        U R                  5      S:  a%  [        S[        U R                  5       S	U S
35      eUS   nU/n	U(       aR  U(       aK  UR                  UR                  U	U[        S5      S9n
UR                  UR                  U	US[        S5      S9nUR                  USS9nUR                  USS9nUR                  U5      nSU 0nU(       a  U(       a
  W
US'   WUS'   UR                  SUUUUS.SU0S9  U$ )a
  

**Instance Normalization Layer**

Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:

DataLayout: NCHW `[batch, in_channels, in_height, in_width]`

Refer to `Instance Normalization: The Missing Ingredient for
Fast Stylization <https://arxiv.org/pdf/1607.08022.pdf>`_
for more details.

:math:`input` is the input features over a mini-batch.

..  math::

    \mu_{\beta} &\gets \frac{1}{HW} \sum_{i=1}^{HW} x_i \qquad &//
    \ mean\ of\ one\ feature\ map\ in\ mini-batch \\
    \sigma_{\beta}^{2} &\gets \frac{1}{HW} \sum_{i=1}^{HW}(x_i -
    \mu_{\beta})^2 \qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\
    \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{
    \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
    y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift

Note:
    `H` means height of feature map, `W` means width of feature map.

Args:
    input(Tensor): The rank of input tensor can be 2, 3, 4, 5.
        The data type is float32 or float64.
    epsilon(float, Default 1e-05): A value added to the denominator for
        numerical stability. Default is 1e-5.
    param_attr(ParamAttr|None|bool, optional): The parameter attribute for Parameter `scale`
        of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
        will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
        If the Initializer of the param_attr is not set, the parameter is initialized
        with Xavier. If the param_attr is set to False, instance_norm will not create param_attr.
        Default: None.
    bias_attr(ParamAttr|None|bool, optional): The parameter attribute for the bias of instance_norm.
        If it is set to None or one attribute of ParamAttr, instance_norm
        will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
        If the Initializer of the bias_attr is not set, the bias is initialized zero.
        If the bias_attr is set to False, instance_norm will not create bias_attr.
        Default: None.
    name(string, Default None): A name for this layer(optional). If set None, the layer
        will be named automatically.

Returns:
    A Tensor which is the result after applying instance normalization on the input,
    has same shape and data type with input.

Examples:

    .. code-block:: python

        >>> import paddle
        >>> paddle.enable_static()
        >>> x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
        >>> hidden1 = paddle.static.nn.fc(x, size=200)
        >>> hidden2 = paddle.static.nn.instance_norm(hidden1)
r   r    r   r!   r"   instance_normFzOparam_attr and bias_attr must be set to False at the same time in instance_normr2      )expected 2D or 3D or 4D or 5D input (got D input, input shape is: )r         ?r.   r/   r0   default_initializerT        r.   r/   r0   r1   rx   r0   stop_gradientr4   ScaleBias)r5   	SavedMeanSavedVarianceepsilonr9   )rq   )r   r   r@   rI   rC   r   r!   r/   rK   
ValueErrorrL   rY   r   rZ   rQ   rR   )r   r   rY   rZ   r\   r]   r0   rb   channel_numrc   scalebias
saved_meansaved_varianceinstance_norm_outr;   s                   r)   rq   rq     s   B 3	 UE! 	
]	
! 5FH5F E ++K
5;;!s5;;/!37EKK8H7IIbcnboopq
 	
 a.K-Ki''"" (	 ( 
 &&!! ( ' 
 ::4 ; J >>4 ? N AA%H5\Fiwv
"#+
 w
   r,   c                     [        S	0 [        5       D6nUR                  U R                  S9n[	        U S/ SQS5        UR                  SU /U/S.SU/0SU0S9  U$ )
a<  
**continuous_value_model layers**
Now, this OP is used in CTR project to remove or dispose show and click value in :attr:`input`.
:attr:`input` is an embedding vector including show and click value, whose shape is :math:`[N, D]` (N is batch size. D is `2 + embedding dim` ).
Show and click at first two dims of embedding vector D.
If :attr:`use_cvm` is True, it will calculate :math:`log(show)` and :math:`log(click)` , and output shape is :math:`[N, D]` .
If :attr:`use_cvm` is False, it will remove show and click from :attr:`input` , and output shape is :math:`[N, D - 2]` .
:attr:`cvm` is show_click info, whose shape is :math:`[N, 2]` .
Args:
    input (Variable): The input variable. A 2-D DenseTensor with shape :math:`[N, D]` , where N is the batch size, D is `2 + the embedding dim` . `lod level = 1` .
    A Tensor with type float32, float64.
    cvm (Variable): Show and click variable. A 2-D Tensor with shape :math:`[N, 2]` , where N is the batch size, 2 is show and click.
    A Tensor with type float32, float64.
    use_cvm  (bool):  Use show_click or not. if use, the output dim is the same as input.
                      if not use, the output dim is `input dim - 2` (remove show and click)
Returns:
    Variable: A 2-D DenseTensor with shape :math:`[N, M]` . if :attr:`use_cvm` = True, M is equal to input dim D. if False, M is equal to `D - 2`. \
    A Tensor with same type as input.
Examples:
    .. code-block:: python

        >>> import paddle

        >>> paddle.enable_static()
        >>> input = paddle.static.data(name="input", shape=[64, 1], dtype="int64")
        >>> label = paddle.static.data(name="label", shape=[64, 1], dtype="int64")
        >>> w0 = paddle.full(shape=(100, 1), fill_value=2).astype(paddle.float32)
        >>> embed = paddle.nn.functional.embedding(input, w0)
        >>> ones = paddle.full_like(label, 1, dtype="int64")
        >>> show_clk = paddle.cast(paddle.concat([ones, label], axis=1), dtype='float32')
        >>> show_clk.stop_gradient = True
        >>> input_with_cvm = paddle.static.nn.continuous_value_model(embed[:, 0], show_clk, True)
cvmr0   r   r   r!   r"   )r4   CVMr5   use_cvmr9   )r   )r   r@   create_variabler0   r   rR   )r   r   r   r]   outs        r)   continuous_value_modelr     sy    F +&(+F

 
 u{{
 
3Cw95 WcU+se'"	   Jr,   c           	         [        S0 [        5       D6nUR                  5       n	[        U S/ SQS5        SU 0n
U R                  n[        U5      S:  a  [        S[        U5       35      eUS:w  a  US:w  a  [        S	U-   S
-   5      eUS:X  a  US   OUS   nU/nU(       a)  UR                  UR                  UU	[        S5      S9nXS'   U(       a  UR                  UR                  XSS9nXS'   UR                  U	SS9nUR                  U	SS9nUR                  U	S9nUR                  SU
UUUS.UUUS.S9  UR                  U5      $ )a?  

**Group Normalization Layer**

Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .

Parameters:
    input(Tensor): Tensor with dimension greater than 1, the data type is float32 or float64.
    groups(int): The number of groups that divided from channels, the data type
        is int32.
    epsilon(float, optional): The small value added to the variance to prevent
        division by zero, the data type is float32. Default: 1e-05.
    param_attr(ParamAttr|bool, optional): ParamAttr object that specifies weight parameter
        attribute. If a bool type, only False is supported, which means there is no weight parameter.
        Default: None, the default weight parameter attribute is used. For more information, please
        refer to :ref:`api_guide_ParamAttr` .
    bias_attr(ParamAttr|bool, optional): ParamAttr object that specifies bias parameter
        attribute. If a bool type, only False is supported, which means there is no bias parameter.
        Default: None, the default bias parameter attribute is used. For more information, please
        refer to :ref:`api_guide_ParamAttr` .
    act(str, optional): Activation to be applied to the output of group normalization.
    data_layout(str, optional): Specify the data format of the input, and the data format of the output
        will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
        The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
        `[batch_size, input_channels, *]`.
    name (str, optional): The default value is None. Normally there is no need for user to set this
        property. For more information, please refer to :ref:`api_guide_Name` .

Returns:
    Tensor: A Tensor has same data type and data format with `input`.

Examples:
   .. code-block:: python

        >>> import paddle
        >>> paddle.enable_static()

        >>> data = paddle.static.data(name='data', shape=[2, 8, 32, 32], dtype='float32')
        >>> x = paddle.static.nn.group_norm(input=data, groups=4)
        >>> print(x.shape)
        (2, 8, 32, 32)

group_normr   r   r4   r2   zWThe dimensions of Op(static.nn.group_norm)'s input should be more than 1. But received NCHWNHWCzIParam(data_layout) of Op(static.nn.group_norm) got wrong value: received ! but only NCHW or NHWC supported.r   r#   rv   rw   r}   Tr-   r~   r{   r   r5   MeanVariance)r   groupsdata_layoutr9   )r   )r   r@   rI   r   r/   rK   r   rL   rY   r   rZ   r   rR   rV   )r   r   r   rY   rZ   r[   r   r\   r]   r0   r;   rb   r   rc   r   r   mean_outvariance_outgroup_norm_outs                      r)   r   r     s   h 22F E3	 5\F++K
;!efijufvewx
 	
 f!6W12
 	

 %06$9+a.{2K-K''"" (	 ( 
  w&&!!4 ' 
 v %%E%FH))T)JL++%+8N
$
 &
   ##N33r,   z3.0.0zpaddle.nn.Conv2DzwThis API will be deprecated in the future, because it's just for old statics mode, please use paddle.nn.Conv2D instead.)since	update_tolevelreasonc                   ^^ [        5       (       a   S5       e[        U S/ SQS5        [        U R                  5      S:w  a!  [	        S[        U R                  5       35      eU R                  S   m[        U	[        5      (       d  [	        SU	 S	35      eUS
;  a  [	        SU S35      eUS:H  nU(       a  U R                  S   OU R                  S   mTS:  a  [	        SU R                   ST S35      eUSLd   S5       eUc  TnO@US::  a  [	        SU 35      eTU-  S:w  a  [	        ST SU R                   SU 35      eTU-  nSnTU:X  a  UT-  S:X  a	  U	(       d  SnTU:X  a%  UT-  S:X  a  [        R                  " 5       (       a  Sn[        U40 [        5       D6nUR                  5       n[        R                  R                  TSS5      m[        R                  R                  USS5      n[        R                  R                  USS5      nS nSn[        U[        5      (       a>  UR!                  5       nUS;  a  [	        S U S!35      eUS":X  a  S"nSS/nOUS#:X  a  S#nSS/nU" XL5      nU[#        U5      /TQnUU4S$ jnUR%                  UR&                  UUU" 5       S%9nUR)                  U5      n[        R*                  " 5       (       a)  [        R,                  R/                  S&5      S&   (       a  Sn	UR1                  UU US'.S(U0UUUUU	SUUS).S*9  US+:X  a  UR3                  USSS,9nOUR3                  USSS,9nUR5                  U5      $ )-a&  
The convolution2D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input and
Output are in NCHW or NHWC format, where N is batch size, C is the number of
channels, H is the height of the feature, and W is the width of the feature.
Filter is in MCHW format, where M is the number of output image channels,
C is the number of input image channels, H is the height of the filter,
and W is the width of the filter. If the groups is greater than 1,
C will equal the number of input image channels divided by the groups.
Please refer to UFLDL's `convolution
<http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_
for more details.
If bias attribution and activation type are provided, bias is added to the
output of the convolution, and the corresponding activation function is
applied to the final result.

For each input :math:`X`, the equation is:

.. math::

    Out = \sigma (W \\ast X + b)

Where:

* :math:`X`: Input value, a tensor with NCHW or NHWC format.
* :math:`W`: Filter value, a tensor with MCHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.

Example:

    - Input:

      Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`

      Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`

    - Output:

      Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`

    Where

    .. math::

        H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
        W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1

Args:
    input (Tensor): The input is 4-D Tensor with shape [N, C, H, W], the data type
        of input is float16 or float32 or float64.
    num_filters(int): The number of filter. It is as same as the output
        image channel.
    filter_size (int|tuple): The filter size. If filter_size
        is a tuple, it must contain two integers, (filter_size_height,
        filter_size_width). Otherwise, filter_size_height = filter_size_width =\
        filter_size.
    stride (int|tuple, optional): The stride size. It means the stride in convolution.
        If stride is a tuple, it must contain two integers, (stride_height, stride_width).
        Otherwise, stride_height = stride_width = stride. Default: stride = 1.
    padding (string|int|list|tuple, optional): The padding size. It means the number of zero-paddings
        on both sides for each dimension.If `padding` is a string, either 'VALID' or
        'SAME' which is the padding algorithm. If padding size is a tuple or list,
        it could be in three forms: `[pad_height, pad_width]` or
        `[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when
        `data_format` is `"NCHW"`, `padding` can be in the form `[[0,0], [0,0],
        [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
        when `data_format` is `"NHWC"`, `pool_padding` can be in the form
        `[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
        Default: padding = 0.
    dilation (int|tuple, optional): The dilation size. It means the spacing between the kernel
        points. If dilation is a tuple, it must contain two integers, (dilation_height,
        dilation_width). Otherwise, dilation_height = dilation_width = dilation.
        Default: dilation = 1.
    groups (int, optional): The groups number of the Conv2d Layer. According to grouped
        convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
        the first half of the filters is only connected to the first half
        of the input channels, while the second half of the filters is only
        connected to the second half of the input channels. Default: groups=1.
    param_attr (ParamAttr|None, optional): The parameter attribute for learnable parameters/weights
        of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
        will create ParamAttr as param_attr. If the Initializer of the param_attr
        is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
        and the :math:`std` is :math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
    bias_attr (ParamAttr|bool|None, optional): The parameter attribute for the bias of conv2d.
        If it is set to False, no bias will be added to the output units.
        If it is set to None or one attribute of ParamAttr, conv2d
        will create ParamAttr as bias_attr. If the Initializer of the bias_attr
        is not set, the bias is initialized zero. Default: None.
    use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
        library is installed. Default: True
    act (str, optional): Activation type, if it is set to None, activation is not appended.
        Default: None
    name(str|None, optional): For detailed information, please refer
       to :ref:`api_guide_Name`. Usually name is no need to set and
       None by default.
    data_format (str, optional): Specify the data format of the input, and the data format of the output
        will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
        The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
        `[batch_size, input_channels, input_height, input_width]`.

Returns:
    A Tensor representing the conv2d, whose data type is the
    same with input. If act is None, the tensor storing the convolution
    result, and if act is not None, the tensor storing convolution
    and non-linearity activation result.

Examples:
    .. code-block:: python

        >>> # doctest: +SKIP("env set will not work in ci check because import paddle in global_exec")
        >>> # set env var before import paddle to disable pir mode, following example code use os module.
        >>> import os
        >>> os.environ['FLAGS_enable_pir_api'] = '0'
        >>> import paddle
        >>> paddle.enable_static()

        >>> data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
        >>> conv2d = paddle.static.nn.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
        >>> print(conv2d.shape)
        (-1, 2, 30, 30)
zpaddle.static.nn.conv2d is not supported in pir mode, please set the environment variable FLAGS_enable_pir_api=0 to switch old static mode.r   rp   conv2d   %Input size should be 4, but received r   CAttr(use_cudnn) should be True or False. Received Attr(use_cudnn): . r   r   zJAttr(data_format) should be 'NCHW' or 'NHWC'. Received Attr(data_format): .r      r   #The channel dimension of the input() should be defined. Received: F$param_attr should not be False here.Pthe groups of input must be greater than 0, but received the groups of input is zSthe channel of input must be divisible by groups,received: the channel of input is z, the shape of input is , the groups is depthwise_conv2dr2   filter_sizestridedilationc                    [        U [        [        45      (       Ga7  [        U 5      S:X  Ga'  [        U S   [        [        45      (       aM  US:X  aG  U S   SS/:X  a  U S   SS/:X  d  [	        SU  S35      eU SS n U  VVs/ s H  o"  H  o3PM     M     n nnOj[        U S   [        [        45      (       aL  US:X  aF  U S   SS/:X  a  U S	   SS/:X  d  [	        SU  S35      eU SS	 n U  VVs/ s H  o"  H  o3PM     M     n nn[
        R                  R                  U SS
5      n [
        R                  R                  U S5      (       a
  U S   U S   /n U $ [
        R                  R                  U SS
5      n U $ s  snnf s  snnf Nr   r   r   r   Non-zero padding(6) in the batch or channel dimensions is not supported.r2   r   r   padding	rF   rA   rB   rK   r   rC   utilsconvert_to_list_is_symmetric_paddingr   data_formata_listeles       r)   _update_paddingconv2d.<locals>._update_padding  s   ge}--#g,!2C'!*tUm44v%
q!f,1v1E$+G9 5, ,  "!A,-4GW633WGGAJu66v%
q!f,1v1E$+G9 5, ,  "!A,-4GW633WGll227AyIG||11'1=="1:wqz2
  ll227AyIG% H Hs    E7+E=EXPLICITSAMEVALIDUnknown padding: '$'. It can only be 'SAME' or 'VALID'.r   r   c                  r   > TS   TS   -  T-  n U S::  a  [        SU  S35      eSU -  S-  n[        SU5      $ Nr   r   FInvalid filter number, excepted number is larger than 0, but received /, please check the input shape and filter size.       @      ?ry   r   r   filter_elem_numstdr   num_channelss     r)   _get_default_param_initializer.conv2d.<locals>._get_default_param_initializerH  sb    %a.;q>9LHa,- . 
 _$,c3r,   rw   FLAGS_conv2d_disable_cudnnInputFilterOutput)stridespaddings	dilationsr   	use_cudnnfuse_relu_before_depthwise_convpadding_algorithmr   r9   r   r?   dim_end)r   r   rK   r/   r   rF   boolr   is_compiled_with_rocmr   r@   rI   rC   r   r   rH   upperintrL   rY   rQ   is_compiled_with_cudabase	get_flagsrR   rU   rV   )r   num_filtersr   r   r   r   r   rY   rZ   r   r[   r\   r   channel_lastnum_filter_channelsl_typer]   r0   r   r   filter_shaper   filter_paramrg   pre_actr   s     `                      @r)   r   r   >  s   b }}  	V wCX 5;;13C4D3EF
 	
 ;;q>Li&&  ){".
 	

 **""-a1
 	

 &(L%15;;q>u{{1~La1%++ ?%a)
 	
 U"J$JJ"~*	1339(<
 	

 & A%55ANBZ[`[f[fZg"6(, 
 +f4F,&!+# 	,&!+&&((#,68,F E,,..{A}MK\\))&!X>F||++HaDH> #'3--/++$WI-QR  g '!fG &!fGg3G%8!9HKHL	  **:<	 + L 88?H 	""$$KK!!">?(
 	
"
 8$!"/4!2&	
  & f''Aq'I''Aq'I##G,,r,   c                   ^^ SnUSLd   S5       e[        U40 [        5       D6nUR                  5       n[        U	[        5      (       d  [        SU	 S35      eUS;  a  [        SU S35      eUS	:H  n[        U R                  5      S
:w  a  [        SU R                   35      eU(       a  U R                  S   OU R                  S   mTS:  a  [        SU R                   ST S35      eUc  TnO4US::  a  [        SU 35      eTU-  S:w  a  [        ST SU S35      eTU-  n[        R                  R                  TSS5      m[        R                  R                  USS5      n[        R                  R                  USS5      nS nSn[        U[        5      (       a>  UR                  5       nUS;  a  [        SU S35      eUS:X  a  Sn/ SQnOUS :X  a  S n/ SQnU" XL5      nU R                  nUU/TQnUU4S! jnUR                  UR                  UUU" 5       S"9nUR                  U5      nUR!                  UU US#.S$U0UUUUU	UUS%.S&9  US':X  a  UR#                  USS(S)9nOUR#                  USS
S)9nUR%                  U5      $ )*a  

The convolution3D layer calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are in NCDHW or NDHWC format. Where N is batch size C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convolution3D is similar with Convolution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.

For each input :math:`X`, the equation is:

.. math::

    Out = \sigma (W \ast X + b)

In the above equation:

* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
* :math:`\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.

Example:

    - Input:

      Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`

      Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`

    - Output:
      Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`

    Where

    .. math::

        D_{out}&= \frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\
        H_{out}&= \frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\
        W_{out}&= \frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1

Args:
    input (Tensor): The input is 5-D Tensor with shape [N, C, D, H, W], the data
        type of input is float16 or float32 or float64.
    num_filters(int): The number of filter. It is as same as the output
        image channel.
    filter_size (int|tuple): The filter size. If filter_size is a tuple,
        it must contain three integers, (filter_size_depth, filter_size_height,
        filter_size_width). Otherwise, filter_size_depth = filter_size_height = \
        filter_size_width = filter_size.
    stride (int|tuple): The stride size. It means the stride in convolution. If stride is a
        tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
        Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
    padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
        on both sides for each dimension. If `padding` is a string, either 'VALID' or
        'SAME' which is the padding algorithm. If padding size is a tuple or list,
        it could be in three forms: `[pad_depth, pad_height, pad_width]` or
        `[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
        and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form
        `[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
        when `data_format` is `"NDHWC"`, `pool_padding` can be in the form
        `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
        Default: padding = 0.
    dilation (int|tuple): The dilation size. It means the spacing between the kernel points.
        If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
        dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
        Default: dilation = 1.
    groups (int): The groups number of the Conv3d Layer. According to grouped
        convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
        the first half of the filters is only connected to the first half
        of the input channels, while the second half of the filters is only
        connected to the second half of the input channels. Default: groups=1
    param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
        of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
        will create ParamAttr as param_attr. If it is set to None, the parameter
        is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
        :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
    bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv3d.
        If it is set to False, no bias will be added to the output units.
        If it is set to None or one attribute of ParamAttr, conv3d
        will create ParamAttr as bias_attr. If the Initializer of the bias_attr
        is not set, the bias is initialized zero. Default: None.
    use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
        library is installed. Default: True
    act (str): Activation type, if it is set to None, activation is not appended.
        Default: None.
    name(str|None): For detailed information, please refer
       to :ref:`api_guide_Name`. Usually name is no need to set and
       None by default.
    data_format (str, optional): Specify the data format of the input, and the data format of the output
        will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
        The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
        `[batch_size, input_channels, input_height, input_width]`.

Returns:
    A Tensor representing the conv3d, whose data type is
    the same with input. If act is None, the tensor variable storing the
    convolution result, and if act is not None, the tensor variable storing
    convolution and non-linearity activation result.

Examples:
    .. code-block:: python

        >>> import paddle
        >>> import numpy as np

        >>> np.random.seed(1107)
        >>> paddle.seed(1107)
        >>> paddle.enable_static()
        >>> data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
        >>> param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001)
        >>> res = paddle.static.nn.conv3d(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr)
        >>> place = paddle.CPUPlace()
        >>> exe = paddle.static.Executor(place)
        >>> exe.run(paddle.static.default_startup_program())
        >>> x = np.random.rand(1, 3, 12, 32, 32).astype("float32")
        >>> output,  = exe.run(feed={"data": x}, fetch_list=[res])
        >>> print(output.shape)
        (1, 2, 10, 30, 30)
conv3dFr   r   r   NCDHWNDHWCzLAttr(data_format) should be 'NCDHW' or 'NDHWC'. Received Attr(data_format): r   r   rr   @Input should be 5D tensor, but received input with the shape of r   r   r   r   r   z@the groups of conv3d should be greater than 0. Received groups: z]The number of input channels must be divisible by Attr(groups). Received: number of channels(z
), groups(z).r   r   r   r   c                    [        U [        [        45      (       Ga;  [        U 5      S:X  Ga+  [        U S   [        [        45      (       aM  US:X  aG  U S   SS/:X  a  U S   SS/:X  d  [	        SU  S35      eU SS n U  VVs/ s H  o"  H  o3PM     M     n nnOj[        U S   [        [        45      (       aL  US:X  aF  U S   SS/:X  a  U S	   SS/:X  d  [	        SU  S35      eU SS	 n U  VVs/ s H  o"  H  o3PM     M     n nn[
        R                  R                  U S
S5      n [
        R                  R                  U S5      (       a  U S   U S   U S	   /n U $ [        U [        [        45      (       ae  [        U 5      S
:X  aV  [
        R                  R                  U S
S5      n [
        R                  R                  U S5      (       a  U S   U S   U S	   /n U $ [
        R                  R                  U SS5      n U $ s  snnf s  snnf Nrr   r   r   r   r   r   r2   r   r      r   r   r   r   s       r)   r   conv3d.<locals>._update_padding:  s   ge}--#g,!2C'!*tUm44w&
q!f,1v1E$+G9 5, ,  "!A,-4GW633WGGAJu66w&
q!f,1v1E$+G9 5, ,  "!A,-4GW633WGll227AyIG||11'1=="1:wqz71:>  $//CLA4Ell227AyIG||11'1=="1:wqz71:>  ll227AyIG+ H Hs    G;+Hr   r   r   r   r   )r   r   r   r   c                  ~   > TS   TS   -  TS   -  T-  n U S::  a  [        SU  S35      eSU -  S-  n[        SU5      $ )	Nr   r   r2   r   r   r   r   ry   r   r   s     r)   r   .conv3d.<locals>._get_default_param_initializero  sp    N[^+k!n<|K 	 a,- .  _$,c3r,   rw   r   r   )r   r   r   r   r   r   r   r9   r   r2   r   )r   r@   rI   rF   r   r   rK   r/   rC   r   r   rH   r   rL   rY   rQ   rR   rU   rV   )r   r   r   r   r   r   r   rY   rZ   r   r[   r\   r   r   r]   r0   r   r   r   r   rb   r   r   r   rg   r   r   s     `                       @r)   r   r     s/   X FU"J$JJ",68,F Ei&&  ){".
 	

 ,,""-a1
 	

 ')L
5;;1Nu{{m\
 	
 &25;;q>u{{1~La1%++ ?%a)
 	

 ~*	1NvhW
 	
 & A%00<~ZxrS  +f4,,..{A}MK\\))&!X>F||++HaDH D #'3--/++$WI-QR  g 'G &Gg3G++K!4C{CL  **:<	 + L 88?H
"
 8$!"!2&
  $ g''Aq'I''Aq'I##G,,r,   c                 
   USLd   S5       e[        U R                  5      S:w  a!  [        S[        U R                  5       35      eUS:X  a  [        S5      eUS;  a  [        SU-   S	-   5      eUS
:X  a  U R                  S   OU R                  S   nSnX:X  a  X:X  a	  U
(       d  Sn[        U40 [	        5       D6n[        U [        5      (       d  [        S5      e[        R                  R                  USS5      n[        R                  R                  USS5      n[        U
[        5      (       d  [        S5      eS nSn[        U[        5      (       a>  UR                  5       nUS;  a  [        SU S35      eUS:X  a  Sn/ SQnOUS:X  a  Sn/ SQnU" XM5      nUc  / nGOS[        U[        [        45      (       af  [        R                  R!                  U5      (       a   [        R                  R#                  U5      nO[        R                  R                  USS5      nO[        U[$        5      (       a"  [        R                  R                  USS5      nO[        U[        5      (       a{  ['        UR(                  SSS/S5        [        UR                  5      S:X  a=  UR                  S   S:X  d  UR                  S   S:X  a  UR                  S   S:X  a  X"/nO[        S5      e[        S 5      eUGcV  U/ :X  a  [        S!5      e[+        5       (       dE  [        U[        5      (       d$  [        R                  R!                  U5      (       a  [        S"5      eOR[        R                  R-                  U5      n[        U5      S:X  a$  [        R                  R                  US   SS5      nUS
:X  a  U R                  S   OU R                  S   nUS
:X  a  U R                  S#   OU R                  S   nUS   US-
  US   -  -
  US   -   US   -   S-
  US   -  S-   nUS   US-
  US   -  -
  US   -   US#   -   S-
  US   -  S-   nUU/nO![        R                  R                  USS$5      n[        U5      S:X  a/  [        R                  R/                  US5      (       a
  US   US   /nUc  SnOUS::  a  [        S%U 35      eXU-  /UQnUR1                  U R(                  UUR2                  S&9nUR5                  U R(                  S'9nUR7                  UU /U/S(.S)U0UUUUUUU
US*.S+9  US
:X  a  UR9                  USSS,9nOUR9                  US#SS,9nUR;                  U5      nU$ )-u"$  

The convolution2D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCHW or NHWC format. Where N is batch size, C is the number of channels,
H is the height of the feature, and W is the width of the feature.
Parameters(dilations, strides, paddings) are two elements. These two elements
represent height and width, respectively. The details of convolution transpose
layer, please refer to the following explanation and references
`therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.

For each input :math:`X`, the equation is:

.. math::

    Out = \sigma (W \ast X + b)

Where:

* :math:`X`: Input value, a 4-D Tensor with NCHW or NHWC format.
* :math:`W`: Filter value, a 4-D Tensor with MCHW format.
* :math:`\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\sigma`: Activation function.
* :math:`Out`: Output value, a 4-D Tensor with data format 'NCHW' or 'NHWC', the shape of :math:`Out` and :math:`X` may be different.

Example:

    - Input:

      Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`

      Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)`

    - Output:

      Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`

    Where

    .. math::

       H^\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (H_f - 1) + 1 \\
       W^\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1 \\
       H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ] \\
       W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] ]

    If `padding` = `"SAME"`:

    .. math::
        H^\prime_{out} &= \frac{(H_{in} + stride[0] - 1)}{stride[0]} \\
        W^\prime_{out} &= \frac{(H_{in} + stride[1] - 1)}{stride[1]}

    If `padding` = `"VALID"`:

    .. math::
        H^\prime_{out} &= (H_{in} - 1) * strides[0] + dilations[0] * (H_f - 1) + 1 \\
        W^\prime_{out} &= (W_{in} − 1) * strides[1] + dilations[1] * (W_f − 1) + 1

    If output_size is None, :math:`H_{out} = H^\prime_{out}, W_{out} = W^\prime_{out}`;
    else, the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
    and :math:`H^\prime_{out} + strides[0]`, and the :math:`W_{out}` of the output size must
    between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[1]`,

    Since transposed convolution can be treated as the inverse of convolution, and according to the input-output formula for convolution,
    different sized input feature layers may correspond to the same sized output feature layer,
    the size of the output feature layer for a fixed sized input feature layer is not unique to transposed convolution

    If `output_size` is specified, `conv2d_transpose` can compute the kernel size automatically.

Args:
    input(Tensor): 4-D Tensor with [N, C, H, W] or [N, H, W, C] format where N is the batch_size,
        C is the input_channels, H is the input_height and W is the input_width.
        Its data type is float32 or float64.
    num_filters(int): The number of the filter. It is as same as the output
        image channel.
    output_size(int|tuple, optional): The output image size. If output size is a
        tuple, it must contain two integers, (image_height, image_width). None if use
        filter_size, padding, and stride to calculate output_size.
        If output_size and filter_size are specified at the same time, They
        should follow the formula above. Default: None. output_size and filter_size
        should not be None at the same time.
    filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
        it must contain two integers, (filter_size_height, filter_size_width).
        Otherwise, filter_size_height = filter_size_width = filter_size. None if
        use output size to calculate filter_size. Default: None. filter_size and
        output_size should not be None at the same time.
    padding(str|int|list|tuple, optional): The padding size. It means the number of zero-paddings
        on both sides for each dimension. If `padding` is a string, either 'VALID' or
        'SAME' which is the padding algorithm. If `padding` is a tuple or list,
        it could be in three forms:
        (1) Contains 4 binary groups: when `data_format` is `"NCHW"`, `padding` can be in the form
        `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
        when `data_format` is `"NHWC"`, `padding` can be in the form
        `[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
        (2) Contains 4 integer values：`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`
        (3) Contains 2 integer values：`[pad_height, pad_width]`, in this case, `padding_height_top = padding_height_bottom = padding_height`，
        `padding_width_left = padding_width_right = padding_width`. If an integer, `padding_height = padding_width = padding`. Default: padding = 0.
    stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
        If stride is a tuple, it must contain two integers, (stride_height, stride_width).
        Otherwise, stride_height = stride_width = stride. Default: stride = 1.
    dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
        If dilation is a tuple, it must contain two integers, (dilation_height, dilation_width).
        Otherwise, dilation_height = dilation_width = dilation. Default: dilation = 1.
    groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by
        grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
        when group=2, the first half of the filters is only connected to the
        first half of the input channels, while the second half of the
        filters is only connected to the second half of the input channels.
        Default: groups = 1.
    param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
        of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose
        will create ParamAttr as param_attr. If the Initializer of the param_attr
        is not set, the parameter is initialized with Xavier. Default: None.
    bias_attr (ParamAttr|bool, optional): Specifies the object for the bias parameter attribute.
        The default value is None, which means that the default bias parameter attribute is used.
        For detailed information, please refer to :ref:`api_paddle_ParamAttr`.
        The default bias initialisation for the conv2d_transpose operator is 0.0.
    use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
        library is installed. Default: True.
    act (str, optional): Activation type, if it is set to None, activation is not appended.
        Default: None.
    name(str, optional): For detailed information, please refer
       to :ref:`api_guide_Name`. Usually name is no need to set and
       None by default.
    data_format (str, optional): Specify the data format of the input, and the data format of the output
        will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
        The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
        `[batch_size, input_channels, input_height, input_width]`.

Returns:
    A Tensor representing the conv2d_transpose, whose
    data type is the same with input and shape is (num_batches, channels, out_h,
    out_w) or (num_batches, out_h, out_w, channels). If act is None, the tensor
    storing the transposed convolution result, and if act is not None, the
    tensor storing transposed convolution and non-linearity activation
    result.

Raises:
    ValueError: If the type of `use_cudnn` is not bool.
    ValueError: If `data_format` is not "NCHW" or "NHWC".
    ValueError: If `padding` is a string, but not "SAME" or "VALID".
    ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
        or the element corresponding to the input's channel is not 0.
    ValueError: If `output_size` and filter_size are None at the same time.
    ShapeError: If the input is not 4-D Tensor.
    ShapeError: If the input's dimension size and filter's dimension size not equal.
    ShapeError: If the dimension size of input minus the size of `stride` is not 2.
    ShapeError: If the number of input channels is not equal to filter's channels.
    ShapeError: If the size of `output_size` is not equal to that of `stride`.

Examples:
    .. code-block:: python

        >>> import paddle
        >>> paddle.enable_static()

        >>> data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
        >>> conv2d_transpose = paddle.static.nn.conv2d_transpose(input=data, num_filters=2, filter_size=3)
        >>> print(conv2d_transpose.shape)
        (-1, 2, 34, 34)
Fz3param_attr should not be False in conv2d_transpose.r   r   r   znum of filters should not be 0.r   z\Attr(data_format) of Op(paddle.static.nn.layers.conv2d_transpose) got wrong value: received r   r   r   r#   conv2d_transposedepthwise_conv2d_transposez(Input of conv2d_transpose must be Tensorr2   r   r   !use_cudnn should be True or Falsec                    [        U [        [        45      (       Ga  [        U 5      S:X  a  [        U S   [        [        45      (       aM  US:X  aG  U S   SS/:X  a  U S   SS/:X  d  [	        SU  S35      eU SS n U  VVs/ s H  o"  H  o3PM     M     n nnOj[        U S   [        [        45      (       aL  US:X  aF  U S   SS/:X  a  U S	   SS/:X  d  [	        SU  S35      eU SS	 n U  VVs/ s H  o"  H  o3PM     M     n nn[
        R                  R                  U SS
5      n U $ [
        R                  R                  U SS
5      n U S   U S   U S   U S   /n U $ s  snnf s  snnf r   rF   rA   rB   rK   r   rC   r   r   r   s       r)   r   )conv2d_transpose.<locals>._update_paddingz  s   ge}--#g,!2C'!*tUm44v%
q!f,1v1E$+G9 5, ,  "!A,-4GW633WGGAJu66v%
q!f,1v1E$+G9 5, ,  "!A,-4GW633WGll227AyIG  ll227AyIGqz71:wqz71:FG H Hs   ?E*Er   r   r   r   r   )r   r   r   r   r   output_sizeint32int64z-output_size must contain one or two integers.z<output_size should be int, list[int] or tuple[int] or Tensor0output_size must be set when filter_size is Nonezafilter_size should not be None when output_size is Tensor or contain Tensor in static graph mode.r   zconv2d_transpose.filter_sizer   r0   r/   r.   r   r   r   r  r   r   r   r   r   r   r   r9   r   )rK   r/   r   r   r@   rF   r   	TypeErrorrC   r   r   r   rH   r   rA   rB   _contain_var_convert_to_tensor_listr   r   r0   r
   convert_shape_to_listr   rL   rY   rQ   rR   rU   rV   )r   r   r  r   r   r   r   r   rY   rZ   r   r[   r\   r   input_channelop_typer]   r   r   h_inw_infilter_size_hfilter_size_wr   
img_filterrg   r   r   s                               r)   r   r     s   j U" =" 5;;13C4D3EF
 	
 a:;;**j12
 	
 '2V&;EKKNRM G(.-FH-FeX&&BCC\\))&!X>F||++HaDHi&&<==8 #'3--/++$WI-QR  g '"G &"Gg3G	K$	/	/<<$$[11 ,,>>{KK ,,66QK 
K	%	%ll22M
 
K	*	*g		
 {  !Q&a A%):):1)=)B  #q(*8LMMJ
 	
 "OPP  +x00FLL4M4M5 5 !w 5 !,,<<[IK;1$$ll::NA} "-!6u{{1~EKKN!,!6u{{1~EKKN Nax6!9$%aj aj 	
 a[  Nax6!9$%aj aj 	
 a[  %m4ll22:
 7|qV\\??KK1:wqz*~	1339(<
 	

 "&#8G;GL((kkF4E4E ) J 88u{{8KH
ZL98$&!2!"&	
	    f''Aq'I''Aq'I

"
"7
+CJr,   c                 N   USLd   S5       eUS;  a  [        SU-   S-   5      eSn[        U40 [        5       D6n[        U [        5      (       d  [        S5      e[        U R                  5      S:w  a  [        S	U R                   35      eUS
:X  a  U R                  S   OU R                  S   n[        R                  R                  USS5      n[        R                  R                  USS5      n[        U
[        5      (       d  [        S5      eS nSn[        U[        5      (       a>  UR                  5       nUS;  a  [        SU S35      eUS:X  a  Sn/ SQnOUS:X  a  Sn/ SQnU" XM5      nUGc  Uc  [        S5      e[        U[        5      (       a  X"U/nUS
:X  a  U R                  S   OU R                  S   nUS
:X  a  U R                  S   OU R                  S   nUS
:X  a  U R                  S   OU R                  S   nUS   US-
  US   -  -
  US   -   US   -   S-
  US   -  S-   nUS   US-
  US   -  -
  US   -   US   -   S-
  US   -  S-   nUS   US-
  US   -  -
  US   -   US   -   S-
  US   -  S-   nUUU/nO![        R                  R                  USS5      n[        U5      S:X  a3  [        R                  R                  US5      (       a  US   US   US   /nUc  / nOM[        U[         ["        [        45      (       a"  [        R                  R                  USS5      nO[        S 5      eUc  SOUnUS::  a  [        S!U 35      eX-  S:w  a  [        S"U S#U 35      eUX-  /UQnUR%                  U R&                  UUR(                  S$9nUS
:X  a  S%nUS&:X  a  S'nUR+                  U R&                  S(9nUR-                  UU /U/S).S*U0UUUUUUU
US+.S,9  US%:X  a  UR/                  USSS-9nOUR/                  USSS-9nUR1                  U5      nU$ ).u%  

The convolution3D transpose layer calculates the output based on the input,
filter, and dilations, strides, paddings. Input(Input) and output(Output)
are in NCDHW or NDHWC format. Where N is batch size, C is the number of channels,
D is the depth of the feature, H is the height of the feature, and W
is the width of the feature. Parameters(dilations, strides, paddings) are
two elements. These two elements represent height and width, respectively.
The details of convolution transpose layer, please refer to the following
explanation and references `therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
If bias attribution and activation type are provided, bias is added to
the output of the convolution, and the corresponding activation function
is applied to the final result.

For each input :math:`X`, the equation is:

.. math::

    Out = \sigma (W \ast X + b)

In the above equation:

* :math:`X`: Input value, a Tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a Tensor with MCDHW format.
* :math:`\ast`: Convolution operation.
* :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
* :math:`\sigma`: Activation function.
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.

Example:

    - Input:

      Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`

      Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)`

    - Output:

      Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`

    Where

    .. math::

       D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\
       H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\
       W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 \\
       D_{out} &\in [ D^\prime_{out}, D^\prime_{out} + strides[0] ] \\
       H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[1] ] \\
       W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[2] ]

If `padding` = `"SAME"`:

    .. math::
        D^\prime_{out} &= \frac{(D_{in} + stride[0] - 1)}{stride[0]} \\
        H^\prime_{out} &= \frac{(H_{in} + stride[1] - 1)}{stride[1]} \\
        W^\prime_{out} &= \frac{(H_{in} + stride[2] - 1)}{stride[2]}

If `padding` = `"VALID"`:

.. math::
    D^\prime_{out} &= (D_{in} - 1) * strides[0] + dilations[0] * (D_f - 1) + 1 \\
    H^\prime_{out} &= (H_{in} - 1) * strides[1] + dilations[1] * (H_f - 1) + 1 \\
    W^\prime_{out} &= (W_{in} − 1) * strides[2] + dilations[2] * (W_f − 1) + 1

If `output_size` is None, :math:`D_{out} = D^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the specified `output_size_depth` (the depth of the output feature layer) :math:`D_{out}`
must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`(not including :math:`D^\prime_{out} + strides[0]`),
the specified `output_size_height` (the height of the output feature layer) :math:`H_{out}` must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`(not including :math:`H^\prime_{out} + strides[1]`),
and the specified `output_size_width` (the width of the output feature layer) :math:`W_{out}` must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`(not including :math:`W^\prime_{out} + strides[2]`).

Since transposed convolution can be treated as the inverse of convolution,
and since different sized input feature layers may correspond to the same sized output feature layer according to the input-output formula for convolution,
the size of the output feature layer for a fixed sized input feature layer is not unique to transposed convolution.

If `output_size` is specified, `conv3d_transpose` can compute the kernel size automatically.

Args:
    input(Tensor): The input is 5-D Tensor with shape [N, C, D, H, W] or [N, D, H, W, C], the data type
        of input is float32 or float64.
    num_filters(int): The number of the filter. It is as same as the output
        image channel.
    output_size(int|tuple, optional): The output image size. If output size is a
        tuple, it must contain three integers, (image_depth, image_height, image_width). This
        parameter only works when filter_size is None. If output_size and filter_size are
        specified at the same time, They should follow the formula above. Default: None.
        Output_size and filter_size should not be None at the same time.
    filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
        it must contain three integers, (filter_size_depth, filter_size_height,
        filter_size_width). Otherwise, filter_size_depth = filter_size_height = \
        filter_size_width = filter_size. None if use output size to
        calculate filter_size. Default: None. filter_size and output_size should not be
        None at the same time.
    padding(int|list|str|tuple, optional): The padding size. The padding argument effectively
        adds `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a string,
        either 'VALID' or 'SAME' supported, which is the padding algorithm. If `padding`
        is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or
        `[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
        and when `data_format` is `'NCDHW'`, `padding` can be in the form
        `[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
        when `data_format` is `'NDHWC'`, `padding` can be in the form
        `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
        Default: padding = 0.
    stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
        If stride is a tuple, it must contain three integers, (stride_depth, stride_height,
        stride_width). Otherwise, stride_depth = stride_height = stride_width = stride.
        Default: stride = 1.
    dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
        If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
        dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
        Default: dilation = 1.
    groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by
        grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
        when group=2, the first half of the filters is only connected to the
        first half of the input channels, while the second half of the
        filters is only connected to the second half of the input channels.
        Default: groups=1
    param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
        of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose
        will create ParamAttr as param_attr. If the Initializer of the param_attr
        is not set, the parameter is initialized with Xavier. Default: None.
    bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.
        If it is set to False, no bias will be added to the output units.
        If it is set to None or one attribute of ParamAttr, conv3d_transpose
        will create ParamAttr as bias_attr. If the Initializer of the bias_attr
        is not set, the bias is initialized zero. Default: None.
    use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
        library is installed. Default: True
    act (str, optional): Activation type, if it is set to None, activation is not appended.
        Default: None.
    name(str, optional): For detailed information, please refer
       to :ref:`api_guide_Name`. Usually name is no need to set and
       None by default.
    data_format (str, optional): Specify the data format of the input, and the data format of the output
        will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
        The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
        `[batch_size, input_channels, input_height, input_width]`.

Returns:
    A Tensor representing the conv3d_transpose, whose data
    type is the same with input and shape is (num_batches, channels, out_d, out_h,
    out_w) or (num_batches, out_d, out_h, out_w, channels). If act is None, the tensor
    variable storing the transposed convolution result, and if act is not None, the tensor
    variable storing transposed convolution and non-linearity activation result.

Examples:
    .. code-block:: python

        >>> import paddle
        >>> import numpy as np

        >>> paddle.seed(1107)
        >>> np.random.seed(1107)
        >>> paddle.enable_static()
        >>> data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
        >>> param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001)
        >>> res = paddle.static.nn.conv3d_transpose(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr)
        >>> place = paddle.CPUPlace()
        >>> exe = paddle.static.Executor(place)
        >>> exe.run(paddle.static.default_startup_program())
        >>> x = np.random.rand(1, 3, 12, 32, 32).astype("float32")
        >>> output = exe.run(feed={"data": x}, fetch_list=[res.mean()])
        >>> print(output)
        [array(0.5148856, dtype=float32)]
Fz3param_attr should not be False in conv3d_transpose.r   zVParam(data_format) of Op(paddle.static.nn.conv3d_transpose) got wrong value: received z# but only NCDHW or NDHWC supported.conv3d_transposez(Input of conv3d_transpose must be Tensorrr   r   r   r   r#   r   r   r   r  c                 t   [        U [        [        45      (       Ga  [        U 5      S:X  a  [        U S   [        [        45      (       aM  US:X  aG  U S   SS/:X  a  U S   SS/:X  d  [	        SU  S35      eU SS n U  VVs/ s H  o"  H  o3PM     M     n nnOj[        U S   [        [        45      (       aL  US:X  aF  U S   SS/:X  a  U S	   SS/:X  d  [	        SU  S35      eU SS	 n U  VVs/ s H  o"  H  o3PM     M     n nn[
        R                  R                  U S
S5      n U $ [        U [        [        45      (       a2  [        U 5      S
:X  a#  [
        R                  R                  U S
S5      n U $ [
        R                  R                  U SS5      n U S   U S   U S   U S   U S   U S   /n U $ s  snnf s  snnf r   r  r   s       r)   r   )conv3d_transpose.<locals>._update_padding  s   ge}--#g,!2C'!*tUm44w&
q!f,1v1E$+G9 5, ,  "!A,-4GW633WGGAJu66w&
q!f,1v1E$+G9 5, ,  "!A,-4GW633WGll227AyIG  $//CLA4Ell227AyIG  ll227AyIG





G 5 H Hs   ?F.*F4r   r   r   r   r   )r   r   r   r   r   r   r   r  r2   r   r   zconv3d_transpose.filter_sizer   r  z2output_size should be int, list[int] or tuple[int]zJthe groups of conv3d_transpose should be greater than 0. Received groups: zMAttr(num_filters) must be divisible by groups,Received: Attr(num_filters) is r   r	  r   r   r   r   r   r   r
  r9   r   )r   r   r@   rF   r   r  rK   r/   rC   r   r   r   rH   r   r   r   rA   rB   rL   r0   rY   rQ   rR   rU   rV   )r   r   r  r   r   r   r   r   rY   rZ   r   r[   r\   r   r   r]   r  r   r   d_inr  r  filter_size_dr  r  r   r  rg   r   r   s                                 r)   r  r    s6   p U" =" ,,d34
 	
  F,68,FeX&&BCC
5;;1Nu{{m\
 	
 &0Aekk"o  \\))&!X>F||++HaDHi&&<==%N #'3--/++$WI-QR  g '(G &(Gg3GOPPk3''&[AK!,!7u{{1~U[[^!,!7u{{1~U[[^!,!7u{{1~U[[^ Nax6!9$%aj aj 	
 a[  Nax6!9$%aj aj 	
 a[  Nax6!9$%aj aj 	
 a[  %m]Cll22:
 7|qV\\??KK1:wqz71:6	K$s!3	4	4ll22M
 MNN.QfF{XY_X`a
 	
 q ..9]:J6(T
 	

 ";#8G;GL((kkF4E4E ) J gg88u{{8KH
ZL98$&!2!"&	
	    f''Aq'I''Aq'I

"
"7
+CJr,   c                   ^^ [        U SSS/S5        [        USSS/S5        [        US[        R                  R                  [        S5      4S5        U R                  S:w  a  [        S	U R                   35      eU R                  S
   mUSLd   S5       e[        S 0 [        5       D6nUR                  5       n[        U [        R                  R                  5      (       d  [        S5      e[        U[        R                  R                  5      (       d  [        S5      eUc  TnO*US:X  a  [        S5      eTU-  S:w  a  [        S5      eTU-  n[        R                  R                  TSS5      m[        R                  R                  USS5      n[        R                  R                  USS5      n[        R                  R                  USS5      nU R                  nU[!        U5      /TQnUU4S jnUR#                  UR$                  UUU" 5       S9nUR'                  U5      nU(       a!  UR)                  SU UUUS.SU0UUUUU	U
S.S9  OUR)                  SU UUS.SU0UUUUU	U
S.S9  UR+                  US
SS9nU$ )!a  

**Deformable Convolution op**

Compute 2-D deformable convolution on 4-D input.
Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:


Deformable Convolution v2:

.. math::

    y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}

Deformable Convolution v1:

.. math::

    y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}

Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
<https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.

Example:
    - Input:

      Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`

      Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`

      Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})`

      Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})`

    - Output:

      Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`

    Where

    .. math::

        H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\
        W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1

Args:
    input (Tensor): The input image with [N, C, H, W] format. A Tensor with type
        float32, float64.
    offset (Tensor): The input coordinate offset of deformable convolution layer.
        A Tensor with type float32, float64.
    Mask (Tensor, Optional): The input mask of deformable convolution layer.
        A Tensor with type float32, float64. It should be None when you use
        deformable convolution v1.
    num_filters(int): The number of filter. It is as same as the output
        image channel.
    filter_size (int|tuple): The filter size. If filter_size is a tuple,
        it must contain two integers, (filter_size_H, filter_size_W).
        Otherwise, the filter will be a square.
    stride (int|tuple): The stride size. If stride is a tuple, it must
        contain two integers, (stride_H, stride_W). Otherwise, the
        stride_H = stride_W = stride. Default: stride = 1.
    padding (int|tuple): The padding size. If padding is a tuple, it must
        contain two integers, (padding_H, padding_W). Otherwise, the
        padding_H = padding_W = padding. Default: padding = 0.
    dilation (int|tuple): The dilation size. If dilation is a tuple, it must
        contain two integers, (dilation_H, dilation_W). Otherwise, the
        dilation_H = dilation_W = dilation. Default: dilation = 1.
    groups (int): The groups number of the deformable conv layer. According to
        grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
        the first half of the filters is only connected to the first half
        of the input channels, while the second half of the filters is only
        connected to the second half of the input channels. Default: groups=1.
    deformable_groups (int): The number of deformable group partitions.
        Default: deformable_groups = 1.
    im2col_step (int): Maximum number of images per im2col computation;
        The total batch size should be devisable by this value or smaller
        than this value; if you face out of memory problem, you can try
        to use a smaller value here.
        Default: im2col_step = 64.
    param_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights
        of deformable conv. If it is set to None or one attribute of ParamAttr,
        deformable conv will create ParamAttr as param_attr.
        If the Initializer of the param_attr is not set, the parameter is
        initialized with :math:`Normal(0.0, std)`, and the
        :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
    bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of
        deformable conv layer. If it is set to False, no bias will be added
        to the output units. If it is set to None or one attribute of ParamAttr, conv2d
        will create ParamAttr as bias_attr. If the Initializer of the bias_attr
        is not set, the bias is initialized zero. Default: None.
    modulated (bool): Make sure which version should be used between v1 and v2, where v2 is \
        used while True. Default: True.
    name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
                    Generally, no setting is required. Default: None.
Returns:
    Tensor: The tensor variable storing the deformable convolution \
              result. A Tensor with type float32, float64.
Examples:
    .. code-block:: python

        >>> # deformable conv v2:
        >>> import paddle
        >>> paddle.enable_static()

        >>> C_in, H_in, W_in = 3, 32, 32
        >>> filter_size, deformable_groups = 3, 1
        >>> data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
        >>> offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
        >>> mask = paddle.static.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
        >>> out = paddle.static.nn.common.deformable_conv(input=data, offset=offset, mask=mask,
        ...                                     num_filters=2, filter_size=filter_size, padding=1, modulated=True)

        >>> # deformable conv v1:
        >>> import paddle
        >>> C_in, H_in, W_in = 3, 32, 32
        >>> filter_size, deformable_groups = 3, 1
        >>> data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
        >>> offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
        >>> out = paddle.static.nn.common.deformable_conv(input=data, offset=offset, mask=None,
        ...                                     num_filters=2, filter_size=filter_size, padding=1, modulated=False)
r   r!   r"   deformable_convoffsetmaskNr   z9The input should be of [N, C, H, W] format, but received r   Fr   z'Input of deformable_conv must be Tensorz.Input Offset of deformable_conv must be Tensorr   zgroups should not be 0.z)num_channels must be divisible by groups.r2   r   r   r   r   c                     > TS   TS   -  T-  n U S::  a  [        SU  S35      eSU -  S-  n[        R                  R                  R                  R                  SU5      $ r   )r   rC   nninitializernormalr   r   s     r)   r   7deformable_conv.<locals>._get_default_param_initializer9  sx    %a.;q>9LHa,- . 
 _$,yy$$++223<<r,   rw   )r   r   OffsetMaskr   )r   r   r   r   deformable_groupsim2col_stepr9   deformable_conv_v1)r   r   r%  r   r  )r   r   rC   staticr   r:   ndimr   r/   r   r@   rI   rF   r  r   r   r   rL   rY   rQ   rR   rU   )r   r  r  r   r   r   r   r   r   r'  r(  rY   rZ   	modulatedr\   r]   r0   r   rb   r   r   r   rg   outputr   s       `                   @r)   r  r    s   X wI.0A 9i02C fv}}--tDz:<M zzQG}U
 	
 ;;q>LU"J$JJ"7fh7F EeV]]3344ABBffmm4455HII~*Q;677& A%HII*f4,,..{A}MK\\))&!X>Fll**7AyAG||++HaDH++K%8!9HKHL	= **:<	 + L 88?H"& 	 x(!#% %6* 	 	
( 	%& 
 x(!#% %6* 	 	
$ ""8q!"DFMr,   c                 d    Uc  [        U UUUUUUUUU	U
UUSUS9$ [        U UUUUUUUUU	U
UUSUS9$ )a  

Compute 2-D deformable convolution on 4-D input.
Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:


Deformable Convolution v2:

.. math::

    y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}

Deformable Convolution v1:

.. math::

    y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}

Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
<https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.

Example:
    - Input:

      X shape: :math:`(N, C_{in}, H_{in}, W_{in})`

      Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`

      Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})`

      Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})`

    - Output:

      Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`

    Where

    .. math::

        H_{out}&= \frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\
        W_{out}&= \frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1

Args:
    x (Tensor): The input image with [N, C, H, W] format. A Tensor with type
        float32, float64.
    offset (Tensor): The input coordinate offset of deformable convolution layer.
        A Tensor with type float32, float64.
    mask (Tensor): The input mask of deformable convolution layer.
        A Tensor with type float32, float64. It should be None when you use
        deformable convolution v1.
    num_filters(int): The number of filter. It is as same as the output
        image channel.
    filter_size (int|list|tuple): The filter size. If filter_size is a list/tuple,
        it must contain two integers, (filter_size_H, filter_size_W).
        Otherwise, the filter will be a square.
    stride (int|list|tuple, Optional): The stride size. If stride is a list/tuple, it must
        contain two integers, (stride_H, stride_W). Otherwise, the
        stride_H = stride_W = stride. Default: stride = 1.
    padding (int|list|tuple, Optional): The padding size. If padding is a list/tuple, it must
        contain two integers, (padding_H, padding_W). Otherwise, the
        padding_H = padding_W = padding. Default: padding = 0.
    dilation (int|list|tuple, Optional): The dilation size. If dilation is a list/tuple, it must
        contain two integers, (dilation_H, dilation_W). Otherwise, the
        dilation_H = dilation_W = dilation. Default: dilation = 1.
    groups (int, Optional): The groups number of the deformable conv layer. According to
        grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
        the first half of the filters is only connected to the first half
        of the input channels, while the second half of the filters is only
        connected to the second half of the input channels. Default: groups=1.
    deformable_groups (int, Optional): The number of deformable group partitions.
        Default: deformable_groups = 1.
    im2col_step (int, Optional): Maximum number of images per im2col computation;
        The total batch size should be devisable by this value or smaller
        than this value; if you face out of memory problem, you can try
        to use a smaller value here.
        Default: im2col_step = 1.
    weight_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights
        of deformable conv. If it is set to None or one attribute of ParamAttr,
        deformable conv will create ParamAttr as weight_attr.
        If the Initializer of the weight_attr is not set, the parameter is
        initialized with :math:`Normal(0.0, std)`, and the
        :math:`std` is :math:`(\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
    bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of
        deformable conv layer. If it is set to False, no bias will be added
        to the output units. If it is set to None or one attribute of ParamAttr, conv2d
        will create ParamAttr as bias_attr. If the Initializer of the bias_attr
        is not set, the bias is initialized zero. Default: None.
    name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
                    Generally, no setting is required. Default: None.

Returns:
    Tensor: The tensor storing the deformable convolution result. A Tensor with type float32, float64.

Examples:
    .. code-block:: python

        >>> # deformable conv v2:
        >>> import paddle
        >>> paddle.enable_static()

        >>> C_in, H_in, W_in = 3, 32, 32
        >>> filter_size, deformable_groups = 3, 1
        >>> data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
        >>> offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
        >>> mask = paddle.static.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
        >>> out = paddle.static.nn.deform_conv2d(x=data, offset=offset, mask=mask,
        ...                                      num_filters=2, filter_size=filter_size, padding=1)

        >>> # deformable conv v1:
        >>> import paddle
        >>> paddle.enable_static()

        >>> C_in, H_in, W_in = 3, 32, 32
        >>> filter_size, deformable_groups = 3, 1
        >>> data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
        >>> offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
        >>> out = paddle.static.nn.deform_conv2d(x=data, offset=offset, mask=None,
        ...                                      num_filters=2, filter_size=filter_size, padding=1)
F)r   r  r  r   r   r   r   r   r   r'  r(  rY   rZ   r-  r\   Tr*  )rl   r  r  r   r   r   r   r   r   r'  r(  rm   rZ   r\   s                 r)   deform_conv2dr0  x  s    V |##/#"
 	
$ ##/#"
 	
r,   c                 B   [        S0 [        5       D6nUR                  S5      n[        U R                  5      S:w  d  [        UR                  5      S:w  a%  [        SU R                   SUR                   35      eX R                  S   UR                  S   /n	UR                  UR                  XSS9n
UR                  US	9nXU
S
.nUR                  (       a#  SU/nUR                  UR                  XSS9nXS'   UR                  SUSU0S9  UR                  U5      $ )aA  
This layer performs bilinear tensor product on two inputs.

.. math::

   out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1

In this formula:
  - :math:`x`: the first input contains M elements, shape is [batch_size, M].
  - :math:`y`: the second input contains N elements, shape is [batch_size, N].
  - :math:`W_{i}`: the i-th learned weight, shape is [M, N].
  - :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].
  - :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`.

Args:
    x (Tensor): 2-D input tensor with shape [batch_size, M]. Data type
        is float32 or float64.
    y (Tensor): 2-D input tensor with shape [batch_size, N]. Data type
        should be same as **x**.
    size (int): The dimension of this layer.
    act (str|None): Activation to be applied to the output of this layer. Default None.
    name(str|None): For detailed information, please refer to
        :ref:`api_guide_Name` . Usually name is no need to set and None by default.
    param_attr (ParamAttr|None): To specify the weight parameter attribute.
        Default: None, which means the default weight parameter property is
        used. See usage for details in :ref:`api_paddle_ParamAttr` .
    bias_attr (ParamAttr|None): To specify the bias parameter attribute.
        Default: None, which means the default bias parameter property is
        used. See usage for details in :ref:`api_paddle_ParamAttr` .

Returns:
    Tensor, A 2-D Tensor of shape [batch_size, size]. Data type is the same as input **x**.

Examples:
    .. code-block:: python

        >>> import paddle
        >>> paddle.enable_static()

        >>> x = paddle.static.data("t1", shape=[-1, 5], dtype="float32")
        >>> y = paddle.static.data("t2", shape=[-1, 4], dtype="float32")
        >>> tensor = paddle.static.nn.bilinear_tensor_product(x, y, size=1000)

bilinear_tensor_productrl   r2   zDInput x and y should be 2D tensor, but received x with the shape of z, y with the shape of r   Fr-   r   )r4   r5   WeightTr~   r6   r:   r;   r<   )r2  )r   r@   rI   rK   r/   r   rL   rY   rQ   rZ   rR   rV   )rl   yrW   r[   r\   rY   rZ   r]   r0   rc   rd   r   r;   	bias_sizer   s                  r)   r2  r2  )	  s<   ^ ?fh?Fs#E
177|qCLA-RSTSZSZR[[qrsryryqz{
 	
 QWWQZ0Kk 	  	A 
3
3%
3
@C*FI	&&!! ' 
 v
&vs|  
 ##C((r,   c           
         USLd   S5       e[        S&0 [        5       D6n[        U S/ SQS5        UR                  5       nU[        R
                  :X  d  U[        R                  :X  a  [        R                  nU R                  n[        U R                  5      S:  d  [        U R                  5      S:  a%  [        S[        U R                  5       S	U S
35      eUS:X  a  US   nOUS:X  a  US   nO[        SU-   5      eU/nUR                  UR                  UU[        R                  R                  R                  S5      S9nUR                  UR                   UUSS9nUR                  [        R"                  " U
[        R                  R                  R                  S5      SUS9UUS9nSUl        UR                  [        R"                  " U[        R                  R                  R                  S5      SUS9UUS9nSUl        UnUn['        5       (       a  SnSn[)        U[        R*                  5      (       a  SnOSnSnU(       a  SUSUSUSUSU4
nO
SUSUSUSU4nU(       a0  [        R,                  R.                  " U UUUUUUU/UQ76 u  n        nO/[        R,                  R.                  " U UUUUSUU/UQ76 u  n        n[        R0                  R2                  R5                  UUS9$ UR7                  USS9nUR7                  USS9nSn U(       d  UR7                  UR                  5       SS9n U(       a  U OUR7                  U5      nU UUUUUUS .n!UUUSUS!.n"[)        U[        R8                  R:                  5      (       a  UU!S"'   OUU"S'   UUUUUS#.n#U b  U U#S$'   UR=                  SU!U#U"S%9  UR?                  U5      $ )'a  

**Batch Normalization Layer**

Can be used as a normalizer function for convolution or fully_connected operations.
The required data format for this layer is one of the following:

1. NHWC `[batch, in_height, in_width, in_channels]`

2. NCHW `[batch, in_channels, in_height, in_width]`

Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
for more details.

:math:`input` is the input features over a mini-batch.

..  math::

    \mu_{\beta} &\gets \frac{1}{m} \sum_{i=1}^{m} x_i \qquad &//
    \ mini-batch\ mean \\
    \sigma_{\beta}^{2} &\gets \frac{1}{m} \sum_{i=1}^{m}(x_i -
    \mu_{\\beta})^2 \qquad &//\ mini-batch\ variance \\
    \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{
    \sigma_{\beta}^{2} + \epsilon}} \qquad &//\ normalize \\
    y_i &\gets \gamma \hat{x_i} + \beta \qquad &//\ scale\ and\ shift

    moving\_mean = moving\_mean * momentum + mini-batch\_mean * (1. - momentum) \\
    moving\_var = moving\_var * momentum + mini-batch\_var * (1. - momentum)


moving_mean is global mean and moving_var is global variance.

When use_global_stats = True, the :math:`\\mu_{\\beta}`
and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
They are global (or running) statistics. (It usually got from the
pre-trained model.)
The training and testing (or inference) have the same behavior:

..  math::

    \hat{x_i} &\gets \frac{x_i - \mu_\beta} {\sqrt{
    \sigma_{\beta}^{2} + \epsilon}}  \\
    y_i &\gets \gamma \hat{x_i} + \beta

Note:
    if build_strategy.sync_batch_norm=True, the batch_norm in network will use
    sync_batch_norm automatically.
    `is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`.

Args:
    input(Tensor): The rank of input Tensor can be 2, 3, 4, 5. The data type
        is float16 or float32 or float64.
    act(string, Default None): Activation type, linear|relu|prelu|...
    is_test (bool, Default False): A flag indicating whether it is in
        test phrase or not.
    momentum(float|Tensor, Default 0.9): The value used for the moving_mean and
        moving_var computation. This should be a float number or a 0-D Tensor with
        shape [] and data type as float32. The updated formula is:
        :math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)`
        :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`
        Default is 0.9.
    epsilon(float, Default 1e-05): A value added to the denominator for
        numerical stability. Default is 1e-5.
    param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
         of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
         will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
         If the Initializer of the param_attr is not set, the parameter is initialized
         with Xavier. Default: None.
    bias_attr(ParamAttr|None): The parameter attribute for the bias of batch_norm.
         If it is set to None or one attribute of ParamAttr, batch_norm
         will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
         If the Initializer of the bias_attr is not set, the bias is initialized zero.
         Default: None.
    data_layout (str, optional): Specify the data format of the input, and the data format of the output
         will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
         The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
         `[batch_size, input_channels, input_height, input_width]`.
    in_place(bool, Default False): Make the input and output of batch norm reuse memory.
    name(str|None): For detailed information, please refer to :ref:`api_guide_Name`.
        Usually name is no need to set and None by default.
    moving_mean_name(str, Default None): The name of moving_mean which store the global Mean. If it
        is set to None, batch_norm will save global mean with a random name, otherwise, batch_norm
        will save global mean with the string.
    moving_variance_name(str, Default None): The name of the moving_variance which store the global Variance.
        If it is set to None, batch_norm will save global variance with a random name, otherwise, batch_norm
        will save global variance with the string.
    do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model
        average when model average is enabled.
    use_global_stats(bool, Default False): Whether to use global mean and
        variance. In inference or test mode, set use_global_stats to true
        or is_test to true, and the behavior is equivalent.
        In train mode, when setting use_global_stats True, the global mean
        and variance are also used during train period.

Returns:
    A Tensor which is the result after applying batch normalization on the input,
    has same shape and data type with input.

Examples:

    .. code-block:: python

        >>> import paddle

        >>> paddle.enable_static()
        >>> x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
        >>> hidden1 = paddle.static.nn.fc(x=x, size=200)
        >>> print(hidden1.shape)
        (3, 200)
        >>> hidden2 = paddle.static.nn.batch_norm(input=hidden1)
        >>> print(hidden2.shape)
        (3, 200)
Fz,bias_attr should not be False in batch_norm.
batch_normr   rp   r2   rr   rs   rt   ru   r   r   r   r#   zunsupported data layout:rv   rw   Tr-   ry   )r\   r"  	trainabledo_model_averager.   r/   r0   r&   momentumr   is_testr   use_global_statsN)r[   r{   )r4   r}   r~   r   r   MeanOutVarianceOut)r   r=  r   fuse_with_relur>  MomentumTensor)r5   r?  r@  r   r   ReserveSpacer9   )r8  ) r   r@   r   rI   rC   r   bfloat16r!   r/   rK   r   rL   rY   r!  r"  r   rZ   r   r|   r
   rF   Tensor_legacy_C_opsr8  r   dygraph_utils_append_activation_in_dygraphrQ   r+  r   rR   rV   )$r   r[   r=  r<  r   rY   rZ   r   in_placer\   moving_mean_namemoving_variance_name!do_model_average_for_mean_and_varr>  r]   r0   rb   r   rc   r   r   meanvariancer   r   inputs_has_MomentumTensorattrs_has_momentumattrs_batch_norm_out_r   r   reserve_spacer;   r=   r<   s$                                       r)   r8  r8  t	  s   D E! 6! 22F3	  E %6??":++K
5;;!s5;;/!37EKK8H7IIbcnboopq
 	
 f!!n& %b/K7+EFF-K ##"II11::3?	 $ E ""[t # D ""!		--66s;>	
  # 	D D&&%		--66s;>	
  ' 	H "H HL$)!"h..(,%!%" F " 	F %,2,@,@,K,K
- 
-)NAq!Q -3,@,@,K,K
- 
-)NAq!Q {{((FF G 
 	
 ::4 ; J >>4 ? N MAA$$&d B 

 vHHO 
 #F ",E (FMM2233#+ $j #'G  "/
&'   ##N33r,   c           	         [        U S/ SQS5        [        S0 [        5       D6nUS;  a  [        S5      eS/nUS:X  ar  / SQnX7;  a  [        S	U 35      eUS   S
:X  a  SOSn[	        U R
                  5      S:  d   S5       eUS:X  a  SSSU R
                  S   /nOUSU R
                  S   SS/nOAUS:X  a;  [	        U R
                  5      S:  d   S5       eS/[        U R
                  5      SS QnUR                  SS9nUR                  UR                  UUS[        R                  R                  R                  S5      S9n	UR                  U5      n
UR                  SX	S.XS.SU
0S9  U
$ )a  

prelu activation.

.. math::
    prelu(x) = max(0, x) + \alpha * min(0, x)

There are three modes for the activation:

.. code-block:: text

    all: All elements share same alpha.
    channel: Elements in same channel share same alpha.
    element: All elements do not share alpha. Each element has its own alpha.

Parameters:
    x (Tensor): The input Tensor or DenseTensor with data type float32.
    mode (str): The mode for weight sharing.
    param_attr (ParamAttr|None, optional): The parameter attribute for the learnable \
        weight (alpha), it can be create by ParamAttr. None by default. \
        For detailed information, please refer to :ref:`api_paddle_ParamAttr`.
    data_format(str, optional): Data format that specifies the layout of input.
        It may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Default: "NCHW".
    name (str, optional): Name for the operation (optional, default is None). \
        For more information, please refer to :ref:`api_guide_Name`.

Returns:
    Tensor: A tensor with the same shape and data type as x.

Examples:

    .. code-block:: python

        >>> # doctest: +SKIP("This has diff in xdoctest env")
        >>> import paddle
        >>> paddle.enable_static()

        >>> x = paddle.static.data(name="x", shape=[None, 5, 10, 10], dtype="float32")
        >>> mode = 'channel'
        >>> output = paddle.static.nn.prelu(
        ...     x,mode,param_attr=paddle.ParamAttr(name='alpha'))

rl   r   prelu)allchannelelementz,mode should be one of all, channel, element.r   rX  )NCNCLr   r   NLCr   r   z\data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', 'NLC', 'NHWC', 'NDHWC' but receive Cr   r   r2   zZThe size of input shape should be equal or larger than 2 in prelu() when mode is 'channel'r#   rY  zZThe size of input shape should be equal or larger than 1 in prelu() when mode is 'element'N)input_param_nameFg      ?rz   )r4   Alpha)moder   r6   )r:   r;   r=   r<   )rV  )r   r   r@   r   rK   r/   rA   rI   rL   rY   rC   r!  r"  r   rQ   rR   )rl   r`  rY   r   r\   r]   alpha_shapetrue_data_formatr0   alphar   s              r)   rV  rV  
  s   Z Q%FP-FH-F00GHH#Ky
 .66A]D 
 !,A# 5f6177|q  	
h	
  & aAGGBK0Kaggaj!Q/K		177|q  	
h	
  -4=,-4E##"II11::4@ $ E 
3
3E
:C
'8	   Jr,   c                   T    \ rS rSr/ rS r\S 5       r\S 5       r\	S 5       r
S rSrg)	PyFuncRegistryi  c                 b   Ub  [        U5      (       d  [        S5      eXl        [        R                  " U R                  5      n[        US   5      S:X  a  US   c  US   c  S U l        O
US   U l        [        R                  " U 5      U l	         [        R                  R                  U 5        g )Nzfunc must be a Python functionr   r   r2   )callabler  _funcinspectgetfullargspecrK   _named_argsr   ,_append_python_callable_object_and_return_id_idre  _register_funcsrS   )selffuncargss      r)   __init__PyFuncRegistry.__init__   s    <x~~<==
%%djj1tAw<1aT!W_#D#AwDDDTJ	 	&&--d3r,   c                 4    U R                   U   R                  $ r%   )rn  rh  )clsidxs     r)   registered_funcPyFuncRegistry.registered_func;  s    ""3'---r,   c                 ,    [        U R                  5      $ r%   )rK   rn  )ru  s    r)   registered_func_num"PyFuncRegistry.registered_func_num?  s    3&&''r,   c                     U R                   $ r%   )rm  ro  s    r)   idPyFuncRegistry.idC  s    xxr,   c                 |   U R                   c  U R                  5       nO60 nSnU R                    H  nX   X5'   US-  nM     U R                  " XS  0 UD6n[        U[        [        45      (       d  U4n/ nU H  nUb  [        U[
        R                  5      (       a  UR                  U5        M8  [        U[        R                  5      (       d  [        R                  " U5      n[
        R                  " 5       nUR                  U[
        R                  " 5       5        UR                  U5        M     [	        U5      $ )Nr   r   )rk  rh  rF   rA   rB   r   DenseTensorrS   rM   ndarrayarraysetCPUPlace)	ro  rq  func_retkwargsrv  argreteach_rettensors	            r)   __call__PyFuncRegistry.__call__G  s   #zz|HFC''"iq ( zz4:88H(T5M22 {H H:h8H8H#I#I

8$h

3388H-%%'FJJx1JJv ! Szr,   )rh  rm  rk  N)__name__
__module____qualname____firstlineno__rn  rr  classmethodrw  rz  propertyr~  r  __static_attributes__r&   r,   r)   re  re    sM    O46 . . ( (  r,   re  c           
         [        S0 [        5       D6n[        US[        [        [
        [        S5      4S5        Uc  / nOe[        U[
        5      (       a  U/nOL[        U[        5      (       a  [        U5      nO+[        U[        [        [
        45      (       d  [        S5      e[        US[        [        [
        [        S5      4S5        Uc  / nO][        U[
        5      (       a  U/nOD[        U[        5      (       a  [        U5      nO#[        U[        5      (       a  UnO[        S5      e[        U 5      R                  nUb  [        U5      R                  OSnU H'  n	[        U	R                  5      S:X  d  M  [        S	5      e   [        5       n
Ub  Ub  [        U[
        5      (       a  U/nU Vs/ s H  oR                  PM     nnUR!                  U Vs/ s H  oR                  PM     sn5        [        U5      n[        5       n
U HG  nUR                  U;  a  [        S
UR                   S35      eU
R#                  UR                  5        MI     UR%                  SSU0SU0UU[        U
5      S.S9  U$ s  snf s  snf )a  
This is used to register customized Python OP to Paddle. The design
principe of py_func is that Tensor and numpy array can be converted to each
other easily. So you can use Python and numpy API to register a python OP.
The forward function of the registered OP is ``func`` and the backward function
of that is ``backward_func``. Paddle will call ``func`` at forward runtime and
call ``backward_func`` at backward runtime(if ``backward_func`` is not  None).
``x`` is the input of ``func``, whose type must be Tensor; ``out`` is
the output of ``func``, whose type can be either Tensor or numpy array.
The input of the backward function ``backward_func`` is ``x``, ``out`` and
the gradient of ``out``. If ``out`` have no gradient, the relevant input of
``backward_func`` is None. If ``x`` do not have a gradient, the user should
return None in ``backward_func``.
The data type and shape of ``out`` should also be set correctly before this
API is called, and the data type and shape of the gradient of ``out`` and
``x`` will be inferred automatically.
This API can also be used to debug the neural network by setting the ``func``
as a function that only print variables.

Args:
    func (callable): The forward function of the registered OP. When the network
        is running, the forward output ``out`` will be calculated according to this
        function and the forward input ``x``. In ``func`` , it's suggested that we
        actively convert Tensor into a numpy array, so that we can use Python and
        numpy API arbitrarily. If not, some operations of numpy may not be compatible.
    x (Tensor|tuple(Tensor)|list[Tensor]): The input of the forward function ``func``.
        It can be Tensor|tuple(Tensor)|list[Tensor]. In addition, Multiple Tensor
        should be passed in the form of tuple(Tensor) or list[Tensor].
    out (T|tuple(T)|list[T]): The output of the forward function ``func``, it can be
        T|tuple(T)|list[T], where T can be either Tensor or numpy array. Since Paddle
        cannot automatically infer the shape and type of ``out``, you must create
        ``out`` in advance.
    backward_func (callable, optional): The backward function of the registered OP.
        Its default value is None, which means there is no reverse calculation. If
        it is not None, ``backward_func`` is called to calculate the gradient of
        ``x`` when the network is at backward runtime.
    skip_vars_in_backward_input (Tensor, optional): It's used to limit the input
        list of ``backward_func``, and it can be Tensor|tuple(Tensor)|list[Tensor].
        It must belong to either ``x`` or ``out``. The default  value is None, which means
        that no tensors need to be removed from ``x`` and ``out``. If it is not None,
        these tensors will not be the input of ``backward_func``. This parameter is only
        useful when ``backward_func`` is not None.

Returns:
    Tensor|tuple(Tensor)|list[Tensor], The output ``out`` of the forward function ``func``.

Examples:
    .. code-block:: python
        :name: code-example1

        >>> import paddle
        >>> import numpy as np

        >>> np.random.seed(1107)
        >>> paddle.seed(1107)

        >>> paddle.enable_static()
        >>> # Creates a forward function, Tensor can be input directly without
        >>> # being converted into numpy array.
        >>> def tanh(x):
        ...     return np.tanh(x)

        >>> # Skip x in backward function and return the gradient of x
        >>> # Tensor must be actively converted to numpy array, otherwise,
        >>> # operations such as +/- can't be used.
        >>> def tanh_grad(y, dy):
        ...     return np.array(dy) * (1 - np.square(np.array(y)))

        >>> # Creates a forward function for debugging running networks(print value)
        >>> def debug_func(x):
        ...     # print(x)
        ...     pass
        >>> def create_tmp_var(name, dtype, shape):
        ...     return paddle.static.default_main_program().current_block().create_var(
        ...         name=name, dtype=dtype, shape=shape)
        >>> def simple_net(img, label):
        ...     hidden = img
        ...     for idx in range(4):
        ...         hidden = paddle.static.nn.fc(hidden, size=200)
        ...         new_hidden = create_tmp_var(name='hidden_{}'.format(idx),
        ...             dtype=hidden.dtype, shape=hidden.shape)
        ...         # User-defined forward and backward
        ...         hidden = paddle.static.py_func(func=tanh, x=hidden,
        ...             out=new_hidden, backward_func=tanh_grad,
        ...             skip_vars_in_backward_input=hidden)
        ...         # User-defined debug functions that print out the input Tensor
        ...         paddle.static.py_func(func=debug_func, x=hidden, out=None)
        ...     prediction = paddle.static.nn.fc(hidden, size=10, activation='softmax')
        ...     ce_loss = paddle.nn.loss.CrossEntropyLoss()
        ...     return ce_loss(prediction, label)
        >>> x = paddle.static.data(name='x', shape=[1,4], dtype='float32')
        >>> y = paddle.static.data(name='y', shape=[1], dtype='int64')
        >>> res = simple_net(x, y)
        >>> exe = paddle.static.Executor(paddle.CPUPlace())
        >>> exe.run(paddle.static.default_startup_program())
        >>> input1 = np.random.random(size=[1,4]).astype('float32')
        >>> input2 = np.random.randint(1, 10, size=[1], dtype='int64')
        >>> out = exe.run(paddle.static.default_main_program(),
        ...                 feed={'x':input1, 'y':input2},
        ...                 fetch_list=[res.name])
        >>> print(out[0].shape)
        ()

    .. code-block:: python
        :name: code-example2

        >>> # This example shows how to turn Tensor into numpy array and
        >>> # use numpy API to register an Python OP
        >>> import paddle
        >>> import numpy as np

        >>> np.random.seed(1107)
        >>> paddle.seed(1107)

        >>> paddle.enable_static()
        >>> def element_wise_add(x, y):
        ...     # Tensor must be actively converted to numpy array, otherwise,
        ...     # numpy.shape can't be used.
        ...     x = np.array(x)
        ...     y = np.array(y)
        ...     if x.shape != y.shape:
        ...         raise AssertionError("the shape of inputs must be the same!")
        ...     result = np.zeros(x.shape, dtype='int32')
        ...     for i in range(len(x)):
        ...         for j in range(len(x[0])):
        ...             result[i][j] = x[i][j] + y[i][j]
        ...     return result
        >>> def create_tmp_var(name, dtype, shape):
        ...     return paddle.static.default_main_program().current_block().create_var(
        ...                 name=name, dtype=dtype, shape=shape)
        >>> def py_func_demo():
        ...     start_program = paddle.static.default_startup_program()
        ...     main_program = paddle.static.default_main_program()
        ...     # Input of the forward function
        ...     x = paddle.static.data(name='x', shape=[2, 3], dtype='int32')
        ...     y = paddle.static.data(name='y', shape=[2, 3], dtype='int32')
        ...     # Output of the forward function, name/dtype/shape must be specified
        ...     output = create_tmp_var('output','int32', [3, 1])
        ...     # Multiple Tensor should be passed in the form of tuple(Tensor) or list[Tensor]
        ...     paddle.static.py_func(func=element_wise_add, x=[x, y], out=output)
        ...     exe=paddle.static.Executor(paddle.CPUPlace())
        ...     exe.run(start_program)
        ...     # Feed numpy array to main_program
        ...     input1 = np.random.randint(1, 10, size=[2, 3], dtype='int32')
        ...     input2 = np.random.randint(1, 10, size=[2, 3], dtype='int32')
        ...     out = exe.run(main_program,
        ...                feed={'x':input1, 'y':input2},
        ...                fetch_list=[output.name])
        ...     print("{0} + {1} = {2}".format(input1, input2, out))
        >>> py_func_demo()
        >>> # [[1 5 4]   + [[3 7 7]  =  [array([[ 4, 12, 11]
        >>> #  [9 4 8]]     [2 3 9]]            [11,  7, 17]], dtype=int32)]
py_funcr4   Nz/Input must be Tensor/list(Tensor)/tuple(Tensor)r6   z0Output must be Tensor/list(Tensor)/tuple(Tensor)r#   r   z=Output shapes of py_func should be provided by users manuallyzTensor z+ is not found in forward inputs and outputs)forward_callable_idbackward_callable_idbackward_skip_varsr9   )r  )r   r@   r   rA   rB   r   r:   rF   r  re  r~  rK   r/   r   r  r\   extendaddrR   )rp  rl   r   backward_funcskip_vars_in_backward_inputr]   out_listfwd_func_idbwd_func_ideach_outr  v
fwd_in_outs                r)   r  r  e  sL   v /fh/Fq#eXtDz:IFy	Ax	 	 C	Au		GD%233IJJsED%4:>	J
{	C	"	"5	C		9	C		JKK &))K,9,E}%((2  x~~!#O    %@%L18<<+F*G'&'(affa
(848a66845_
 U,AvvZ' affX%PQ  ""166* - Qx!#.$/"&'9":
	  	 J+ )4s   8JJc                 8   [        S
0 [        5       D6n[        U SS/S5        UR                  5       nUS-   U R                  S   /nUR                  UR                  XeS9nUR                  U5      nUR                  SU /U/S.SU/0S	9  UR                  U5      $ )a
  
:api_attr: Static Graph

The row convolution is called lookahead convolution. It was
introduced in the following paper for DeepSpeech2:
http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf

The main motivation is that a bidirectional RNN, useful in DeepSpeech
like speech models, learns representation for a sequence by performing a
forward and a backward pass through the entire sequence. However, unlike
unidirectional RNNs, bidirectional RNNs are challenging to deploy in an online
and low-latency setting. The lookahead convolution incorporates information
from future subsequences in a computationally efficient manner to improve
unidirectional recurrent neural networks. The row convolution is
different from the 1D sequence convolution, and is computed as follows:

Given an input sequence :math:`X` of length :math:`t` and input dimension :math:`D`,
and a filter (:math:`W`) of size :math:`context \times D`,
the output sequence is convolved as:

.. math::

    Out_{i} = \sum_{j=i}^{i + context - 1} X_{j} \cdot W_{j-i}


In the above equation:

* :math:`Out_{i}`: The i-th row of output variable with shape [1, D].

* :math:`context`: Future context size.

* :math:`X_{j}`: The j-th row of input variable with shape [1, D].

* :math:`W_{j-i}`: The (j-i)-th row of parameters with shape [1, D].

More details about row_conv please refer to
the design document
https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645 .

Args:
    input (Tensor): The input is a Tensor, the shape of Tensor input has shape
        (B x T x N), B is batch size.
    future_context_size (int): Future context size. Please note, the shape
        of convolution kernel is [future_context_size + 1, D].
    param_attr (ParamAttr): Attributes of parameters, including
        name, initializer etc.
    act (str): Non-linear activation to be applied to output Tensor.

Returns:
    Tensor: The output is a Tensor, which has same type and same shape as input.

Examples:

    .. code-block:: python

        >>> # doctest: +SKIP("This has diff in xdoctest env")
        >>> # for DenseTensor inputs
        >>> import paddle
        >>> paddle.enable_static()
        >>> x = paddle.static.data(name='x', shape=[9, 16],
        ...                     dtype='float32', lod_level=1)
        >>> out_x = paddle.static.nn.row_conv(input=x, future_context_size=2)

        >>> # for Tensor inputs
        >>> y = paddle.static.data(name='y', shape=[9, 4, 16], dtype='float32')
        >>> out_y = paddle.static.nn.row_conv(input=y, future_context_size=2)
row_convr   r!   r   r#   r;  )r4   r   r6   r4  )r  )
r   r@   r   rI   r/   rL   rY   rQ   rR   rV   )	r   future_context_sizerY   r[   r]   r0   r   r   r   s	            r)   r  r  >  s    H 0vx0FUGi[*E E'!+U[[_=L**l + L 
3
3E
:C
W7  
 ##C((r,   c           	         [        S0 [        5       D6n[        U SSS/S5        [        US[        S5        [        US[        S5        [        US[
        S5        U R                  nU R                  nSU;  d   S	5       eUS
;  a  [        SU 35      eXq   n[        R                  " U5      U-  n	UR                  [        5       U/U[        SS5      S9n
SU
l        UR                  [        5       U	/U[        SS5      S9nSUl        [        5       (       a!  [         R"                  R%                  X
XX#5      $ SU 0nXS'   XS'   UR'                  US9nUR)                  SUSU0UUUS.S9  U$ )a;
  
:api_attr: Static Graph

**Spectral Normalization Layer**

This operation calculates the spectral normalization value of weight parameters of
fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D
Parameters. Output tensor will be in same shape with input tensor.
Calculations are showed as follows.

Step 1:
Generate vector U in shape of [H], and V in shape of [W].
While H is the :attr:`dim` th dimension of the input weights,
and W is the product result of remaining dimensions.

Step 2:
:attr:`power_iters` should be a positive integer, do following
calculations with U and V for :attr:`power_iters` rounds. Calculations
as follows:

.. math::

    \mathbf{v} := \\frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}

    \mathbf{u} := \\frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2}

Step 3:
Calculate :math:`\sigma(\mathbf{W})` and normalize weight values.

.. math::

    \sigma(\mathbf{W}) = \mathbf{u}^{T} \mathbf{W} \mathbf{v}

    \mathbf{W} = \\frac{\mathbf{W}}{\sigma(\mathbf{W})}


Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ .

Args:
    weight(Tensor): The input weight tensor of spectral_norm operator,
                    This can be a 2-D, 3-D, 4-D, 5-D tensor which is the
                    weights of fc, conv1d, conv2d, conv3d layer.
                    The data type is float32 or float64.
    dim(int): The index of dimension which should be permuted
              to the first before reshaping Input(Weight) to
              matrix, it should be set as 0 if Input(Weight) is
              the weight of fc layer, and should be set as 1 if
              Input(Weight) is the weight of conv layer, default 0.
    power_iters(int): number of power iterations to calculate spectral norm, default 1.
    eps(float): epsilon for numerical stability in calculating norms, it will be added to
                the denominator to avoid divide zero. Default 1e-12.
    name(str, optional): For detailed information, please refer
                         to :ref:`api_guide_Name`. Usually name is no need to set and
                         None by default.

Returns:
    Tensor: A tensor of weight parameters after spectral normalization.
              The data type and shape is same as input tensor.

Examples:
   .. code-block:: python

        >>> import paddle

        >>> paddle.enable_static()
        >>> weight = paddle.static.data(name='weight', shape=[2, 8, 32, 32], dtype='float32')
        >>> x = paddle.static.nn.spectral_norm(weight=weight, dim=1, power_iters=2)
        >>> print(x.shape)
        (2, 8, 32, 32)
spectral_normweightr!   r"   dimpower_itersepsr   z,Any dimension of input cannot be equal to 0.)r   r   zWThe input `dim` must be 0 (if weight in fc) or 1 (if weight in conv), but received dim=ry   rv   rw   Tr3  UVr   r6   )r  r  r  r9   )r  )r   r@   r   r   r   floatr0   r/   r   rM   rN   rL   r   r   r|   r   rC   _C_opsr  r   rR   )r  r  r  r  r\   r]   r0   rb   hrd   ur  r;   r   s                 r)   r  r    s   N 5FH5F9i0/ sE30{M3@sE5/2LLE ,,KKO!OO
&efiejk
 	
 	A
!A[c"3,	 	  	A AO[c"3,	 	  	A AO}}**6akOOF3K3K 
 
 u
 
-C
3
 &
   Jr,   c	                    [        5       SLd   S5       e[        S0 [        5       D6n	[        U SSS/S5        U	R	                  5       n
SU 0nU R
                  n[        S XS	 S
5      /nU(       a6  USLd   S5       eU	R                  U	R                  UU
[        S5      S9nXS'   OU(       a  [        R                  " S5        U(       a,  USLd   S5       eU	R                  U	R                  XSS9nXS'   OU(       a  [        R                  " S5        U	R                  U
SS9nU	R                  U
SS9nU	R                  U
5      nU	R                  SUUUUS.XCS.S9  U	R                  U5      $ )a&  

**Layer Normalization Layer**

The API implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_

The formula is as follows:

..  math::

    \mu & = \frac{1}{H}\sum_{i=1}^{H} x_i

    \sigma & = \sqrt{\frac{1}{H}\sum_{i=1}^{H}{(x_i - \mu)^2} + \epsilon}

    y & = f(\frac{g}{\sigma}(x - \mu) + b)

- :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
- :math:`H`: the number of hidden units in a layers
- :math:`\\epsilon`: the small value added to the variance to prevent division by zero.
- :math:`g`: the trainable scale parameter.
- :math:`b`: the trainable bias parameter.

Args:
    input(Tensor): A multi-dimension ``Tensor`` , and the data type is float32 or float64.
    scale(bool, optional): Whether to learn the adaptive gain :math:`g` after
        normalization. Default: True.
    shift(bool, optional): Whether to learn the adaptive bias :math:`b` after
        normalization. Default: True.
    begin_norm_axis(int, optional): The normalization will be performed along
        dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`.
        Default: 1.
    epsilon(float, optional): The small value added to the variance to prevent
        division by zero. Default: 1e-05.
    param_attr(ParamAttr, optional): The parameter attribute for the learnable
        gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is
        omitted. If :attr:`scale` is True and :attr:`param_attr` is None,
        a default :code:`ParamAttr` would be added as scale. The
        :attr:`param_attr` is initialized as 1 if it is added. Default: None.
    bias_attr(ParamAttr, optional): The parameter attribute for the learnable
        bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is
        omitted. If :attr:`shift` is True and :attr:`param_attr` is None,
        a default :code:`ParamAttr` would be added as bias. The
        :attr:`bias_attr` is initialized as 0 if it is added. Default: None.
    act(str, optional): Activation to be applied to the output of layer normalization.
              Default: None.
    name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name` .

Returns:
    Tensor: ``Tensor``  indicating the normalized result, the data type is the same as  ``input`` , and the return dimension is the same as  ``input`` .

Examples:

    .. code-block:: python

        >>> import paddle
        >>> paddle.enable_static()
        >>> x = paddle.static.data(name='x', shape=[8, 32, 32], dtype='float32')
        >>> output = paddle.static.nn.layer_norm(input=x, begin_norm_axis=1)
        >>> print(output.shape)
        (8, 32, 32)
Tz;please use LayerNorm instead of layer_norm in dygraph mode!
layer_normr   r!   r"   r4   c                 
    X-  $ r%   r&   )rl   r5  s     r)   r*   layer_norm.<locals>.<lambda>q  s    qur,   Nr   Fz0param_attr should not be False when using scale.rv   rw   r}   z0param_attr is only available with scale is True.z/bias_attr should not be False when using shift.r-   r~   z/bias_attr is only available with shift is True.r{   r   )r   begin_norm_axisr9   )r  )r
   r   r@   r   rI   r/   r   rL   rY   r   warningswarnrZ   rQ   rR   rV   )r   r   shiftr  r   rY   rZ   r[   r\   r]   r0   r;   rb   rc   r   r   r   layer_norm_outs                     r)   r  r    s   R D( E( 22FwI.  E 5\F++K,k:J.KQOPK& 	
>	
& ''"" (	 ( 
  wMMLM% 	
=	
% &&!!4 ' 
 vMMKL 884 9 H <<4 = L >>uEN
$

 "F  	 ##N33r,   c           
      h   [        S0 [        5       D6n[        U SS/S5        [        US/ SQS5        U=(       a    U(       + nU(       a  USL a  USL d   eUR	                  UR
                  XSS9n	UR                  U5      n
Uc  S	OUS
:  a  UOUS
   U-   nUR                  SX	S.SU
0UUUUS.S9  U
$ )an  
:api_attr: Static Graph

The operator is used to lookup embeddings vector of ids provided by :attr:`input` .
It automatically constructs a 2D embedding matrix based on the
input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .

The shape of output Tensor is generated by appending an emb_size dimension to the
last dimension of the input Tensor shape.

**Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,
otherwise the program will throw an exception and exit.

.. code-block:: text

    Case 1:

    input is a Tensor. padding_idx = -1
        input.data = [[1, 3], [2, 4], [4, 127]]
        input.shape = [3, 2]
    Given size = [128, 16]
    output is a Tensor:
        out.shape = [3, 2, 16]
        out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
                    [0.345421456, 0.524563927, ..., 0.144534654]],

                    [[0.345249859, 0.124939536, ..., 0.194353745],
                    [0.945345345, 0.435394634, ..., 0.435345365]],

                    [[0.945345345, 0.435394634, ..., 0.435345365],
                    [0.0,         0.0,         ..., 0.0        ]]]  # padding data
    The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
    It will pad all-zero data when ids is 127.

    Case 2:

    input is a DenseTensor with 1-level LoD. padding_idx = 0
        input.lod = [[2, 3]]
        input.data = [[1], [3], [2], [4], [0]]
        input.shape = [5, 1]
    Given size = [128, 16]
    output is a DenseTensor:
        out.lod = [[2, 3]]
        out.shape = [5, 1, 16]
        out.data = [[[0.129435295, 0.244512452, ..., 0.436322452]],
                    [[0.345421456, 0.524563927, ..., 0.144534654]],
                    [[0.345249859, 0.124939536, ..., 0.194353745]],
                    [[0.945345345, 0.435394634, ..., 0.435345365]],
                    [[0.0,         0.0,         ..., 0.0        ]]]  # padding data
    It will pad all-zero data when ids is 0.


Args:
    input(Tensor): A Tensor or DenseTensor with type int64, which contains the id information.
        The value of the input id should satisfy :math:`0<= id < size[0]` .
    size(tuple|list): The shape of lookup table parameter. It should have two elements which
        indicates the size of the dictionary of embeddings and the size of each embedding vector respectively.
    is_sparse(bool): The flag indicating whether to use sparse update. This parameter only
        affects the performance of the backwards gradient update. It is recommended to set
        True because sparse update is faster. But some optimizer does not support sparse update
        In these case, is_sparse must be False. Default: False.
    is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used
        in multi-machine distributed CPU training. Default: False.
    padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size).
        If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
        to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
        encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
        If set None, it makes no effect to output. Default: None.
    param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
        default weight parameter property is used. In addition,
        user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
        The local word vector needs to be transformed into numpy format, and the shape of local word
        vector should be consistent with :attr:`size` .
    dtype(str): It refers to the data type of output Tensor.
        It must be float32 or float64. Default: float32.

Returns:
    Tensor: Embedding Tensor or DenseTensor mapped by input. The data type is the same as :attr:`dtype` .

Static Examples:
    .. code-block:: python

        >>> # doctest: +SKIP("This has diff in xdoctest env")
        >>> import paddle
        >>> import numpy as np
        >>> paddle.enable_static()

        >>> x = paddle.static.data(name="x", shape = [2, 4], dtype=np.int64)
        >>> output = paddle.static.nn.embedding(x, (10, 3),
        ...             param_attr=paddle.nn.initializer.Constant(value=1.0))
        >>> m_output=paddle.mean(output)
        >>> place = paddle.CPUPlace()
        >>> exe = paddle.static.Executor(place)
        >>> exe.run(paddle.static.default_startup_program())

        >>> x = np.array([[7, 2, 4, 5],[4, 3, 2, 9]], dtype=np.int64)
        >>> out, = exe.run(paddle.static.default_main_program(), feed={'x':x}, fetch_list=[output])
        >>> print(out)
        [[[1. 1. 1.]
          [1. 1. 1.]
          [1. 1. 1.]
          [1. 1. 1.]]
         [[1. 1. 1.]
          [1. 1. 1.]
          [1. 1. 1.]
          [1. 1. 1.]]]
	embeddingr   r  r0   )r   r!   r"   r    TFr-   r#   r   lookup_table_v2IdsWr6   )	is_sparseis_distributedremote_prefetchpadding_idxr9   )r  )r   r@   r   r   rL   rY   rQ   rR   )r   rW   r  r  r  rY   r0   r]   r  rd   rf   s              r)   r  r    s    l 11FUGgYD3	  8%7OD ^u%<<<d 	  	A 
3
3E
:C  	 ! 1g#  %",.&	
	  
 Jr,   c	                 H   [        S0 [        5       D6n	[        U SS/S5        [        USSS/S5        U R                  S:X  a  [        S	5      eU	R                  U	R                  U[        R                  R                  R                  US
S9n
U	R                  U5      nUc  SOUS:  a  UOUS   U-   nUS;  a  [        S5      eSnUb5  UR                  R                  S;  a  [        S5      eUR                  5       nUc  SnU	R!                  SX
S.SU0USSSUUUUS.S9  U$ )a  
:api_attr: Static Graph

The OP is used as the operator of the Embedding Lookup layer in the large-scale
sparse training of the parameter server mode, instead of using the paddle.nn.functional.embedding.

The operator is used to lookup embeddings vector of ids provided by :attr:`input` .
It automatically constructs a 2D embedding matrix based on the input :attr:`size`
(vocab_size, emb_size) and :attr:`dtype` .

The shape of output Tensor is generated by appending an emb_size dimension to the
last dimension of the input Tensor shape.

**Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` , otherwise
the program will throw an exception and exit.

.. code-block:: text

    Case 1:

    input is a Tensor. padding_idx = -1
        input.data = [[1, 3], [2, 4], [4, 127]]
        input.shape = [3, 2]
    Given size = [128, 16]
    output is a Tensor:
        out.shape = [3, 2, 16]
        out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
                    [0.345421456, 0.524563927, ..., 0.144534654]],

                    [[0.345249859, 0.124939536, ..., 0.194353745],
                    [0.945345345, 0.435394634, ..., 0.435345365]],

                    [[0.945345345, 0.435394634, ..., 0.435345365],
                    [0.0,         0.0,         ..., 0.0        ]]]  # padding data
    The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
    It will pad all-zero data when ids is 127.

    Case 2:

    input is a DenseTensor with 1-level LoD. padding_idx = 0
        input.lod = [[2, 3]]
        input.data = [[1], [3], [2], [4], [0]]
        input.shape = [5, 1]
    Given size = [128, 16]
    output is a DenseTensor:
        out.lod = [[2, 3]]
        out.shape = [5, 1, 16]
        out.data = [[[0.129435295, 0.244512452, ..., 0.436322452]],
                    [[0.345421456, 0.524563927, ..., 0.144534654]],
                    [[0.345249859, 0.124939536, ..., 0.194353745]],
                    [[0.945345345, 0.435394634, ..., 0.435345365]],
                    [[0.0,         0.0,         ..., 0.0        ]]]  # padding data
    It will pad all-zero data when ids is 0.

Args:
    input(Tensor): A Tensor or DenseTensor with type int64, which contains the id
        information. The value of the input id should satisfy :math:`0<= id < size[0]` .
    size(tuple|list): The shape of lookup table parameter (vocab_size, emb_size). It
        should have two elements which indicates the size of the dictionary of embeddings
        and the size of each embedding vector respectively. The initial parameter size
        is 0 in the large-scale sparse scenario, which will gradually expand with the
        training. So if vocab_size is temporarily useless, its value can be any integer.
        The emb_size is the dimensional configuration of the word embedding weight parameter.
    padding_idx(int|long|None, optional): padding_idx needs to be in the interval [-vocab_size, vocab_size).
        If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
        to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever
        lookup encounters :math:`padding\_idx` in id. And the padding data will not be updated
        while training. If set None, it makes no effect to output. Default: None.
    is_test(bool, optional): Training or prediction mode. In prediction mode (is_test=False),
        the output is not initialized and created, and it is filled with 0 and returned. Default: False.
    entry(str, optional): Entry config with parameter server whose value is ProbabilityEntry,
        CountFilterEntry or None. Default: None.
    table_class(str, optional): The type of the sparse table. The value can be CommonSparseTable
        or SSDSparseTable. The default is CommonSparseTable.
    param_attr(ParamAttr, optional): To specify the weight parameter property. Default: None, which means the
        default weight parameter property is used. In addition, user-defined or pre-trained word
        vectors can be loaded with the :attr:`param_attr` parameter. The local word vector needs
        to be transformed into numpy format, and the shape of local word vector should be consistent
        with :attr:`size` .
    dtype(str): It refers to the data type of output Tensor. It must be float32 or
        float64. Default: float32.

Returns:
    Tensor: Embedding Tensor or DenseTensor mapped by input. The data type is the same as :attr:`dtype` .

Examples:
    .. code-block:: python

        >>> # doctest: +SKIP("This has diff in xdoctest env")
        >>> import paddle

        >>> paddle.enable_static()
        >>> sparse_feature_dim = 1024
        >>> embedding_size = 64

        >>> # Only when the feature appear more than 10 times or more will be participated in the training.
        >>> entry = paddle.distributed.CountFilterEntry(10)

        >>> input = paddle.static.data(name='ins', shape=[1], dtype='int64')

        >>> emb = paddle.static.nn.sparse_embedding(
        ...     input=input,
        ...     size=[sparse_feature_dim, embedding_size],
        ...     is_test=False,
        ...     entry=entry,
        ...     param_attr=paddle.ParamAttr(name="SparseFeatFactors",
        ...     initializer=paddle.nn.initializer.Uniform()))

r   r  z'paddle.incubate.layers.sparse_embeddingr0   r!   r"   z!paddle.static.nn.sparse_embeddingr   zinput size should not be 0F)r.   r/   r:   r0   r1   r#   )CommonSparseTableSSDSparseTableMemorySparseTablezMtable_class must be in [CommonSparseTable, SSDSparseTable, MemorySparseTable]none)ProbabilityEntryCountFilterEntryShowClickEntryzentry must be instance in [paddle.distributed.ProbabilityEntry, paddle.distributed.CountFilterEntry, paddle.distributed.ShowClickEntry]lookup_tabler  r6   T)r  r  r  r  r=  entrytable_classslotr9   )sparse_embedding)r   r@   r   r   rW   r   rL   rY   r   VarDescVarTypeSELECTED_ROWSrQ   	__class__r  _to_attrrR   )r   rW   r  r=  r  r  rY   r0   r  r]   rd   rf   	entry_strs                r)   r  r  =  s   r 8vx8Fw	#L 	I+	 zzQ566\\!!// 	  	A 
3
3E
:C  	 ! 1g#    
 [
 	
 I??## ,
 

  Z  NN$	|
%&"#&	
	   Jr,   c                   T    \ rS rSrSrSS jrS rS rS rS r	\
SS	 j5       rS
 rSrg)ExponentialMovingAveragei  a  

Compute the moving average of parameters with exponential decay.
Given a parameter :math:`\\theta`, its exponential moving average (EMA)
will be

..  math::

    \text{EMA}_0 & = 0

    \text{EMA}_t & = \text{decay} * \text{EMA}_{t-1} + (1 - \text{decay}) * \theta_t

The average results calculated by **update()** method will be saved in
temporary variables which are created and maintained by the object, and can
be applied to parameters of current model by calling **apply()** method. And
the **restore()** method is used to restore the parameters.

**Bias correction**. All EMAs are initialized to :math:`0` and hence they will be
zero biased, which can be corrected by divided by a factor
:math:`(1 - \text{decay}^t)` , i.e., the actual EMAs applied to parameters
when calling **apply()** method would be

..  math::

    \widehat{\text{EMA}}_t = \frac{\text{EMA}_t}{1 - \text{decay}^t}

**Decay rate scheduling**. A large decay rate very close to 1 would result
in that the averages move very slowly. And a better strategy is to set a
relative smaller decay rate in the very beginning. The argument **thres_steps**
allows users to pass a Variable to schedule the decay rate, in this case,
the actual decay rate becomes

..  math::

    \min(\text{decay}, \frac{1 + \text{thres_steps}}{10 + \text{thres_steps}})

Usually **thres_steps** can be the global training steps.


Args:
    decay (float, optional): The exponential decay rate, usually close to 1, such as 0.999, 0.9999, ... . Default 0.999.
    thres_steps (Variable|None, optional): If not `None`, schedule the decay rate. Default None.
    name (str|None, optional): For detailed information, please refer to :ref:`api_guide_Name`. Usually name is no need to set and None by default.


Examples:

    .. code-block:: python

        >>> import numpy
        >>> import paddle
        >>> import paddle.static as static
        >>> from paddle.static import ExponentialMovingAverage

        >>> paddle.enable_static()

        >>> data = static.data(name='x', shape=[-1, 5], dtype='float32')
        >>> hidden = static.nn.fc(x=data, size=10)
        >>> cost = paddle.mean(hidden)

        >>> test_program = static.default_main_program().clone(for_test=True)
        >>> optimizer = paddle.optimizer.Adam(learning_rate=0.001)
        >>> optimizer.minimize(cost)

        >>> ema = ExponentialMovingAverage(0.999)
        >>> ema.update()

        >>> place = paddle.CPUPlace()
        >>> exe = static.Executor(place)
        >>> exe.run(static.default_startup_program())

        >>> for pass_id in range(3):
        ...     for batch_id in range(6):
        ...         feed_data = numpy.random.random(size=(10, 5)).astype('float32')
        ...         exe.run(program=static.default_main_program(),
        ...         feed={'x': feed_data},
        ...         fetch_list=[cost.name])

        ...     # usage 1
        ...     with ema.apply(exe):
        ...         feed_data = numpy.random.random(size=(10, 5)).astype('float32')
        ...         exe.run(program=test_program,
        ...             feed={'x': feed_data},
        ...             fetch_list=[hidden.name])

        ...     # usage 2
        ...     with ema.apply(exe, need_restore=False):
        ...         feed_data = numpy.random.random(size=(10, 5)).astype('float32')
        ...         exe.run(program=test_program,
        ...             feed={'x': feed_data},
        ...             fetch_list=[hidden.name])
        ...     ema.restore(exe)

Nc           	        ^	^
 [        5       (       a  [        S5      eXl        X l        Ub  UOSU l        U R                  5       U l        SU l        / U l        [        5       R                  5       R                  5        H  nUR                  (       d  M  UR                  R                  [        R                   " SR#                  U R                  UR$                  -   S/5      5      UR&                  SSS9nU R                  R)                  XE45        M     0 U l        U R                   Hp  u  pEUR                  R,                  R/                  XE/5         [1        S	5         U R3                  U5      U R*                  UR$                  '   S S S 5        S S S 5        Mr     [5        5       U l        U R6                  R                  5       n[9        U R6                  S
9   U R;                  U5      u  m	nU R                   H  u  pEUR=                  U5      nUR=                  U5      nUR=                  U R*                  UR$                     5      m
[>        R@                  " XES9  [>        RB                  RD                  RG                  US:  U	U
4S jU
4S j5      n[>        R@                  " XS9  M     S S S 5        [5        5       U l$        U RH                  R                  5       n[9        U RH                  S
9   U R                   H;  u  pEUR=                  U5      nUR=                  U5      n[>        R@                  " XTS9  M=     S S S 5        g ! , (       d  f       GN= f! , (       d  f       GM>  = f! , (       d  f       N= f! , (       d  f       g = f)Nz3In dygraph, don't support ExponentialMovingAverage. z@EMA_STEP_COUNTER@r   ema_tmpFT)r\   r0   persistabler|   moving_average)main_programr.  r   c                     > TST -
  -  $ )Nrv   r&   )	decay_powemas   r)   r*   3ExponentialMovingAverage.__init__.<locals>.<lambda>  s    C3?3r,   c                     > T $ r%   r&   )r  s   r)   r*   r    s    Cr,   )%r
   	Exception_decay_thres_steps_name_get_ema_decay
_decay_var_step_counter_name_params_tmpsr	   global_blockall_parametersr:  block
create_varr   generatejoinr\   r0   rS   	_ema_varsprogram_optimized_guardr   _create_ema_varsr   apply_programr   _get_decay_pow_clone_variablerC   assignr+  r!  condrestore_program)ro  decaythres_stepsr\   paramrf   r  global_step	param_valr  r  s            @@r)   rr  !ExponentialMovingAverage.__init__c  s   E  '!-T2
--/"6)+88:IIKE%%%kk,,$--$**uzz"99!EF  ++ %"& -  !!((%6 L ++JE##44e\B+,-1-B-B5-Iuzz* - CB , %Y""//1(:(:;%)%8%8%?"I{"//
--e4++C0++DNN5::,FGe0"MM,,11!O3	
 i6 0 <  'y$$113(<(<="//
++C0--e4c0 0 >=- -, CB <;  >=s>   M )M	MCM*0AM;
MM
M'	*
M8;
N	c           	        ^ ^ [        5       R                  5          [        R                  R	                  S/T R
                  SSSS9nT R                  bu  T R                  S-   T R                  S-   -  m[        R                  R                  R                  TT R
                  :  U4S jU 4S	 j5      n[        R                  " X!5        S S S 5        U$ ! , (       d  f       W$ = f)
Nr   r!   Tscheduled_ema_decay_rate)r/   valuer0   r  r\   rv   g      $@c                     > T $ r%   r&   )decay_ts   r)   r*   9ExponentialMovingAverage._get_ema_decay.<locals>.<lambda>  s    Gr,   c                  `   > [         R                  " T R                  /[         R                  S9$ )Nr   )rM   r  r  r!   r}  s   r)   r*   r
    s    BHHdkk]"**Er,   )
r	   _lr_schedule_guardrC   r+  create_global_varr  r  r!  r  r  )ro  	decay_var	decay_valr	  s   `  @r)   r  'ExponentialMovingAverage._get_ema_decay  s    !#66877ckk / 8 I   ,,,s2t7H7H47OP"MM,,11dkk)#E	
 i3! 9" # 98" s   B/C
C#c                     [         R                  R                  U R                  S/SSSS9n[         R                  " US5      nUR                  U R                  5      n[         R                  " X25      nXB4$ )Nr   r   r  Tr\   r/   r  r0   r  r!   )rC   r+  r  r  castr  r  pow)ro  r  r  r  decay_pow_accs        r)   r  'ExponentialMovingAverage._get_decay_pow  so    mm55((# 6 
 kk+y9))$//:	

9:))r,   c                     [         R                  R                  [        R                  " U R
                  UR                  -   S-   5      UR                  SUR                  SS9nU$ )N_emary   Tr  )	rC   r+  r  r   r  r  r\   r/   r0   )ro  r  	param_emas      r)   r  )ExponentialMovingAverage._create_ema_vars  sW    MM33%%djj5::&=&FG++++ 4 
	 r,   c           	      2   [         R                  R                  R                  U R                  S9n/ nU R
                   H  u  p4UR                  R                  R                  X4/5         [        S5         U R                  UR                     nUR                  S-   U R                  ;   a/  U R                  UR                  S-      nUR                  XV/5        O5XPR                  -  USU R                  -
  -  -   n[         R                  " XuS9  SSS5        SSS5        M     U HH  u  pV[        5       R!                  5       R#                  SSU0S	U0UR$                  UR$                  S
.S9  MJ     g! , (       d  f       Ni= f! , (       d  f       GMV  = f)zS
Update Exponential Moving Average. Should only call this method in
train program.
)counter_namer  z.masterr   r  Nr  r4   r6   )in_dtype	out_dtyper9   )rC   	optimizerlrautoincreased_step_counterr  r  r  r  r  r   r  r\   rS   r  r  r	   r  rR   r0   )ro  r  param_master_emasr  rf   r  
master_emaema_ts           r)   updateExponentialMovingAverage.update  sd   
 &&))DD00 E 
 ++JE##44e\B+, NN5::6	::	)T^^;!%

Y0F!GJ%,,i-DE%7%DOO+; E MM%: - CB ,  &7!I "//1;;Z(	* * 0 0!*	 <  &7 -, CBs%   +F7BE5F5
F?F
F	c              #      #    UR                  U R                  5         Sv   U(       a  U R                  U5        gg! U(       a  U R                  U5        f f = f7f)z
Apply moving average to parameters for evaluation.

Args:
    executor (Executor): The Executor to execute applying.
    need_restore (bool, optional): Whether to restore parameters after
        applying. Default True.
N)runr  restore)ro  executorneed_restores      r)   applyExponentialMovingAverage.apply  sI      	T''(	'X& |X& s   A= AAAc                 :    UR                  U R                  5        g)zWRestore parameters.

Args:
    executor (Executor): The Executor to execute restoring.
N)r(  r  )ro  r*  s     r)   r)   ExponentialMovingAverage.restore  s     	T))*r,   )	r  r  r  r  r  r  r  r  r  )g+?NNT)r  r  r  r  __doc__rr  r  r  r  r%  r   r,  r)  r  r&   r,   r)   r  r    s=    ]~61p(*	"H #' #' +r,   r  rk   )h㈵>NNNr0  )r2  NNNr   N)
r   r   r   NNNTNNr   )
r   r   r   NNNTNNr   )NNr   r   r   NNNTNNr   )NNr   r   r   NNNTNNr   )
r   r   r   NNNNNTN)	r   r   r   r   r   r   NNN)NNNN)NFg?r2  NNr   FNNNTF)Nr   N)NN)r   r   g-q=N)TTr   r2  NNNN)FFNNr!   )NFNr  Nr!   N);ri  r  	functoolsr   numpyrM   rC   paddle.baser   r   paddle.base.data_feederr   paddle.base.frameworkr   r   r	   r
   r   r   r   r   r   paddle.base.param_attrr   paddle.base.wrapped_decoratorr   paddle.common_ops_importr   r   r   paddle.nn.initializerr   r   paddle.utilsr   __all__r   rq   r   r   r   r   r   r  r  r0  r2  r8  rV  re  r  r  r  rw  rz  r  r  r  r  r&   r,   r)   <module>r>     s        ) /
 
 
 - G 
 3 #
  	^ ^D AEHV - -f 	m4` 
 
 E	 	x-x-~	 	^-H	 	uv 	g` 	sl  	m
 m
b AEH)Z 		&*|4~	 f fRE EP U UpQ)hBL )88 ,@@ 
 
	D4N  
V Vx 
#
	CL+ +r,   