
    ёi                    2   S SK Jr  S SKrS SKJr  S SKJr  S SKrS SKJrJ	r	  S SK
Jr  S SKJr  SS	KJrJr  SS
KJr  SSKJrJrJrJr  SSKJr  \(       a5  S SKJr  S SKJr  S SKJr  S SKJ r   S SK!J"r"  SSK#J$r$  SSKJ%r%   " S S\%5      r&/ r' " S S\5      r(g)    )annotationsN)defaultdict)TYPE_CHECKING)_C_opspir)DataType)Value   )core	framework)base)Variablein_dygraph_modein_dynamic_or_pir_modein_pir_mode   )	Optimizer)Sequence)NotRequired)Tensor)GradientClipBase)WeightDecayRegularizer)LRScheduler)_ParameterConfigc                  >    \ rS rSr% S\S'   S\S'   S\S'   S\S'   Srg	)
_AdamParameterConfig0   zNotRequired[float | Tensor]beta1beta2epsilonzNotRequired[bool]	lazy_mode N)__name__
__module____qualname____firstlineno____annotations____static_attributes__r"       U/var/www/html/banglarbhumi/venv/lib/python3.13/site-packages/paddle/optimizer/adam.pyr   r   0   s    ****,,$$r)   r   c                    ^  \ rS rSr% SrS\S'   SrSrSrSr	S	r
            S                         SU 4S
 jjjrS rS rS r\R                   \R$                  SS j5       5       rS rS rS rSrU =r$ )Adam:   a  
The Adam optimizer uses an optimization described at the end
of section 2 of `Adam paper <https://arxiv.org/abs/1412.6980>`_ ,
it can dynamically adjusts the learning rate of each parameter using
the 1st moment estimates and the 2nd moment estimates of the gradient.

The parameter ``param_out`` update rule with gradient ``grad``:

.. math::

    \begin{aligned}
        &\hspace{5mm} t = t + 1 \\
        &\hspace{5mm} moment\_1\_out = {\beta}_1 * moment\_1 + (1 - {\beta}_1) * grad \\
        &\hspace{5mm} moment\_2\_out = {\beta}_2 * moment\_2 + (1 - {\beta}_2) * grad * grad \\
        &\hspace{5mm} learning\_rate = learning\_rate * \frac{\sqrt{1 - {\beta}_2^t}}{1 - {\beta}_1^t} \\
        &\hspace{5mm}\textbf{if} \: \textit{amsgrad}: \\
        &\hspace{15mm} moment\_2\_max\_out = max(moment\_2\_out, moment\_2\_max) \\
        &\hspace{15mm} param\_out = param - learning\_rate * \frac{moment\_1\_out}{\sqrt{moment\_2\_max\_out} + \epsilon} \\
        &\hspace{5mm}\textbf{else}: \: \\
        &\hspace{15mm} param\_out = param - learning\_rate * \frac{moment\_1\_out}{\sqrt{moment\_2\_out} + \epsilon} \\
    \end{aligned}

Related paper: `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_

Args:
    learning_rate (float|LRScheduler, optional): The learning rate used to update ``Parameter``.
        It can be a float value or a LRScheduler. The default value is 0.001.
    beta1 (float|Tensor, optional): The exponential decay rate for the 1st moment estimates.
        It should be a float number or a 0-D Tensor with shape [] and data type as float32.
        The default value is 0.9.
    beta2 (float|Tensor, optional): The exponential decay rate for the 2nd moment estimates.
        It should be a float number or a 0-D Tensor with shape [] and data type as float32.
        The default value is 0.999.
    epsilon (float|Tensor, optional): A small float value for numerical stability.
        It should be a float number or a 0-D Tensor with shape [] and data type as float32.
        The default value is 1e-08.
    parameters (list|tuple|None, optional): List/Tuple of ``Tensor`` to update to minimize ``loss``.
        This parameter is required in dygraph mode. And you can specify different options for
        different parameter groups such as the learning rate, weight decay, etc,
        then the parameters are list of dict. Note that the learning_rate in parameter groups
        represents the scale of base learning_rate.
        The default value is None in static graph mode, at this time all parameters will be updated.
    weight_decay (int|float|WeightDecayRegularizer|None, optional): The strategy of regularization.
        It canbe a int or float value as coeff of L2 regularization or
        :ref:`api_paddle_regularizer_L1Decay`, :ref:`api_paddle_regularizer_L2Decay`.
        If a parameter has set regularizer using :ref:`api_paddle_ParamAttr` already,
        the regularization setting here in optimizer will be ignored for this parameter.
        Otherwise, the regularization setting here in optimizer will take effect.
        Default None, meaning there is no regularization.
    grad_clip (GradientClipBase|None, optional): Gradient clipping strategy, it's an instance of
        some derived class of ``GradientClipBase`` . There are three clipping strategies
        ( :ref:`api_paddle_nn_ClipGradByGlobalNorm` , :ref:`api_paddle_nn_ClipGradByNorm` ,
        :ref:`api_paddle_nn_ClipGradByValue` ). Default None, meaning there is no gradient clipping.
    lazy_mode (bool, optional): The official Adam algorithm has two moving-average accumulators.
        The accumulators are updated at every step. Every element of the two moving-average
        is updated in both dense mode and sparse mode. If the size of parameter is very large,
        then the update may be very slow. The lazy mode only update the element that has
        gradient in current mini-batch, so it will be much more faster. But this mode has
        different semantics with the original Adam algorithm and may lead to different result.
        The default value is False.
    multi_precision (bool, optional): Whether to use multi-precision during weight updating. Default is false.
    use_multi_tensor (bool, optional): Whether to use multi-tensor strategy to update all parameters at once . Default is false.
    amsgrad (bool, optional): Whether to use the AMSGrad variant of this algorithm from the paper
        `On the Convergence of Adam and Beyond <https://openreview.net/forum?id=ryQu7f-RZ>`_. Default is false.
    name (str|None, optional): Normally there is no need for user to set this property.
        For more information, please refer to :ref:`api_guide_Name`.
        The default value is None.

Examples:
    .. code-block:: python
        :name: code-example1

        >>> import paddle

        >>> linear = paddle.nn.Linear(10, 10)
        >>> inp = paddle.rand([10,10], dtype="float32")
        >>> out = linear(inp)
        >>> loss = paddle.mean(out)
        >>> adam = paddle.optimizer.Adam(
        ...     learning_rate=0.1,
        ...     parameters=linear.parameters()
        ... )
        >>> loss.backward()
        >>> adam.step()
        >>> adam.clear_grad()

    .. code-block:: python
        :name: code-example2

        >>> # Adam with beta1/beta2 as Tensor and weight_decay as float
        >>> import paddle

        >>> linear = paddle.nn.Linear(10, 10)
        >>> inp = paddle.rand([10,10], dtype="float32")
        >>> out = linear(inp)
        >>> loss = paddle.mean(out)
        >>> beta1 = paddle.to_tensor([0.9], dtype="float32")
        >>> beta2 = paddle.to_tensor([0.99], dtype="float32")
        >>> adam = paddle.optimizer.Adam(
        ...     learning_rate=0.1,
        ...     parameters=linear.parameters(),
        ...     beta1=beta1,
        ...     beta2=beta2,
        ...     weight_decay=0.01
        ... )
        >>> loss.backward()
        >>> adam.step()
        >>> adam.clear_grad()

        >>> # Note that the learning_rate of linear_2 is 0.01.
        >>> linear_1 = paddle.nn.Linear(10, 10)
        >>> linear_2 = paddle.nn.Linear(10, 10)
        >>> inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
        >>> out = linear_1(inp)
        >>> out = linear_2(out)
        >>> loss = paddle.mean(out)
        >>> adam = paddle.optimizer.Adam(
        ...     learning_rate=0.1,
        ...     parameters=[{  # type: ignore
        ...         'params': linear_1.parameters()
        ...     }, {
        ...         'params': linear_2.parameters(),
        ...         'weight_decay': 0.001,
        ...         'learning_rate': 0.1,
        ...         'beta1': 0.8
        ...     }],
        ...     weight_decay=0.01,
        ...     beta1=0.9
        ... )
        >>> loss.backward()
        >>> adam.step()
        >>> adam.clear_grad()

strtypemoment1moment2moment2_maxbeta1_pow_accbeta2_pow_accc                |  > Uc   eUc   eUc   eUc   e[        U[        [        45      (       d  SUs=::  a  S:  d  O  [        S5      e[        U[        [        45      (       d  SUs=::  a  S:  d  O  [        S5      e[        U[        [        45      (       d  SU::  d  [        S5      e[        TU ]  UUUUUS9  SU l        X l        X0l        X@l	        Xl
        Xl        0 U l        UUUUS.U l        Xl        U R                  (       a  U R                  5       U l        U R                  5       U l        U R                  5       U l        U(       a  U R                  5       OS U l        U R                  5       U l        U R                  5       U l        U R                  5       U l        S U R,                  S	'   Xl        g )
Nr   r   z.Invalid value of beta1, expect beta1 in [0,1).z.Invalid value of beta2, expect beta2 in [0,1).z.Invalid value of epsilon, expect epsilon >= 0.)learning_rate
parametersweight_decay	grad_clipnameadam)r   r   r    r!   FP32_DenseTensor)
isinstancer   r	   
ValueErrorsuper__init__r/   _beta1_beta2_epsilon
_lazy_mode_multi_precision_master_weights_default_dict_use_multi_tensor_create_multi_tensor_dict_param_dict_moment1_dict_moment2_dict_moment2_max_dict_beta1_pow_acc_dict_beta2_pow_acc_dict_master_weight_dict_amsgrad)selfr6   r   r   r    r7   r8   r9   r!   multi_precisionuse_multi_tensoramsgradr:   	__class__s                r*   r@   Adam.__init__   s   " (((      """%(E!233>> D  %(E!233>> D  'He#455< D  	'!% 	 	
 	# /!"	
 "2!!#==?D!%!?!?!AD!%!?!?!AD4;..0 " (,'E'E'GD$'+'E'E'GD$'+'E'E'GD$;?D$$%78  r)   c           
     Z   UR                   nU R                  U5      (       aD  [        5       (       a  [        R                  nO$[
        R                  R                  R                  nU R                  U R                  XS9  U R                  U R                  XS9  U R                  (       a  U R                  U R                  XS9  U R                  U R                  UU[        U R                   ["        [$        45      (       a  SOU R                   S/[
        R                  R                  R&                  SS9  U R                  U R(                  UU[        U R*                  ["        [$        45      (       a  SOU R*                  S/[
        R                  R                  R&                  SS9  g )N)dtype?r   cpu)r:   paramrY   
fill_valueshaper/   device+?)rY   _is_dtype_fp16_or_bf16r   r   FLOAT32r   VarDescVarTypeFP32_add_accumulator_moment1_acc_str_moment2_acc_strrQ   _moment2_acc_max_str_beta1_pow_acc_strr=   rA   r   r	   DENSE_TENSOR_beta2_pow_acc_strrB   )rR   p	acc_dtypes      r*   _add_moments_powsAdam._add_moments_pows  sY   GG	&&y11}}$,,	 LL0055	d33QHd33QH==!!$";";Q!P(( dkkHe+<== [[#%%22 	 	
 	(( dkkHe+<== [[#%%22 	 	
r)   c                   [        U[        R                  [        R                  R                  45      (       d   e[        U[
        5      (       a  U R                  U5      nU GH  nUR                  U R                  ;   a  M   U R                  (       ai  U R                  UR                  5      (       aI  U R                  U5      nU R                  U5        U R                  R                  UR                  5        M  U R                  UR                  5      (       a'  U R                  (       d  [        R                   " S5        U R                  U5        U R                  R                  UR                  5        GM     g )NzAccumulating with FP16 or BF16 in optimizer can lead to poor accuracy or slow convergence.Consider using multi_precision=True option of the Adam optimizer.)r=   r   Blockpaddler   dict_update_param_groupr:   _already_create_accumulatorrE   ra   rY   _create_master_weightro   addwarningswarn)rR   blockr7   rm   master_ps        r*   _create_accumulatorsAdam._create_accumulators8  s   %)//6::3C3C!DEEEEj$''11*=J Avv999$$)D)DQWW)M)M55a8&&x00044QVV<++AGG44--X ""1%,,008# r)   c                   [        U[        R                  [        R                  R                  45      (       d   e[        U[
        5      (       a  U R                  U5      nU R                  U R                  US   5      nU R                  U R                  US   5      nU R                  (       a  U R                  U R                  US   5      OS nU R                  U R                  US   5      nU R                  U R                  US   5      nU R                  =(       a    U R                  US   R                   5      nU(       a  U R"                  US   R$                     OS n	U R'                  U5      n
[)        5       (       Ga  [        U R*                  [,        5      (       d  U R*                  OU R*                  R/                  S5      n[        U R0                  [,        5      (       d  U R0                  OU R0                  R/                  S5      n[3        5       (       a  U R5                  S5      OS n[6        R8                  " US   US   U
UUUUUU	UUUU R:                  U R<                  SUSU R                  5      u              ng US   /US   /U
/U/U/U/U/S.nU R5                  S5      nU(       a  XS'   US   /U/U/U/U/S.nU R<                  SUU R                  S	.n[        U R*                  [,        5      (       a  U R*                  US
'   OU R*                  US'   [        U R0                  [,        5      (       a  U R0                  US'   OU R0                  US'   [        U R:                  [,        5      (       a  U R:                  US'   OU R:                  US'   U R                  (       a  U/US'   U/US'   U(       a	  XS'   U	US'   UR?                  U R@                  UUUSS9nU$ )Nr   	found_infr   i  FParamGradLearningRateMoment1Moment2Beta1PowBeta2Pow
SkipUpdateParamOut
Moment1Out
Moment2OutBeta1PowOutBeta2PowOut)r!   min_row_size_to_use_multithreadrS   rU   Beta1Tensorr   Beta2Tensorr   EpsilonTensorr    
Moment2MaxMoment2MaxOutMasterParamMasterParamOutTr/   inputsoutputsattrsstop_gradient)!r=   r   rr   rs   r   rt   ru   _get_accumulator_masterrg   rh   rQ   ri   rj   rl   rE   ra   rY   rF   r:   _create_param_lrr   rA   r   itemrB   r   _get_auxiliary_varr   adam_rC   rD   	append_opr/   )rR   r{   param_and_gradr0   r1   r2   r3   r4   find_mastermaster_weightlrrA   rB   r   _r   r   r   adam_ops                      r*   _append_optimize_opAdam._append_optimize_opQ  s   %)//6::3C3C!DEEEEnd++!55nEN..!!>!#4
 ..!!>!#4
 }} (())>!+<  	 44##^A%6
 44##^A%6
 ++ 
0K0K1##1

    !2!7!78 	
 "">2 "## "$++x88 [[%%a(  "$++x88 [[%%a(  9D''44  #),,q!q!%#Aq!Q1a*  )+,'*+!##9#9*O*OF //<I'0|$ ,A./&i&i - -G "__37#.==	E $++x00(,}%!%g$++x00(,}%!%g$--22*.--'#'==i }}(3}|$,7=((5}%,9()ooYY" & G Nr)   c           	     z   [         R                  R                  R                  R                  5       (       a  U R	                  5         g[        U R                  S   [        5      (       d  / nU R                   H  nUR                  (       a  M  UR                  5       c  M)  UR                  5       n[        5       (       a?  [        US5      (       a-  UR                  5       (       a  U R                  b  [        S5      eO>[        US5      (       a-  UR                  5       (       a  U R                  b  [        S5      eUR!                  X#45        M     U R#                  SSUSS9ng[%        U R&                  5       H  u  pV[)        S 5      nUS    HN  nUR                  (       a  M  UR                  5       c  M)  UR                  5       nUS   R!                  X#45        MP     UR+                  UR-                  5        VVs0 s H  u  pxUS:w  d  M  Xx_M     snn5        U R#                  SSUUS9  M     gs  snnf )	a  
Execute the optimizer and update parameters once.

Returns:
    None

Examples:
    .. code-block:: python

        >>> import paddle

        >>> a = paddle.rand([2,13], dtype="float32")
        >>> linear = paddle.nn.Linear(13, 5)
        >>> # This can be any optimizer supported by dygraph.
        >>> adam = paddle.optimizer.Adam(learning_rate = 0.01,
        ...                             parameters = linear.parameters())
        >>> out = linear(a)
        >>> out.backward()
        >>> adam.step()
        >>> adam.clear_grad()
Nr   is_selected_rowszNAdam don't support weight_decay with sparse parameters, please set it to None.
_is_sparse)lossstartup_programparams_gradsparam_group_idxc                     / $ )Nr"   r"   r)   r*   <lambda>Adam.step.<locals>.<lambda>  s    2r)   params)rs   r   dygraphin_to_static_mode_declarative_stepr=   _parameter_listrt   r   
_grad_ivarr   hasattrr   regularizationRuntimeErrorr   append_apply_optimize	enumerate_param_groupsr   updateitems)	rR   r   r\   grad_varoptimize_opsidxparam_groupkvs	            r*   step	Adam.step  s   0 ;;##5577""$$..q1488L--&&##%1$//1H&((#H.@AA ( 9 9 ; ; $ 3 3 ?". p# 
 $Hl;; ( 3 3 5 5 $ 3 3 ?". p#  !''(9:/ .2  // $) !	 0 L %.d.@.@$A *:6(2E** '')5#(#3#3#5$X.55u6GH 3 ##&1&7&7&9K&9daQ(]TQT&9K $$$(!-$'	 %  %B Ls   H7H7c                &   U R                  X5        U GH  nU R                  U R                  U5      nU R                  U R                  U5      nU R                  (       a  U R                  U R
                  U5      OSnU R                  U R                  U5      nU R                  U R                  U5      n	UR                  [        R                  :X  a  U R                  S   U   R                  U5        U R                  S   U   R                  U5        U R                  S   U   R                  U5        U R                  (       a!  U R                  S   U   R                  U5        U R                   S   U   R                  U5        U R"                  S   U   R                  U	5        GM  U R%                  UR                  5      (       Ga5  U R                  S   U   R                  U5        U R                  S   U   R                  U5        U R                  S   U   R                  U5        U R                  (       a!  U R                  S   U   R                  U5        U R                   S   U   R                  U5        U R"                  S   U   R                  U	5        U R&                  (       a;  U R(                  S   U   R                  U R*                  UR,                     5        GM  SU R(                  S'   GM  [/        S5      e   g)a  
All parameters used for optimizer (such as: parameters, master_weight, velocity_acc for momentum) calculations are grouped into a python list by data type (bfloat16, float16, float32).
This function will be overridden in the corresponding optimizer file.
Args:
    target_block: the block in which the loss tensor is present
    parameters: list of parameter tensors for the optimizer
Nr<   FP16_DenseTensorz^Now multi_tensor_momentum only support fp32, fp16 or bf16 parameters and grad is DENSE_TENSOR.)r}   r   rg   rh   rQ   ri   rj   rl   rY   rs   float32rJ   r   rK   rL   rM   rN   rO   ra   rE   rP   rF   r:   r>   )
rR   target_blockr7   r   r\   r0   r1   r2   r3   r4   s
             r*   _multi_tensor_initAdam._multi_tensor_init(  s    	!!,;E2243H3H%PG2243H3H%PG == ,,T-F-FN 
 !88''M !88''M {{fnn,  !34_ELL ""#56GNN ""#56GNN ==**+=>'f[)(();<#&'(();<#&',,U[[99  !34_ELL ""#56GNN ""#56GNN ==**+=>'f[)(();<#&'(();<#&'((,,-?@'fT11%**=>CGD,,-?@ t }  r)   c                   [        U[        R                  [        R                  45      (       d   e/ / S.n/ / S.n[        U[        5      (       Ga  [        R
                  " 5       (       Ga'  U Vs/ s H  ofS   PM	     nn[        R                  R                  U5      n[        U5       H  u  pU
[        R                  R                  :X  aB  US   R                  X)   S   5        U R                  X)   5      nUS   R                  U5        Me  U
[        R                  R                  :X  d   U
[        R                  R                  :X  d  M  US   R                  X)   S   5        U R                  X)   5      nUS   R                  U5        M     GO3[!        5       (       Ga  U GH  nUS   c  M  US   R"                  SL d  M   US   R$                  [        R                  :X  aV  US   R'                  5       (       a>  US   R                  US   5        U R                  U5      nUS   R                  U5        M  U R)                  US   R$                  5      (       d  M  US   R'                  5       (       d  M  US   R                  US   5        U R                  U5      nUS   R                  U5        GM     GOU GHM  nUS   c  M  US   R"                  SL d  M   US   R$                  [*        R,                  :X  as  US   R.                  [        R0                  R2                  R4                  :X  a>  US   R                  US   5        U R                  U5      nUS   R                  U5        M  U R)                  US   R$                  5      (       d  M  US   R.                  [        R0                  R2                  R4                  :X  d  GM  US   R                  US   5        U R                  U5      nUS   R                  U5        GMP     GOUS    GH  nUS   c  M  US   R"                  SL d  M   0 nXS'   UR7                  UR9                  5        VVs0 s H  u  pUS:w  d  M  X_M     snn5        U R;                  U5      n[!        5       (       a  US   R$                  [        R                  :X  aV  US   R'                  5       (       a>  US   R                  US   5        U R                  U5      nUS   R                  U5        M  U R)                  US   R$                  5      (       aZ  US   R'                  5       (       a?  US   R                  US   5        U R                  U5      nUS   R                  U5        GMs  GMv  GMy  US   R$                  [*        R,                  :X  at  US   R.                  [        R0                  R2                  R4                  :X  a?  US   R                  US   5        U R                  U5      nUS   R                  U5        GM  U R)                  US   R$                  5      (       d  GM4  US   R.                  [        R0                  R2                  R4                  :X  d  GMl  US   R                  US   5        U R                  U5      nUS   R                  U5        GM     SS/nU GH  n[=        U R>                  U   U   5      S:  d  M%  U R@                  =(       a    US:H  n[        U RB                  [D        5      (       d  U RB                  OU RB                  RG                  S5      n[        U RH                  [D        5      (       d  U RH                  OU RH                  RG                  S5      n[        5       (       Ga  U RJ                  U   nUb  UU   OSnU RM                  S	5      nU(       aQ  [        U[        R                  RN                  [        RP                  45      (       a  U RS                  S	S
5        GMX  GM[  [        U[        R                  RN                  [        RP                  45      (       a  U RS                  S	S5        [T        RV                  " U R>                  U   U   UU   UU   U RX                  U   U   U RZ                  U   U   U R\                  (       a  U R^                  U   U   OSU R`                  U   U   U Rb                  U   U   UUUU Rd                  USU R\                  5      u              nGMb  [!        5       (       a  U RJ                  U   nUb  UU   OSn[T        RV                  " U R>                  U   U   UU   UU   U RX                  U   U   U RZ                  U   U   U R\                  (       a  U R^                  U   U   OSU R`                  U   U   U Rb                  U   U   UUUU Rd                  USU R\                  5      u              nGMF  U R>                  U   U   UU   UU   U RX                  U   U   U RZ                  U   U   U R`                  U   U   U Rb                  U   U   S.nU R>                  U   U   U RX                  U   U   U RZ                  U   U   U R`                  U   U   U Rb                  U   U   S.nU Rd                  UUU R\                  S.nU R\                  (       a*  U R^                  U   U   US'   U R^                  U   U   US'   U(       a/  U RJ                  U   U   US'   U RJ                  U   U   US'   UUS'   URg                  SUUUS
S9  GM     gs  snf s  snnf )z=
For Multi Tensor, append optimize merged_operator to block.
)r<   r   r   r<   r   r   NFr   r   Tr   r   )r    r   r   rU   r   r   r   r   rS   merged_adamr   )4r=   r   rr   r   listr   r   eagerget_grads_typesr   r   rb   r   r   FLOAT16BFLOAT16r   r   rY   is_dense_tensor_typera   rs   r   r/   rc   rd   rk   r   r   ru   lenrJ   rE   rA   r   r   rB   rP   r   r   r	   _set_auxiliary_varr   merged_adam_rK   rL   rQ   rM   rN   rO   rC   r   )rR   r   parameters_and_gradsr   	grad_dictlr_dictpairr   grads_typesindextpr   r   param_grad_dictr   r   multi_tensor_listkeyr   rA   rB   r   r   r   r   r   r   s                              r*    _append_optimize_multi_tensor_op%Adam._append_optimize_multi_tensor_ops  sG    ,#))(DEEEE)+D	')rB*D11((**.BC.Bdq'.BC"jj88@!*;!7IET]]222!"45<<07: "223G3NO 23::2>dmm333!7!77!"45<<07: "223G3NO 23::2> "8  &:N%a(0 %a(66%?*1-33x7G7GG .q 1 F F H H%&89@@ .q 1 "&!6!6~!FB#$67>>rB 77q8I8O8OPP .q 1 F F H H%&89@@ .q 1 "&!6!6~!FB#$67>>rB+ ';. ';N%a(0 %a(66%?*1-33v~~E .q 1 6 6#||33@@!A &&89@@ .q 1 "&!6!6~!FB#$67>>rB 77q8I8O8OPP .q 1 6 6#||33@@!A &&89@@ .q 1 "&!6!6~!FB#$67>>rB/ ';2 #7x"@!!$,!!$22e;&(O0>H-#** )=(B(B(D(D H} !AD(D &*%=%=o%NN"}}*1-33x7G7GG .q 1 F F H H%&89@@ .q 1 "&!6!6~!FB#$67>>rB 77q8I8O8OPP .q 1 F F H H%&89@@ .q 1 "&!6!6~!FB#$67>>rB !I Q +1-33v~~E .q 1 6 6#||33@@!A &&89@@ .q 1 "&!6!6~!FB#$67>>rB 77q8I8O8OPP .q 1 6 6#||33@@!A &&89@@ .q 1 "&!6!6~!FB#$67>>rBk #An 01CD$C4##C(9:Q>))Gc5G.G  &dkk8<< KK))!,  &dkk8<< KK))!,  #$$$($<$<S$AM )4 &o6! "
 !% 7 7 DI %%

(9(9399'E  !33KF
 &%

(9(9399'E  !33KG.4.A.A ,,S1/B%cN#CL ..s3OD ..s3OD $(== !% 6 6s ;O L%) 44S9/J 44S9/J)"" MM'! MM'/+1aAq!* !]]$($<$<S$AM )4 &o6! "
 +1*=*=((-o>!#**3/@**3/@  $}} !2237H!%005oF005oF%#'+'Aq!Q1a. "&!1!1#!6!G )#(/#'#5#5c#:?#K#'#5#5c#:?#K$($<$<S$A+% %)$<$<S$A+%F %)$4$4S$9/$J&*&8&8&=o&N&*&8&8&=o&N'+'?'?'D+( (,'?'?'D+(
G $(==!'!'#'==	E }}/3/E/Ec/J+0|, 483I3I#3N+40 #040H0H0M+1}- 594L4L5)5+ 01 4?/0 ***% '#&* + K %w DTs   1m0m5.m5c                H   UR                  SU R                  S   5      U l        UR                  SU R                  S   5      U l        UR                  SU R                  S   5      U l        UR                  SU R                  S   5      U l        UR                  S5      nU$ )Nr   r   r    r!   r   )getrG   rA   rB   rC   rD   )rR   r7   s     r*   ru   Adam._update_param_group  s     nnWd.@.@.IJ nnWd.@.@.IJ"y$2D2DY2OP$..++K8
  ^^H-
r)   )rQ   rA   rN   rB   rO   rG   rC   rD   rP   rF   rK   rL   rM   rE   rJ   rH   r/   )gMbP?rZ   r`   g:0yE>NNNFFFFN)r6   zfloat | LRSchedulerr   float | Tensorr   r   r    r   r7   z8Sequence[Tensor] | Sequence[_AdamParameterConfig] | Noner8   z%float | WeightDecayRegularizer | Noner9   zGradientClipBase | Noner!   boolrS   r   rT   r   rU   r   r:   z
str | NonereturnNone)r   r   )r#   r$   r%   r&   __doc__r'   rg   rh   ri   rj   rl   r@   ro   r}   r   imperative_baseno_gradr   non_static_onlyr   r   r   ru   r(   __classcell__)rV   s   @r*   r,   r,   :   sG   EN I  ((( .3 # %"& >B-1 %!&G *G  G  	G 
  G  EG  <G  +G  G  G  G  G  G   
!G  G R$
L92DL M  M^IVVp r)   r,   ))
__future__r   ry   collectionsr   typingr   rs   r   r   paddle.base.libpaddler   
paddle.pirr	   r   r   r   base.dygraphr   base.frameworkr   r   r   r   	optimizerr   collections.abcr   typing_extensionsr   r   paddle.nn.clipr   paddle.regularizerr   r   r   r   r   __all__r,   r"   r)   r*   <module>r     sn    #  #     *  " 2  !(-/9+%/ % Y9 Yr)   