
    Αii                        S SK Jr  S SKrS SKJr  S SKrS SKJr  S SKJ	r	  S SK
JrJrJr  \(       a  S SKJr  S SKJr  S SKJr  S S	K
Jr  S
 r     S             SS jjrg)    )annotationsN)TYPE_CHECKING)	framework)_get_global_group_get_or_throw_group_rank_warn_cur_rank_not_in_group)Sequence)Tensor)task)Groupc                   UR                   nUR                  U:X  aC  [        U5      S:X  a3  U[        U5       Vs/ s H  n[        R
                  " U 5      PM     sn-  nO[        U5       Vs/ s H  opPM     nn[        U5      U:X  d   S[        U5       SU S35       eUR                  R                  XX$U5      nU(       a  UR                  " 5         U$ s  snf s  snf )Nr   z gather_list length z and nrankd z
 not equal)	nranksranklenrangepaddle
empty_likeprocess_groupgatherwait)	tensorgather_listdst_rank_in_groupgroupsync_opuse_calc_streamr   _r   s	            n/var/www/html/banglarbhumi/venv/lib/python3.13/site-packages/paddle/distributed/communication/stream/gather.py_gather_in_dygraphr   %   s     \\Fzz&&{q uV}M}!F--f5}MMK',V}5}!v}5{v% 
s;/0VHJO% %%.D 		K N5s    C-Cc                d   [         R                  " 5       (       d   S5       e[        U5      (       a  gU(       d  U(       a  [        S5      eU[        R
                  " 5       :w  a  Ub  [        R                  " S5        / nO
Uc   S5       eUc
  [        5       OUn[        X#5      n[        XXcXE5      $ )a  

Gather tensors from all participators.

Args:
    tensor (Tensor): The input Tensor. Its data type
        should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16.
    gather_list (list|None): A list of Tensors to hold the gathered tensors. Every element in the list must be a Tensor whose data type
        should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16. Default value is None.
    dst (int): The dst rank id. Default value is 0.
    group (Group|None, optional): The group instance return by new_group or None for global default group.
    sync_op (bool, optional): Whether this op is a sync op. The default value is True.
    use_calc_stream (bool, optional): Indicate whether the communication is done on calculation stream. If none is given, use false as default. This
        option is designed for high performance demand, be careful to turn it on except you are clearly know its meaning.

Returns:
    Async work handle,which can be wait on, if async_op is set to True.
    None, if not async_op

Examples:
    .. code-block:: python

        >>> # doctest: +REQUIRES(env: DISTRIBUTED)
        >>> import paddle
        >>> import paddle.distributed as dist

        >>> dist.init_parallel_env()
        >>> gather_list = [] # type: ignore[var-annotated]
        >>> if dist.get_rank() == 0:
        ...     data = paddle.to_tensor([1, 2, 3])
        ...     dist.stream.gather(data, gather_list, dst=0)
        >>> else:
        ...     data = paddle.to_tensor([4, 5, 6])
        ...     dist.stream.gather(data, gather_list, dst=0)
        >>> print(gather_list)
        >>> # [[1, 2, 3], [4, 5, 6]] (2 GPUs, out for rank 0)
        >>> # [] (2 GPUs, out for rank 1)
z-gather doesn't support static graph mode yet.Nz5use_calc_stream can only be true in sync op behavior.z@Specific `gather_list` is meaningless for rank which is not dst.z)gather_list must not be none for dst rank)r   in_dynamic_moder   RuntimeErrordistget_rankwarningswarnr   r   r   )r   r   dstr   r   r   r   s          r   r   r   =   s    ^ $$&& 7& #5))C
 	
 dmmo"MMR & 	
7	
& $)=eE0<.w     )Nr   NTF)r   r
   r   zSequence[Tensor] | Noner'   intr   zGroup | Noner   boolr   r*   returnztask | None)
__future__r   r%   typingr   r   paddle.distributeddistributedr#   r   &paddle.distributed.communication.groupr   r   r   collections.abcr	   r
   paddle.base.corer   r   r   r    r(   r   <module>r4      s    #     !   (%<4 ,0!MM(M 
M 	M
 M M Mr(   