
    ΑiY                        S SK Jr  S SKJrJr  S SKrS SKrS SKJr  S SK	J
r
  SSKJrJr  \(       a  S SKJr  S S	KJr  S S
KJr  \" S5      r  S         SS jjr S       SS jjrg)    )annotations)TYPE_CHECKINGTypeVarN)	framework)stream   )convert_object_to_tensorconvert_tensor_to_object)Tensor)task)Group_Tc                0    [         R                  " XX#5      $ )aJ  

Gather tensors from all participators and all get the result. As shown
below, one process is started with a GPU and the data of this process is represented
by its group rank. Through the all_gather operator, each GPU will have data
from all GPUs.

.. image:: https://githubraw.cdn.bcebos.com/PaddlePaddle/docs/develop/docs/api/paddle/distributed/img/allgather.png
    :width: 800
    :alt: all_gather
    :align: center

Args:
    tensor_list (list): A list of output Tensors. Every element in the list must be a Tensor whose data type
        should be float16, float32, float64, int32, int64, int8, uint8, bool, bfloat16, complex64 or complex128.
    tensor (Tensor): The Tensor to send. Its data type
        should be float16, float32, float64, int32, int64, int8, uint8, bool, bfloat16, complex64 or complex128.
    group (Group|None, optional): The group instance return by new_group or None for global default group.
    sync_op (bool, optional): Whether this op is a sync op. The default value is True.

Returns:
    None.

Examples:
    .. code-block:: python

        >>> # doctest: +REQUIRES(env: DISTRIBUTED)
        >>> import paddle
        >>> import paddle.distributed as dist

        >>> dist.init_parallel_env()
        >>> tensor_list = [] # type: ignore
        >>> if dist.get_rank() == 0:
        ...     data = paddle.to_tensor([[4, 5, 6], [4, 5, 6]])
        >>> else:
        ...     data = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
        >>> dist.all_gather(tensor_list, data)
        >>> print(tensor_list)
        >>> # [[[4, 5, 6], [4, 5, 6]], [[1, 2, 3], [1, 2, 3]]] (2 GPUs)
)r   
all_gather)tensor_listtensorgroupsync_ops       k/var/www/html/banglarbhumi/venv/lib/python3.13/site-packages/paddle/distributed/communication/all_gather.pyr   r   &   s    \ [%AA    c                   [         R                  " 5       (       d   S5       e[        U5      u  p4/ n[        XTU5        [	        [        U5      R                  5       5      nUR                  5       n[        R                  " Xv/5      n[        R                  " U5      n/ n	[        XU5        [        U	5       H"  u  pU R                  [        X5U
   5      5        M$     g)a  

Gather picklable objects from all participators and all get the result. Similar to all_gather(), but python object can be passed in.

Args:
    object_list (list): A list of output object. The datatype of every element in the list is same as the input obj.
    obj (Any): The picklable object to send.
    group (Group): The group instance return by new_group or None for global default group.

Returns:
    None.

Warning:
    This API only supports the dygraph mode.

Examples:
    .. code-block:: python

        >>> # doctest: +REQUIRES(env: DISTRIBUTED)
        >>> import paddle
        >>> import paddle.distributed as dist

        >>> dist.init_parallel_env()
        >>> object_list = [] # type: ignore
        >>> if dist.get_rank() == 0:
        ...     obj = {"foo": [1, 2, 3]}
        >>> else:
        ...     obj = {"bar": [4, 5, 6]}
        >>> dist.all_gather_object(object_list, obj)
        >>> print(object_list)
        >>> # [{'foo': [1, 2, 3]}, {'bar': [4, 5, 6]}] (2 GPUs)
z4all_gather_object doesn't support static graph mode.N)r   in_dynamic_moder	   r   intmaxitemnumpynpresizepaddle	to_tensor	enumerateappendr
   )object_listobjr   r   len_of_tensorlist_len_of_tensormax_len_of_tensor
numpy_datainput_tensorr   is              r   all_gather_objectr+   W   s    F $$&& >& 5S9F !%8C 2388:; J:':;J##J/LK{%0{+	$V-BC	
 ,r   )NT)
r   zlist[Tensor]r   r   r   zGroup | Noner   boolreturnztask | None)N)r#   zlist[_T]r$   r   r   r   r-   None)
__future__r   typingr   r   r   r   r   r    paddle.distributed.communicationr   serialization_utilsr	   r
   r   paddle.base.corer   &paddle.distributed.communication.groupr   r   r   r+    r   r   <module>r6      s    # )    3
 %<	B 	.B.B.B .B 	.B
 .Bd 48:
:
 ":
+0:
	:
r   