@@ -48,14 +48,15 @@ Fleet 分布式高层 API
4848 :header: "API 名称", "API 功能"
4949 :widths: 20, 50
5050
51-
51+ " :ref: ` is_available < cn_api_distributed_is_available >` ", "检查分布式环境是否可用"
5252 " :ref: `init_parallel_env <cn_api_distributed_init_parallel_env >` ", "初始化并行训练环境,支持动态图模式"
5353 " :ref: `launch <cn_api_distributed_launch >` ", "启动分布式训练进程,支持集合通信及参数服务器架构"
5454 " :ref: `spawn <cn_api_distributed_spawn >` ", "启动分布式训练进程,仅支持集合通信架构"
5555 " :ref: `get_rank <cn_api_distributed_get_rank >` ", "获取当前进程的 rank 值"
5656 " :ref: `get_world_size <cn_api_distributed_get_world_size >` ", "获取当前进程数"
5757 " :ref: `new_group <cn_api_distributed_new_group >` ", "创建分布式通信组"
5858 " :ref: `destroy_process_group <cn_api_distributed_destroy_process_group >` ", "销毁分布式通信组"
59+ " :ref: `get_backend <cn_api_distributed_get_backend >` ", "获取指定分布式通信组后端的名称"
5960
6061.. _03 :
6162
@@ -89,7 +90,9 @@ Fleet 分布式高层 API
8990 " :ref: `alltoall <cn_api_distributed_alltoall >` ", "将一组 tensor 分发到每个进程并进行聚合"
9091 " :ref: `alltoall_single <cn_api_distributed_alltoall_single >` ", "将一个 tensor 分发到每个进程并进行聚合"
9192 " :ref: `broadcast <cn_api_distributed_broadcast >` ", "将一个 tensor 发送到每个进程"
93+ " :ref: `broadcast_object_list <cn_api_distributed_broadcast_object_list >` ", "将一组 object 发送到每个进程"
9294 " :ref: `scatter <cn_api_distributed_scatter >` ", "将一组 tensor 分发到每个进程"
95+ " :ref: `scatter_object_list <cn_api_distributed_scatter_object_list >` ", "将一组 object 分发到每个进程"
9396 " :ref: `reduce_scatter <cn_api_distributed_reduce_scatter >` ", "规约一组 tensor,随后将规约结果分发到每个进程"
9497 " :ref: `isend <cn_api_distributed_isend >` ", "异步发送一个 tensor 到指定进程"
9598 " :ref: `irecv <cn_api_distributed_irecv >` ", "异步接收一个来自指定进程的 tensor"
0 commit comments