-
-
Notifications
You must be signed in to change notification settings - Fork 768
Expand file tree
/
Copy pathgpu_mapping.yaml
More file actions
61 lines (49 loc) · 2.45 KB
/
gpu_mapping.yaml
File metadata and controls
61 lines (49 loc) · 2.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# Please check "GPU_MAPPING.md" to see how to define the topology
# You can define a cluster containing multiple GPUs within multiple machines by defining `gpu_mapping.yaml` as follows:
# config_cluster0:
# host_name_node0: [num_of_processes_on_GPU0, num_of_processes_on_GPU1, num_of_processes_on_GPU2, num_of_processes_on_GPU3, ..., num_of_processes_on_GPU_n]
# host_name_node1: [num_of_processes_on_GPU0, num_of_processes_on_GPU1, num_of_processes_on_GPU2, num_of_processes_on_GPU3, ..., num_of_processes_on_GPU_n]
# host_name_node_m: [num_of_processes_on_GPU0, num_of_processes_on_GPU1, num_of_processes_on_GPU2, num_of_processes_on_GPU3, ..., num_of_processes_on_GPU_n]
# this is used for 10 clients and 1 server training within a single machine which has 4 GPUs
mapping_default:
ChaoyangHe-GPU-RTX2080Tix4: [3, 3, 3, 2]
# this is used for 4 clients and 1 server training within a single machine which has 4 GPUs
mapping_config1_5:
host1: [2, 1, 1, 1]
# this is used for 10 clients and 1 server training within a single machine which has 4 GPUs
mapping_config2_11:
host1: [3, 3, 3, 2]
# this is used for 10 clients and 1 server training within a single machine which has 8 GPUs
mapping_config3_11:
host1: [2, 2, 2, 1, 1, 1, 1, 1]
# this is used for 4 clients and 1 server training within a single machine which has 8 GPUs, but you hope to skip the GPU device ID.
mapping_config4_5:
host1: [1, 0, 0, 1, 1, 0, 1, 1]
# this is used for 4 clients and 1 server training using 6 machines, each machine has 2 GPUs inside, but you hope to use the second GPU.
mapping_config5_6:
host1: [0, 1]
host2: [0, 1]
host3: [0, 1]
host4: [0, 1]
host5: [0, 1]
# this is used for 4 clients and 1 server training using 2 machines, each machine has 2 GPUs inside, but you hope to use the second GPU.
mapping_config5_2:
gpu-worker2: [1,1]
gpu-worker1: [2,1]
# this is used for 10 clients and 1 server training using 4 machines, each machine has 2 GPUs inside, but you hope to use the second GPU.
mapping_config5_4:
gpu-worker2: [1,1]
gpu-worker1: [2,1]
gpu-worker3: [3,1]
gpu-worker4: [1,1]
# for grpc GPU mapping
mapping_FedML_gRPC:
hostname_node_server: [1]
hostname_node_1: [1, 0, 0, 0]
hostname_node_2: [1, 0, 0, 0]
# for torch RPC GPU mapping
mapping_FedML_tRPC:
lambda-server1: [0, 0, 0, 0, 2, 2, 1, 1]
lambda-server2: [2, 1, 1, 1, 0, 0, 0, 0]
#mapping_FedML_tRPC:
# lambda-server1: [0, 0, 0, 0, 3, 3, 3, 2]