-
Notifications
You must be signed in to change notification settings - Fork 1
/
main_auc.py
executable file
·237 lines (215 loc) · 11.9 KB
/
main_auc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
import argparse
from numpy.lib.type_check import _nan_to_num_dispatcher
import torch
import numpy as np
from model import DeepAUC, FcNet, LeNet
from dataloader import Cifar10Loader, FedCifar10Loader, MNISTLoader, FedMNISTLoader
from algorithm import CDMA_ONE_wrapper, CDMA_ADA_wrapper, CDMA_NC_wrapper, Local_SGDA_Plus_wrapper, CODASCA_wrapper, CODA_Plus_wrapper, Catalyst_Scaffold_S_wrapper, Extra_Step_Local_SGD_wrapper, CODASCA_Threading_wrapper
from comm import Communicator
import os
torch.backends.cudnn.deterministic = True # fix the random seed of cudnn
# define input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--master_addr', type=str, default='127.0.0.1')
parser.add_argument('--local_rank', type=int, default=0)
# provided by the launch utility in torch.distributed.launch
parser.add_argument('--data_dir', type=str, default='./data/')
parser.add_argument('--output_dir', type=str, default='./data/results/')
parser.add_argument('--use_gpu', type=lambda x: (str(x).lower()
in ['true', '1', 'yes']), default=True)
parser.add_argument('--algorithm_to_run', type=str, default='CDMA_ONE')
parser.add_argument('--dataset_name', type=str, default='Cifar10')
parser.add_argument('--num_partitions', type=int)
parser.add_argument('--num_nodes', type=int)
parser.add_argument('--num_rounds', type=int)
parser.add_argument('--num_local_iterations', type=int)
parser.add_argument('--primal_step_size', type=float,
default=0.1) # primal step size
parser.add_argument('--dual_step_size', type=float,
default=0.1) # dual step size
parser.add_argument('--step_size_exp', type=float,
default=0.3333) # step size exponent
parser.add_argument('--primal_alpha', type=float, default=0.5)
parser.add_argument('--dual_alpha', type=float, default=0.5)
parser.add_argument('--alpha_exp', type=float,
default=0.6667) # step size exponent
parser.add_argument('--resample_flag', type=lambda x: (str(x).lower()
in ['true', '1', 'yes']), default=False)
parser.add_argument('--print_freq', type=int, default=10)
parser.add_argument('--train_batch_size', type=int, default=32)
parser.add_argument('--test_batch_size', type=int, default=32)
parser.add_argument('--pretrained', type=lambda x: (str(x).lower()
in ['true', '1', 'yes']), default=True)
parser.add_argument('--num_threads', type=int, default=1)
parser.add_argument('--random_seed_id', type=int, default=1234)
parser.add_argument('--sort_by', type=str, default=None)
parser.add_argument('--similarity', type=float, default=0.1)
parser.add_argument('--max_machine_drop_ratio', type=float, default=0.0)
# the following arguments are required by CODASCA/Catalyst_Scaffold_S
parser.add_argument('--local_step_size', type=float, default=1.0)
parser.add_argument('--global_step_size', type=float, default=1.0)
parser.add_argument('--algorithm_reg_coef', type=float, default=1.0)
parser.add_argument('--T0', type=int, default=2000)
args = parser.parse_args()
MAIN_ADDR = args.master_addr
LOCAL_PROCESS_RANK = args.local_rank
DATA_DIR = args.data_dir
OUTPUT_DIR = args.output_dir
USE_GPU = args.use_gpu
algorithm_to_run = args.algorithm_to_run
num_partitions = args.num_partitions
num_nodes = args.num_nodes
num_rounds = args.num_rounds
num_local_iterations = args.num_local_iterations
print_freq = args.print_freq
primal_step_size = args.primal_step_size
dual_step_size = args.dual_step_size
step_size_exp = args.step_size_exp
primal_alpha = args.primal_alpha
dual_alpha = args.dual_alpha
alpha_exp = args.alpha_exp
train_batch_size = args.train_batch_size
test_batch_size = args.test_batch_size
num_threads = args.num_threads # oversubscribe
random_seed_id = args.random_seed_id
dataset_name = args.dataset_name
sort_by = args.sort_by
similarity = args.similarity
local_step_size = args.local_step_size
global_step_size = args.global_step_size
algorithm_reg_coef = args.algorithm_reg_coef
T0 = args.T0
resample_flag = args.resample_flag
max_machine_drop_ratio = args.max_machine_drop_ratio
def thread_main():
communicator.acquire()
torch.manual_seed(random_seed_id)
torch.cuda.manual_seed(random_seed_id)
np.random.seed(random_seed_id)
# Step 2. prepare the dataset, the clients share the same random number seed
# in each round, each client samples a subset of data
net = None
data_loader = None
if dataset_name == 'Cifar10':
# numpy_state = np.random.get_state()
# torch_state = torch.get_rng_state()
# if USE_GPU:
# torch_cuda_state = torch.cuda.get_rng_state()
# torch.manual_seed(1234)
# torch.cuda.manual_seed(1234)
# np.random.seed(1234)
net = FcNet()
# np.random.set_state(numpy_state)
# torch.set_rng_state(torch_state)
# if USE_GPU:
# torch.cuda.set_rng_state(torch_cuda_state)
# to save space, use FedCifar10Loader
# to save time, use Cifar10Loader
if algorithm_to_run == 'CODASCA' or num_nodes == num_partitions:
data_loader = FedCifar10Loader(num_partitions,
DATA_DIR, train_batch_size, test_batch_size, device=device, sort_by=sort_by, similarity=similarity) # similarity = 0.1
else:
data_loader = Cifar10Loader(num_partitions,
# DATA_DIR, train_batch_size, test_batch_size, sort_by='label')
DATA_DIR, train_batch_size, test_batch_size, device=device, sort_by=sort_by, similarity=similarity) # similarity = 0.1
elif dataset_name == 'MNIST':
numpy_state = np.random.get_state()
torch_state = torch.get_rng_state()
if USE_GPU:
torch_cuda_state = torch.cuda.get_rng_state()
torch.manual_seed(1234)
torch.cuda.manual_seed(1234)
np.random.seed(1234)
net = LeNet()
np.random.set_state(numpy_state)
torch.set_rng_state(torch_state)
if USE_GPU:
torch.cuda.set_rng_state(torch_cuda_state)
if algorithm_to_run == 'CODASCA' or num_nodes == num_partitions:
data_loader = FedMNISTLoader(num_partitions,
DATA_DIR, train_batch_size, test_batch_size, device=device, sort_by=sort_by, similarity=similarity) # similarity = 0.1
else:
data_loader = MNISTLoader(num_partitions,
# DATA_DIR, train_batch_size, test_batch_size)
DATA_DIR, train_batch_size, test_batch_size, device=device, sort_by=sort_by, similarity=similarity)
else:
raise Exception(
'The "{:s}" dataset is not supported yet.'.format(dataset_name))
# Step 3. initialize the model
net = net.to(device)
model = DeepAUC(net, data_loader.get_pos_ratio(), device)
# Step 4. run the CDMA_ONE algorithm
if algorithm_to_run == 'CDMA_ONE':
CDMA_ONE_wrapper(model, communicator, data_loader,
num_partitions, num_nodes, num_rounds, num_local_iterations,
primal_step_size, dual_step_size, train_batch_size,
resample_flag=resample_flag,
max_machine_drop_ratio=max_machine_drop_ratio,
device=device,
print_freq=print_freq, OUTPUT_DIR=OUTPUT_DIR)
elif algorithm_to_run == 'CDMA_ADA':
CDMA_ADA_wrapper(model, communicator, data_loader,
num_partitions, num_nodes, num_rounds, num_local_iterations,
primal_step_size, dual_step_size, step_size_exp,
primal_alpha, dual_alpha, alpha_exp,
train_batch_size,
resample_flag=resample_flag,
max_machine_drop_ratio=max_machine_drop_ratio,
device=device, print_freq=print_freq, OUTPUT_DIR=OUTPUT_DIR)
elif algorithm_to_run == 'CDMA_NC':
CDMA_NC_wrapper(model, communicator, data_loader,
num_partitions, num_nodes, num_rounds, num_local_iterations,
primal_step_size, dual_step_size, train_batch_size,
max_machine_drop_ratio=max_machine_drop_ratio,
device=device, print_freq=print_freq, OUTPUT_DIR=OUTPUT_DIR)
elif algorithm_to_run == 'Local_SGDA_Plus':
Local_SGDA_Plus_wrapper(model, communicator, data_loader,
num_partitions, num_nodes, num_rounds, num_local_iterations,
primal_step_size, dual_step_size, train_batch_size,
max_machine_drop_ratio=max_machine_drop_ratio,
device=device, print_freq=print_freq, OUTPUT_DIR=OUTPUT_DIR)
elif algorithm_to_run == 'CODASCA':
if num_threads != 1:
CODASCA_Threading_wrapper(model, communicator, data_loader, num_partitions, num_nodes, num_rounds, num_local_iterations, T0, local_step_size,
global_step_size, algorithm_reg_coef, train_batch_size,
max_machine_drop_ratio=max_machine_drop_ratio,
device=device, print_freq=print_freq, OUTPUT_DIR=OUTPUT_DIR)
else:
CODASCA_wrapper(model, communicator, data_loader, num_partitions, num_nodes, num_rounds, num_local_iterations, T0, local_step_size,
global_step_size, algorithm_reg_coef, train_batch_size,
max_machine_drop_ratio=max_machine_drop_ratio,
device=device, print_freq=print_freq, OUTPUT_DIR=OUTPUT_DIR)
elif algorithm_to_run == 'CODA_Plus':
CODA_Plus_wrapper(model, communicator, data_loader, num_partitions, num_nodes, num_rounds, num_local_iterations, T0,
local_step_size, algorithm_reg_coef, train_batch_size,
max_machine_drop_ratio=max_machine_drop_ratio,
device=device, print_freq=print_freq, OUTPUT_DIR=OUTPUT_DIR)
elif algorithm_to_run == 'Catalyst_Scaffold_S':
Catalyst_Scaffold_S_wrapper(model, communicator, data_loader, num_partitions, num_nodes, num_rounds, num_local_iterations, T0, local_step_size,
global_step_size, algorithm_reg_coef, train_batch_size, resample_flag=resample_flag,
max_machine_drop_ratio=max_machine_drop_ratio,
device=device, print_freq=print_freq, OUTPUT_DIR=OUTPUT_DIR)
elif algorithm_to_run == 'Extra_Step_Local_SGD':
Extra_Step_Local_SGD_wrapper(model, communicator, data_loader, num_partitions, num_nodes, num_rounds, num_local_iterations, local_step_size, train_batch_size,
max_machine_drop_ratio=max_machine_drop_ratio,
device=device, print_freq=print_freq, OUTPUT_DIR=OUTPUT_DIR)
else:
raise ValueError('the value of algorithm_to_run, "' +
algorithm_to_run + '", is invalid.')
communicator.release()
device = "cpu"
backend = 'gloo'
if USE_GPU is True:
if torch.cuda.is_available():
backend = 'nccl'
device = torch.device("cuda:{}".format(LOCAL_PROCESS_RANK))
else:
print("[Warning] no cuda device available, use cpu instead")
# Step 1. initialize the distributed environment and the communicator
requires_thread_coordinator = False
if algorithm_to_run == 'CODASCA' and num_threads != 1:
requires_thread_coordinator = True
communicator = Communicator(
device, MAIN_ADDR, target=thread_main, backend=backend, thread_num=num_threads, requires_thread_coordinator=requires_thread_coordinator)
communicator.threads_start()
communicator.threads_join()