-
Notifications
You must be signed in to change notification settings - Fork 134
/
config.py
513 lines (464 loc) · 27.1 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
import configparser
import os
import numpy as np
import torch as th
from .utils.activation import act_dict
class Config(object):
def __init__(self, file_path, model, dataset, task, gpu):
conf = configparser.ConfigParser()
data_path = os.getcwd()
if gpu == -1:
self.device = th.device('cpu')
elif gpu >= 0:
if th.cuda.is_available():
self.device = th.device('cuda', int(gpu))
else:
raise ValueError("cuda is not available, please set 'gpu' -1")
try:
conf.read(file_path)
except:
print("failed!")
# training dataset path
self.seed = 0
self.patience = 1
self.max_epoch = 1
self.task = task
self.model = model
self.dataset = dataset
self.output_dir = './openhgnn/output/{}'.format(self.model)
self.optimizer = 'Adam'
if model == "NSHE":
self.dim_size = {}
self.dim_size['emd'] = conf.getint("NSHE", "emd_dim")
self.dim_size['context'] = conf.getint("NSHE", "context_dim")
self.dim_size['project'] = conf.getint("NSHE", "project_dim")
self.lr = conf.getfloat("NSHE", "learning_rate")
self.weight_decay = conf.getfloat("NSHE", "weight_decay")
self.beta = conf.getfloat("NSHE", "beta")
self.seed = conf.getint("NSHE", "seed")
np.random.seed(self.seed)
self.max_epoch = conf.getint("NSHE", "max_epoch")
self.patience = conf.getint("NSHE", "patience")
self.num_e_neg = conf.getint("NSHE", "num_e_neg")
self.num_ns_neg = conf.getint("NSHE", "num_ns_neg")
self.norm_emd_flag = conf.get("NSHE", "norm_emd_flag")
self.mini_batch_flag = conf.getboolean("NSHE", "mini_batch_flag")
elif model == "GTN":
self.lr = conf.getfloat("GTN", "learning_rate")
self.weight_decay = conf.getfloat("GTN", "weight_decay")
self.seed = conf.getint("GTN", "seed")
# np.random.seed(self.seed)
self.hidden_dim = conf.getint("GTN", "hidden_dim")
self.out_dim = conf.getint("GTN", "out_dim")
self.num_channels = conf.getint("GTN", "num_channels")
self.num_layers = conf.getint("GTN", "num_layers")
self.max_epoch = conf.getint("GTN", "max_epoch")
self.patience = conf.getint("GTN", "patience")
self.identity = conf.getboolean("GTN", "identity")
self.norm_emd_flag = conf.getboolean("GTN", "norm_emd_flag")
self.adaptive_lr_flag = conf.getboolean("GTN", "adaptive_lr_flag")
self.mini_batch_flag = conf.getboolean("GTN", "mini_batch_flag")
elif model == "MHNF":
self.lr = conf.getfloat("MHNF", "learning_rate")
self.weight_decay = conf.getfloat("MHNF", "weight_decay")
self.seed = conf.getint("MHNF", "seed")
# np.random.seed(self.seed)
self.hidden_dim = conf.getint("MHNF", "hidden_dim")
self.out_dim = conf.getint("MHNF", "out_dim")
self.num_channels = conf.getint("MHNF", "num_channels")
self.num_layers = conf.getint("MHNF", "num_layers")
self.max_epoch = conf.getint("MHNF", "max_epoch")
self.patience = conf.getint("MHNF", "patience")
self.identity = conf.getboolean("MHNF", "identity")
self.norm_emd_flag = conf.getboolean("MHNF", "norm_emd_flag")
self.adaptive_lr_flag = conf.getboolean("MHNF", "adaptive_lr_flag")
self.mini_batch_flag = conf.getboolean("MHNF", "mini_batch_flag")
elif model == "RSHN":
self.lr = conf.getfloat("RSHN", "learning_rate")
self.weight_decay = conf.getfloat("RSHN", "weight_decay")
self.dropout = conf.getfloat("RSHN", "dropout")
self.seed = conf.getint("RSHN", "seed")
self.hidden_dim = conf.getint("RSHN", "hidden_dim")
self.max_epoch = conf.getint("RSHN", "max_epoch")
self.rw_len = conf.getint("RSHN", "rw_len")
self.batch_size = conf.getint("RSHN", "batch_size")
self.num_node_layer = conf.getint("RSHN", "num_node_layer")
self.num_edge_layer = conf.getint("RSHN", "num_edge_layer")
self.patience = conf.getint("RSHN", "patience")
self.validation = conf.getboolean("RSHN", "validation")
self.mini_batch_flag = conf.getboolean("RSHN", "mini_batch_flag")
elif model == 'RGCN':
self.lr = conf.getfloat("RGCN", "learning_rate")
self.dropout = conf.getfloat("RGCN", "dropout")
self.in_dim = conf.getint("RGCN", "in_dim")
self.hidden_dim = conf.getint("RGCN", "hidden_dim")
self.n_bases = conf.getint("RGCN", "n_bases")
self.n_layers = conf.getint("RGCN", "n_layers")
self.max_epoch = conf.getint("RGCN", "max_epoch")
self.weight_decay = conf.getfloat("RGCN", "weight_decay")
self.seed = conf.getint("RGCN", "seed")
self.fanout = conf.getint("RGCN", "fanout")
self.patience = conf.getint("RGCN", "patience")
self.batch_size = conf.getint("RGCN", "batch_size")
self.validation = conf.getboolean("RGCN", "validation")
self.mini_batch_flag = conf.getboolean("RGCN", "mini_batch_flag")
self.use_self_loop = conf.getboolean("RGCN", "use_self_loop")
elif model == 'CompGCN':
self.lr = conf.getfloat("CompGCN", "learning_rate")
self.weight_decay = conf.getfloat("CompGCN", "weight_decay")
self.dropout = conf.getfloat("CompGCN", "dropout")
self.in_dim = conf.getint("CompGCN", "in_dim")
self.hidden_dim = conf.getint("CompGCN", "hidden_dim")
self.out_dim = conf.getint("CompGCN", "out_dim")
self.n_layers = conf.getint("CompGCN", "n_layers")
self.max_epoch = conf.getint("CompGCN", "max_epoch")
self.seed = conf.getint("CompGCN", "seed")
self.patience = conf.getint("CompGCN", "patience")
self.comp_fn = conf.get("CompGCN", "comp_fn")
self.mini_batch_flag = conf.getboolean("CompGCN", "mini_batch_flag")
self.validation = conf.getboolean("CompGCN", "validation")
pass
elif model == 'HetGNN':
self.lr = conf.getfloat("HetGNN", "learning_rate")
self.weight_decay = conf.getfloat("HetGNN", "weight_decay")
#self.dropout = conf.getfloat("CompGCN", "dropout")
self.max_epoch = conf.getint("HetGNN", "max_epoch")
self.dim = conf.getint("HetGNN", "dim")
self.batch_size = conf.getint("HetGNN", "batch_size")
self.window_size = conf.getint("HetGNN", "window_size")
self.num_workers = conf.getint("HetGNN", "num_workers")
self.batches_per_epoch = conf.getint("HetGNN", "batches_per_epoch")
self.seed = conf.getint("HetGNN", "seed")
self.patience = conf.getint("HetGNN", "patience")
self.rw_length = conf.getint("HetGNN", "rw_length")
self.rw_walks = conf.getint("HetGNN", "rw_walks")
self.rwr_prob = conf.getfloat("HetGNN", "rwr_prob")
self.mini_batch_flag = conf.getboolean("HetGNN", "mini_batch_flag")
pass
elif model == 'Metapath2vec':
self.lr = conf.getfloat("Metapath2vec", "learning_rate")
self.max_epoch = conf.getint("Metapath2vec", "max_epoch")
self.dim = conf.getint("Metapath2vec", "dim")
self.batch_size = conf.getint("Metapath2vec", "batch_size")
self.window_size = conf.getint("Metapath2vec", "window_size")
self.num_workers = conf.getint("Metapath2vec", "num_workers")
self.neg_size = conf.getint("Metapath2vec", "neg_size")
self.rw_length = conf.getint("Metapath2vec", "rw_length")
self.rw_walks = conf.getint("Metapath2vec", "rw_walks")
self.meta_path_key = conf.get("Metapath2vec", "meta_path_key")
elif model == 'HERec':
self.lr = conf.getfloat("HERec", "learning_rate")
self.max_epoch = conf.getint("HERec", "max_epoch")
self.dim = conf.getint("HERec", "dim")
self.batch_size = conf.getint("HERec", "batch_size")
self.window_size = conf.getint("HERec", "window_size")
self.num_workers = conf.getint("HERec", "num_workers")
self.neg_size = conf.getint("HERec", "neg_size")
self.rw_length = conf.getint("HERec", "rw_length")
self.rw_walks = conf.getint("HERec", "rw_walks")
self.meta_path_key = conf.get("HERec", "meta_path_key")
elif model == 'HAN':
self.lr = conf.getfloat("HAN", "learning_rate")
self.weight_decay = conf.getfloat("HAN", "weight_decay")
self.seed = conf.getint("HAN", "seed")
self.dropout = conf.getfloat("HAN", "dropout")
self.hidden_dim = conf.getint('HAN', 'hidden_dim')
self.out_dim = conf.getint('HAN', 'out_dim')
num_heads = conf.get('HAN', 'num_heads').split('-')
self.num_heads = [int(i) for i in num_heads]
self.patience = conf.getint('HAN', 'patience')
self.max_epoch = conf.getint('HAN', 'max_epoch')
self.mini_batch_flag = conf.getboolean("HAN", "mini_batch_flag")
elif model == 'NARS':
self.lr = conf.getfloat("NARS", "learning_rate")
self.weight_decay = conf.getfloat("NARS", "weight_decay")
self.seed = conf.getint("NARS", "seed")
self.dropout = conf.getfloat("NARS", "dropout")
self.patience = conf.getint('HAN', 'patience')
self.hidden_dim = conf.getint('NARS', 'hidden_dim')
self.out_dim = conf.getint('NARS', 'out_dim')
num_heads = conf.get('NARS', 'num_heads').split('-')
self.num_heads = [int(i) for i in num_heads]
self.num_hops = conf.getint('NARS', 'num_hops')
self.max_epoch = conf.getint('NARS', 'max_epoch')
self.mini_batch_flag = conf.getboolean("NARS", "mini_batch_flag")
self.R = conf.getint('NARS', 'R')
self.cpu_preprocess = conf.getboolean("NARS", "cpu_preprocess")
self.input_dropout = conf.getboolean("NARS", "input_dropout")
self.ff_layer = conf.getint('NARS', 'ff_layer')
elif model == 'MAGNN':
self.lr = conf.getfloat("MAGNN", "learning_rate")
self.weight_decay = conf.getfloat("MAGNN", "weight_decay")
self.seed = conf.getint("MAGNN", "seed")
self.dropout = conf.getfloat("MAGNN", "dropout")
self.inter_attn_feats = conf.getint("MAGNN", "inter_attn_feats")
self.h_dim = conf.getint('MAGNN', 'h_dim')
self.out_dim = conf.getint('MAGNN', 'out_dim')
self.num_heads = conf.getint('MAGNN', 'num_heads')
self.num_layers = conf.getint("MAGNN", "num_layers")
self.patience = conf.getint('MAGNN', 'patience')
self.max_epoch = conf.getint('MAGNN', 'max_epoch')
self.mini_batch_flag = conf.getboolean("MAGNN", "mini_batch_flag")
self.encoder_type = conf.get('MAGNN', 'encoder_type')
self.hidden_dim = self.h_dim * self.num_heads
elif model == 'RHGNN':
self.lr = conf.getfloat("RHGNN", "learning_rate")
self.num_heads = conf.getint("RHGNN", "num_heads")
self.hidden_dim = conf.getint("RHGNN", "hidden_dim")
self.relation_hidden_units = conf.getint("RHGNN", "relation_hidden_units")
self.drop_out = conf.getfloat("RHGNN", "drop_out")
self.n_layers = conf.getint("RHGNN", "n_layers")
self.residual = conf.getboolean("RHGNN", "residual")
self.batch_size = conf.getint("RHGNN", "batch_size")
self.node_neighbors_min_num = conf.getint("RHGNN", "node_neighbors_min_num")
#self.optimizer = conf.get
self.weight_decay = conf.getfloat("RHGNN", "weight_decay")
self.max_epoch = conf.getint("RHGNN", "max_epoch")
self.patience = conf.getint("RHGNN", "patience")
self.mini_batch_flag = conf.getboolean("RHGNN", "mini_batch_flag")
self.negative_slope = conf.getfloat("RHGNN", "negative_slope")
self.norm = conf.getboolean("RHGNN", "norm")
self.dropout = conf.getfloat("RHGNN", "dropout")
self.n_heads = conf.getint("RHGNN", "n_heads")
self.category = conf.get("RHGNN", "category")
self.out_dim = conf.getint("RHGNN", "out_dim")
elif model == 'HGNN_AC':
self.feats_drop_rate = conf.getfloat("HGNN_AC", "feats_drop_rate")
self.attn_vec_dim = conf.getint("HGNN_AC", "attn_vec_dim")
self.feats_opt = conf.get("HGNN_AC", "feats_opt")
self.loss_lambda = conf.getfloat("HGNN_AC", "loss_lambda")
self.src_node_type = conf.getint("HGNN_AC", "src_node_type")
self.HIN = conf.get("HGNN_AC", "HIN")
if self.HIN == "MAGNN":
self.lr = conf.getfloat("MAGNN", "learning_rate")
self.weight_decay = conf.getfloat("MAGNN", "weight_decay")
self.seed = conf.getint("MAGNN", "seed")
self.dropout = conf.getfloat("MAGNN", "dropout")
self.inter_attn_feats = conf.getint("MAGNN", "inter_attn_feats")
self.h_dim = conf.getint('MAGNN', 'h_dim')
self.out_dim = conf.getint('MAGNN', 'out_dim')
self.num_heads = conf.getint('MAGNN', 'num_heads')
self.num_layers = conf.getint("MAGNN", "num_layers")
self.patience = conf.getint('MAGNN', 'patience')
self.max_epoch = conf.getint('MAGNN', 'max_epoch')
self.mini_batch_flag = conf.getboolean("MAGNN", "mini_batch_flag")
self.encoder_type = conf.get('MAGNN', 'encoder_type')
self.hidden_dim = self.h_dim * self.num_heads
elif self.HIN == "GTN":
self.lr = conf.getfloat("GTN", "learning_rate")
self.weight_decay = conf.getfloat("GTN", "weight_decay")
self.seed = conf.getint("GTN", "seed")
# np.random.seed(self.seed)
self.hidden_dim = conf.getint("GTN", "hidden_dim")
self.out_dim = conf.getint("GTN", "out_dim")
self.num_channels = conf.getint("GTN", "num_channels")
self.num_layers = conf.getint("GTN", "num_layers")
self.max_epoch = conf.getint("GTN", "max_epoch")
self.patience = conf.getint("GTN", "patience")
self.identity = conf.getboolean("GTN", "identity")
self.norm_emd_flag = conf.getboolean("GTN", "norm_emd_flag")
self.adaptive_lr_flag = conf.getboolean("GTN", "adaptive_lr_flag")
self.mini_batch_flag = conf.getboolean("GTN", "mini_batch_flag")
self.dropout = conf.getfloat("HGNN_AC", "dropout")
self.num_heads = conf.getint('HGNN_AC', 'num_heads')
elif self.HIN == "MHNF":
self.lr = conf.getfloat("MHNF", "learning_rate")
self.weight_decay = conf.getfloat("MHNF", "weight_decay")
self.seed = conf.getint("MHNF", "seed")
# np.random.seed(self.seed)
self.hidden_dim = conf.getint("MHNF", "hidden_dim")
self.out_dim = conf.getint("MHNF", "out_dim")
self.num_channels = conf.getint("MHNF", "num_channels")
self.num_layers = conf.getint("MHNF", "num_layers")
self.max_epoch = conf.getint("MHNF", "max_epoch")
self.patience = conf.getint("MHNF", "patience")
self.identity = conf.getboolean("MHNF", "identity")
self.norm_emd_flag = conf.getboolean("MHNF", "norm_emd_flag")
self.adaptive_lr_flag = conf.getboolean("MHNF", "adaptive_lr_flag")
self.mini_batch_flag = conf.getboolean("MHNF", "mini_batch_flag")
self.dropout = 0.2
self.num_heads = 8
elif model == 'HGT':
self.lr = conf.getfloat("HGT", "learning_rate")
self.weight_decay = conf.getfloat("HGT", "weight_decay")
self.seed = conf.getint("HGT", "seed")
self.dropout = conf.getfloat("HGT", "dropout")
self.batch_size = conf.getint("HGT", "batch_size")
self.hidden_dim = conf.getint('HGT', 'hidden_dim')
self.out_dim = conf.getint('HGT', 'out_dim')
self.num_heads = conf.getint('HGT', 'num_heads')
self.patience = conf.getint('HGT', 'patience')
self.max_epoch = conf.getint('HGT', 'max_epoch')
self.num_workers = conf.getint("HGT", "num_workers")
self.mini_batch_flag = conf.getboolean("HGT", "mini_batch_flag")
self.n_layers = conf.getint("HGT", "n_layers")
self.num_heads = conf.getint("HGT", "num_heads")
elif model == 'HeCo':
self.lr = conf.getfloat("HeCo", "learning_rate")
self.weight_decay = conf.getfloat("HeCo", "weight_decay")
self.seed = conf.getint("HeCo", "seed")
self.hidden_dim = conf.getint('HeCo', 'hidden_dim')
self.patience = conf.getint('HeCo', 'patience')
self.max_epoch = conf.getint('HeCo', 'max_epoch')
self.mini_batch_flag = conf.getboolean("HeCo", "mini_batch_flag")
self.feat_drop = conf.getfloat("HeCo", "feat_drop")
self.attn_drop = conf.getfloat("HeCo", "attn_drop")
self.eva_lr = conf.getfloat("HeCo", "eva_lr")
self.eva_wd = conf.getfloat("HeCo", "eva_wd")
sample_rate = conf.get('HeCo', 'sample_rate').split('_')
#self.sample_rate = [int(i) for i in sample_rate]
self.sample_rate = {}
for i in sample_rate:
one = i.split('-')
self.sample_rate[one[0]] = int(one[1])
self.tau = conf.getfloat("HeCo", "tau")
self.lam = conf.getfloat("HeCo", "lam")
elif model == 'DMGI':
self.lr = conf.getfloat("DMGI", "learning_rate")
self.weight_decay = conf.getfloat("DMGI", "weight_decay")
self.sc = conf.getint("DMGI", "sc")
self.seed = conf.getint("DMGI", "seed")
self.sup_coef = conf.getfloat("DMGI",'sup_coef')
self.reg_coef = conf.getfloat("DMGI", "reg_coef")
self.dropout = conf.getfloat("DMGI", "dropout")
self.hidden_dim = conf.getint('DMGI', 'hidden_dim')
self.num_heads = conf.getint('DMGI', 'num_heads')
self.patience = conf.getint('DMGI', 'patience')
self.max_epoch = conf.getint('DMGI', 'max_epoch')
self.isSemi = conf.getboolean("DMGI", "isSemi")
self.isBias = conf.getboolean("DMGI", "isBias")
self.isAttn = conf.getboolean("DMGI", "isAttn")
elif model == 'SLiCE':
self.data_name = conf.get('SLiCE','data_name')
self.num_walks_per_node=conf.getint('SLiCE','num_walks_per_node')
self.beam_width=conf.getint('SLiCE','beam_width')
self.max_length=conf.getint('SLiCE','max_length')
self.walk_type=conf.get("SLiCE",'walk_type')
self.batch_size=conf.getint('SLiCE','batch_size')
self.outdir=conf.get('SLiCE','outdir')
self.n_pred=conf.getint('SLiCE','n_pred')
self.max_pred=conf.getint('SLiCE','max_pred')
self.lr=conf.getfloat('SLiCE','lr')
self.n_epochs=conf.getint('SLiCE','n_epochs')
self.get_bert_encoder_embeddings=conf.getboolean('SLiCE','get_bert_encoder_embeddings')
self.checkpoint=conf.getint('SLiCE','checkpoint')
self.path_option = conf.get("SLiCE",'path_option')
self.ft_batch_size=conf.getint('SLiCE','ft_batch_size')
#self.embed_dir=conf.get('SLiCE','embed_dir')
self.d_model=conf.getint('SLiCE','d_model')
self.ft_d_ff=conf.getint('SLiCE','ft_d_ff')
self.ft_layer=conf.get('SLiCE','ft_layer')
self.ft_drop_rate=conf.getfloat('SLiCE','ft_drop_rate')
self.ft_input_option=conf.get('SLiCE','ft_input_option')
self.n_layers=conf.getint('SLiCE','n_layers')
self.ft_lr=conf.getfloat('SLiCE','ft_lr')
self.ft_n_epochs=conf.getint('SLiCE','ft_n_epochs')
self.ft_checkpoint=conf.getint('SLiCE','ft_checkpoint')
self.pretrained_embeddings=conf.get('SLiCE','pretrained_embeddings')
elif model == 'HPN':
self.lr = conf.getfloat("HPN", "learning_rate")
self.weight_decay = conf.getfloat("HPN", "weight_decay")
self.seed = conf.getint("HPN", "seed")
self.dropout = conf.getfloat("HPN", "dropout")
self.hidden_dim = conf.getint('HPN', 'hidden_dim')
self.k_layer = conf.getint("HPN", "k_layer")
self.alpha = conf.getfloat("HPN", "alpha")
self.edge_drop = conf.getfloat("HPN", "edge_drop")
self.patience = conf.getint('HPN', 'patience')
self.max_epoch = conf.getint('HPN', 'max_epoch')
self.mini_batch_flag = conf.getboolean("HPN", "mini_batch_flag")
elif model == 'KGCN':
self.weight_decay = conf.getfloat("KGCN", "weight_decay")
self.batch_size = conf.getint("KGCN", "batch_size")
self.in_dim = conf.getint('KGCN', 'in_dim')
self.out_dim = conf.getint('KGCN', 'out_dim')
self.lr = conf.getfloat("KGCN", "lr")
self.n_neighbor = conf.getint("KGCN", "n_neighbor")
self.n_relation = conf.getint("KGCN", "n_relation")
self.aggregate = conf.get("KGCN", "aggregate")
self.n_item = conf.getint("KGCN", "n_relation")
self.n_user = conf.getint("KGCN", "n_user")
self.epoch_iter = conf.getint("KGCN", "epoch_iter")
elif model == 'general_HGNN':
self.lr = conf.getfloat("general_HGNN", "lr")
self.weight_decay = conf.getfloat("general_HGNN", "weight_decay")
self.dropout = conf.getfloat("general_HGNN", "dropout")
self.hidden_dim = conf.getint('general_HGNN', 'hidden_dim')
self.num_heads = conf.getint('general_HGNN', 'num_heads')
self.patience = conf.getint('general_HGNN', 'patience')
self.max_epoch = conf.getint('general_HGNN', 'max_epoch')
self.mini_batch_flag = conf.getboolean("general_HGNN", "mini_batch_flag")
self.layers_gnn = conf.getint("general_HGNN", "layers_gnn")
self.layers_pre_mp = conf.getint("general_HGNN", "layers_pre_mp")
self.layers_post_mp = conf.getint("general_HGNN", "layers_post_mp")
self.stage_type = conf.get('general_HGNN', 'stage_type')
self.gnn_type = conf.get('general_HGNN', 'gnn_type')
self.activation = conf.get('general_HGNN', 'activation')
self.activation = act_dict[self.activation]
self.subgraph_extraction = conf.get('general_HGNN', 'subgraph_extraction')
self.feat = conf.getint('general_HGNN', 'feat')
self.has_bn = conf.getboolean('general_HGNN', 'has_bn')
self.has_l2norm = conf.getboolean('general_HGNN', 'has_l2norm')
self.macro_func = conf.get('general_HGNN', 'macro_func')
elif model == 'homo_GNN':
self.lr = conf.getfloat("homo_GNN", "lr")
self.weight_decay = conf.getfloat("homo_GNN", "weight_decay")
self.dropout = conf.getfloat("homo_GNN", "dropout")
self.hidden_dim = conf.getint('homo_GNN', 'hidden_dim')
self.num_heads = conf.getint('homo_GNN', 'num_heads')
self.patience = conf.getint('homo_GNN', 'patience')
self.max_epoch = conf.getint('homo_GNN', 'max_epoch')
self.mini_batch_flag = conf.getboolean("homo_GNN", "mini_batch_flag")
self.layers_gnn = conf.getint("homo_GNN", "layers_gnn")
self.layers_pre_mp = conf.getint("homo_GNN", "layers_pre_mp")
self.layers_post_mp = conf.getint("homo_GNN", "layers_post_mp")
self.stage_type = conf.get('homo_GNN', 'stage_type')
self.gnn_type = conf.get('homo_GNN', 'gnn_type')
self.activation = conf.get('homo_GNN', 'activation')
self.activation = act_dict[self.activation]
self.subgraph = conf.get('homo_GNN', 'subgraph')
self.feat = conf.getint('homo_GNN', 'feat')
self.has_bn = conf.getboolean('homo_GNN', 'has_bn')
self.has_l2norm = conf.getboolean('homo_GNN', 'has_l2norm')
elif model == 'HeGAN':
self.lr_gen = conf.getfloat('HeGAN', 'lr_gen')
self.lr_dis = conf.getfloat('HeGAN', 'lr_dis')
self.sigma = conf.getfloat('HeGAN', 'sigma')
self.n_sample = conf.getint('HeGAN', 'n_sample')
self.max_epoch = conf.getint('HeGAN', 'max_epoch')
self.epoch_dis = conf.getint('HeGAN', 'epoch_dis')
self.epoch_gen = conf.getint('HeGAN', 'epoch_gen')
self.wd_dis = conf.getfloat("HeGAN", 'wd_dis')
self.wd_gen = conf.getfloat('HeGAN', 'wd_gen')
self.mini_batch_flag = conf.getboolean('HeGAN', 'mini_batch_flag')
self.validation = conf.getboolean('HeGAN', 'validation')
self.emb_size = conf.getint("HeGAN", 'emb_size')
self.patience = conf.getint("HeGAN", 'patience')
elif model == 'HDE':
self.emb_dim = conf.getint('HDE', 'emb_dim')
self.num_neighbor = conf.getint('HDE', 'num_neighbor')
self.use_bias = conf.getboolean('HDE', 'use_bias')
self.k_hop = conf.getint('HDE', 'k_hop')
self.max_epoch = conf.getint('HDE', 'max_epoch')
self.batch_size = conf.getint('HDE', 'batch_size')
self.max_dist = conf.getint('HDE', 'max_dist')
self.lr = conf.getfloat('HDE', 'lr')
elif model == 'SimpleHGN':
self.weight_decay = conf.getfloat("SimpleHGN", "weight_decay")
self.lr = conf.getfloat("SimpleHGN", "lr")
self.max_epoch = conf.getint("SimpleHGN", "max_epoch")
self.seed = conf.getint("SimpleHGN", "seed")
self.patience = conf.getint("SimpleHGN", "patience")
self.edge_dim = conf.getint("SimpleHGN", "edge_dim")
self.slope = conf.getfloat("SimpleHGN", "slope")
self.attn_drop_rate = conf.getfloat("SimpleHGN", "attn_drop_rate")
self.feats_drop_rate = conf.getfloat("SimpleHGN", "feats_drop_rate")
self.num_heads = conf.getint("SimpleHGN", "num_heads")
self.h_dim = conf.getint("SimpleHGN", "h_dim")
self.n_layers = conf.getint("SimpleHGN", "n_layers")
self.beta = conf.getfloat("SimpleHGN", "beta")
self.residual = conf.getboolean("SimpleHGN", "residual")
self.mini_batch_flag = False
self.hidden_dim = self.h_dim * self.num_heads
def __repr__(self):
return '[Config Info]\tModel: {},\tTask: {},\tDataset: {}'.format(self.model, self.task, self.dataset)