-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathconfig.py
128 lines (84 loc) · 3.16 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import logging
import os
from yacs.config import CfgNode as CN
# Global config object
cfg = CN()
def set_cfg(cfg):
r'''
This function sets the default config value.
1) Note that for an experiment, only part of the arguments will be used
The remaining unused arguments won't affect anything.
2) We support *at most* two levels of configs, e.g., cfg.dataset.name
'''
# ------------------------------------------------------------------------ #
# Basic options
# ------------------------------------------------------------------------ #
# Select the device, cpu or cuda
cfg.device = 'cuda:2'
# Random seed for data split
cfg.seed = 42
# Random seed for model initialization
cfg.model_seed = 42
# Num splits for few-shot learning
cfg.num_split = 5
# Num labels per class for few-shot learning
cfg.num_shot = 20
# Ratio of nodes for validation (exclude train nodes) for few-shot learning
cfg.val_ratio = 0.5
# Path to the pre-trained model, only effective for downstream tasks
cfg.pre_train_model_path = None
# Repeat experitment times
cfg.repeat = 5
# Node classification, or graph classification task?
cfg.task = 'node' # node or graph
# ------------------------------------------------------------------------ #
# Dataset options
# ------------------------------------------------------------------------ #
cfg.dataset = CN()
cfg.dataset.name = 'texas'
# Modified automatically by code, no need to set
cfg.dataset.num_nodes = -1
# Modified automatically by code, no need to set
cfg.dataset.num_classes = -1
# Dir to load the dataset. If the dataset is downloaded, it is in root
cfg.dataset.root = '../datasets'
# ------------------------------------------------------------------------ #
# Optimization options
# ------------------------------------------------------------------------ #
cfg.optim = CN()
# Maximal number of epochs
cfg.optim.epochs = 200
cfg.optim.patience = 200
# Base learning rate
cfg.optim.lr = 0.01
# L2 regularization
cfg.optim.wd = 5e-4
# Batch size, only works in minibatch mode
cfg.optim.batch_size = 128
cfg.optim.eval_batch_size = -1
# ------------------------------------------------------------------------ #
# Model options
# ------------------------------------------------------------------------ #
cfg.model = CN()
# Backbone model to use
cfg.model.backbone = 'GCN'
cfg.model.pretrain_type = 'DGI'
# Prompt type, in ['gppt', 'gprpmpt', ]
cfg.model.prompt_type = 'none'
# Hidden layer dim
cfg.model.hidden_dim = 128
# Number of attetnion heads
cfg.model.num_heads = 8
# Layer number
cfg.model.num_layers = 3
# Dropout rate
cfg.model.dropout = 0.5
# Pooling method, sum, mean, max
cfg.model.pool = 'mean'
# JK method: how the node features across layers are combined. last, sum, max or concat
cfg.model.JK = 'last'
cfg.model.alpha = 0.5
cfg.model.r = 0
cfg.model.lg = False
cfg.model.adaptive_adj = False
set_cfg(cfg)