-
Notifications
You must be signed in to change notification settings - Fork 41
/
Copy path900M_eval.yml
executable file
·116 lines (98 loc) · 2.92 KB
/
900M_eval.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
{
"pipe_parallel_size": 1,
"model_parallel_size": 1,
"num_layers": 32,
"hidden_size": 1280,
"num_attention_heads": 16,
"qkv_slot_num": 1280,
"proj_slot_num": 1280,
"ffn_slot_num": 5120,
"seq_length": 2048,
"max_position_embeddings": 2048,
"pos_emb": "rotary",
"rotary_pct": 0.25,
"no_weight_tying": false,
"norm": "layernorm_nonparam",
"final_norm": "layernorm",
"gpt_j_residual": false,
"output_layer_parallelism": "column",
"use_bias_in_attn_linear": false,
"attention_config": [[["tokenformer"], 32]],
"norm_activation_type": "l2_norm_gelu", # "l2_norm_gelu", "gelu_l2_norm", "softmax"
# these should provide some speedup but takes a while to build, set to true if desired
"scaled_upper_triang_masked_softmax_fusion": false,
"bias_gelu_fusion": false,
"rope_fusion": false,
"layernorm_fusion": false,
"init_method": "normal",
"output_layer_init_method": "wang_init",
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.0006,
"betas": [0.9, 0.95],
"eps": 1.0e-8
}
},
"min_lr": 0.00006,
"zero_optimization": {
"stage": 1,
"allgather_partitions": true,
"allgather_bucket_size": 500000000,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": 500000000,
"contiguous_gradients": true,
"cpu_offload": false
},
"train_micro_batch_size_per_gpu": 8, # 64gpu
"gradient_accumulation_steps": 2,
"data_impl": "mmap",
"num_workers": 4,
"checkpoint_activations": true,
"checkpoint_num_layers": 1,
"partition_activations": true,
"synchronize_each_layer": true,
"gradient_clipping": 1.0,
"weight_decay": 0.1,
"hidden_dropout": 0,
"attention_dropout": 0,
"fp16": {
"fp16": true,
"enabled": true,
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 12,
"hysteresis": 2,
"min_loss_scale": 1
},
"train_iters": 143000,
"lr_decay_iters": 143000,
"distributed_backend": "nccl",
"lr_decay_style": "cosine",
"warmup": 0.01,
"checkpoint_factor": 500,
"extra_save_iters": [0,1,2,4,8,16,32,64,128,256,512],
"eval_interval": 5000,
"eval_iters": 100,
"log_interval": 500,
"steps_per_print": 500,
"wall_clock_breakdown": true,
"keep_last_n_checkpoints": 6,
"save": "./work_dirs/900M_TokenFormer_Pile/checkpoints",
"load": "./work_dirs/900M_TokenFormer_Pile/checkpoints",
"log_dir": "./work_dirs/900M_TokenFormer_Pile/logs_eval",
"eval_ckpt": "./TokenFormer-900M/pytorch_model.bin",
"checkpoint_validation_with_forward_pass": False,
"adlr_autoresume": True,
"tokenizer-type": "HFTokenizer",
"vocab-file": "./tokenizer.json", # from GPT-NeoX 20B
"launcher": "slurm",
"deepspeed_slurm": false, # set true for training
"no_ssh_check": true,
"data_path": "./dataset/language_dataset/pile/pile_0.87_deduped_text_document",
"tensorboard_dir": "tensorboard",
"use_wandb": False,
"wandb_host": "https://api.wandb.ai",
"wandb_project": "neox"
}