Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Hackathon 6th Code Camp No.15] support neuraloperator #867

Merged
merged 28 commits into from
May 27, 2024
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
273d4b2
add-neuraloperator
Yang-Changhui Apr 25, 2024
f0b900e
add-neuraloperator
Yang-Changhui Apr 25, 2024
f0760a7
Merge branch 'develop' into add_neuraloperator
Yang-Changhui Apr 26, 2024
5c59594
add-neuraloperator
Yang-Changhui Apr 29, 2024
55e5b14
add-neuralopertor
Yang-Changhui Apr 29, 2024
34009e8
Merge branch 'add_neuraloperator' of https://github.com/Yang-Changhui…
Yang-Changhui Apr 29, 2024
0a1cde8
add-neuraloperator
Yang-Changhui Apr 29, 2024
99fbd35
add-neuraloperator
Yang-Changhui Apr 29, 2024
fd8c49f
add-neuraloperator
Yang-Changhui Apr 29, 2024
92db31c
Merge branch 'develop' into add_neuraloperator
Yang-Changhui May 7, 2024
1be2a63
Merge branch 'develop' into add_neuraloperator
Yang-Changhui May 10, 2024
a8c5526
add-neuraloperator
Yang-Changhui May 10, 2024
11c50fd
add-neuraloperator
Yang-Changhui May 10, 2024
9dc7917
Merge branch 'develop' of https://github.com/PaddlePaddle/PaddleScien…
Yang-Changhui May 10, 2024
a48e8fb
Merge branch 'add_neuraloperator' of https://github.com/Yang-Changhui…
Yang-Changhui May 10, 2024
b1df8c4
add-neuraloperator
Yang-Changhui May 10, 2024
df747a9
Merge branch 'develop' into add_neuraloperator
Yang-Changhui May 10, 2024
2a4d0da
add-neuraloperator
Yang-Changhui May 13, 2024
c91d92f
Merge branch 'add_neuraloperator' of https://github.com/Yang-Changhui…
Yang-Changhui May 13, 2024
5e5ad68
Merge branch 'develop' into add_neuraloperator
Yang-Changhui May 13, 2024
750bb7d
add-neuraloperator
Yang-Changhui May 20, 2024
cb76494
Merge branch 'add_neuraloperator' of https://github.com/Yang-Changhui…
Yang-Changhui May 20, 2024
9dcd49a
Merge branch 'develop' into add_neuraloperator
Yang-Changhui May 20, 2024
560592d
add-neuraloperator
Yang-Changhui May 26, 2024
f1b197f
Merge branch 'add_neuraloperator' of https://github.com/Yang-Changhui…
Yang-Changhui May 26, 2024
60aa2c8
Merge branch 'develop' into add_neuraloperator
Yang-Changhui May 26, 2024
8d72bf7
add-neuraloperator
Yang-Changhui May 26, 2024
7b819fc
Merge branch 'add_neuraloperator' of https://github.com/Yang-Changhui…
Yang-Changhui May 26, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
119 changes: 119 additions & 0 deletions examples/neuraloperator/conf/sfno_swe_pretrain.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
hydra:
run:
# dynamic output directory according to running time and override name
dir: outputs_sfno_pretrain
job:
name: ${mode} # name of logfile
chdir: false # keep current working direcotry unchaned
config:
override_dirname:
exclude_keys:
- TRAIN.checkpoint_path
- TRAIN.pretrained_model_path
- EVAL.pretrained_model_path
- mode
- output_dir
- log_freq
callbacks:
init_callback:
_target_: ppsci.utils.callbacks.InitCallback
sweep:
# output directory for multirun
dir: ${hydra.run.dir}
subdir: ./

# general settings
mode: train # running mode: train/eval/export/infer
seed: 666
output_dir: ${hydra:run.dir}
log_freq: 20

# set train and evaluate data path
FILE_PATH: ./datasets/SWE/

# dataset setting
DATASET:
label_keys: ["y"]
train_resolution: "32x64"
test_resolutions: ["32x64","64x128"]


# model settings
MODEL:
input_keys: ["x"]
output_keys: ["y"]
in_channels: 3
out_channels: 3
n_modes: [32, 32]
hidden_channels: 32
projection_channels: 64
n_layers: 4

use_mlp: false
mlp:
expansion: 0.5
dropout: 0.0
norm: 'group_norm'
fno_skip: "linear"
mlp_skip: "soft-gating"
separable: false
preactivation: false
factorization: null
rank: 1.0
joint_factorization: false
fixed_rank_modes: null
implementation: "factorized"
domain_padding: null #0.078125
domain_padding_mode: "one-sided" #symmetric
fft_norm: 'forward'
patching_levels: 0


# training settings
TRAIN:
epochs: 300
save_freq: 20
eval_during_train: true
eval_freq: 1
lr_scheduler:
epochs: ${TRAIN.epochs}
learning_rate: 5e-3
by_epoch: True
type: "StepDecay"
step_size: 60
gamma: 0.5
# ReduceOnPlateau only
scheduler_patience: 5

# CosineAnnealingLR
scheduler_T_max: 30
wd: 1e-4
batch_size: 4
pretrained_model_path: ./pretrainmodel/sfno_pretrain.pdparams
checkpoint_path: null


# evaluation settings
EVAL:
pretrained_model_path: ./outputs_sfno_pretrain/checkpoints/best_model.pdparams
compute_metric_by_batch: false
eval_with_no_grad: true
batch_size: 10

INFER:
pretrained_model_path: ./outputs_sfno_pretrain/checkpoints/best_model.pdparams
export_path: ./inference/uno/uno_darcyflow
pdmodel_path: ${INFER.export_path}.pdmodel
pdpiparams_path: ${INFER.export_path}.pdiparams
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

pdpiparams_path --> pdiparams_path 全部替换一下

device: gpu
engine: native
precision: fp32
onnx_path: ${INFER.export_path}.onnx
ir_optim: true
min_subgraph_size: 10
gpu_mem: 4000
gpu_id: 0
max_batch_size: 16
num_cpu_threads: 4
batch_size: 1
data_path: ./datasets/SWE/test_SWE_32x64.npy
127 changes: 127 additions & 0 deletions examples/neuraloperator/conf/tfno_darcyflow_pretrain.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
hydra:
run:
# dynamic output directory according to running time and override name
dir: outputs_tfno_pretrain
job:
name: ${mode} # name of logfile
chdir: false # keep current working direcotry unchaned
config:
override_dirname:
exclude_keys:
- TRAIN.checkpoint_path
- TRAIN.pretrained_model_path
- EVAL.pretrained_model_path
- mode
- output_dir
- log_freq
callbacks:
init_callback:
_target_: ppsci.utils.callbacks.InitCallback
sweep:
# output directory for multirun
dir: ${hydra.run.dir}
subdir: ./

# general settings
mode: train # running mode: train/eval/export/infer
seed: 666
output_dir: ${hydra:run.dir}
log_freq: 20

# set train and evaluate data path
FILE_PATH: ./datasets/darcyflow/

# dataset setting
DATASET:
label_keys: ["y"]
train_resolution: 16
test_resolutions: [16,32]
grid_boundaries: [[0, 1], [0, 1]]
positional_encoding: True
encode_input: False
encode_output: False
encoding: "channel-wise"
channel_dim: 1

# model settings
MODEL:
input_keys: ["x"]
output_keys: ["y"]
n_modes_height: 16
n_modes_width: 16
in_channels: 3
out_channels: 1
hidden_channels: 32
projection_channels: 64
n_layers: 4

use_mlp: False
mlp:
expansion: 0.5
dropout: 0.0
norm: "group_norm"
fno_skip: "linear"
mlp_skip: "soft-gating"
separable: false
preactivation: false
factorization: 'dense'
rank: 1.0
joint_factorization: false
fixed_rank_modes: null
implementation: "factorized"
domain_padding: null #0.078125
domain_padding_mode: "one-sided" #symmetric
fft_norm: 'forward'
patching_levels: 0


# training settings
TRAIN:
epochs: 300
save_freq: 20
eval_during_train: true
eval_freq: 1
training_loss: "h1"
lr_scheduler:
epochs: ${TRAIN.epochs}
learning_rate: 5e-3
by_epoch: True
type: "StepDecay"
step_size: 60
gamma: 0.5
# ReduceOnPlateau only
scheduler_patience: 5

# CosineAnnealingLR
scheduler_T_max: 500
wd: 1.0e-4
batch_size: 16
pretrained_model_path: ./pretrainmodel/tfno_pretrain.pdparams
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

直接设置成null吧,另外两个也改下

checkpoint_path: null


# evaluation settings
EVAL:
pretrained_model_path: ./outputs_tfno_pretrain/checkpoints/best_model.pdparams
compute_metric_by_batch: false
eval_with_no_grad: true
batch_size: 16

INFER:
pretrained_model_path: ./outputs_tfno_pretrain/checkpoints/best_model.pdparams
export_path: ./inference/tfno/tfno_darcyflow
pdmodel_path: ${INFER.export_path}.pdmodel
pdpiparams_path: ${INFER.export_path}.pdiparams
device: gpu
engine: native
precision: fp32
onnx_path: ${INFER.export_path}.onnx
ir_optim: true
min_subgraph_size: 10
gpu_mem: 4000
gpu_id: 0
max_batch_size: 16
num_cpu_threads: 1
batch_size: 1
data_path: ./datasets/darcyflow/darcy_test_16.npy
grid_boundaries: [[0, 1], [0, 1]]
132 changes: 132 additions & 0 deletions examples/neuraloperator/conf/uno_darcyflow_pretrain.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
hydra:
run:
# dynamic output directory according to running time and override name
dir: outputs_uno_pretrain
job:
name: ${mode} # name of logfile
chdir: false # keep current working direcotry unchaned
config:
override_dirname:
exclude_keys:
- TRAIN.checkpoint_path
- TRAIN.pretrained_model_path
- EVAL.pretrained_model_path
- mode
- output_dir
- log_freq
callbacks:
init_callback:
_target_: ppsci.utils.callbacks.InitCallback
sweep:
# output directory for multirun
dir: ${hydra.run.dir}
subdir: ./

# general settings
mode: train # running mode: train/eval/export/infer
seed: 666
output_dir: ${hydra:run.dir}
log_freq: 20

# set train and evaluate data path
FILE_PATH: ./datasets/darcyflow/

# dataset setting
DATASET:
label_keys: ["y"]
train_resolution: 16
test_resolutions: [16,32]
grid_boundaries: [[0, 1], [0, 1]]
positional_encoding: True
encode_input: False
encode_output: False
encoding: "channel-wise"
channel_dim: 1

# model settings
MODEL:
input_keys: ["x"]
output_keys: ["y"]
in_channels: 3
out_channels: 1
hidden_channels: 64
projection_channels: 64
n_layers: 5
uno_out_channels: [32,64,64,64,32]
uno_n_modes: [[16,16],[8,8],[8,8],[8,8],[16,16]]
uno_scalings: [[1.0,1.0],[0.5,0.5],[1,1],[2,2],[1,1]]
horizontal_skips_map: null
incremental_n_modes: null

use_mlp: false
mlp:
expansion: 0.5
dropout: 0.0
norm: "group_norm"
fno_skip: "linear"
horizontal_skip: "linear"
mlp_skip: "soft-gating"
separable: false
preactivation: false
factorization: null
rank: 1.0
joint_factorization: false
fixed_rank_modes: null
implementation: "factorized"
domain_padding: 0.2 #0.078125
domain_padding_mode: "one-sided" #symmetric
fft_norm: 'forward'
patching_levels: 0


# training settings
TRAIN:
epochs: 300
save_freq: 20
eval_during_train: true
eval_freq: 1
training_loss: "h1"
lr_scheduler:
epochs: ${TRAIN.epochs}
learning_rate: 5e-3
by_epoch: True
type: "StepDecay"
step_size: 60
gamma: 0.5
# ReduceOnPlateau only
scheduler_patience: 5

# CosineAnnealingLR
scheduler_T_max: 30
wd: 1.0e-4
batch_size: 16
# /home/aistudio/darcy_flow_small.pdparams
pretrained_model_path: ./pretrainmodel/uno_pretrain.pdparams
checkpoint_path: null


# evaluation settings
EVAL:
pretrained_model_path: ./outputs_tfno_pretrain/checkpoints/best_model.pdparams
compute_metric_by_batch: false
eval_with_no_grad: true
batch_size: 16

INFER:
pretrained_model_path: ./outputs_tfno_pretrain/checkpoints/best_model.pdparams
export_path: ./inference/uno/uno_darcyflow
pdmodel_path: ${INFER.export_path}.pdmodel
pdpiparams_path: ${INFER.export_path}.pdiparams
device: gpu
engine: native
precision: fp32
onnx_path: ${INFER.export_path}.onnx
ir_optim: true
min_subgraph_size: 10
gpu_mem: 4000
gpu_id: 0
max_batch_size: 16
num_cpu_threads: 4
batch_size: 1
data_path: ./datasets/darcyflow/darcy_test_16.npy
grid_boundaries: [[0, 1], [0, 1]]
Loading