Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

format SciML Style #542

Merged
merged 2 commits into from
Jun 26, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 42 additions & 0 deletions .github/workflows/FormatCheck.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
name: format-check

on:
push:
branches:
- 'master'
- 'release-'
tags: '*'
pull_request:

jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
julia-version: [1]
julia-arch: [x86]
os: [ubuntu-latest]
steps:
- uses: julia-actions/setup-julia@latest
with:
version: ${{ matrix.julia-version }}

- uses: actions/checkout@v1
- name: Install JuliaFormatter and format
# This will use the latest version by default but you can set the version like so:
#
# julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter", version="0.13.0"))'
run: |
julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter"))'
julia -e 'using JuliaFormatter; format(".", verbose=true)'
- name: Format check
run: |
julia -e '
out = Cmd(`git diff --name-only`) |> read |> String
if out == ""
exit(0)
else
@error "Some files have not been formatted !!!"
write(stdout, out)
exit(1)
end'
42 changes: 19 additions & 23 deletions docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,23 @@ using Documenter, NeuralPDE

include("pages.jl")

makedocs(
sitename="NeuralPDE.jl",
authors="#",
clean=true,
doctest=false,
modules=[NeuralPDE],
strict=[
:doctest,
:linkcheck,
:parse_error,
:example_block,
# Other available options are
# :autodocs_block, :cross_references, :docs_block, :eval_block, :example_block, :footnote, :meta_block, :missing_docs, :setup_block
],
format=Documenter.HTML( analytics = "UA-90474609-3",
assets=["assets/favicon.ico"],
canonical="https://neuralpde.sciml.ai/stable/"),
pages=pages
)
makedocs(sitename = "NeuralPDE.jl",
authors = "#",
clean = true,
doctest = false,
modules = [NeuralPDE],
strict = [
:doctest,
:linkcheck,
:parse_error,
:example_block,
# Other available options are
# :autodocs_block, :cross_references, :docs_block, :eval_block, :example_block, :footnote, :meta_block, :missing_docs, :setup_block
],
format = Documenter.HTML(analytics = "UA-90474609-3",
assets = ["assets/favicon.ico"],
canonical = "https://neuralpde.sciml.ai/stable/"),
pages = pages)

deploydocs(
repo="github.com/SciML/NeuralPDE.jl.git";
push_preview=true
)
deploydocs(repo = "github.com/SciML/NeuralPDE.jl.git";
push_preview = true)
57 changes: 25 additions & 32 deletions docs/pages.jl
Original file line number Diff line number Diff line change
@@ -1,33 +1,26 @@
pages = [
"NeuralPDE.jl: Scientific Machine Learning (SciML) for Partial Differential Equations" => "index.md",
"Physics-Informed Neural Network Tutorials" => Any[
"pinn/poisson.md",
"pinn/wave.md",
"pinn/2D.md",
"pinn/system.md",
"pinn/3rd.md",
"pinn/low_level.md",
"pinn/ks.md",
"pinn/fp.md",
"pinn/parm_estim.md",
"pinn/heterogeneous.md",
"pinn/integro_diff.md",
"pinn/debugging.md",
"pinn/neural_adapter.md",
],
"Specialized Neural PDE Tutorials" => Any[
"examples/kolmogorovbackwards.md",
"examples/optimal_stopping_american.md",
],
"Specialized Neural ODE Tutorials" => Any[
"examples/ode.md",
"examples/nnrode_example.md",
],
"API Documentation" => Any[
"solvers/pinns.md",
"solvers/kolmogorovbackwards_solver.md",
"solvers/optimal_stopping.md",#TODO
"solvers/ode.md",
"solvers/nnrode.md",#TODO
]
]
"NeuralPDE.jl: Scientific Machine Learning (SciML) for Partial Differential Equations" => "index.md",
"Physics-Informed Neural Network Tutorials" => Any["pinn/poisson.md",
"pinn/wave.md",
"pinn/2D.md",
"pinn/system.md",
"pinn/3rd.md",
"pinn/low_level.md",
"pinn/ks.md",
"pinn/fp.md",
"pinn/parm_estim.md",
"pinn/heterogeneous.md",
"pinn/integro_diff.md",
"pinn/debugging.md",
"pinn/neural_adapter.md"],
"Specialized Neural PDE Tutorials" => Any["examples/kolmogorovbackwards.md",
"examples/optimal_stopping_american.md"],
"Specialized Neural ODE Tutorials" => Any["examples/ode.md",
"examples/nnrode_example.md"],
"API Documentation" => Any["solvers/ode.md",
"solvers/pinns.md",
"solvers/training_strategies.md",
"solvers/kolmogorovbackwards_solver.md",
"solvers/optimal_stopping.md",#TODO
"solvers/nnrode.md"],
]
3 changes: 1 addition & 2 deletions docs/src/examples/ode.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,7 @@ Once these pieces are together, we call `solve` just like with any other `ODEPro
Let's turn on `verbose` so we can see the loss over time during the training process:

```@example nnode1
sol = solve(prob, NeuralPDE.NNODE(chain, opt), dt=1 / 20f0, verbose=true,
abstol=1e-10, maxiters=200)
sol = solve(prob, NeuralPDE.NNODE(chain, opt), verbose=true, abstol=1f-6, maxiters=200)
```

And that's it: the neural network solution was computed by training the neural network and
Expand Down
26 changes: 26 additions & 0 deletions docs/src/solvers/training_strategies.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Training Strategies

Training strategies are the choices for how the points are sampled for the definition
the physics-informed loss.

## Recommendations

`QuasiRandomTraining` with its default `LatinHyperCubeSample()` is a well-rounded training
strategy which can be used for most situations. It scales well for high dimensional
spaces and is GPU-compatible. `QuadratureTraining` can lead to faster or more robust convergence
with one of the H-Cubature or P-Cubature methods, but are not currently GPU compatible.
For very high dimensional cases, `QuadratureTraining` with an adaptive Monte Carlo quadrature
method, such as `CubaVegas`, can be beneficial for difficult or stiff problems.

`GridTraining` should only be used for testing purposes and should not be relied upon for real
training cases. `StochasticTraining` achieves a lower convergence rate the quasi-Monte Carlo
methods and thus `QuasiRandomTraining` should be preferred in most cases.

## API

```@docs
GridTraining
StochasticTraining
QuasiRandomTraining
QuadratureTraining
```
11 changes: 6 additions & 5 deletions lib/NeuralPDELogging/src/NeuralPDELogging.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,20 @@ using TensorBoardLogger

"""This function overrides the empty function in NeuralPDE in order to use TensorBoardLogger in that package
This is light type piracy but it should be alright since this is a subpackage of NeuralPDE"""
function NeuralPDE.logvector(logger::TBLogger, vector::AbstractVector{R}, name::AbstractString, step::Integer) where R <: Real
function NeuralPDE.logvector(logger::TBLogger, vector::AbstractVector{R},
name::AbstractString, step::Integer) where {R <: Real}
for j in 1:length(vector)
log_value(logger, "$(name)/$(j)", vector[j], step=step)
log_value(logger, "$(name)/$(j)", vector[j], step = step)
end
nothing
end

"""This function overrides the empty function in NeuralPDE in order to use TensorBoardLogger in that package.
This is light type piracy but it should be alright since this is a subpackage of NeuralPDE"""
function NeuralPDE.logscalar(logger::TBLogger, scalar::R, name::AbstractString, step::Integer) where R <: Real
log_value(logger, "$(name)", scalar, step=step)
function NeuralPDE.logscalar(logger::TBLogger, scalar::R, name::AbstractString,
step::Integer) where {R <: Real}
log_value(logger, "$(name)", scalar, step = step)
nothing
end


end
79 changes: 45 additions & 34 deletions lib/NeuralPDELogging/test/adaptive_loss_log_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,18 @@ using Random
#using Plots
@info "Starting Soon!"

nonadaptive_loss = NeuralPDE.NonAdaptiveLoss(pde_loss_weights=1, bc_loss_weights=1)
gradnormadaptive_loss = NeuralPDE.GradientScaleAdaptiveLoss(100, pde_loss_weights=1e3, bc_loss_weights=1)
adaptive_loss = NeuralPDE.MiniMaxAdaptiveLoss(100; pde_loss_weights=1, bc_loss_weights=1)
adaptive_losses = [nonadaptive_loss, gradnormadaptive_loss,adaptive_loss]
maxiters=800
seed=60
nonadaptive_loss = NeuralPDE.NonAdaptiveLoss(pde_loss_weights = 1, bc_loss_weights = 1)
gradnormadaptive_loss = NeuralPDE.GradientScaleAdaptiveLoss(100, pde_loss_weights = 1e3,
bc_loss_weights = 1)
adaptive_loss = NeuralPDE.MiniMaxAdaptiveLoss(100; pde_loss_weights = 1,
bc_loss_weights = 1)
adaptive_losses = [nonadaptive_loss, gradnormadaptive_loss, adaptive_loss]
maxiters = 800
seed = 60

## 2D Poisson equation
function test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, outdir, haslogger; seed=60, maxiters=800)
function test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, outdir, haslogger;
seed = 60, maxiters = 800)
logdir = joinpath(outdir, string(run))
if haslogger
logger = TBLogger(logdir)
Expand All @@ -24,23 +27,24 @@ function test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, outdir, hasl
end
Random.seed!(seed)
hid = 40
chain_ = FastChain(FastDense(2,hid,Flux.σ),FastDense(hid,hid,Flux.σ),FastDense(hid,1))
strategy_ = NeuralPDE.StochasticTraining(256)
chain_ = FastChain(FastDense(2, hid, Flux.σ), FastDense(hid, hid, Flux.σ),
FastDense(hid, 1))
strategy_ = NeuralPDE.StochasticTraining(256)
@info "adaptive reweighting test logdir: $(logdir), maxiters: $(maxiters), 2D Poisson equation, adaptive_loss: $(nameof(typeof(adaptive_loss))) "
@parameters x y
@variables u(..)
Dxx = Differential(x)^2
Dyy = Differential(y)^2

# 2D PDE
eq = Dxx(u(x,y)) + Dyy(u(x,y)) ~ -sin(pi*x)*sin(pi*y)
eq = Dxx(u(x, y)) + Dyy(u(x, y)) ~ -sin(pi * x) * sin(pi * y)

# Initial and boundary conditions
bcs = [u(0,y) ~ 0.0, u(1,y) ~ -sin(pi*1)*sin(pi*y),
u(x,0) ~ 0.0, u(x,1) ~ -sin(pi*x)*sin(pi*1)]
bcs = [u(0, y) ~ 0.0, u(1, y) ~ -sin(pi * 1) * sin(pi * y),
u(x, 0) ~ 0.0, u(x, 1) ~ -sin(pi * x) * sin(pi * 1)]
# Space and time domains
domains = [x ∈ Interval(0.0,1.0),
y ∈ Interval(0.0,1.0)]
domains = [x ∈ Interval(0.0, 1.0),
y ∈ Interval(0.0, 1.0)]

initθ = Float64.(DiffEqFlux.initial_params(chain_))
iteration = [0]
Expand All @@ -49,43 +53,46 @@ function test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, outdir, hasl
init_params = initθ,
adaptive_loss = adaptive_loss,
logger = logger,
iteration=iteration)
iteration = iteration)


@named pde_system = PDESystem(eq,bcs,domains,[x,y],[u(x, y)])
prob = NeuralPDE.discretize(pde_system,discretization)
@named pde_system = PDESystem(eq, bcs, domains, [x, y], [u(x, y)])
prob = NeuralPDE.discretize(pde_system, discretization)
phi = discretization.phi
sym_prob = NeuralPDE.symbolic_discretize(pde_system,discretization)

sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization)

xs,ys = [infimum(d.domain):0.01:supremum(d.domain) for d in domains]
analytic_sol_func(x,y) = (sin(pi*x)*sin(pi*y))/(2pi^2)
u_real = reshape([analytic_sol_func(x,y) for x in xs for y in ys], (length(xs),length(ys)))
xs, ys = [infimum(d.domain):0.01:supremum(d.domain) for d in domains]
analytic_sol_func(x, y) = (sin(pi * x) * sin(pi * y)) / (2pi^2)
u_real = reshape([analytic_sol_func(x, y) for x in xs for y in ys],
(length(xs), length(ys)))

callback = function (p,l)
callback = function (p, l)
iteration[1] += 1
if iteration[1] % 100 == 0
@info "Current loss is: $l, iteration is $(iteration[1])"
end
if haslogger
log_value(logger, "outer_error/loss", l, step=iteration[1])
log_value(logger, "outer_error/loss", l, step = iteration[1])
if iteration[1] % 30 == 0
u_predict = reshape([first(phi([x,y],p)) for x in xs for y in ys],(length(xs),length(ys)))
u_predict = reshape([first(phi([x, y], p)) for x in xs for y in ys],
(length(xs), length(ys)))
diff_u = abs.(u_predict .- u_real)
total_diff = sum(diff_u)
log_value(logger, "outer_error/total_diff", total_diff, step=iteration[1])
log_value(logger, "outer_error/total_diff", total_diff, step = iteration[1])
total_u = sum(abs.(u_real))
total_diff_rel = total_diff / total_u
log_value(logger, "outer_error/total_diff_rel", total_diff_rel, step=iteration[1])
log_value(logger, "outer_error/total_diff_rel", total_diff_rel,
step = iteration[1])
total_diff_sq = sum(diff_u .^ 2)
log_value(logger, "outer_error/total_diff_sq", total_diff_sq, step=iteration[1])
log_value(logger, "outer_error/total_diff_sq", total_diff_sq,
step = iteration[1])
end
end
return false
end
res = Optimization.solve(prob, ADAM(0.03); maxiters=maxiters, callback=callback)
res = Optimization.solve(prob, ADAM(0.03); maxiters = maxiters, callback = callback)

u_predict = reshape([first(phi([x,y],res.minimizer)) for x in xs for y in ys],(length(xs),length(ys)))
u_predict = reshape([first(phi([x, y], res.minimizer)) for x in xs for y in ys],
(length(xs), length(ys)))
diff_u = abs.(u_predict .- u_real)
total_diff = sum(diff_u)
total_u = sum(abs.(u_real))
Expand All @@ -95,7 +102,7 @@ function test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, outdir, hasl
#p2 = plot(xs, ys, u_predict, linetype=:contourf,title = "predict");
#p3 = plot(xs, ys, diff_u,linetype=:contourf,title = "error");
#(plot=plot(p1,p2,p3), error=total_diff, total_diff_rel=total_diff_rel)
(error=total_diff, total_diff_rel=total_diff_rel)
(error = total_diff, total_diff_rel = total_diff_rel)
end

possible_logger_dir = mktempdir()
Expand All @@ -115,8 +122,12 @@ end

@info "has logger: $(haslogger), expected log folders: $(expected_log_folders)"

test_2d_poisson_equation_adaptive_loss_run_seediters(adaptive_loss, run) = test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, possible_logger_dir, haslogger; seed=seed, maxiters=maxiters)
error_results = map(test_2d_poisson_equation_adaptive_loss_run_seediters, adaptive_losses, 1:length(adaptive_losses))
function test_2d_poisson_equation_adaptive_loss_run_seediters(adaptive_loss, run)
test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, possible_logger_dir,
haslogger; seed = seed, maxiters = maxiters)
end
error_results = map(test_2d_poisson_equation_adaptive_loss_run_seediters, adaptive_losses,
1:length(adaptive_losses))

@test length(readdir(possible_logger_dir)) == expected_log_folders
if expected_log_folders > 0
Expand Down
Loading