Skip to content

Commit

Permalink
Merge pull request #542 from SciML/format
Browse files Browse the repository at this point in the history
format SciML Style
  • Loading branch information
ChrisRackauckas authored Jun 26, 2022
2 parents 2f29dfd + 7c3c404 commit a2afce6
Show file tree
Hide file tree
Showing 31 changed files with 2,644 additions and 2,251 deletions.
42 changes: 42 additions & 0 deletions .github/workflows/FormatCheck.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
name: format-check

on:
push:
branches:
- 'master'
- 'release-'
tags: '*'
pull_request:

jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
julia-version: [1]
julia-arch: [x86]
os: [ubuntu-latest]
steps:
- uses: julia-actions/setup-julia@latest
with:
version: ${{ matrix.julia-version }}

- uses: actions/checkout@v1
- name: Install JuliaFormatter and format
# This will use the latest version by default but you can set the version like so:
#
# julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter", version="0.13.0"))'
run: |
julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter"))'
julia -e 'using JuliaFormatter; format(".", verbose=true)'
- name: Format check
run: |
julia -e '
out = Cmd(`git diff --name-only`) |> read |> String
if out == ""
exit(0)
else
@error "Some files have not been formatted !!!"
write(stdout, out)
exit(1)
end'
42 changes: 19 additions & 23 deletions docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,23 @@ using Documenter, NeuralPDE

include("pages.jl")

makedocs(
sitename="NeuralPDE.jl",
authors="#",
clean=true,
doctest=false,
modules=[NeuralPDE],
strict=[
:doctest,
:linkcheck,
:parse_error,
:example_block,
# Other available options are
# :autodocs_block, :cross_references, :docs_block, :eval_block, :example_block, :footnote, :meta_block, :missing_docs, :setup_block
],
format=Documenter.HTML( analytics = "UA-90474609-3",
assets=["assets/favicon.ico"],
canonical="https://neuralpde.sciml.ai/stable/"),
pages=pages
)
makedocs(sitename = "NeuralPDE.jl",
authors = "#",
clean = true,
doctest = false,
modules = [NeuralPDE],
strict = [
:doctest,
:linkcheck,
:parse_error,
:example_block,
# Other available options are
# :autodocs_block, :cross_references, :docs_block, :eval_block, :example_block, :footnote, :meta_block, :missing_docs, :setup_block
],
format = Documenter.HTML(analytics = "UA-90474609-3",
assets = ["assets/favicon.ico"],
canonical = "https://neuralpde.sciml.ai/stable/"),
pages = pages)

deploydocs(
repo="github.com/SciML/NeuralPDE.jl.git";
push_preview=true
)
deploydocs(repo = "github.com/SciML/NeuralPDE.jl.git";
push_preview = true)
58 changes: 25 additions & 33 deletions docs/pages.jl
Original file line number Diff line number Diff line change
@@ -1,34 +1,26 @@
pages = [
"NeuralPDE.jl: Scientific Machine Learning (SciML) for Partial Differential Equations" => "index.md",
"Physics-Informed Neural Network Tutorials" => Any[
"pinn/poisson.md",
"pinn/wave.md",
"pinn/2D.md",
"pinn/system.md",
"pinn/3rd.md",
"pinn/low_level.md",
"pinn/ks.md",
"pinn/fp.md",
"pinn/parm_estim.md",
"pinn/heterogeneous.md",
"pinn/integro_diff.md",
"pinn/debugging.md",
"pinn/neural_adapter.md",
],
"Specialized Neural PDE Tutorials" => Any[
"examples/kolmogorovbackwards.md",
"examples/optimal_stopping_american.md",
],
"Specialized Neural ODE Tutorials" => Any[
"examples/ode.md",
"examples/nnrode_example.md",
],
"API Documentation" => Any[
"solvers/ode.md",
"solvers/pinns.md",
"solvers/training_strategies.md",
"solvers/kolmogorovbackwards_solver.md",
"solvers/optimal_stopping.md",#TODO
"solvers/nnrode.md",#TODO
]
]
"NeuralPDE.jl: Scientific Machine Learning (SciML) for Partial Differential Equations" => "index.md",
"Physics-Informed Neural Network Tutorials" => Any["pinn/poisson.md",
"pinn/wave.md",
"pinn/2D.md",
"pinn/system.md",
"pinn/3rd.md",
"pinn/low_level.md",
"pinn/ks.md",
"pinn/fp.md",
"pinn/parm_estim.md",
"pinn/heterogeneous.md",
"pinn/integro_diff.md",
"pinn/debugging.md",
"pinn/neural_adapter.md"],
"Specialized Neural PDE Tutorials" => Any["examples/kolmogorovbackwards.md",
"examples/optimal_stopping_american.md"],
"Specialized Neural ODE Tutorials" => Any["examples/ode.md",
"examples/nnrode_example.md"],
"API Documentation" => Any["solvers/ode.md",
"solvers/pinns.md",
"solvers/training_strategies.md",
"solvers/kolmogorovbackwards_solver.md",
"solvers/optimal_stopping.md",#TODO
"solvers/nnrode.md"],
]
11 changes: 6 additions & 5 deletions lib/NeuralPDELogging/src/NeuralPDELogging.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,20 @@ using TensorBoardLogger

"""This function overrides the empty function in NeuralPDE in order to use TensorBoardLogger in that package
This is light type piracy but it should be alright since this is a subpackage of NeuralPDE"""
function NeuralPDE.logvector(logger::TBLogger, vector::AbstractVector{R}, name::AbstractString, step::Integer) where R <: Real
function NeuralPDE.logvector(logger::TBLogger, vector::AbstractVector{R},
name::AbstractString, step::Integer) where {R <: Real}
for j in 1:length(vector)
log_value(logger, "$(name)/$(j)", vector[j], step=step)
log_value(logger, "$(name)/$(j)", vector[j], step = step)
end
nothing
end

"""This function overrides the empty function in NeuralPDE in order to use TensorBoardLogger in that package.
This is light type piracy but it should be alright since this is a subpackage of NeuralPDE"""
function NeuralPDE.logscalar(logger::TBLogger, scalar::R, name::AbstractString, step::Integer) where R <: Real
log_value(logger, "$(name)", scalar, step=step)
function NeuralPDE.logscalar(logger::TBLogger, scalar::R, name::AbstractString,
step::Integer) where {R <: Real}
log_value(logger, "$(name)", scalar, step = step)
nothing
end


end
79 changes: 45 additions & 34 deletions lib/NeuralPDELogging/test/adaptive_loss_log_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,18 @@ using Random
#using Plots
@info "Starting Soon!"

nonadaptive_loss = NeuralPDE.NonAdaptiveLoss(pde_loss_weights=1, bc_loss_weights=1)
gradnormadaptive_loss = NeuralPDE.GradientScaleAdaptiveLoss(100, pde_loss_weights=1e3, bc_loss_weights=1)
adaptive_loss = NeuralPDE.MiniMaxAdaptiveLoss(100; pde_loss_weights=1, bc_loss_weights=1)
adaptive_losses = [nonadaptive_loss, gradnormadaptive_loss,adaptive_loss]
maxiters=800
seed=60
nonadaptive_loss = NeuralPDE.NonAdaptiveLoss(pde_loss_weights = 1, bc_loss_weights = 1)
gradnormadaptive_loss = NeuralPDE.GradientScaleAdaptiveLoss(100, pde_loss_weights = 1e3,
bc_loss_weights = 1)
adaptive_loss = NeuralPDE.MiniMaxAdaptiveLoss(100; pde_loss_weights = 1,
bc_loss_weights = 1)
adaptive_losses = [nonadaptive_loss, gradnormadaptive_loss, adaptive_loss]
maxiters = 800
seed = 60

## 2D Poisson equation
function test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, outdir, haslogger; seed=60, maxiters=800)
function test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, outdir, haslogger;
seed = 60, maxiters = 800)
logdir = joinpath(outdir, string(run))
if haslogger
logger = TBLogger(logdir)
Expand All @@ -24,23 +27,24 @@ function test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, outdir, hasl
end
Random.seed!(seed)
hid = 40
chain_ = FastChain(FastDense(2,hid,Flux.σ),FastDense(hid,hid,Flux.σ),FastDense(hid,1))
strategy_ = NeuralPDE.StochasticTraining(256)
chain_ = FastChain(FastDense(2, hid, Flux.σ), FastDense(hid, hid, Flux.σ),
FastDense(hid, 1))
strategy_ = NeuralPDE.StochasticTraining(256)
@info "adaptive reweighting test logdir: $(logdir), maxiters: $(maxiters), 2D Poisson equation, adaptive_loss: $(nameof(typeof(adaptive_loss))) "
@parameters x y
@variables u(..)
Dxx = Differential(x)^2
Dyy = Differential(y)^2

# 2D PDE
eq = Dxx(u(x,y)) + Dyy(u(x,y)) ~ -sin(pi*x)*sin(pi*y)
eq = Dxx(u(x, y)) + Dyy(u(x, y)) ~ -sin(pi * x) * sin(pi * y)

# Initial and boundary conditions
bcs = [u(0,y) ~ 0.0, u(1,y) ~ -sin(pi*1)*sin(pi*y),
u(x,0) ~ 0.0, u(x,1) ~ -sin(pi*x)*sin(pi*1)]
bcs = [u(0, y) ~ 0.0, u(1, y) ~ -sin(pi * 1) * sin(pi * y),
u(x, 0) ~ 0.0, u(x, 1) ~ -sin(pi * x) * sin(pi * 1)]
# Space and time domains
domains = [x Interval(0.0,1.0),
y Interval(0.0,1.0)]
domains = [x Interval(0.0, 1.0),
y Interval(0.0, 1.0)]

initθ = Float64.(DiffEqFlux.initial_params(chain_))
iteration = [0]
Expand All @@ -49,43 +53,46 @@ function test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, outdir, hasl
init_params = initθ,
adaptive_loss = adaptive_loss,
logger = logger,
iteration=iteration)
iteration = iteration)


@named pde_system = PDESystem(eq,bcs,domains,[x,y],[u(x, y)])
prob = NeuralPDE.discretize(pde_system,discretization)
@named pde_system = PDESystem(eq, bcs, domains, [x, y], [u(x, y)])
prob = NeuralPDE.discretize(pde_system, discretization)
phi = discretization.phi
sym_prob = NeuralPDE.symbolic_discretize(pde_system,discretization)

sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization)

xs,ys = [infimum(d.domain):0.01:supremum(d.domain) for d in domains]
analytic_sol_func(x,y) = (sin(pi*x)*sin(pi*y))/(2pi^2)
u_real = reshape([analytic_sol_func(x,y) for x in xs for y in ys], (length(xs),length(ys)))
xs, ys = [infimum(d.domain):0.01:supremum(d.domain) for d in domains]
analytic_sol_func(x, y) = (sin(pi * x) * sin(pi * y)) / (2pi^2)
u_real = reshape([analytic_sol_func(x, y) for x in xs for y in ys],
(length(xs), length(ys)))

callback = function (p,l)
callback = function (p, l)
iteration[1] += 1
if iteration[1] % 100 == 0
@info "Current loss is: $l, iteration is $(iteration[1])"
end
if haslogger
log_value(logger, "outer_error/loss", l, step=iteration[1])
log_value(logger, "outer_error/loss", l, step = iteration[1])
if iteration[1] % 30 == 0
u_predict = reshape([first(phi([x,y],p)) for x in xs for y in ys],(length(xs),length(ys)))
u_predict = reshape([first(phi([x, y], p)) for x in xs for y in ys],
(length(xs), length(ys)))
diff_u = abs.(u_predict .- u_real)
total_diff = sum(diff_u)
log_value(logger, "outer_error/total_diff", total_diff, step=iteration[1])
log_value(logger, "outer_error/total_diff", total_diff, step = iteration[1])
total_u = sum(abs.(u_real))
total_diff_rel = total_diff / total_u
log_value(logger, "outer_error/total_diff_rel", total_diff_rel, step=iteration[1])
log_value(logger, "outer_error/total_diff_rel", total_diff_rel,
step = iteration[1])
total_diff_sq = sum(diff_u .^ 2)
log_value(logger, "outer_error/total_diff_sq", total_diff_sq, step=iteration[1])
log_value(logger, "outer_error/total_diff_sq", total_diff_sq,
step = iteration[1])
end
end
return false
end
res = Optimization.solve(prob, ADAM(0.03); maxiters=maxiters, callback=callback)
res = Optimization.solve(prob, ADAM(0.03); maxiters = maxiters, callback = callback)

u_predict = reshape([first(phi([x,y],res.minimizer)) for x in xs for y in ys],(length(xs),length(ys)))
u_predict = reshape([first(phi([x, y], res.minimizer)) for x in xs for y in ys],
(length(xs), length(ys)))
diff_u = abs.(u_predict .- u_real)
total_diff = sum(diff_u)
total_u = sum(abs.(u_real))
Expand All @@ -95,7 +102,7 @@ function test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, outdir, hasl
#p2 = plot(xs, ys, u_predict, linetype=:contourf,title = "predict");
#p3 = plot(xs, ys, diff_u,linetype=:contourf,title = "error");
#(plot=plot(p1,p2,p3), error=total_diff, total_diff_rel=total_diff_rel)
(error=total_diff, total_diff_rel=total_diff_rel)
(error = total_diff, total_diff_rel = total_diff_rel)
end

possible_logger_dir = mktempdir()
Expand All @@ -115,8 +122,12 @@ end

@info "has logger: $(haslogger), expected log folders: $(expected_log_folders)"

test_2d_poisson_equation_adaptive_loss_run_seediters(adaptive_loss, run) = test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, possible_logger_dir, haslogger; seed=seed, maxiters=maxiters)
error_results = map(test_2d_poisson_equation_adaptive_loss_run_seediters, adaptive_losses, 1:length(adaptive_losses))
function test_2d_poisson_equation_adaptive_loss_run_seediters(adaptive_loss, run)
test_2d_poisson_equation_adaptive_loss(adaptive_loss, run, possible_logger_dir,
haslogger; seed = seed, maxiters = maxiters)
end
error_results = map(test_2d_poisson_equation_adaptive_loss_run_seediters, adaptive_losses,
1:length(adaptive_losses))

@test length(readdir(possible_logger_dir)) == expected_log_folders
if expected_log_folders > 0
Expand Down
31 changes: 14 additions & 17 deletions lib/NeuralPDELogging/test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,44 +3,41 @@ using SafeTestsets

const GROUP = get(ENV, "GROUP", "All")

const is_APPVEYOR = Sys.iswindows() && haskey(ENV,"APPVEYOR")
const is_APPVEYOR = Sys.iswindows() && haskey(ENV, "APPVEYOR")

const is_TRAVIS = haskey(ENV,"TRAVIS")
const is_TRAVIS = haskey(ENV, "TRAVIS")

is_CI = haskey(ENV,"CI")
is_CI = haskey(ENV, "CI")


@time begin
if GROUP == "All" || GROUP == "Logging"
@time @safetestset "AdaptiveLossLogNoImport" begin
@time begin if GROUP == "All" || GROUP == "Logging"
@time @safetestset "AdaptiveLossLogNoImport" begin
using Pkg
neuralpde_dir = dirname(abspath(joinpath(@__DIR__, "..", "..", "..")))
@info "loading neuralpde package at : $(neuralpde_dir)"
neuralpde = Pkg.PackageSpec(path = neuralpde_dir)
Pkg.develop(neuralpde)
@info "making sure that there are no logs without having imported NeuralPDELogging"
ENV["LOG_SETTING"] = "NoImport"
include("adaptive_loss_log_tests.jl")
end
@time @safetestset "AdaptiveLossLogImportNoUse" begin
include("adaptive_loss_log_tests.jl")
end
@time @safetestset "AdaptiveLossLogImportNoUse" begin
using Pkg
neuralpde_dir = dirname(abspath(joinpath(@__DIR__, "..", "..", "..")))
@info "loading neuralpde package at : $(neuralpde_dir)"
neuralpde = Pkg.PackageSpec(path = neuralpde_dir)
Pkg.develop(neuralpde)
@info "making sure that there are still no logs now that we have imported NeuralPDELogging"
ENV["LOG_SETTING"] = "ImportNoUse"
include("adaptive_loss_log_tests.jl")
end
@time @safetestset "AdaptiveLossLogImportUse" begin
include("adaptive_loss_log_tests.jl")
end
@time @safetestset "AdaptiveLossLogImportUse" begin
using Pkg
neuralpde_dir = dirname(abspath(joinpath(@__DIR__, "..", "..", "..")))
@info "loading neuralpde package at : $(neuralpde_dir)"
neuralpde = Pkg.PackageSpec(path = neuralpde_dir)
Pkg.develop(neuralpde)
ENV["LOG_SETTING"] = "ImportUse"
@info "making sure that logs are generated now if we use a logger"
include("adaptive_loss_log_tests.jl")
end
end
end
include("adaptive_loss_log_tests.jl")
end
end end
Loading

0 comments on commit a2afce6

Please sign in to comment.