diff --git a/docs/src/tutorials/derivative_neural_network.md b/docs/src/tutorials/derivative_neural_network.md index bd26ce50f..ac9721d1a 100644 --- a/docs/src/tutorials/derivative_neural_network.md +++ b/docs/src/tutorials/derivative_neural_network.md @@ -52,9 +52,8 @@ We approximate the derivative of the neural network with another neural network using the second numeric derivative `Dt(Dtu1(t,x))`. ```@example derivativenn -using NeuralPDE, Lux, ModelingToolkit -using Optimization, OptimizationOptimisers, OptimizationOptimJL, LineSearches -using Plots +using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimisers, + OptimizationOptimJL, LineSearches, Plots using ModelingToolkit: Interval, infimum, supremum @parameters t, x @@ -63,35 +62,40 @@ Dx = Differential(x) @variables u1(..), u2(..), u3(..) @variables Dxu1(..) Dtu1(..) Dxu2(..) Dtu2(..) -eqs_ = [Dt(Dtu1(t, x)) ~ Dx(Dxu1(t, x)) + u3(t, x) * sin(pi * x), - Dt(Dtu2(t, x)) ~ Dx(Dxu2(t, x)) + u3(t, x) * cos(pi * x), - exp(-t) ~ u1(t, x) * sin(pi * x) + u2(t, x) * cos(pi * x)] - -bcs_ = [u1(0.0, x) ~ sin(pi * x), - u2(0.0, x) ~ cos(pi * x), - Dt(u1(0, x)) ~ -sin(pi * x), - Dt(u2(0, x)) ~ -cos(pi * x), +eqs_ = [ + Dt(Dtu1(t, x)) ~ Dx(Dxu1(t, x)) + u3(t, x) * sinpi(x), + Dt(Dtu2(t, x)) ~ Dx(Dxu2(t, x)) + u3(t, x) * cospi(x), + exp(-t) ~ u1(t, x) * sinpi(x) + u2(t, x) * cospi(x) +] + +bcs_ = [ + u1(0.0, x) ~ sinpi(x), + u2(0.0, x) ~ cospi(x), + Dt(u1(0, x)) ~ -sinpi(x), + Dt(u2(0, x)) ~ -cospi(x), u1(t, 0.0) ~ 0.0, u2(t, 0.0) ~ exp(-t), u1(t, 1.0) ~ 0.0, - u2(t, 1.0) ~ -exp(-t)] + u2(t, 1.0) ~ -exp(-t) +] -der_ = [Dt(u1(t, x)) ~ Dtu1(t, x), +der_ = [ + Dt(u1(t, x)) ~ Dtu1(t, x), Dt(u2(t, x)) ~ Dtu2(t, x), Dx(u1(t, x)) ~ Dxu1(t, x), - Dx(u2(t, x)) ~ Dxu2(t, x)] + Dx(u2(t, x)) ~ Dxu2(t, x) +] bcs__ = [bcs_; der_] # Space and time domains -domains = [t ∈ Interval(0.0, 1.0), - x ∈ Interval(0.0, 1.0)] +domains = [t ∈ Interval(0.0, 1.0), x ∈ Interval(0.0, 1.0)] input_ = length(domains) n = 15 -chain = [Lux.Chain(Dense(input_, n, Lux.σ), Dense(n, n, Lux.σ), Dense(n, 1)) for _ in 1:7] +chain = [Chain(Dense(input_, n, σ), Dense(n, n, σ), Dense(n, 1)) for _ in 1:7] -training_strategy = QuadratureTraining(; batch = 200, reltol = 1e-6, abstol = 1e-6) +training_strategy = StochasticTraining(128) discretization = PhysicsInformedNN(chain, training_strategy) vars = [u1(t, x), u2(t, x), u3(t, x), Dxu1(t, x), Dtu1(t, x), Dxu2(t, x), Dtu2(t, x)] @@ -126,13 +130,13 @@ using Plots ts, xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains] minimizers_ = [res.u.depvar[sym_prob.depvars[i]] for i in 1:length(chain)] -u1_real(t, x) = exp(-t) * sin(pi * x) -u2_real(t, x) = exp(-t) * cos(pi * x) +u1_real(t, x) = exp(-t) * sinpi(x) +u2_real(t, x) = exp(-t) * cospi(x) u3_real(t, x) = (1 + pi^2) * exp(-t) -Dxu1_real(t, x) = pi * exp(-t) * cos(pi * x) -Dtu1_real(t, x) = -exp(-t) * sin(pi * x) -Dxu2_real(t, x) = -pi * exp(-t) * sin(pi * x) -Dtu2_real(t, x) = -exp(-t) * cos(pi * x) +Dxu1_real(t, x) = pi * exp(-t) * cospi(x) +Dtu1_real(t, x) = -exp(-t) * sinpi(x) +Dxu2_real(t, x) = -pi * exp(-t) * sinpi(x) +Dtu2_real(t, x) = -exp(-t) * cospi(x) function analytic_sol_func_all(t, x) [u1_real(t, x), u2_real(t, x), u3_real(t, x), diff --git a/docs/src/tutorials/neural_adapter.md b/docs/src/tutorials/neural_adapter.md index bcff48fa3..40914b6fa 100644 --- a/docs/src/tutorials/neural_adapter.md +++ b/docs/src/tutorials/neural_adapter.md @@ -24,43 +24,44 @@ Dxx = Differential(x)^2 Dyy = Differential(y)^2 # 2D PDE -eq = Dxx(u(x, y)) + Dyy(u(x, y)) ~ -sin(pi * x) * sin(pi * y) +eq = Dxx(u(x, y)) + Dyy(u(x, y)) ~ -sinpi(x) * sinpi(y) # Initial and boundary conditions -bcs = [u(0, y) ~ 0.0, u(1, y) ~ -sin(pi * 1) * sin(pi * y), - u(x, 0) ~ 0.0, u(x, 1) ~ -sin(pi * x) * sin(pi * 1)] +bcs = [ + u(0, y) ~ 0.0, + u(1, y) ~ -sinpi(1) * sinpi(y), + u(x, 0) ~ 0.0, + u(x, 1) ~ -sinpi(x) * sinpi(1) +] # Space and time domains domains = [x ∈ Interval(0.0, 1.0), y ∈ Interval(0.0, 1.0)] -quadrature_strategy = NeuralPDE.QuadratureTraining(reltol = 1e-3, abstol = 1e-6, - maxiters = 50, batch = 100) + +strategy = StochasticTraining(1024) inner = 8 -af = Lux.tanh -chain1 = Lux.Chain(Lux.Dense(2, inner, af), - Lux.Dense(inner, inner, af), - Lux.Dense(inner, 1)) +af = tanh +chain1 = Chain(Dense(2, inner, af), Dense(inner, inner, af), Dense(inner, 1)) -discretization = NeuralPDE.PhysicsInformedNN(chain1, quadrature_strategy) +discretization = PhysicsInformedNN(chain1, strategy) @named pde_system = PDESystem(eq, bcs, domains, [x, y], [u(x, y)]) -prob = NeuralPDE.discretize(pde_system, discretization) -sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization) +prob = discretize(pde_system, discretization) +sym_prob = symbolic_discretize(pde_system, discretization) callback = function (p, l) println("Current loss is: $l") return false end -res = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3); maxiters = 10000) +res = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3); maxiters = 10000, + callback) phi = discretization.phi inner_ = 8 -af = Lux.tanh -chain2 = Lux.Chain(Dense(2, inner_, af), - Dense(inner_, inner_, af), - Dense(inner_, inner_, af), +af = tanh +chain2 = Chain(Dense(2, inner_, af), Dense(inner_, inner_, af), Dense(inner_, inner_, af), Dense(inner_, 1)) initp, st = Lux.setup(Random.default_rng(), chain2) -init_params2 = Float64.(ComponentArray(initp)) +init_params2 = ComponentArray{Float64}(initp) # the rule by which the training will take place is described here in loss function function loss(cord, θ) @@ -69,15 +70,15 @@ function loss(cord, θ) ch2 .- phi(cord, res.u) end -strategy = NeuralPDE.QuadratureTraining(; reltol = 1e-6, abstol = 1e-3) +strategy = GridTraining(0.1) -prob_ = NeuralPDE.neural_adapter(loss, init_params2, pde_system, strategy) -res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 10000) +prob_ = neural_adapter(loss, init_params2, pde_system, strategy) +res_ = solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 10000, callback) phi_ = PhysicsInformedNN(chain2, strategy; init_params = res_.u).phi xs, ys = [infimum(d.domain):0.01:supremum(d.domain) for d in domains] -analytic_sol_func(x, y) = (sin(pi * x) * sin(pi * y)) / (2pi^2) +analytic_sol_func(x, y) = (sinpi(x) * sinpi(y)) / (2pi^2) u_predict = reshape([first(phi([x, y], res.u)) for x in xs for y in ys], (length(xs), length(ys))) @@ -99,7 +100,7 @@ plot(p1, p2, p3, p4, p5) ## Domain decomposition -In this example, we first obtain a prediction of 2D Poisson equation on subdomains. We split up full domain into 10 sub problems by x, and create separate neural networks for each sub interval. If x domain ∈ [x_0, x_end] so, it is decomposed on 10 part: sub x domains = {[x_0, x_1], ... [x_i,x_i+1], ..., x_9,x_end]}. +In this example, we first obtain a prediction of 2D Poisson equation on subdomains. We split up full domain into 10 sub problems by x, and create separate neural networks for each sub interval. If x domain ∈ [x_0, x_end] so, it is decomposed on 4 part: sub x domains = {[x_0, x_1], ... [x_i,x_i+1], ..., x_3,x_end]}. And then using the method neural_adapter, we retrain the batch of 10 predictions to the one prediction for full domain of task. ![domain_decomposition](https://user-images.githubusercontent.com/12683885/127149752-a4ecea50-2984-45d8-b0d4-d2eadecf58e7.png) @@ -113,10 +114,14 @@ using ModelingToolkit: Interval, infimum, supremum Dxx = Differential(x)^2 Dyy = Differential(y)^2 -eq = Dxx(u(x, y)) + Dyy(u(x, y)) ~ -sin(pi * x) * sin(pi * y) +eq = Dxx(u(x, y)) + Dyy(u(x, y)) ~ -sinpi(x) * sinpi(y) -bcs = [u(0, y) ~ 0.0, u(1, y) ~ -sin(pi * 1) * sin(pi * y), - u(x, 0) ~ 0.0, u(x, 1) ~ -sin(pi * x) * sin(pi * 1)] +bcs = [ + u(0, y) ~ 0.0, + u(1, y) ~ -sinpi(1) * sinpi(y), + u(x, 0) ~ 0.0, + u(x, 1) ~ -sinpi(x) * sinpi(1) +] # Space x_0 = 0.0 @@ -125,38 +130,34 @@ x_domain = Interval(x_0, x_end) y_domain = Interval(0.0, 1.0) domains = [x ∈ x_domain, y ∈ y_domain] -count_decomp = 10 +count_decomp = 4 # Neural network -af = Lux.tanh +af = tanh inner = 10 -chains = [Lux.Chain(Dense(2, inner, af), Dense(inner, inner, af), Dense(inner, 1)) - for _ in 1:count_decomp] -init_params = map( - c -> Float64.(ComponentArray(Lux.setup(Random.default_rng(), c)[1])), chains) +chain = Chain(Dense(2, inner, af), Dense(inner, inner, af), Dense(inner, 1)) xs_ = infimum(x_domain):(1 / count_decomp):supremum(x_domain) xs_domain = [(xs_[i], xs_[i + 1]) for i in 1:(length(xs_) - 1)] domains_map = map(xs_domain) do (xs_dom) x_domain_ = Interval(xs_dom...) - domains_ = [x ∈ x_domain_, - y ∈ y_domain] + domains_ = [x ∈ x_domain_, y ∈ y_domain] end -analytic_sol_func(x, y) = (sin(pi * x) * sin(pi * y)) / (2pi^2) +analytic_sol_func(x, y) = (sinpi(x) * sinpi(y)) / (2pi^2) function create_bcs(x_domain_, phi_bound) x_0, x_e = x_domain_.left, x_domain_.right if x_0 == 0.0 bcs = [u(0, y) ~ 0.0, u(x_e, y) ~ analytic_sol_func(x_e, y), u(x, 0) ~ 0.0, - u(x, 1) ~ -sin(pi * x) * sin(pi * 1)] + u(x, 1) ~ -sinpi(x) * sinpi(1)] return bcs end bcs = [u(x_0, y) ~ phi_bound(x_0, y), u(x_e, y) ~ analytic_sol_func(x_e, y), u(x, 0) ~ 0.0, - u(x, 1) ~ -sin(pi * x) * sin(pi * 1)] + u(x, 1) ~ -sinpi(x) * sinpi(1)] bcs end @@ -174,14 +175,13 @@ for i in 1:count_decomp bcs_ = create_bcs(domains_[1].domain, phi_bound) @named pde_system_ = PDESystem(eq, bcs_, domains_, [x, y], [u(x, y)]) push!(pde_system_map, pde_system_) - strategy = NeuralPDE.QuadratureTraining(; reltol = 1e-6, abstol = 1e-3) - discretization = NeuralPDE.PhysicsInformedNN(chains[i], strategy; - init_params = init_params[i]) + strategy = StochasticTraining(1024) + discretization = PhysicsInformedNN(chain, strategy) - prob = NeuralPDE.discretize(pde_system_, discretization) - symprob = NeuralPDE.symbolic_discretize(pde_system_, discretization) - res_ = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3); maxiters = 10000) + prob = discretize(pde_system_, discretization) + symprob = symbolic_discretize(pde_system_, discretization) + res_ = solve(prob, OptimizationOptimisers.Adam(5e-3); maxiters = 10000, callback) phi = discretization.phi push!(reses, res_) push!(phis, phi) @@ -218,15 +218,12 @@ dx = 0.01 u_predict, diff_u = compose_result(dx) inner_ = 18 -af = Lux.tanh -chain2 = Lux.Chain(Dense(2, inner_, af), - Dense(inner_, inner_, af), - Dense(inner_, inner_, af), - Dense(inner_, inner_, af), - Dense(inner_, 1)) +af = tanh +chain2 = Chain(Dense(2, inner_, af), Dense(inner_, inner_, af), Dense(inner_, inner_, af), + Dense(inner_, inner_, af), Dense(inner_, 1)) initp, st = Lux.setup(Random.default_rng(), chain2) -init_params2 = Float64.(ComponentArray(initp)) +init_params2 = ComponentArray{Float64}(initp) @named pde_system = PDESystem(eq, bcs, domains, [x, y], [u(x, y)]) @@ -243,12 +240,8 @@ callback = function (p, l) return false end -prob_ = NeuralPDE.neural_adapter(losses, init_params2, pde_system_map, - NeuralPDE.QuadratureTraining(; reltol = 1e-6, abstol = 1e-3)) -res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 5000) -prob_ = NeuralPDE.neural_adapter(losses, res_.u, pde_system_map, - NeuralPDE.QuadratureTraining(; reltol = 1e-6, abstol = 1e-3)) -res_ = Optimization.solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 5000) +prob_ = neural_adapter(losses, init_params2, pde_system_map, StochasticTraining(1024)) +res_ = solve(prob_, OptimizationOptimisers.Adam(5e-3); maxiters = 5000, callback) phi_ = PhysicsInformedNN(chain2, strategy; init_params = res_.u).phi diff --git a/docs/src/tutorials/ode_parameter_estimation.md b/docs/src/tutorials/ode_parameter_estimation.md index c8243e958..784828ee7 100644 --- a/docs/src/tutorials/ode_parameter_estimation.md +++ b/docs/src/tutorials/ode_parameter_estimation.md @@ -7,10 +7,7 @@ with Physics-Informed Neural Networks. Now we would consider the case where we w We start by defining the problem: ```@example param_estim_lv -using NeuralPDE, OrdinaryDiffEq -using Lux, Random -using OptimizationOptimJL, LineSearches -using Plots +using NeuralPDE, OrdinaryDiffEq, Lux, Random, OptimizationOptimJL, LineSearches, Plots using Test # hide function lv(u, p, t) @@ -42,21 +39,14 @@ Now, let's define a neural network for the PINN using [Lux.jl](https://lux.csail rng = Random.default_rng() Random.seed!(rng, 0) n = 15 -chain = Lux.Chain( - Lux.Dense(1, n, Lux.σ), - Lux.Dense(n, n, Lux.σ), - Lux.Dense(n, n, Lux.σ), - Lux.Dense(n, 2) -) -ps, st = Lux.setup(rng, chain) |> Lux.f64 +chain = Chain(Dense(1, n, σ), Dense(n, n, σ), Dense(n, n, σ), Dense(n, 2)) +ps, st = Lux.setup(rng, chain) |> f64 ``` Next we define an additional loss term to in the total loss which measures how the neural network's predictions is fitting the data. ```@example param_estim_lv -function additional_loss(phi, θ) - return sum(abs2, phi(t_, θ) .- u_) / size(u_, 2) -end +additional_loss(phi, θ) = sum(abs2, phi(t_, θ) .- u_) / size(u_, 2) ``` Next we define the optimizer and [`NNODE`](@ref) which is then plugged into the `solve` call. @@ -64,14 +54,14 @@ Next we define the optimizer and [`NNODE`](@ref) which is then plugged into the ```@example param_estim_lv opt = LBFGS(linesearch = BackTracking()) alg = NNODE(chain, opt, ps; strategy = WeightedIntervalTraining([0.7, 0.2, 0.1], 500), - param_estim = true, additional_loss = additional_loss) + param_estim = true, additional_loss) ``` Now we have all the pieces to solve the optimization problem. ```@example param_estim_lv sol = solve(prob, alg, verbose = true, abstol = 1e-8, maxiters = 5000, saveat = t_) -@test sol.k.u.p≈true_p rtol=1e-2 # hide +@test sol.k.u.p≈true_p rtol=1e-2 norm=Base.Fix1(maximum, abs) # hide ``` Let's plot the predictions from the PINN and compare it to the data. diff --git a/docs/src/tutorials/pdesystem.md b/docs/src/tutorials/pdesystem.md index 3fbfd55d9..e4156d9ef 100644 --- a/docs/src/tutorials/pdesystem.md +++ b/docs/src/tutorials/pdesystem.md @@ -28,10 +28,8 @@ Using physics-informed neural networks. ## Copy-Pasteable Code ```@example poisson -using NeuralPDE, Lux, Optimization, OptimizationOptimJL -using LineSearches +using NeuralPDE, Lux, Optimization, OptimizationOptimJL, LineSearches, Plots using ModelingToolkit: Interval -using Plots @parameters x y @variables u(..) @@ -42,15 +40,16 @@ Dyy = Differential(y)^2 eq = Dxx(u(x, y)) + Dyy(u(x, y)) ~ -sin(pi * x) * sin(pi * y) # Boundary conditions -bcs = [u(0, y) ~ 0.0, u(1, y) ~ 0.0, - u(x, 0) ~ 0.0, u(x, 1) ~ 0.0] +bcs = [ + u(0, y) ~ 0.0, u(1, y) ~ 0.0, + u(x, 0) ~ 0.0, u(x, 1) ~ 0.0 +] # Space and time domains -domains = [x ∈ Interval(0.0, 1.0), - y ∈ Interval(0.0, 1.0)] +domains = [x ∈ Interval(0.0, 1.0), y ∈ Interval(0.0, 1.0)] # Neural network dim = 2 # number of dimensions -chain = Lux.Chain(Dense(dim, 16, Lux.σ), Dense(16, 16, Lux.σ), Dense(16, 1)) +chain = Chain(Dense(dim, 16, σ), Dense(16, 16, σ), Dense(16, 1)) # Discretization discretization = PhysicsInformedNN( @@ -66,7 +65,7 @@ callback = function (p, l) end # Optimizer -opt = OptimizationOptimJL.LBFGS(linesearch = BackTracking()) +opt = LBFGS(linesearch = BackTracking()) res = solve(prob, opt, maxiters = 1000) phi = discretization.phi @@ -116,7 +115,7 @@ Here, we define the neural network, where the input of NN equals the number of d ```@example poisson # Neural network dim = 2 # number of dimensions -chain = Lux.Chain(Dense(dim, 16, Lux.σ), Dense(16, 16, Lux.σ), Dense(16, 1)) +chain = Chain(Dense(dim, 16, σ), Dense(16, 16, σ), Dense(16, 1)) ``` Here, we build PhysicsInformedNN algorithm where `dx` is the step of discretization where diff --git a/docs/src/tutorials/systems.md b/docs/src/tutorials/systems.md index fceaa6898..321efad2a 100644 --- a/docs/src/tutorials/systems.md +++ b/docs/src/tutorials/systems.md @@ -35,7 +35,8 @@ with physics-informed neural networks. ## Solution ```@example system -using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL, LineSearches +using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL, LineSearches, + OptimizationOptimisers using ModelingToolkit: Interval, infimum, supremum @parameters t, x @@ -45,29 +46,32 @@ Dtt = Differential(t)^2 Dx = Differential(x) Dxx = Differential(x)^2 -eqs = [Dtt(u1(t, x)) ~ Dxx(u1(t, x)) + u3(t, x) * sin(pi * x), - Dtt(u2(t, x)) ~ Dxx(u2(t, x)) + u3(t, x) * cos(pi * x), - 0.0 ~ u1(t, x) * sin(pi * x) + u2(t, x) * cos(pi * x) - exp(-t)] - -bcs = [u1(0, x) ~ sin(pi * x), - u2(0, x) ~ cos(pi * x), - Dt(u1(0, x)) ~ -sin(pi * x), - Dt(u2(0, x)) ~ -cos(pi * x), +eqs = [ + Dtt(u1(t, x)) ~ Dxx(u1(t, x)) + u3(t, x) * sinpi(x), + Dtt(u2(t, x)) ~ Dxx(u2(t, x)) + u3(t, x) * cospi(x), + 0.0 ~ u1(t, x) * sinpi(x) + u2(t, x) * cospi(x) - exp(-t) +] + +bcs = [ + u1(0, x) ~ sinpi(x), + u2(0, x) ~ cospi(x), + Dt(u1(0, x)) ~ -sinpi(x), + Dt(u2(0, x)) ~ -cospi(x), u1(t, 0) ~ 0.0, u2(t, 0) ~ exp(-t), u1(t, 1) ~ 0.0, - u2(t, 1) ~ -exp(-t)] + u2(t, 1) ~ -exp(-t) +] # Space and time domains -domains = [t ∈ Interval(0.0, 1.0), - x ∈ Interval(0.0, 1.0)] +domains = [t ∈ Interval(0.0, 1.0), x ∈ Interval(0.0, 1.0)] # Neural network input_ = length(domains) n = 15 -chain = [Lux.Chain(Dense(input_, n, Lux.σ), Dense(n, n, Lux.σ), Dense(n, 1)) for _ in 1:3] +chain = [Chain(Dense(input_, n, σ), Dense(n, n, σ), Dense(n, 1)) for _ in 1:3] -strategy = QuadratureTraining(; batch = 200, abstol = 1e-6, reltol = 1e-6) +strategy = StochasticTraining(128) discretization = PhysicsInformedNN(chain, strategy) @named pdesystem = PDESystem(eqs, bcs, domains, [t, x], [u1(t, x), u2(t, x), u3(t, x)]) @@ -84,7 +88,9 @@ callback = function (p, l) return false end -res = solve(prob, LBFGS(linesearch = BackTracking()); maxiters = 1000) +res = solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 1000, callback) +prob = remake(prob, u0 = res.u) +res = solve(prob, LBFGS(linesearch = BackTracking()); maxiters = 200, callback) phi = discretization.phi ``` @@ -95,64 +101,19 @@ interface. Here is an example using the components from `symbolic_discretize` to reproduce the `discretize` optimization: ```@example system -using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL, LineSearches -import ModelingToolkit: Interval, infimum, supremum - -@parameters t, x -@variables u1(..), u2(..), u3(..) -Dt = Differential(t) -Dtt = Differential(t)^2 -Dx = Differential(x) -Dxx = Differential(x)^2 - -eqs = [Dtt(u1(t, x)) ~ Dxx(u1(t, x)) + u3(t, x) * sin(pi * x), - Dtt(u2(t, x)) ~ Dxx(u2(t, x)) + u3(t, x) * cos(pi * x), - 0.0 ~ u1(t, x) * sin(pi * x) + u2(t, x) * cos(pi * x) - exp(-t)] - -bcs = [u1(0, x) ~ sin(pi * x), - u2(0, x) ~ cos(pi * x), - Dt(u1(0, x)) ~ -sin(pi * x), - Dt(u2(0, x)) ~ -cos(pi * x), - u1(t, 0) ~ 0.0, - u2(t, 0) ~ exp(-t), - u1(t, 1) ~ 0.0, - u2(t, 1) ~ -exp(-t)] - -# Space and time domains -domains = [t ∈ Interval(0.0, 1.0), - x ∈ Interval(0.0, 1.0)] - -# Neural network -input_ = length(domains) -n = 15 -chain = [Lux.Chain(Dense(input_, n, Lux.σ), Dense(n, n, Lux.σ), Dense(n, 1)) for _ in 1:3] -@named pdesystem = PDESystem(eqs, bcs, domains, [t, x], [u1(t, x), u2(t, x), u3(t, x)]) - -strategy = NeuralPDE.QuadratureTraining() -discretization = PhysicsInformedNN(chain, strategy) -sym_prob = NeuralPDE.symbolic_discretize(pdesystem, discretization) - pde_loss_functions = sym_prob.loss_functions.pde_loss_functions bc_loss_functions = sym_prob.loss_functions.bc_loss_functions -callback = function (p, l) - println("loss: ", l) - println("pde_losses: ", map(l_ -> l_(p.u), pde_loss_functions)) - println("bcs_losses: ", map(l_ -> l_(p.u), bc_loss_functions)) - return false -end - loss_functions = [pde_loss_functions; bc_loss_functions] -function loss_function(θ, p) - sum(map(l -> l(θ), loss_functions)) -end +loss_function(θ, _) = sum(l -> l(θ), loss_functions) -f_ = OptimizationFunction(loss_function, Optimization.AutoZygote()) -prob = Optimization.OptimizationProblem(f_, sym_prob.flat_init_params) +f_ = OptimizationFunction(loss_function, AutoZygote()) +prob = OptimizationProblem(f_, sym_prob.flat_init_params) -res = Optimization.solve( - prob, OptimizationOptimJL.LBFGS(linesearch = BackTracking()); maxiters = 1000) +res = solve(prob, OptimizationOptimisers.Adam(0.01); maxiters = 1000, callback) +prob = remake(prob, u0 = res.u) +res = solve(prob, LBFGS(linesearch = BackTracking()); maxiters = 200, callback) ``` ## Solution Representation @@ -168,10 +129,12 @@ ts, xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains] minimizers_ = [res.u.depvar[sym_prob.depvars[i]] for i in 1:3] function analytic_sol_func(t, x) - [exp(-t) * sin(pi * x), exp(-t) * cos(pi * x), (1 + pi^2) * exp(-t)] + [exp(-t) * sinpi(x), exp(-t) * cospi(x), (1 + pi^2) * exp(-t)] end + u_real = [[analytic_sol_func(t, x)[i] for t in ts for x in xs] for i in 1:3] u_predict = [[phi[i]([t, x], minimizers_[i])[1] for t in ts for x in xs] for i in 1:3] + diff_u = [abs.(u_real[i] .- u_predict[i]) for i in 1:3] ps = [] for i in 1:3 @@ -196,7 +159,7 @@ ps[3] Notice here that the solution is represented in the `OptimizationSolution` with `u` as the parameters for the trained neural network. But, for the case where the neural network -is from Lux.jl, it's given as a `ComponentArray` where `res.u.depvar.x` corresponds to the result +is from jl, it's given as a `ComponentArray` where `res.u.depvar.x` corresponds to the result for the neural network corresponding to the dependent variable `x`, i.e. `res.u.depvar.u1` are the trained parameters for `phi[1]` in our example. For simpler indexing, you can use `res.u.depvar[:u1]` or `res.u.depvar[Symbol(:u,1)]` as shown here. @@ -220,8 +183,7 @@ Dyy = Differential(y)^2 bcs = [u[1](x, 0) ~ x, u[2](x, 0) ~ 2, u[3](x, 0) ~ 3, u[4](x, 0) ~ 4] # matrix PDE -eqs = @. [(Dxx(u_(x, y)) + Dyy(u_(x, y))) for u_ in u] ~ -sin(pi * x) * sin(pi * y) * - [0 1; 0 1] +eqs = @. [(Dxx(u_(x, y)) + Dyy(u_(x, y))) for u_ in u] ~ -sinpi(x) * sinpi(y) * [0 1; 0 1] size(eqs) ``` diff --git a/src/pinn_types.jl b/src/pinn_types.jl index 15b426f0f..6a7d617e9 100644 --- a/src/pinn_types.jl +++ b/src/pinn_types.jl @@ -1,3 +1,9 @@ +""" + LogOptions(log_frequency) + LogOptions(; log_frequency = 50) + +Options for logging during optimization. +""" struct LogOptions log_frequency::Int # TODO: add in an option for saving plots in the log. this is currently not done because the type of plot is dependent on the PDESystem