-
-
Notifications
You must be signed in to change notification settings - Fork 210
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Updated the closures with @closure to avoid boxing #924
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -17,6 +17,7 @@ DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" | |
DomainSets = "5b8099bc-c8ec-5219-889f-1d9e522a28bf" | ||
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" | ||
Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196" | ||
Glob = "c27321d9-0574-5035-807b-f59d2c89b15c" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. what is this? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. it was for searching a keyword across all files, automating the search with a script. |
||
Integrals = "de52edbc-65ea-441a-8357-d3a637375a31" | ||
IntervalSets = "8197267c-284f-5f27-9208-e0e47529a953" | ||
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" | ||
|
@@ -38,6 +39,7 @@ RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" | |
Reexport = "189a3867-3050-52da-a836-e630ba90ab69" | ||
RuntimeGeneratedFunctions = "7e49a35a-f44a-4d26-94aa-eba1b4ca6b47" | ||
SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" | ||
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why is this needed? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It was throwing |
||
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" | ||
SymbolicIndexingInterface = "2efcf032-c050-4f8e-a9bb-153293bab1f5" | ||
SymbolicUtils = "d1185830-fcd6-423d-90d6-eec64667417b" | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -36,11 +36,11 @@ eltypeθ = eltype(init_params) | |
phi = NeuralPDE.get_phi(chain) | ||
derivative = NeuralPDE.get_numeric_derivative() | ||
|
||
u_ = (cord, θ, phi) -> sum(phi(cord, θ)) | ||
u_ = @closure (cord, θ, phi) -> sum(phi(cord, θ)) | ||
|
||
phi([1, 2], init_params) | ||
|
||
phi_ = (p) -> phi(p, init_params)[1] | ||
phi_ = @closure (p) -> phi(p, init_params)[1] | ||
dphi = Zygote.gradient(phi_, [1.0, 2.0]) | ||
|
||
dphi1 = derivative(phi, u_, [1.0, 2.0], [[0.0049215667, 0.0]], 1, init_params) | ||
|
@@ -57,7 +57,7 @@ multioutput = chain isa AbstractArray | |
strategy = NeuralPDE.GridTraining(dx) | ||
integral = NeuralPDE.get_numeric_integral(strategy, indvars, multioutput, chain, derivative) | ||
|
||
_pde_loss_function = NeuralPDE.build_loss_function(eq, indvars, depvars, phi, derivative, | ||
_pde_loss_function = @closure NeuralPDE.build_loss_function(eq, indvars, depvars, phi, derivative, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this isn't a closure There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I added @closure here under the assumption that it would be necessary for capturing variables, but I see that this isn't forming a closure. |
||
integral, multioutput, init_params, | ||
strategy) | ||
``` | ||
|
@@ -82,7 +82,7 @@ julia> bc_indvars = NeuralPDE.get_variables(bcs,indvars,depvars) | |
``` | ||
|
||
```julia | ||
_bc_loss_functions = [NeuralPDE.build_loss_function(bc, indvars, depvars, | ||
_bc_loss_functions = [ @closure NeuralPDE.build_loss_function(bc, indvars, depvars, | ||
phi, derivative, integral, multioutput, | ||
init_params, strategy, | ||
bc_indvars = bc_indvar) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -45,9 +45,18 @@ root(x) = f(x) - g(x) | |
|
||
# Analytic solution | ||
k = find_zero(root, (0, 1), Bisection()) # k is a root of the algebraic (transcendental) equation f(x) = g(x) | ||
θ(x, y) = (cosh(sqrt(f(k)) * x) + sinh(sqrt(f(k)) * x)) * (y + 1) # Analytical solution to Helmholtz equation | ||
w_analytic(x, y) = θ(x, y) - h(k) / f(k) | ||
u_analytic(x, y) = k * w_analytic(x, y) | ||
θ = let k = k | ||
(x, y) -> (cosh(sqrt(f(k)) * x) + sinh(sqrt(f(k)) * x)) * (y + 1) | ||
end | ||
|
||
w_analytic = let θ = θ, h_k = h(k) / f(k) # Closure for analytic function | ||
(x, y) -> θ(x, y) - h_k | ||
end | ||
|
||
u_analytic = let k = k, w_analytic = w_analytic # Closure for u_analytic | ||
(x, y) -> k * w_analytic(x, y) | ||
end | ||
|
||
|
||
# Nonlinear Steady-State Systems of Two Reaction-Diffusion Equations with 3 arbitrary function f, g, h | ||
eqs_ = [ | ||
|
@@ -105,14 +114,21 @@ res = solve(prob, BFGS(); maxiters = 100, callback) | |
phi = discretization.phi | ||
|
||
# Analysis | ||
# Analysis with closure | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why mention this? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
xs, ys = [infimum(d.domain):0.01:supremum(d.domain) for d in domains] | ||
depvars = [:u, :w] | ||
minimizers_ = [res.u.depvar[depvars[i]] for i in 1:2] | ||
|
||
analytic_sol_func(x, y) = [u_analytic(x, y), w_analytic(x, y)] | ||
analytic_sol_func = let u_analytic = u_analytic, w_analytic = w_analytic # Closure for analytic function | ||
(x, y) -> [u_analytic(x, y), w_analytic(x, y)] | ||
end | ||
|
||
u_real = [[analytic_sol_func(x, y)[i] for x in xs for y in ys] for i in 1:2] | ||
u_predict = [[phi[i]([x, y], minimizers_[i])[1] for x in xs for y in ys] for i in 1:2] | ||
u_predict = let phi = phi, minimizers_ = minimizers_ # Closure for predicted values | ||
[[phi[i]([x, y], minimizers_[i])[1] for x in xs for y in ys] for i in 1:2] | ||
end | ||
diff_u = [abs.(u_real[i] .- u_predict[i]) for i in 1:2] | ||
|
||
ps = [] | ||
for i in 1:2 | ||
p1 = plot(xs, ys, u_real[i], linetype = :contourf, title = "u$i, analytic") | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -16,7 +16,7 @@ change during optimization | |
@concrete mutable struct NonAdaptiveLoss{T <: Real} <: AbstractAdaptiveLoss | ||
pde_loss_weights::Vector{T} | ||
bc_loss_weights::Vector{T} | ||
additional_loss_weights::Vector{T} | ||
additional_loss_weights::Vector{T} | ||
end | ||
|
||
function NonAdaptiveLoss{T}(; pde_loss_weights = 1.0, bc_loss_weights = 1.0, | ||
|
@@ -28,7 +28,7 @@ end | |
|
||
NonAdaptiveLoss(; kwargs...) = NonAdaptiveLoss{Float64}(; kwargs...) | ||
|
||
function generate_adaptive_loss_function(::PINNRepresentation, ::NonAdaptiveLoss, _, __) | ||
@closure function generate_adaptive_loss_function(::PINNRepresentation, ::NonAdaptiveLoss, _, __) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. wrong spot |
||
return Returns(nothing) | ||
end | ||
|
||
|
@@ -83,7 +83,7 @@ function GradientScaleAdaptiveLoss(args...; kwargs...) | |
return GradientScaleAdaptiveLoss{Float64}(args...; kwargs...) | ||
end | ||
|
||
function generate_adaptive_loss_function(pinnrep::PINNRepresentation, | ||
@closure function generate_adaptive_loss_function(pinnrep::PINNRepresentation, | ||
adaloss::GradientScaleAdaptiveLoss, pde_loss_functions, bc_loss_functions) | ||
weight_change_inertia = adaloss.weight_change_inertia | ||
iteration = pinnrep.iteration | ||
|
@@ -168,7 +168,7 @@ end | |
|
||
MiniMaxAdaptiveLoss(args...; kwargs...) = MiniMaxAdaptiveLoss{Float64}(args...; kwargs...) | ||
|
||
function generate_adaptive_loss_function(pinnrep::PINNRepresentation, | ||
@closure function generate_adaptive_loss_function(pinnrep::PINNRepresentation, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. in the wrong spot |
||
adaloss::MiniMaxAdaptiveLoss, _, __) | ||
pde_max_optimiser_setup = Optimisers.setup( | ||
adaloss.pde_max_optimiser, adaloss.pde_loss_weights) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.