diff --git a/src/adaptive_losses.jl b/src/adaptive_losses.jl index b37023da7c..8270bfa3e1 100644 --- a/src/adaptive_losses.jl +++ b/src/adaptive_losses.jl @@ -15,6 +15,7 @@ end """ ```julia NonAdaptiveLoss{T}(; pde_loss_weights = 1, + energy_loss_weights = 1, bc_loss_weights = 1, additional_loss_weights = 1) ``` @@ -25,30 +26,33 @@ change during optimization mutable struct NonAdaptiveLoss{T <: Real} <: AbstractAdaptiveLoss pde_loss_weights::Vector{T} bc_loss_weights::Vector{T} + energy_loss_weights::Vector{T} additional_loss_weights::Vector{T} SciMLBase.@add_kwonly function NonAdaptiveLoss{T}(; pde_loss_weights = 1, + energy_loss_weights = 1, bc_loss_weights = 1, additional_loss_weights = 1) where { T <: Real } - new(vectorify(pde_loss_weights, T), vectorify(bc_loss_weights, T), + new(vectorify(pde_loss_weights, T), vectorify(energy_loss_weights, T), vectorify(bc_loss_weights, T), vectorify(additional_loss_weights, T)) end end # default to Float64 -SciMLBase.@add_kwonly function NonAdaptiveLoss(; pde_loss_weights = 1, bc_loss_weights = 1, +SciMLBase.@add_kwonly function NonAdaptiveLoss(; pde_loss_weights = 1, energy_loss_weights = 1, bc_loss_weights = 1, additional_loss_weights = 1) NonAdaptiveLoss{Float64}(; pde_loss_weights = pde_loss_weights, bc_loss_weights = bc_loss_weights, + energy_loss_weights = energy_loss_weights, additional_loss_weights = additional_loss_weights) end function generate_adaptive_loss_function(pinnrep::PINNRepresentation, adaloss::NonAdaptiveLoss, - pde_loss_functions, bc_loss_functions) - function null_nonadaptive_loss(θ, pde_losses, bc_losses) + pde_loss_functions, energy_loss_functions, bc_loss_functions) + function null_nonadaptive_loss(θ, pde_loss, energy_loss, bc_losses) nothing end end @@ -58,6 +62,7 @@ end GradientScaleAdaptiveLoss(reweight_every; weight_change_inertia = 0.9, pde_loss_weights = 1, + energy_loss_weights = 1, bc_loss_weights = 1, additional_loss_weights = 1) ``` @@ -90,37 +95,41 @@ mutable struct GradientScaleAdaptiveLoss{T <: Real} <: AbstractAdaptiveLoss reweight_every::Int64 weight_change_inertia::T pde_loss_weights::Vector{T} + energy_loss_weights::Vector{T} bc_loss_weights::Vector{T} additional_loss_weights::Vector{T} SciMLBase.@add_kwonly function GradientScaleAdaptiveLoss{T}(reweight_every; weight_change_inertia = 0.9, pde_loss_weights = 1, + energy_loss_weights = 1, bc_loss_weights = 1, additional_loss_weights = 1) where { T <: Real } new(convert(Int64, reweight_every), convert(T, weight_change_inertia), - vectorify(pde_loss_weights, T), vectorify(bc_loss_weights, T), - vectorify(additional_loss_weights, T)) + vectorify(pde_loss_weights, T), vectorify(energy_loss_weights, T), + vectorify(bc_loss_weights, T), vectorify(additional_loss_weights, T)) end end # default to Float64 SciMLBase.@add_kwonly function GradientScaleAdaptiveLoss(reweight_every; weight_change_inertia = 0.9, pde_loss_weights = 1, + energy_loss_weights = 1, bc_loss_weights = 1, additional_loss_weights = 1) GradientScaleAdaptiveLoss{Float64}(reweight_every; weight_change_inertia = weight_change_inertia, pde_loss_weights = pde_loss_weights, + energy_loss_weights = energy_loss_weights, bc_loss_weights = bc_loss_weights, additional_loss_weights = additional_loss_weights) end function generate_adaptive_loss_function(pinnrep::PINNRepresentation, adaloss::GradientScaleAdaptiveLoss, - pde_loss_functions, bc_loss_functions) + pde_loss_functions, energy_loss_functions, bc_loss_functions) weight_change_inertia = adaloss.weight_change_inertia iteration = pinnrep.iteration adaloss_T = eltype(adaloss.pde_loss_weights) @@ -128,23 +137,24 @@ function generate_adaptive_loss_function(pinnrep::PINNRepresentation, function run_loss_gradients_adaptive_loss(θ, pde_losses, bc_losses) if iteration[1] % adaloss.reweight_every == 0 # the paper assumes a single pde loss function, so here we grab the maximum of the maximums of each pde loss function - pde_grads_maxes = [maximum(abs.(Zygote.gradient(pde_loss_function, θ)[1])) - for pde_loss_function in pde_loss_functions] - pde_grads_max = maximum(pde_grads_maxes) + # we treat energy loss functions the same as pde loss functions + pde_energy_grads_maxes = [maximum(abs.(Zygote.gradient(pde_loss_function, θ)[1])) + for pde_loss_function in vcat(pde_loss_functions, energy_loss_functions)] + pde_energy_grads_max = maximum(pde_energy_grads_maxes) bc_grads_mean = [mean(abs.(Zygote.gradient(bc_loss_function, θ)[1])) for bc_loss_function in bc_loss_functions] nonzero_divisor_eps = adaloss_T isa Float64 ? Float64(1e-11) : convert(adaloss_T, 1e-7) - bc_loss_weights_proposed = pde_grads_max ./ + bc_loss_weights_proposed = pde_energy_grads_max ./ (bc_grads_mean .+ nonzero_divisor_eps) adaloss.bc_loss_weights .= weight_change_inertia .* adaloss.bc_loss_weights .+ (1 .- weight_change_inertia) .* bc_loss_weights_proposed - logscalar(pinnrep.logger, pde_grads_max, "adaptive_loss/pde_grad_max", + logscalar(pinnrep.logger, pde_energy_grads_max, "adaptive_loss/pde_energy_grad_max", iteration[1]) - logvector(pinnrep.logger, pde_grads_maxes, "adaptive_loss/pde_grad_maxes", + logvector(pinnrep.logger, pde_energy_grads_maxes, "adaptive_loss/pde_energy_grad_maxes", iteration[1]) logvector(pinnrep.logger, bc_grads_mean, "adaptive_loss/bc_grad_mean", iteration[1]) @@ -160,8 +170,10 @@ end ```julia function MiniMaxAdaptiveLoss(reweight_every; pde_max_optimiser = Flux.ADAM(1e-4), + energy_max_optimiser = Flux.ADAM(1e-4), bc_max_optimiser = Flux.ADAM(0.5), pde_loss_weights = 1, + energy_loss_weights = 1, bc_loss_weights = 1, additional_loss_weights = 1) ``` @@ -191,65 +203,81 @@ https://arxiv.org/abs/2009.04544 """ mutable struct MiniMaxAdaptiveLoss{T <: Real, PDE_OPT <: Flux.Optimise.AbstractOptimiser, + ENERGY_OPT <: Flux.Optimise.AbstractOptimiser, BC_OPT <: Flux.Optimise.AbstractOptimiser} <: AbstractAdaptiveLoss reweight_every::Int64 pde_max_optimiser::PDE_OPT + energy_max_optimiser::ENERGY_OPT bc_max_optimiser::BC_OPT pde_loss_weights::Vector{T} + energy_loss_weights::Vector{T} bc_loss_weights::Vector{T} additional_loss_weights::Vector{T} SciMLBase.@add_kwonly function MiniMaxAdaptiveLoss{T, - PDE_OPT, BC_OPT}(reweight_every; + PDE_OPT, ENERGY_OPT, BC_OPT}(reweight_every; pde_max_optimiser = Flux.ADAM(1e-4), + energy_max_optimiser = Flux.ADAM(1e-4), bc_max_optimiser = Flux.ADAM(0.5), pde_loss_weights = 1, + energy_loss_weights = 1, bc_loss_weights = 1, additional_loss_weights = 1) where { T <: Real, PDE_OPT <: Flux.Optimise.AbstractOptimiser, + ENERGY_OPT <: + Flux.Optimise.AbstractOptimiser, BC_OPT <: Flux.Optimise.AbstractOptimiser } - new(convert(Int64, reweight_every), convert(PDE_OPT, pde_max_optimiser), + new(convert(Int64, reweight_every), convert(PDE_OPT, pde_max_optimiser), convert(ENERGY_OPT, energy_max_optimiser), convert(BC_OPT, bc_max_optimiser), vectorify(pde_loss_weights, T), vectorify(bc_loss_weights, T), - vectorify(additional_loss_weights, T)) + vectorify(energy_loss_weights, T), vectorify(additional_loss_weights, T)) end end # default to Float64, ADAM, ADAM SciMLBase.@add_kwonly function MiniMaxAdaptiveLoss(reweight_every; pde_max_optimiser = Flux.ADAM(1e-4), + energy_max_optimiser = Flux.ADAM(1e-4), bc_max_optimiser = Flux.ADAM(0.5), pde_loss_weights = 1, + energy_loss_weights = 1, bc_loss_weights = 1, additional_loss_weights = 1) MiniMaxAdaptiveLoss{Float64, typeof(pde_max_optimiser), typeof(bc_max_optimiser)}(reweight_every; pde_max_optimiser = pde_max_optimiser, + energy_max_optimiser = energy_max_optimiser, bc_max_optimiser = bc_max_optimiser, pde_loss_weights = pde_loss_weights, + energy_loss_weights = energy_loss_weights, bc_loss_weights = bc_loss_weights, additional_loss_weights = additional_loss_weights) end function generate_adaptive_loss_function(pinnrep::PINNRepresentation, adaloss::MiniMaxAdaptiveLoss, - pde_loss_functions, bc_loss_functions) + pde_loss_functions, energy_loss_functions, bc_loss_functions) pde_max_optimiser = adaloss.pde_max_optimiser + energy_max_optimiser = adaloss.energy_max_optimiser bc_max_optimiser = adaloss.bc_max_optimiser iteration = pinnrep.iteration - function run_minimax_adaptive_loss(θ, pde_losses, bc_losses) + function run_minimax_adaptive_loss(θ, pde_losses, energy_losses, bc_losses) if iteration[1] % adaloss.reweight_every == 0 Flux.Optimise.update!(pde_max_optimiser, adaloss.pde_loss_weights, -pde_losses) + Flux.Optimise.update!(energy_max_optimiser, adaloss.energy_loss_weights, + -energy_losses) Flux.Optimise.update!(bc_max_optimiser, adaloss.bc_loss_weights, -bc_losses) logvector(pinnrep.logger, adaloss.pde_loss_weights, "adaptive_loss/pde_loss_weights", iteration[1]) + logvector(pinnrep.logger, adaloss.energy_loss_weights, + "adaptive_loss/energy_loss_weights", iteration[1]) logvector(pinnrep.logger, adaloss.bc_loss_weights, "adaptive_loss/bc_loss_weights", iteration[1]) diff --git a/src/discretize.jl b/src/discretize.jl index 4308a79b4e..19fa444ad5 100644 --- a/src/discretize.jl +++ b/src/discretize.jl @@ -204,16 +204,16 @@ strategy. """ function generate_training_sets end -function generate_training_sets(domains, dx, eqs, bcs, eltypeθ, _indvars::Array, +function generate_training_sets(domains, dx, eqs, energies, bcs, eltypeθ, _indvars::Array, _depvars::Array) depvars, indvars, dict_indvars, dict_depvars, dict_depvar_input = get_vars(_indvars, _depvars) - return generate_training_sets(domains, dx, eqs, bcs, eltypeθ, dict_indvars, + return generate_training_sets(domains, dx, eqs, energies, bcs, eltypeθ, dict_indvars, dict_depvars) end # Generate training set in the domain and on the boundary -function generate_training_sets(domains, dx, eqs, bcs, eltypeθ, dict_indvars::Dict, +function generate_training_sets(domains, dx, eqs, energies, bcs, eltypeθ, dict_indvars::Dict, dict_depvars::Dict) if dx isa Array dxs = dx @@ -249,19 +249,26 @@ function generate_training_sets(domains, dx, eqs, bcs, eltypeθ, dict_indvars::D hcat(vec(map(points -> collect(points), Iterators.product(span...)))...)) end - pde_vars = get_variables(eqs, dict_indvars, dict_depvars) - pde_args = get_argument(eqs, dict_indvars, dict_depvars) + function get_eqs_train_sets(eqs) + eqs_vars = get_variables(eqs, dict_indvars, dict_depvars) + eqs_args = get_argument(eqs, dict_indvars, dict_depvars) - pde_train_set = adapt(eltypeθ, - hcat(vec(map(points -> collect(points), - Iterators.product(bc_data...)))...)) + eqs_train_set = adapt(eltypeθ, + hcat(vec(map(points -> collect(points), + Iterators.product(bc_data...)))...)) - pde_train_sets = map(pde_args) do bt - span = map(b -> get(dict_var_span_, b, b), bt) - _set = adapt(eltypeθ, - hcat(vec(map(points -> collect(points), Iterators.product(span...)))...)) + eqs_train_sets = map(eqs_args) do bt + span = map(b -> get(dict_var_span_, b, b), bt) + _set = adapt(eltypeθ, + hcat(vec(map(points -> collect(points), Iterators.product(span...)))...)) + end + return eqs_train_sets end - [pde_train_sets, bcs_train_sets] + + pde_train_sets = get_eqs_train_sets(eqs) + energy_train_sets = get_eqs_train_sets(energies) + + [pde_train_sets, energy_train_sets, bcs_train_sets] end """ @@ -274,35 +281,38 @@ training strategy: StochasticTraining, QuasiRandomTraining, QuadratureTraining. """ function get_bounds end -function get_bounds(domains, eqs, bcs, eltypeθ, _indvars::Array, _depvars::Array, strategy) +function get_bounds(domains, eqs, energies, bcs, eltypeθ, _indvars::Array, _depvars::Array, strategy) depvars, indvars, dict_indvars, dict_depvars, dict_depvar_input = get_vars(_indvars, _depvars) - return get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, strategy) + return get_bounds(domains, eqs, energies, bcs, eltypeθ, dict_indvars, dict_depvars, strategy) end -function get_bounds(domains, eqs, bcs, eltypeθ, _indvars::Array, _depvars::Array, +function get_bounds(domains, eqs, energies, bcs, eltypeθ, _indvars::Array, _depvars::Array, strategy::QuadratureTraining) depvars, indvars, dict_indvars, dict_depvars, dict_depvar_input = get_vars(_indvars, _depvars) - return get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, strategy) + return get_bounds(domains, eqs, energies, bcs, eltypeθ, dict_indvars, dict_depvars, strategy) end -function get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, +function get_bounds(domains, eqs, energies, bcs, eltypeθ, dict_indvars, dict_depvars, strategy::QuadratureTraining) dict_lower_bound = Dict([Symbol(d.variables) => infimum(d.domain) for d in domains]) dict_upper_bound = Dict([Symbol(d.variables) => supremum(d.domain) for d in domains]) - pde_args = get_argument(eqs, dict_indvars, dict_depvars) - - pde_lower_bounds = map(pde_args) do pd - span = map(p -> get(dict_lower_bound, p, p), pd) - map(s -> adapt(eltypeθ, s) + cbrt(eps(eltypeθ)), span) - end - pde_upper_bounds = map(pde_args) do pd - span = map(p -> get(dict_upper_bound, p, p), pd) - map(s -> adapt(eltypeθ, s) - cbrt(eps(eltypeθ)), span) + function get_eqs_bounds(eqs) + eqs_args = get_argument(eqs, dict_indvars, dict_depvars) + eqs_lower_bounds = map(eqs_args) do pd + span = map(p -> get(dict_lower_bound, p, p), pd) + map(s -> adapt(eltypeθ, s) + cbrt(eps(eltypeθ)), span) + end + eqs_upper_bounds = map(eqs_args) do pd + span = map(p -> get(dict_upper_bound, p, p), pd) + map(s -> adapt(eltypeθ, s) - cbrt(eps(eltypeθ)), span) + end + return [eqs_lower_bounds, eqs_upper_bounds] end - pde_bounds = [pde_lower_bounds, pde_upper_bounds] + pde_bounds = get_eqs_bounds(eqs) + energy_bounds = get_eqs_bounds(energies) bound_vars = get_variables(bcs, dict_indvars, dict_depvars) @@ -314,10 +324,10 @@ function get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, end bcs_bounds = [bcs_lower_bounds, bcs_upper_bounds] - [pde_bounds, bcs_bounds] + [pde_bounds, energy_bounds, bcs_bounds] end -function get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, strategy) +function get_bounds(domains, eqs, energies, bcs, eltypeθ, dict_indvars, dict_depvars, strategy) dx = 1 / strategy.points dict_span = Dict([Symbol(d.variables) => [ infimum(d.domain) + dx, @@ -325,20 +335,19 @@ function get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, str ] for d in domains]) # pde_bounds = [[infimum(d.domain),supremum(d.domain)] for d in domains] - pde_args = get_argument(eqs, dict_indvars, dict_depvars) - pde_bounds = map(pde_args) do pde_arg - bds = mapreduce(s -> get(dict_span, s, fill(s, 2)), hcat, pde_arg) - bds = eltypeθ.(bds) - bds[1, :], bds[2, :] - end - - bound_args = get_argument(bcs, dict_indvars, dict_depvars) - bcs_bounds = map(bound_args) do bound_arg - bds = mapreduce(s -> get(dict_span, s, fill(s, 2)), hcat, bound_arg) - bds = eltypeθ.(bds) - bds[1, :], bds[2, :] + function get_eqs_bounds(eqs) + eqs_args = get_argument(eqs, dict_indvars, dict_depvars) + eqs_bounds = map(eqs_args) do eqs_arg + bds = mapreduce(s -> get(dict_span, s, fill(s, 2)), hcat, eqs_arg) + bds = eltypeθ.(bds) + bds[1, :], bds[2, :] + end + return eqs_bounds end - return pde_bounds, bcs_bounds + pde_bounds = get_eqs_bounds(eqs) + energy_bounds = get_eqs_bounds(energies) + bcs_bounds = get_eqs_bounds(bcs) + return pde_bounds, energy_bounds, bcs_bounds end function get_numeric_integral(pinnrep::PINNRepresentation) @@ -403,6 +412,7 @@ For more information, see `discretize` and `PINNRepresentation`. function SciMLBase.symbolic_discretize(pde_system::PDESystem, discretization::PhysicsInformedNN) eqs = pde_system.eqs + energies = pde_system.energies bcs = pde_system.bcs chain = discretization.chain @@ -514,62 +524,73 @@ function SciMLBase.symbolic_discretize(pde_system::PDESystem, eqs = [eqs] end - pde_indvars = if strategy isa QuadratureTraining - get_argument(eqs, dict_indvars, dict_depvars) - else - get_variables(eqs, dict_indvars, dict_depvars) + if !(energies isa Array) + energies = [energies] end - bc_indvars = if strategy isa QuadratureTraining - get_argument(bcs, dict_indvars, dict_depvars) - else - get_variables(bcs, dict_indvars, dict_depvars) + function get_eqs_indvars(eqs) + eqs_indvars = if strategy isa QuadratureTraining + get_argument(eqs, dict_indvars, dict_depvars) + else + get_variables(eqs, dict_indvars, dict_depvars) + end + return eqs_indvars end + pde_indvars = get_eqs_indvars(eqs) + bc_indvars = get_eqs_indvars(bcs) + energy_indvars = get_eqs_indvars(energies) pde_integration_vars = get_integration_variables(eqs, dict_indvars, dict_depvars) bc_integration_vars = get_integration_variables(bcs, dict_indvars, dict_depvars) + energy_integration_vars = get_integration_variables(energies, dict_indvars, dict_depvars) - pinnrep = PINNRepresentation(eqs, bcs, domains, eq_params, defaults, default_p, + pinnrep = PINNRepresentation(eqs, energies, bcs, domains, eq_params, defaults, default_p, param_estim, additional_loss, adaloss, depvars, indvars, dict_indvars, dict_depvars, dict_depvar_input, logger, multioutput, iteration, init_params, flat_init_params, phi, derivative, - strategy, pde_indvars, bc_indvars, pde_integration_vars, - bc_integration_vars, nothing, nothing, nothing, nothing) + strategy, pde_indvars, energy_indvars, bc_indvars, pde_integration_vars, + energy_integration_vars, bc_integration_vars, nothing, nothing, nothing, nothing, nothing) integral = get_numeric_integral(pinnrep) - symbolic_pde_loss_functions = [build_symbolic_loss_function(pinnrep, eq; - bc_indvars = pde_indvar) - for (eq, pde_indvar) in zip(eqs, pde_indvars, - pde_integration_vars)] - - symbolic_bc_loss_functions = [build_symbolic_loss_function(pinnrep, bc; - bc_indvars = bc_indvar) - for (bc, bc_indvar) in zip(bcs, bc_indvars, - bc_integration_vars)] + function build_symbolic_loss_functions(eqs, eqs_indvars, eqs_integration_vars) + symbolic_eqs_loss_functions = [build_symbolic_loss_function(pinnrep, eq; + bc_indvars = eqs_indvar) + for (eq, eqs_indvar) in zip(eqs, eqs_indvars, + eqs_integration_vars)] + return symbolic_eqs_loss_functions + end + symbolic_pde_loss_functions = build_symbolic_loss_functions(eqs, pde_indvars, pde_integration_vars) + symbolic_energy_loss_functions = build_symbolic_loss_functions(energies, energy_indvars, energy_integration_vars) + symbolic_bc_loss_functions = build_symbolic_loss_functions(bcs, bc_indvars, bc_integration_vars) pinnrep.integral = integral pinnrep.symbolic_pde_loss_functions = symbolic_pde_loss_functions + pinnrep.symbolic_energy_loss_functions = symbolic_energy_loss_functions pinnrep.symbolic_bc_loss_functions = symbolic_bc_loss_functions - datafree_pde_loss_functions = [build_loss_function(pinnrep, eq, pde_indvar) - for (eq, pde_indvar, integration_indvar) in zip(eqs, - pde_indvars, - pde_integration_vars)] + function build_datafree_eqs_loss_functions(eqs, eqs_indvars, eqs_integration_vars) + return [build_loss_function(pinnrep, eq, eq_indvar) + for (eq, eq_indvar, integration_indvar) in zip(eqs, + eqs_indvars, + eqs_integration_vars)] + end - datafree_bc_loss_functions = [build_loss_function(pinnrep, bc, bc_indvar) - for (bc, bc_indvar, integration_indvar) in zip(bcs, - bc_indvars, - bc_integration_vars)] + datafree_pde_loss_functions = build_datafree_eqs_loss_functions(eqs, pde_indvars, pde_integration_vars) + datafree_energy_loss_functions = build_datafree_eqs_loss_functions(energies, energy_indvars, energy_integration_vars) + datafree_bc_loss_functions = build_datafree_eqs_loss_functions(bcs, bc_indvars, bc_integration_vars) - pde_loss_functions, bc_loss_functions = merge_strategy_with_loss_function(pinnrep, + pde_loss_functions, energy_loss_functions, bc_loss_functions = merge_strategy_with_loss_function(pinnrep, strategy, datafree_pde_loss_functions, - datafree_bc_loss_functions) + datafree_energy_loss_functions, + datafree_bc_loss_functions, + ) # setup for all adaptive losses num_pde_losses = length(pde_loss_functions) + num_energy_losses = length(energy_loss_functions) num_bc_losses = length(bc_loss_functions) # assume one single additional loss function if there is one. this means that the user needs to lump all their functions into a single one, num_additional_loss = additional_loss isa Nothing ? 0 : 1 @@ -578,19 +599,24 @@ function SciMLBase.symbolic_discretize(pde_system::PDESystem, # this will error if the user has provided a number of initial weights that is more than 1 and doesn't match the number of loss functions adaloss.pde_loss_weights = ones(adaloss_T, num_pde_losses) .* adaloss.pde_loss_weights + adaloss.energy_loss_weights = ones(adaloss_T, num_energy_losses) .* adaloss.energy_loss_weights adaloss.bc_loss_weights = ones(adaloss_T, num_bc_losses) .* adaloss.bc_loss_weights adaloss.additional_loss_weights = ones(adaloss_T, num_additional_loss) .* adaloss.additional_loss_weights reweight_losses_func = generate_adaptive_loss_function(pinnrep, adaloss, pde_loss_functions, - bc_loss_functions) + energy_loss_functions, + bc_loss_functions + ) function full_loss_function(θ, p) # the aggregation happens on cpu even if the losses are gpu, probably fine since it's only a few of them - pde_losses = [pde_loss_function(θ) for pde_loss_function in pde_loss_functions] - bc_losses = [bc_loss_function(θ) for bc_loss_function in bc_loss_functions] + # we need to type annotate the empty vector for autodiff to succeed in the case of empty equations/energies/boundary conditions. + pde_losses = num_pde_losses == 0 ? adaloss_T[] : [pde_loss_function(θ) for pde_loss_function in pde_loss_functions] + energy_losses = num_energy_losses == 0 ? adaloss_T[] : [energy_loss_function(θ) for energy_loss_function in energy_loss_functions] + bc_losses = num_bc_losses == 0 ? adaloss_T[] : [bc_loss_function(θ) for bc_loss_function in bc_loss_functions] # this is kind of a hack, and means that whenever the outer function is evaluated the increment goes up, even if it's not being optimized # that's why we prefer the user to maintain the increment in the outer loop callback during optimization @@ -599,14 +625,16 @@ function SciMLBase.symbolic_discretize(pde_system::PDESystem, end ChainRulesCore.@ignore_derivatives begin reweight_losses_func(θ, pde_losses, - bc_losses) end + energy_losses, bc_losses) end weighted_pde_losses = adaloss.pde_loss_weights .* pde_losses + weighted_energy_losses = adaloss.energy_loss_weights .* energy_losses weighted_bc_losses = adaloss.bc_loss_weights .* bc_losses sum_weighted_pde_losses = sum(weighted_pde_losses) + sum_weighted_energy_losses = sum(weighted_energy_losses) sum_weighted_bc_losses = sum(weighted_bc_losses) - weighted_loss_before_additional = sum_weighted_pde_losses + sum_weighted_bc_losses + weighted_loss_before_additional = sum_weighted_pde_losses + sum_weighted_energy_losses + sum_weighted_bc_losses full_weighted_loss = if additional_loss isa Nothing weighted_loss_before_additional @@ -633,10 +661,14 @@ function SciMLBase.symbolic_discretize(pde_system::PDESystem, ChainRulesCore.@ignore_derivatives begin if iteration[1] % log_frequency == 0 logvector(pinnrep.logger, pde_losses, "unweighted_loss/pde_losses", iteration[1]) + logvector(pinnrep.logger, energy_losses, "unweighted_loss/bc_losses", iteration[1]) logvector(pinnrep.logger, bc_losses, "unweighted_loss/bc_losses", iteration[1]) logvector(pinnrep.logger, weighted_pde_losses, "weighted_loss/weighted_pde_losses", iteration[1]) + logvector(pinnrep.logger, weighted_energy_losses, + "weighted_loss/weighted_energy_losses", + iteration[1]) logvector(pinnrep.logger, weighted_bc_losses, "weighted_loss/weighted_bc_losses", iteration[1]) @@ -648,6 +680,8 @@ function SciMLBase.symbolic_discretize(pde_system::PDESystem, "weighted_loss/sum_weighted_pde_losses", iteration[1]) logscalar(pinnrep.logger, sum_weighted_bc_losses, "weighted_loss/sum_weighted_bc_losses", iteration[1]) + logscalar(pinnrep.logger, sum_weighted_energy_losses, + "weighted_loss/sum_weighted_energy_losses", iteration[1]) logscalar(pinnrep.logger, full_weighted_loss, "weighted_loss/full_weighted_loss", iteration[1]) @@ -657,15 +691,20 @@ function SciMLBase.symbolic_discretize(pde_system::PDESystem, logvector(pinnrep.logger, adaloss.bc_loss_weights, "adaptive_loss/bc_loss_weights", iteration[1]) + logvector(pinnrep.logger, adaloss.energy_loss_weights, + "adaptive_loss/energy_loss_weights", + iteration[1]) end end return full_weighted_loss end - pinnrep.loss_functions = PINNLossFunctions(bc_loss_functions, pde_loss_functions, + pinnrep.loss_functions = PINNLossFunctions(bc_loss_functions, pde_loss_functions, energy_loss_functions, full_loss_function, additional_loss, datafree_pde_loss_functions, - datafree_bc_loss_functions) + datafree_energy_loss_functions, + datafree_bc_loss_functions, + ) return pinnrep end diff --git a/src/pinn_types.jl b/src/pinn_types.jl index 3c74022b38..68bc2ee749 100644 --- a/src/pinn_types.jl +++ b/src/pinn_types.jl @@ -166,6 +166,10 @@ mutable struct PINNRepresentation """ eqs::Any """ + The energy functions + """ + energies::Any + """ The boundary condition equations """ bcs::Any @@ -267,6 +271,10 @@ mutable struct PINNRepresentation """ ??? """ + energy_indvars::Any + """ + ??? + """ bc_indvars::Any """ ??? @@ -275,6 +283,10 @@ mutable struct PINNRepresentation """ ??? """ + energy_integration_vars::Any + """ + ??? + """ bc_integration_vars::Any """ ??? @@ -285,6 +297,10 @@ mutable struct PINNRepresentation """ symbolic_pde_loss_functions::Any """ + The energy loss functions as represented in Julia AST + """ + symbolic_energy_loss_functions::Any + """ The boundary condition loss functions as represented in Julia AST """ symbolic_bc_loss_functions::Any @@ -313,6 +329,10 @@ struct PINNLossFunctions """ pde_loss_functions::Any """ + The energy loss functions + """ + energy_loss_functions::Any + """ The full loss function, combining the PDE and boundary condition loss functions. This is the loss function that is used by the optimizer. """ @@ -326,6 +346,10 @@ struct PINNLossFunctions """ datafree_pde_loss_functions::Any """ + The pre-data version of the energy loss function + """ + datafree_energy_loss_functions::Any + """ The pre-data version of the BC loss function """ datafree_bc_loss_functions::Any diff --git a/src/symbolic_utilities.jl b/src/symbolic_utilities.jl index 9161f3c365..b14c264e5f 100644 --- a/src/symbolic_utilities.jl +++ b/src/symbolic_utilities.jl @@ -303,7 +303,8 @@ Example: [(derivative(phi1, u1, [x, y], [[ε,0]], 1, θ1) + 4 * derivative(phi2, u, [x, y], [[0,ε]], 1, θ2)) - 0, (derivative(phi2, u2, [x, y], [[ε,0]], 1, θ2) + 9 * derivative(phi1, u, [x, y], [[0,ε]], 1, θ1)) - 0] """ -function parse_equation(pinnrep::PINNRepresentation, eq) +# Parse an equation +function parse_equation(pinnrep::PINNRepresentation, eq::Equation) eq_lhs = isequal(expand_derivatives(eq.lhs), 0) ? eq.lhs : expand_derivatives(eq.lhs) eq_rhs = isequal(expand_derivatives(eq.rhs), 0) ? eq.rhs : expand_derivatives(eq.rhs) left_expr = transform_expression(pinnrep, toexpr(eq_lhs)) @@ -313,6 +314,11 @@ function parse_equation(pinnrep::PINNRepresentation, eq) loss_func = :($left_expr .- $right_expr) end +# Parse an energy +function parse_equation(pinnrep::PINNRepresentation, eq) + loss_func = _dot_(transform_expression(pinnrep, toexpr(eq))) +end + function get_indvars_ex(bc_indvars) # , dict_this_eq_indvars) i_ = 1 indvars_ex = map(bc_indvars) do u diff --git a/src/training_strategies.jl b/src/training_strategies.jl index 8af7358753..fe75377525 100644 --- a/src/training_strategies.jl +++ b/src/training_strategies.jl @@ -19,28 +19,32 @@ end function merge_strategy_with_loss_function(pinnrep::PINNRepresentation, strategy::GridTraining, datafree_pde_loss_function, - datafree_bc_loss_function) - @unpack domains, eqs, bcs, dict_indvars, dict_depvars, flat_init_params = pinnrep + datafree_energy_loss_function, + datafree_bc_loss_function + ) + @unpack domains, eqs, energies, bcs, dict_indvars, dict_depvars, flat_init_params = pinnrep dx = strategy.dx eltypeθ = eltype(pinnrep.flat_init_params) - train_sets = generate_training_sets(domains, dx, eqs, bcs, eltypeθ, + train_sets = generate_training_sets(domains, dx, eqs, energies, bcs, eltypeθ, dict_indvars, dict_depvars) # the points in the domain and on the boundary - pde_train_sets, bcs_train_sets = train_sets - pde_train_sets = adapt.(parameterless_type(ComponentArrays.getdata(flat_init_params)), - pde_train_sets) - bcs_train_sets = adapt.(parameterless_type(ComponentArrays.getdata(flat_init_params)), - bcs_train_sets) - pde_loss_functions = [get_loss_function(_loss, _set, eltypeθ, strategy) - for (_loss, _set) in zip(datafree_pde_loss_function, - pde_train_sets)] - - bc_loss_functions = [get_loss_function(_loss, _set, eltypeθ, strategy) - for (_loss, _set) in zip(datafree_bc_loss_function, bcs_train_sets)] - - pde_loss_functions, bc_loss_functions + pde_train_sets, energy_train_sets, bcs_train_sets = train_sets + + all_loss_functions = + map([(pde_train_sets, datafree_pde_loss_function), + (energy_train_sets, datafree_energy_loss_function), + (bcs_train_sets, datafree_bc_loss_function)]) do (train_sets, datafree_loss_function) + + train_sets = adapt.(parameterless_type(ComponentArrays.getdata(flat_init_params)), + train_sets) + loss_functions = [get_loss_function(_loss, _set, eltypeθ, strategy) + for (_loss, _set) in zip(datafree_loss_function, + train_sets)] + return loss_functions + end + return Tuple(all_loss_functions) end function get_loss_function(loss_function, train_set, eltypeθ, strategy::GridTraining; @@ -79,22 +83,25 @@ end function merge_strategy_with_loss_function(pinnrep::PINNRepresentation, strategy::StochasticTraining, datafree_pde_loss_function, + datafree_energy_loss_function, datafree_bc_loss_function) - @unpack domains, eqs, bcs, dict_indvars, dict_depvars, flat_init_params = pinnrep + @unpack domains, eqs, energies, bcs, dict_indvars, dict_depvars, flat_init_params = pinnrep eltypeθ = eltype(pinnrep.flat_init_params) - bounds = get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, + bounds = get_bounds(domains, eqs, energies, bcs, eltypeθ, dict_indvars, dict_depvars, strategy) - pde_bounds, bcs_bounds = bounds + pde_bounds, energy_bounds, bcs_bounds = bounds - pde_loss_functions = [get_loss_function(_loss, bound, eltypeθ, strategy) - for (_loss, bound) in zip(datafree_pde_loss_function, pde_bounds)] + all_loss_functions = map([(datafree_pde_loss_function, pde_bounds), + (datafree_energy_loss_function, energy_bounds), + (datafree_bc_loss_function, bcs_bounds)]) do (datafree_loss_function, bounds) - bc_loss_functions = [get_loss_function(_loss, bound, eltypeθ, strategy) - for (_loss, bound) in zip(datafree_bc_loss_function, bcs_bounds)] + return [get_loss_function(_loss, bound, eltypeθ, strategy) + for (_loss, bound) in zip(datafree_loss_function, bounds)] + end - pde_loss_functions, bc_loss_functions + return Tuple(all_loss_functions) end function get_loss_function(loss_function, bound, eltypeθ, strategy::StochasticTraining; @@ -161,17 +168,20 @@ end function merge_strategy_with_loss_function(pinnrep::PINNRepresentation, strategy::QuasiRandomTraining, datafree_pde_loss_function, + datafree_energy_loss_function, datafree_bc_loss_function) - @unpack domains, eqs, bcs, dict_indvars, dict_depvars, flat_init_params = pinnrep + @unpack domains, eqs, energies, bcs, dict_indvars, dict_depvars, flat_init_params = pinnrep eltypeθ = eltype(pinnrep.flat_init_params) - bounds = get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, + bounds = get_bounds(domains, eqs, energies, bcs, eltypeθ, dict_indvars, dict_depvars, strategy) - pde_bounds, bcs_bounds = bounds + pde_bounds, energy_bounds, bcs_bounds = bounds pde_loss_functions = [get_loss_function(_loss, bound, eltypeθ, strategy) for (_loss, bound) in zip(datafree_pde_loss_function, pde_bounds)] + energy_loss_functions = [get_loss_function(_loss, bound, eltypeθ, strategy) + for (_loss, bound) in zip(datafree_energy_loss_function, energy_bounds)] strategy_ = QuasiRandomTraining(strategy.bcs_points; sampling_alg = strategy.sampling_alg, @@ -180,7 +190,8 @@ function merge_strategy_with_loss_function(pinnrep::PINNRepresentation, bc_loss_functions = [get_loss_function(_loss, bound, eltypeθ, strategy_) for (_loss, bound) in zip(datafree_bc_loss_function, bcs_bounds)] - pde_loss_functions, bc_loss_functions + + pde_loss_functions, energy_loss_functions, bc_loss_functions end function get_loss_function(loss_function, bound, eltypeθ, strategy::QuasiRandomTraining; @@ -254,22 +265,24 @@ end function merge_strategy_with_loss_function(pinnrep::PINNRepresentation, strategy::QuadratureTraining, datafree_pde_loss_function, + datafree_energy_loss_function, datafree_bc_loss_function) - @unpack domains, eqs, bcs, dict_indvars, dict_depvars, flat_init_params = pinnrep + @unpack domains, eqs, energies, bcs, dict_indvars, dict_depvars, flat_init_params = pinnrep eltypeθ = eltype(pinnrep.flat_init_params) - bounds = get_bounds(domains, eqs, bcs, eltypeθ, dict_indvars, dict_depvars, + bounds = get_bounds(domains, eqs, energies, bcs, eltypeθ, dict_indvars, dict_depvars, strategy) - pde_bounds, bcs_bounds = bounds - - lbs, ubs = pde_bounds - pde_loss_functions = [get_loss_function(_loss, lb, ub, eltypeθ, strategy) - for (_loss, lb, ub) in zip(datafree_pde_loss_function, lbs, ubs)] - lbs, ubs = bcs_bounds - bc_loss_functions = [get_loss_function(_loss, lb, ub, eltypeθ, strategy) - for (_loss, lb, ub) in zip(datafree_bc_loss_function, lbs, ubs)] + pde_bounds, energy_bounds, bcs_bounds = bounds + + all_loss_functions = map([(datafree_pde_loss_function, pde_bounds), + (datafree_energy_loss_function, energy_bounds), + (datafree_bc_loss_function, bcs_bounds)]) do (datafree_loss_function, bounds) + lbs, ubs = bounds + return [get_loss_function(_loss, lb, ub, eltypeθ, strategy) + for (_loss, lb, ub) in zip(datafree_loss_function, lbs, ubs)] + end - pde_loss_functions, bc_loss_functions + return Tuple(all_loss_functions) end function get_loss_function(loss_function, lb, ub, eltypeθ, strategy::QuadratureTraining; diff --git a/test/energy_loss_tests.jl b/test/energy_loss_tests.jl new file mode 100644 index 0000000000..5e630a01c1 --- /dev/null +++ b/test/energy_loss_tests.jl @@ -0,0 +1,171 @@ +using NeuralPDE: DomainSets +using Random +using Test +using ComponentArrays +using OptimizationOptimisers +using NeuralPDE +using LinearAlgebra +using Lux +import ModelingToolkit: Interval + +@parameters x0 x1 x2 x3 +@variables ρ01(..) ρ02(..) ρ03(..) ρ12(..) ρ13(..) ρ23(..) + +# the 4-torus +domain = [ + x0 ∈ Interval(0.0, 1.0), + x1 ∈ Interval(0.0, 1.0), + x2 ∈ Interval(0.0, 1.0), + x3 ∈ Interval(0.0, 1.0), +] + +∂₀ = Differential(x0) +∂₁ = Differential(x1) +∂₂ = Differential(x2) +∂₃ = Differential(x3) + +d₂(ρ) = [ + # commented are the signed permutations of the indeces + + #(0,1,2) + (2,0,1) + (1,2,0) - (1,0,2) - (2,1,0) - (0,2,1) + 2 * ∂₀(ρ[4]) - 2 * ∂₁(ρ[2]) + 2 * ∂₂(ρ[1]), + #(0,1,3) + (3,0,1) + (1,3,0) - (1,0,3) - (0,3,1) - (3,1,0) + 2 * ∂₀(ρ[5]) - 2 * ∂₁(ρ[3]) + 2 * ∂₃(ρ[1]), + #(0,2,3) + (3,0,2) + (2,3,0) - (2,0,3) - (0,3,2) - (3,2,0) + 2 * ∂₀(ρ[6]) - 2 * ∂₂(ρ[3]) + 2 * ∂₃(ρ[2]), + #(1,2,3) + (3,1,2) + (2,3,1) - (2,1,3) - (1,3,2) - (3,2,1) + 2 * ∂₁(ρ[6]) - 2 * ∂₂(ρ[5]) + 2 * ∂₃(ρ[4]), +] + +u(ρ) = ρ[1] * ρ[6] - ρ[2] * ρ[5] + ρ[3] * ρ[4] + +K₁(ρ) = 2(ρ[1] + ρ[6]) / u(ρ) +K₂(ρ) = 2(ρ[2] - ρ[5]) / u(ρ) +K₃(ρ) = 2(ρ[3] + ρ[4]) / u(ρ) + +K(ρ) = [ + K₁(ρ), + K₂(ρ), + K₃(ρ), +] + +# energy +fₑ(ρ) = (K(ρ)[1]^2 + K(ρ)[2]^2 + K(ρ)[3]^2) * u(ρ) + +energies = + let ρ = [ρ01(x0, x1, x2, x3), ρ02(x0, x1, x2, x3), ρ03(x0, x1, x2, x3), ρ12(x0, x1, x2, x3), ρ13(x0, x1, x2, x3), ρ23(x0, x1, x2, x3)] + [fₑ(ρ)] + end + +# periodic boundary conditions for the 4-torus +bcs = [ + ρ01(0.0, x1, x2, x3) ~ ρ01(1.0, x1, x2, x3), + ρ01(x0, 0.0, x2, x3) ~ ρ01(x0, 1.0, x2, x3), + ρ01(x0, x1, 0.0, x3) ~ ρ01(x0, x1, 1.0, x3), + ρ01(x0, x1, x2, 0.0) ~ ρ01(x0, x1, x2, 1.0), + ρ02(0.0, x1, x2, x3) ~ ρ02(1.0, x1, x2, x3), + ρ02(x0, 0.0, x2, x3) ~ ρ02(x0, 1.0, x2, x3), + ρ02(x0, x1, 0.0, x3) ~ ρ02(x0, x1, 1.0, x3), + ρ02(x0, x1, x2, 0.0) ~ ρ02(x0, x1, x2, 1.0), + ρ03(0.0, x1, x2, x3) ~ ρ03(1.0, x1, x2, x3), + ρ03(x0, 0.0, x2, x3) ~ ρ03(x0, 1.0, x2, x3), + ρ03(x0, x1, 0.0, x3) ~ ρ03(x0, x1, 1.0, x3), + ρ03(x0, x1, x2, 0.0) ~ ρ03(x0, x1, x2, 1.0), + ρ12(0.0, x1, x2, x3) ~ ρ12(1.0, x1, x2, x3), + ρ12(x0, 0.0, x2, x3) ~ ρ12(x0, 1.0, x2, x3), + ρ12(x0, x1, 0.0, x3) ~ ρ12(x0, x1, 1.0, x3), + ρ12(x0, x1, x2, 0.0) ~ ρ12(x0, x1, x2, 1.0), + ρ13(0.0, x1, x2, x3) ~ ρ13(1.0, x1, x2, x3), + ρ13(x0, 0.0, x2, x3) ~ ρ13(x0, 1.0, x2, x3), + ρ13(x0, x1, 0.0, x3) ~ ρ13(x0, x1, 1.0, x3), + ρ13(x0, x1, x2, 0.0) ~ ρ13(x0, x1, x2, 1.0), + ρ23(0.0, x1, x2, x3) ~ ρ23(1.0, x1, x2, x3), + ρ23(x0, 0.0, x2, x3) ~ ρ23(x0, 1.0, x2, x3), + ρ23(x0, x1, 0.0, x3) ~ ρ23(x0, x1, 1.0, x3), + ρ23(x0, x1, x2, 0.0) ~ ρ23(x0, x1, x2, 1.0), +] + +# equations for dρ = 0. +eqClosed(ρ) = d₂(ρ)[:] .~ 0 + +eqs = + let ρ = [ρ01(x0, x1, x2, x3), ρ02(x0, x1, x2, x3), ρ03(x0, x1, x2, x3), ρ12(x0, x1, x2, x3), ρ13(x0, x1, x2, x3), ρ23(x0, x1, x2, x3)] + vcat( + eqClosed(ρ), + ) + end + + +input_ = length(domain) +n = 16 + +ixToSym = Dict( + 1 => :ρ01, + 2 => :ρ02, + 3 => :ρ03, + 4 => :ρ12, + 5 => :ρ13, + 6 => :ρ23 +) + +chains = NamedTuple((ixToSym[ix], Lux.Chain(Dense(input_, n, Lux.σ), Dense(n, n, Lux.σ), Dense(n, 1))) for ix in 1:6) +chains0 = collect(chains) + +function test_donaldson_energy_loss_no_logs(ϵ, sym_prob, prob) + # pde_inner_loss_functions = sym_prob.loss_functions.pde_loss_functions + # bcs_inner_loss_functions = sym_prob.loss_functions.bc_loss_functions + # energy_inner_loss_functions = sym_prob.loss_functions.energy_loss_functions + + ps = map(c -> Lux.setup(Random.default_rng(), c)[1], chains) |> ComponentArray .|> Float64 + prob1 = remake(prob; u0 = ComponentVector(depvar = ps)) + + callback(ϵ::Float64) = function(p, l) + # println("loss: ", l) + # println("pde_losses: ", map(l_ -> l_(p), pde_inner_loss_functions)) + # println("bcs_losses: ", map(l_ -> l_(p), bcs_inner_loss_functions)) + # println("energy losses: ", map(l_ -> l_(p), energy_inner_loss_functions)) + return l < ϵ + end + _sol = Optimization.solve(prob1, Adam(0.01); callback=callback(ϵ), maxiters = 1) + return true +end + + +@named pdesystem = PDESystem(eqs, [], bcs, domain, [x0, x1, x2, x3], + [ρ01(x0, x1, x2, x3), ρ02(x0, x1, x2, x3), ρ03(x0, x1, x2, x3), ρ12(x0, x1, x2, x3), ρ13(x0, x1, x2, x3), ρ23(x0, x1, x2, x3)] +) +discretization = PhysicsInformedNN(chains0, QuasiRandomTraining(1000)) +sym_prob = symbolic_discretize(pdesystem, discretization) +prob = discretize(pdesystem, discretization) +@info "testing energy loss functions: solver runs with only a PDE system." +@test test_donaldson_energy_loss_no_logs(0.5, sym_prob, prob) + + +@named pdesystem1 = PDESystem([], energies, bcs, domain, [x0, x1, x2, x3], + [ρ01(x0, x1, x2, x3), ρ02(x0, x1, x2, x3), ρ03(x0, x1, x2, x3), ρ12(x0, x1, x2, x3), ρ13(x0, x1, x2, x3), ρ23(x0, x1, x2, x3)] +) +sym_prob = symbolic_discretize(pdesystem1, discretization) +prob = discretize(pdesystem1, discretization) +@info "testing energy loss functions: quasi random training: solver runs with only energies." +@test test_donaldson_energy_loss_no_logs(0.5, sym_prob, prob) + +@named pdesystem2 = PDESystem(eqs, energies, bcs, domain, [x0, x1, x2, x3], + [ρ01(x0, x1, x2, x3), ρ02(x0, x1, x2, x3), ρ03(x0, x1, x2, x3), ρ12(x0, x1, x2, x3), ρ13(x0, x1, x2, x3), ρ23(x0, x1, x2, x3)] +) +discretization = PhysicsInformedNN(chains0, StochasticTraining(1000)) +sym_prob = symbolic_discretize(pdesystem2, discretization) +prob = discretize(pdesystem2, discretization) +@info "testing energy loss functions: stochastic training: solver runs with energies and PDE system." +@test test_donaldson_energy_loss_no_logs(0.5, sym_prob, prob) + +discretization = PhysicsInformedNN(chains0, GridTraining(0.1)) +sym_prob = symbolic_discretize(pdesystem2, discretization) +prob = discretize(pdesystem2, discretization) +@info "testing energy loss functions: grid training: solver runs with energies and PDE system." +@test test_donaldson_energy_loss_no_logs(0.5, sym_prob, prob) + +discretization = PhysicsInformedNN(chains0, QuadratureTraining()) +sym_prob = symbolic_discretize(pdesystem1, discretization) +prob = discretize(pdesystem1, discretization) +@info "testing energy loss functions: quadrature training: solver runs with energies and PDE system." +@test test_donaldson_energy_loss_no_logs(0.5, sym_prob, prob) diff --git a/test/runtests.jl b/test/runtests.jl index afe7186a28..5a12c1ec41 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -30,6 +30,7 @@ end if GROUP == "All" || GROUP == "NNPDE2" @time @safetestset "Additional Loss" begin include("additional_loss_tests.jl") end + @time @safetestset "Energy Loss" begin include("energy_loss_tests.jl") end @time @safetestset "Direction Function Approximation" begin include("direct_function_tests.jl") end end if GROUP == "All" || GROUP == "NeuralAdapter"