Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Compatibility with newer versions of NeuralPDE, Lux, ModelingToolkit #33

Merged
merged 12 commits into from
Dec 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,31 +4,31 @@ authors = ["Nicholas Klugman <[email protected]>"]
version = "0.1.0"

[deps]
DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa"
EvalMetrics = "251d5f9e-10c1-4699-ba24-e0ad168fa3e4"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
ModelingToolkit = "961ee093-0014-501f-94e3-6117800e7a78"
NeuralPDE = "315f7962-48a3-4962-8226-d0f33b1235f0"
Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba"
OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462"

[compat]
ForwardDiff = "0.10"
JuMP = "1"
Lux = "0.5"
ModelingToolkit = "8, 9"
Lux = "1"
ModelingToolkit = "9.51"
NLopt = "1"
NeuralPDE = "5.10"
NeuralPDE = "5.17"
Optimization = "3, 4"
SciMLBase = "2"
julia = "1.10"

[extras]
Boltz = "4544d5e4-abc5-4dea-817f-29e4c205d9c8"
CSDP = "0a46da34-8e4b-519e-b418-48813639ff34"
DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa"
ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66"
Lux = "b2108857-7c20-44ae-9111-449ecde12c47"
NLopt = "76087f3c-5699-56af-9a33-bf431cd00edd"
NeuralPDE = "315f7962-48a3-4962-8226-d0f33b1235f0"
Expand All @@ -40,4 +40,4 @@ SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["SafeTestsets", "Test", "Lux", "Optimization", "OptimizationOptimJL", "OptimizationOptimisers", "NLopt", "Random", "NeuralPDE", "DifferentialEquations", "CSDP", "Boltz"]
test = ["SafeTestsets", "Test", "Lux", "Optimization", "OptimizationOptimJL", "OptimizationOptimisers", "NLopt", "Random", "NeuralPDE", "CSDP", "Boltz", "ComponentArrays"]
13 changes: 7 additions & 6 deletions docs/src/demos/policy_search.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@ We'll jointly train a neural controller ``\tau = u \left( \theta, \frac{d\theta}
## Copy-Pastable Code

```julia
using NeuralPDE, Lux, Boltz, ModelingToolkit, NeuralLyapunov
using NeuralPDE, Lux, ModelingToolkit, NeuralLyapunov
import Boltz.Layers: PeriodicEmbedding
import Optimization, OptimizationOptimisers, OptimizationOptimJL
using Random

Expand Down Expand Up @@ -54,8 +55,8 @@ dim_hidden = 20
dim_phi = 3
dim_u = 1
dim_output = dim_phi + dim_u
chain = [Lux.Chain(
Boltz.Layers.PeriodicEmbedding([1], [2π]),
chain = [Chain(
PeriodicEmbedding([1], [2π]),
Dense(3, dim_hidden, tanh),
Dense(dim_hidden, dim_hidden, tanh),
Dense(dim_hidden, 1)
Expand Down Expand Up @@ -115,7 +116,7 @@ net = discretization.phi
_θ = res.u.depvar

(open_loop_pendulum_dynamics, _), state_order, p_order = ModelingToolkit.generate_control_function(
driven_pendulum; simplify = true)
driven_pendulum; simplify = true, split = false)
p = [defaults[param] for param in p_order]

V_func, V̇_func = get_numerical_lyapunov_function(
Expand Down Expand Up @@ -189,7 +190,7 @@ dim_hidden = 20
dim_phi = 3
dim_u = 1
dim_output = dim_phi + dim_u
chain = [Lux.Chain(
chain = [Chain(
PeriodicEmbedding([1], [2π]),
Dense(3, dim_hidden, tanh),
Dense(dim_hidden, dim_hidden, tanh),
Expand Down Expand Up @@ -280,7 +281,7 @@ _θ = res.u.depvar
We can use the result of the optimization problem to build the Lyapunov candidate as a Julia function, as well as extract our controller, using the [`get_policy`](@ref) function.

```@example policy_search
(open_loop_pendulum_dynamics, _), state_order, p_order = ModelingToolkit.generate_control_function(driven_pendulum; simplify = true)
(open_loop_pendulum_dynamics, _), state_order, p_order = ModelingToolkit.generate_control_function(driven_pendulum; simplify = true, split = false)
p = [defaults[param] for param in p_order]

V_func, V̇_func = get_numerical_lyapunov_function(
Expand Down
2 changes: 1 addition & 1 deletion src/NeuralLyapunov.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ using LinearAlgebra
using ModelingToolkit
import SciMLBase
using NeuralPDE
import DifferentialEquations: Tsit5
import OrdinaryDiffEq: Tsit5
import EvalMetrics: ConfusionMatrix

include("conditions_specification.jl")
Expand Down
17 changes: 10 additions & 7 deletions src/NeuralLyapunovPDESystem.jl
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,8 @@ function NeuralLyapunovPDESystem(
else
(f, _), x, params = ModelingToolkit.generate_control_function(
dynamics;
simplify = true
simplify = true,
split = false
)
(f, x, params, true)
end
Expand Down Expand Up @@ -255,7 +256,7 @@ function _NeuralLyapunovPDESystem(
end

################ Define equations and boundary conditions #################
eqs = []
eqs = Equation[]

if check_nonnegativity(minimization_condition)
cond = get_minimization_condition(minimization_condition)
Expand All @@ -267,10 +268,12 @@ function _NeuralLyapunovPDESystem(
push!(eqs, cond(V, V̇, state, fixed_point) ~ 0.0)
end

bcs = []
bcs = Equation[]

if check_minimal_fixed_point(minimization_condition)
push!(bcs, V(fixed_point) ~ 0.0)
_V = V(fixed_point)
_V = _V isa AbstractVector ? _V[] : _V
push!(bcs, _V ~ 0.0)
end

if policy_search
Expand All @@ -282,16 +285,16 @@ function _NeuralLyapunovPDESystem(
end

# NeuralPDE requires an equation and a boundary condition, even if they are
# trivial like 0.0 == 0.0, so we remove those trivial equations if they showed up
# trivial like φ(0.0) == φ(0.0), so we remove those trivial equations if they showed up
# naturally alongside other equations and add them in if we have no other equations
eqs = filter(eq -> eq != (0.0 ~ 0.0), eqs)
bcs = filter(eq -> eq != (0.0 ~ 0.0), bcs)

if isempty(eqs)
push!(eqs, 0.0 ~ 0.0)
push!(eqs, φ(fixed_point)[1] ~ φ(fixed_point)[1])
end
if isempty(bcs)
push!(bcs, 0.0 ~ 0.0)
push!(bcs, φ(fixed_point)[1] ~ φ(fixed_point)[1])
end

########################### Construct PDESystem ###########################
Expand Down
56 changes: 50 additions & 6 deletions src/benchmark_harness.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,13 @@ fixed point. Return a confusion matrix for the neural Lyapunov classifier using
of the simulated trajectories as ground truth. Additionally return the time it took for the
optimization to run.

To use multiple solvers, users should supply a vector of optimizers in `opt`. The first
optimizer will be used, then the problem will be remade with the result of the first
optimization as the initial guess. Then, the second optimizer will be used, and so on.
Supplying a vector of `Pair`s in `optimization_args` will use the same arguments for each
optimization pass, and supplying a vector of such vectors will use potentially different
arguments for each optimization pass.

# Positional Arguments
- `dynamics`: the dynamical system being analyzed, represented as an `ODESystem` or the
function `f` such that `ẋ = f(x[, u], p, t)`; either way, the ODE should not depend on
Expand Down Expand Up @@ -56,8 +63,8 @@ optimization to run.
!(dynamics isa ODEFunction)`; when `dynamics isa ODEFunction`, `policy_search` should
not be supplied (as it must be false); when `dynamics isa ODESystem`, value inferred by
the presence of unbound inputs.
- `optimization_args`: arguments to be passed into the optimization solver. For more
information, see the
- `optimization_args`: arguments to be passed into the optimization solver, as a vector of
`Pair`s. For more information, see the
[Optimization.jl docs](https://docs.sciml.ai/Optimization/stable/API/solve/).
- `simulation_time`: simulation end time for checking if trajectory from a point reaches
equilibrium
Expand Down Expand Up @@ -107,7 +114,11 @@ function benchmark(
f, params = if isempty(ModelingToolkit.unbound_inputs(dynamics))
ODEFunction(dynamics), parameters(dynamics)
else
(_f, _), _, _p = ModelingToolkit.generate_control_function(dynamics, simplify = true)
(_f, _), _, _p = ModelingToolkit.generate_control_function(
dynamics,
simplify = true,
split = false
)
_f, _p
end

Expand Down Expand Up @@ -221,8 +232,8 @@ function _benchmark(
pde_system,
chain,
strategy,
opt;
optimization_args = optimization_args
opt,
optimization_args
)
solve_time = t.time
θ, phi = t.value
Expand Down Expand Up @@ -278,7 +289,7 @@ function _benchmark(
end
end

function benchmark_solve(pde_system, chain, strategy, opt; optimization_args)
function benchmark_solve(pde_system, chain, strategy, opt, optimization_args)
# Construct OptimizationProblem
discretization = PhysicsInformedNN(chain, strategy)
prob = discretize(pde_system, discretization)
Expand All @@ -290,6 +301,39 @@ function benchmark_solve(pde_system, chain, strategy, opt; optimization_args)
return res.u.depvar, discretization.phi
end

function benchmark_solve(pde_system, chain, strategy, opt::AbstractVector, optimization_args::AbstractVector{<:AbstractVector})
# Construct OptimizationProblem
discretization = PhysicsInformedNN(chain, strategy)
prob = discretize(pde_system, discretization)

# Solve OptimizationProblem
res = Ref{Any}()
for i in eachindex(opt)
_res = solve(prob, opt[i]; optimization_args[i]...)
prob = remake(prob, u0 = _res.u)
res[] = _res
end

# Return parameters θ and network phi
return res[].u.depvar, discretization.phi
end

function benchmark_solve(pde_system, chain, strategy, opt::AbstractVector, optimization_args)
# Construct OptimizationProblem
discretization = PhysicsInformedNN(chain, strategy)
prob = discretize(pde_system, discretization)

# Solve OptimizationProblem
res = for i in eachindex(opt)
_res = solve(prob, opt[i]; optimization_args...)
prob = Optimization.remake(prob, u0 = _res.u)
_res
end

# Return parameters θ and network phi
return res.u.depvar, discretization.phi
end

function eval_Lyapunov(lb, ub, n, V, V̇)
Δ = @. (ub - lb) / n

Expand Down
23 changes: 16 additions & 7 deletions src/decrease_conditions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,15 @@ end

function get_decrease_condition(cond::LyapunovDecreaseCondition)
if cond.check_decrease
return (V, dVdt, x, fixed_point) -> cond.rectifier(
cond.rate_metric(V(x), dVdt(x)) + cond.strength(x, fixed_point)
)
return function (V, dVdt, x, fixed_point)
_V = V(x)
_V = _V isa AbstractVector ? _V[] : _V
_V̇ = dVdt(x)
_V̇ = _V̇ isa AbstractVector ? _V̇[] : _V̇
return cond.rectifier(
cond.rate_metric(_V, _V̇) + cond.strength(x, fixed_point)
)
end
else
return nothing
end
Expand All @@ -97,26 +103,29 @@ function StabilityISL(; rectifier = (t) -> max(zero(t), t))
end

"""
AsymptoticStability(; C, rectifier)
AsymptoticStability(; C, strength, rectifier)

Construct a [`LyapunovDecreaseCondition`](@ref) corresponding to asymptotic stability.

The decrease condition for asymptotic stability is ``V̇(x) < 0``, which is here represented
as ``V̇(x) ≤ - C \\lVert x - x_0 \\rVert^2`` for some ``C > 0``. ``C`` defaults to `1e-6`.
as ``V̇(x) ≤ - \\texttt{strength}(x, x_0)``, where `strength` is positive definite around
``x_0``. By default, ``\\texttt{strength}(x, x_0) = C \\lVert x - x_0 \\rVert^2`` for the
supplied ``C > 0``. ``C`` defaults to `1e-6`.

The inequality is represented by
``\\texttt{rectifier}(V̇(x) + C \\lVert x - x_0 \\rVert^2) = 0``.
``\\texttt{rectifier}(V̇(x) + \\texttt{strength}(x, x_0)) = 0``.
The default value `rectifier = (t) -> max(zero(t), t)` exactly represents the inequality,
but differentiable approximations of this function may be employed.
"""
function AsymptoticStability(;
C::Real = 1e-6,
strength = (x, x0) -> C * (x - x0) ⋅ (x - x0),
rectifier = (t) -> max(zero(t), t)
)::LyapunovDecreaseCondition
return LyapunovDecreaseCondition(
true,
(V, dVdt) -> dVdt,
(x, x0) -> C * (x - x0) ⋅ (x - x0),
strength,
rectifier
)
end
Expand Down
10 changes: 7 additions & 3 deletions src/decrease_conditions_RoA_aware.jl
Original file line number Diff line number Diff line change
Expand Up @@ -103,10 +103,14 @@ end
function get_decrease_condition(cond::RoAAwareDecreaseCondition)
if cond.check_decrease
return function (V, dVdt, x, fixed_point)
cond.sigmoid(cond.ρ - V(x)) * cond.rectifier(
cond.rate_metric(V(x), dVdt(x)) + cond.strength(x, fixed_point)
_V = V(x)
_V = _V isa AbstractVector ? _V[] : _V
_V̇ = dVdt(x)
_V̇ = _V̇ isa AbstractVector ? _V̇[] : _V̇
return cond.sigmoid(cond.ρ - _V) * cond.rectifier(
cond.rate_metric(_V, _V̇) + cond.strength(x, fixed_point)
) +
cond.sigmoid(V(x) - cond.ρ) * cond.out_of_RoA_penalty(V(x), dVdt(x), x, fixed_point,
cond.sigmoid(_V - cond.ρ) * cond.out_of_RoA_penalty(_V, _V̇, x, fixed_point,
cond.ρ)
end
else
Expand Down
6 changes: 5 additions & 1 deletion src/minimization_conditions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,11 @@ end

function get_minimization_condition(cond::LyapunovMinimizationCondition)
if cond.check_nonnegativity
return (V, x, fixed_point) -> cond.rectifier(cond.strength(x, fixed_point) - V(x))
return function (V, x, fixed_point)
_V = V(x)
_V = _V isa AbstractVector ? _V[] : _V
return cond.rectifier(cond.strength(x, fixed_point) - _V)
end
else
return nothing
end
Expand Down
Loading
Loading