Skip to content

Commit

Permalink
[API] Expose the options for iterative refinements and quasi-Newton (#…
Browse files Browse the repository at this point in the history
…287)

* [API] Expose the options for iterative refinements and quasi-Newton

* fix logger macro in initialization

* fixup! Merge remote-tracking branch 'origin/master' into fp/options

---------

Co-authored-by: Sungho Shin <[email protected]>
  • Loading branch information
frapac and sshin23 authored Jan 12, 2024
1 parent 7942ad5 commit b35bc21
Show file tree
Hide file tree
Showing 4 changed files with 52 additions and 34 deletions.
37 changes: 20 additions & 17 deletions src/IPM/IPM.jl
Original file line number Diff line number Diff line change
Expand Up @@ -104,28 +104,31 @@ end

function MadNLPSolver(nlp::AbstractNLPModel{T,VT}; kwargs...) where {T, VT}

opt, opt_linear_solver, logger = load_options(nlp; kwargs...)
@assert is_supported(opt.linear_solver, T)
options = load_options(nlp; kwargs...)

ipm_opt = options.interior_point
logger = options.logger
@assert is_supported(ipm_opt.linear_solver, T)

cnt = MadNLPCounters(start_time=time())
cb = create_callback(
opt.callback,
ipm_opt.callback,
nlp;
fixed_variable_treatment=opt.fixed_variable_treatment,
equality_treatment=opt.equality_treatment,
fixed_variable_treatment=ipm_opt.fixed_variable_treatment,
equality_treatment=ipm_opt.equality_treatment,
)

# generic options
opt.disable_garbage_collector &&
ipm_opt.disable_garbage_collector &&
(GC.enable(false); @warn(logger,"Julia garbage collector is temporarily disabled"))
set_blas_num_threads(opt.blas_num_threads; permanent=true)
set_blas_num_threads(ipm_opt.blas_num_threads; permanent=true)
@trace(logger,"Initializing variables.")

ind_cons = get_index_constraints(
get_lvar(nlp), get_uvar(nlp),
get_lcon(nlp), get_ucon(nlp);
fixed_variable_treatment=opt.fixed_variable_treatment,
equality_treatment=opt.equality_treatment
fixed_variable_treatment=ipm_opt.fixed_variable_treatment,
equality_treatment=ipm_opt.equality_treatment
)

ind_lb = ind_cons.ind_lb
Expand All @@ -140,16 +143,16 @@ function MadNLPSolver(nlp::AbstractNLPModel{T,VT}; kwargs...) where {T, VT}

@trace(logger,"Initializing KKT system.")
kkt = create_kkt_system(
opt.kkt_system,
ipm_opt.kkt_system,
cb,
ind_cons,
opt.linear_solver;
hessian_approximation=opt.hessian_approximation,
opt_linear_solver=opt_linear_solver,
ipm_opt.linear_solver;
hessian_approximation=ipm_opt.hessian_approximation,
opt_linear_solver=options.linear_solver,
)

@trace(logger,"Initializing iterative solver.")
iterator = opt.iterator(kkt; cnt = cnt, logger = logger)
iterator = ipm_opt.iterator(kkt; cnt = cnt, logger = logger, opt = options.iterative_refinement)

x = PrimalVector(VT, nx, ns, ind_lb, ind_ub)
xl = PrimalVector(VT, nx, ns, ind_lb, ind_ub)
Expand Down Expand Up @@ -184,10 +187,10 @@ function MadNLPSolver(nlp::AbstractNLPModel{T,VT}; kwargs...) where {T, VT}
dx_lr = view(d.xp, ind_cons.ind_lb) # TODO
dx_ur = view(d.xp, ind_cons.ind_ub) # TODO

inertia_correction_method = if opt.inertia_correction_method == InertiaAuto
inertia_correction_method = if ipm_opt.inertia_correction_method == InertiaAuto
is_inertia(kkt.linear_solver)::Bool ? InertiaBased : InertiaFree
else
opt.inertia_correction_method
ipm_opt.inertia_correction_method
end

inertia_corrector = build_inertia_corrector(
Expand All @@ -200,7 +203,7 @@ function MadNLPSolver(nlp::AbstractNLPModel{T,VT}; kwargs...) where {T, VT}

return MadNLPSolver(
nlp, cb, kkt,
opt, cnt, logger,
ipm_opt, cnt, options.logger,
n, m, nlb, nub,
x, y, zl, zu, xl, xu,
zero(T), f, c,
Expand Down
14 changes: 8 additions & 6 deletions src/LinearSolvers/backsolve.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
@kwdef struct RichardsonOptions
max_iter::Int = 10
tol::Float64 = 1e-10
acceptable_tol::Float64 = 1e-5
@kwdef mutable struct RichardsonOptions <: AbstractOptions
richardson_max_iter::Int = 10
richardson_tol::Float64 = 1e-10
richardson_acceptable_tol::Float64 = 1e-5
end

struct RichardsonIterator{T, KKT <: AbstractKKTSystem{T}} <: AbstractIterator{T}
Expand All @@ -22,6 +22,8 @@ function RichardsonIterator(
)
end

default_options(::Type{RichardsonIterator}) = RichardsonOptions()

function solve_refine!(
x::VT,
iterator::R,
Expand Down Expand Up @@ -66,7 +68,7 @@ function solve_refine!(
@debug(iterator.logger, @sprintf("%4i %6.2e", iter, residual_ratio))
iter += 1

if (iter >= iterator.opt.max_iter) || (residual_ratio < iterator.opt.tol)
if (iter >= iterator.opt.richardson_max_iter) || (residual_ratio < iterator.opt.richardson_tol)
break
end
end
Expand All @@ -79,6 +81,6 @@ function solve_refine!(
),
)

return residual_ratio < iterator.opt.acceptable_tol
return residual_ratio < iterator.opt.richardson_acceptable_tol
end

13 changes: 11 additions & 2 deletions src/options.jl
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ end
hessian_constant::Bool = false
kkt_system::Type = SparseKKTSystem
hessian_approximation::Type = ExactHessian
quasi_newton_options::QuasiNewtonOptions = QuasiNewtonOptions()
callback::Type = SparseCallback

# initialization options
Expand Down Expand Up @@ -163,7 +164,10 @@ function load_options(nlp; options...)
check_option_sanity(opt_ipm)
# Initiate linear-solver options
opt_linear_solver = default_options(opt_ipm.linear_solver)
remaining_options = set_options!(opt_linear_solver, linear_solver_options)
iterator_options = set_options!(opt_linear_solver, linear_solver_options)
# Initiate iterator options
opt_iterator = default_options(opt_ipm.iterator)
remaining_options = set_options!(opt_iterator, iterator_options)

# Initiate logger
logger = MadNLPLogger(
Expand All @@ -177,6 +181,11 @@ function load_options(nlp; options...)
if !isempty(remaining_options)
print_ignored_options(logger, remaining_options)
end
return opt_ipm, opt_linear_solver, logger
return (
interior_point=opt_ipm,
linear_solver=opt_linear_solver,
iterative_refinement=opt_iterator,
logger=logger,
)
end

22 changes: 13 additions & 9 deletions src/quasi_newton.jl
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,11 @@ curvature(::Val{SCALAR2}, sk, yk) = dot(yk, yk) / dot(sk, yk)
curvature(::Val{SCALAR3}, sk, yk) = 0.5 * (curvature(Val(SCALAR1), sk, yk) + curvature(Val(SCALAR2), sk, yk))
curvature(::Val{SCALAR4}, sk, yk) = sqrt(curvature(Val(SCALAR1), sk, yk) * curvature(Val(SCALAR2), sk, yk))

@kwdef mutable struct QuasiNewtonOptions <: AbstractOptions
init_strategy::BFGSInitStrategy = SCALAR1
max_history::Int = 6
end


"""
BFGS{T, VT} <: AbstractQuasiNewton{T, VT}
Expand All @@ -61,10 +66,10 @@ function create_quasi_newton(
::Type{BFGS},
cb::AbstractCallback{T,VT},
n;
init_strategy = SCALAR1
options=QuasiNewtonOptions(),
) where {T,VT}
BFGS(
init_strategy,
options.init_strategy,
VT(undef, n),
VT(undef, n),
VT(undef, n),
Expand Down Expand Up @@ -100,10 +105,10 @@ function create_quasi_newton(
::Type{DampedBFGS},
cb::AbstractCallback{T,VT},
n;
init_strategy = SCALAR1
options=QuasiNewtonOptions(),
) where {T,VT}
return DampedBFGS(
init_strategy,
options.init_strategy,
VT(undef, n),
VT(undef, n),
VT(undef, n),
Expand Down Expand Up @@ -178,17 +183,16 @@ function create_quasi_newton(
::Type{CompactLBFGS},
cb::AbstractCallback{T,VT},
n;
max_mem=6,
init_strategy = SCALAR1
options=QuasiNewtonOptions(),
) where {T, VT}
return CompactLBFGS(
init_strategy,
options.init_strategy,
fill!(create_array(cb, n), zero(T)),
fill!(create_array(cb, n), zero(T)),
fill!(create_array(cb, n), zero(T)),
fill!(create_array(cb, n), zero(T)),
fill!(create_array(cb, n), zero(T)),
max_mem,
options.max_history,
0,
fill!(create_array(cb, n, 0), zero(T)),
fill!(create_array(cb, n, 0), zero(T)),
Expand Down Expand Up @@ -319,4 +323,4 @@ end


struct ExactHessian{T, VT} <: AbstractHessian{T, VT} end
create_quasi_newton(::Type{ExactHessian}, cb::AbstractCallback{T,VT}, n) where {T,VT} = ExactHessian{T, VT}()
create_quasi_newton(::Type{ExactHessian}, cb::AbstractCallback{T,VT}, n; options...) where {T,VT} = ExactHessian{T, VT}()

0 comments on commit b35bc21

Please sign in to comment.