From 03edc78cc6084f1823ff761898d7251f401c32ea Mon Sep 17 00:00:00 2001 From: Christopher Rackauckas Date: Thu, 22 Aug 2024 08:46:59 -0400 Subject: [PATCH] WIP: Make more libraries into an extension? I think we should be considering some kind of split to LinearSolveCore vs LinearSolve, a la OrdinaryDiffEq. The default algorithm requires RecursiveFactorization and MKL, but we could definitely make a version that doesn't have these deps. But at least sparse stuff can go into an extension since those always dispatch on the existance of sparse matrices. This PR isn't quite complete yet, more needs to be removed with sparse, but it shows the right direction. --- Project.toml | 6 +- ext/LinearSolveKLUExt.jl | 66 +++++++++ ext/LinearSolveSparseArraysExt.jl | 178 ++++++++++++++++++++++++ src/LinearSolve.jl | 86 +++++------- src/default.jl | 29 ---- src/extension_algs.jl | 95 +++++++++++++ src/factorization.jl | 216 ------------------------------ src/factorization_sparse.jl | 35 ----- 8 files changed, 377 insertions(+), 334 deletions(-) create mode 100644 ext/LinearSolveKLUExt.jl create mode 100644 ext/LinearSolveSparseArraysExt.jl delete mode 100644 src/factorization_sparse.jl diff --git a/Project.toml b/Project.toml index b10fd29ca..2ee6763cf 100644 --- a/Project.toml +++ b/Project.toml @@ -12,7 +12,6 @@ EnumX = "4e289a0a-7415-4d19-859d-a7e5c4648b56" FastLapackInterface = "29a986be-02c6-4525-aec4-84b980013641" GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" -KLU = "ef3ab10e-7fda-4108-b977-705223b18434" Krylov = "ba0b0d4f-ebba-5204-a429-3ac8c609bfb7" LazyArrays = "5078a376-72f3-5289-bfd5-ec5146d43c02" Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" @@ -26,7 +25,6 @@ Reexport = "189a3867-3050-52da-a836-e630ba90ab69" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" SciMLOperators = "c0aeaf25-5076-4817-a8d5-81caf7dfa961" Setfield = "efcf1570-3423-57d1-acb7-fd33fddbac46" -SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" Sparspak = "e56a9233-b9d6-4f03-8d0f-1825330902ac" StaticArraysCore = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" UnPack = "3a884ed6-31ef-47d7-9d2a-63182c4928ed" @@ -40,11 +38,13 @@ EnzymeCore = "f151be2c-9106-41f4-ab19-57ee4f262869" FastAlmostBandedMatrices = "9d29842c-ecb8-4973-b1e9-a27b1157504e" HYPRE = "b5ffcf37-a2bd-41ab-a3da-4bd9bc8ad771" IterativeSolvers = "42fd0dbc-a981-5370-80f2-aaf504508153" +KLU = "ef3ab10e-7fda-4108-b977-705223b18434" KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c" KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77" Metal = "dde4c033-4e86-420c-a63e-0dd931031962" Pardiso = "46dd5b70-b6fb-5a00-ae2d-e8fea33afaf2" RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" [extensions] LinearSolveBandedMatricesExt = "BandedMatrices" @@ -55,11 +55,13 @@ LinearSolveEnzymeExt = "EnzymeCore" LinearSolveFastAlmostBandedMatricesExt = "FastAlmostBandedMatrices" LinearSolveHYPREExt = "HYPRE" LinearSolveIterativeSolversExt = "IterativeSolvers" +LinearSolveKLUExt = "KLU" LinearSolveKernelAbstractionsExt = "KernelAbstractions" LinearSolveKrylovKitExt = "KrylovKit" LinearSolveMetalExt = "Metal" LinearSolvePardisoExt = "Pardiso" LinearSolveRecursiveArrayToolsExt = "RecursiveArrayTools" +LinearSolveSparseArraysExt = "SparseArrays" [compat] AllocCheck = "0.1" diff --git a/ext/LinearSolveKLUExt.jl b/ext/LinearSolveKLUExt.jl new file mode 100644 index 000000000..5d04277e6 --- /dev/null +++ b/ext/LinearSolveKLUExt.jl @@ -0,0 +1,66 @@ +module LinearSolveKLUExt + +using LinearSolve, LinearSolve.LinearAlgebra +using KLU, KLU.SparseArrays + +const PREALLOCATED_KLU = KLU.KLUFactorization(SparseMatrixCSC(0, 0, [1], Int[], + Float64[])) + +function init_cacheval(alg::KLUFactorization, + A, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) + nothing +end + +function init_cacheval(alg::KLUFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, Pl, + Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) + PREALLOCATED_KLU +end + +function init_cacheval(alg::KLUFactorization, A::AbstractSparseArray, b, u, Pl, Pr, + maxiters::Int, abstol, + reltol, + verbose::Bool, assumptions::OperatorAssumptions) + A = convert(AbstractMatrix, A) + return KLU.KLUFactorization(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), + nonzeros(A))) +end + +# TODO: guard this against errors +function SciMLBase.solve!(cache::LinearCache, alg::KLUFactorization; kwargs...) + A = cache.A + A = convert(AbstractMatrix, A) + if cache.isfresh + cacheval = @get_cacheval(cache, :KLUFactorization) + if alg.reuse_symbolic + if alg.check_pattern && pattern_changed(cacheval, A) + fact = KLU.klu( + SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), + nonzeros(A)), + check = false) + else + fact = KLU.klu!(cacheval, nonzeros(A), check = false) + end + else + # New fact each time since the sparsity pattern can change + # and thus it needs to reallocate + fact = KLU.klu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), + nonzeros(A))) + end + cache.cacheval = fact + cache.isfresh = false + end + F = @get_cacheval(cache, :KLUFactorization) + if F.common.status == KLU.KLU_OK + y = ldiv!(cache.u, F, cache.b) + SciMLBase.build_linear_solution(alg, y, nothing, cache) + else + SciMLBase.build_linear_solution( + alg, cache.u, nothing, cache; retcode = ReturnCode.Infeasible) + end +end + +end diff --git a/ext/LinearSolveSparseArraysExt.jl b/ext/LinearSolveSparseArraysExt.jl new file mode 100644 index 000000000..931831eb7 --- /dev/null +++ b/ext/LinearSolveSparseArraysExt.jl @@ -0,0 +1,178 @@ +module LinearSolveSparseArraysExt + +using LinearSolve +import LinearSolve: SciMLBase, LinearAlgebra, PrecompileTools, init_cacheval +using LinearSolve: DefaultLinearSolver, DefaultAlgorithmChoice +using SparseArrays +using SparseArrays: AbstractSparseMatrixCSC, nonzeros, rowvals, getcolptr + +# Specialize QR for the non-square case +# Missing ldiv! definitions: https://github.com/JuliaSparse/SparseArrays.jl/issues/242 +function LinearSolve._ldiv!(x::Vector, + A::Union{SparseArrays.QR, LinearAlgebra.QRCompactWY, + SparseArrays.SPQR.QRSparse, + SparseArrays.CHOLMOD.Factor}, b::Vector) +x .= A \ b +end + +function LinearSolve._ldiv!(x::AbstractVector, + A::Union{SparseArrays.QR, LinearAlgebra.QRCompactWY, + SparseArrays.SPQR.QRSparse, + SparseArrays.CHOLMOD.Factor}, b::AbstractVector) +x .= A \ b +end + +# Ambiguity removal +function LinearSolve._ldiv!(::SVector, + A::Union{SparseArrays.CHOLMOD.Factor, LinearAlgebra.QR, + LinearAlgebra.QRCompactWY, SparseArrays.SPQR.QRSparse}, + b::AbstractVector) +(A \ b) +end +function LinearSolve._ldiv!(::SVector, + A::Union{SparseArrays.CHOLMOD.Factor, LinearAlgebra.QR, + LinearAlgebra.QRCompactWY, SparseArrays.SPQR.QRSparse}, + b::SVector) +(A \ b) +end + +function LinearSolve.pattern_changed(fact, A::SparseArrays.SparseMatrixCSC) + !(SparseArrays.decrement(SparseArrays.getcolptr(A)) == + fact.colptr && SparseArrays.decrement(SparseArrays.getrowval(A)) == + fact.rowval) +end + +const PREALLOCATED_UMFPACK = SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC(0, 0, [1], + Int[], Float64[])) + +function init_cacheval(alg::UMFPACKFactorization, + A, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) + nothing +end + +function init_cacheval(alg::UMFPACKFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, + Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) + PREALLOCATED_UMFPACK +end + +function init_cacheval(alg::UMFPACKFactorization, A::AbstractSparseArray, b, u, Pl, Pr, + maxiters::Int, abstol, + reltol, + verbose::Bool, assumptions::OperatorAssumptions) + A = convert(AbstractMatrix, A) + zerobased = SparseArrays.getcolptr(A)[1] == 0 + return SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC(size(A)..., getcolptr(A), + rowvals(A), nonzeros(A))) +end + +function SciMLBase.solve!(cache::LinearCache, alg::UMFPACKFactorization; kwargs...) + A = cache.A + A = convert(AbstractMatrix, A) + if cache.isfresh + cacheval = @get_cacheval(cache, :UMFPACKFactorization) + if alg.reuse_symbolic + # Caches the symbolic factorization: https://github.com/JuliaLang/julia/pull/33738 + if alg.check_pattern && pattern_changed(cacheval, A) + fact = lu( + SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), + nonzeros(A)), + check = false) + else + fact = lu!(cacheval, + SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), + nonzeros(A)), check = false) + end + else + fact = lu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A)), + check = false) + end + cache.cacheval = fact + cache.isfresh = false + end + + F = @get_cacheval(cache, :UMFPACKFactorization) + if F.status == SparseArrays.UMFPACK.UMFPACK_OK + y = ldiv!(cache.u, F, cache.b) + SciMLBase.build_linear_solution(alg, y, nothing, cache) + else + SciMLBase.build_linear_solution( + alg, cache.u, nothing, cache; retcode = ReturnCode.Infeasible) + end +end + +const PREALLOCATED_CHOLMOD = cholesky(SparseMatrixCSC(0, 0, [1], Int[], Float64[])) + +function init_cacheval(alg::CHOLMODFactorization, + A, b, u, Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) + nothing +end + +function init_cacheval(alg::CHOLMODFactorization, + A::Union{SparseMatrixCSC{T, Int}, Symmetric{T, SparseMatrixCSC{T, Int}}}, b, u, + Pl, Pr, + maxiters::Int, abstol, reltol, + verbose::Bool, assumptions::OperatorAssumptions) where {T <: + Union{Float32, Float64}} + PREALLOCATED_CHOLMOD +end + +function SciMLBase.solve!(cache::LinearCache, alg::CHOLMODFactorization; kwargs...) + A = cache.A + A = convert(AbstractMatrix, A) + + if cache.isfresh + cacheval = @get_cacheval(cache, :CHOLMODFactorization) + fact = cholesky(A; check = false) + if !LinearAlgebra.issuccess(fact) + ldlt!(fact, A; check = false) + end + cache.cacheval = fact + cache.isfresh = false + end + + cache.u .= @get_cacheval(cache, :CHOLMODFactorization) \ cache.b + SciMLBase.build_linear_solution(alg, cache.u, nothing, cache) +end + +function LinearSolve.defaultalg( + A::Symmetric{<:Number, <:SparseMatrixCSC}, b, ::OperatorAssumptions{Bool}) + DefaultLinearSolver(DefaultAlgorithmChoice.CHOLMODFactorization) +end + +function LinearSolve.defaultalg(A::AbstractSparseMatrixCSC{Tv, Ti}, b, + assump::OperatorAssumptions{Bool}) where {Tv, Ti} + if assump.issq + DefaultLinearSolver(DefaultAlgorithmChoice.SparspakFactorization) + else + error("Generic number sparse factorization for non-square is not currently handled") + end +end + +function LinearSolve.defaultalg(A::AbstractSparseMatrixCSC{<:Union{Float64, ComplexF64}, Ti}, b, + assump::OperatorAssumptions{Bool}) where {Ti} + if assump.issq + if length(b) <= 10_000 && length(nonzeros(A)) / length(A) < 2e-4 + DefaultLinearSolver(DefaultAlgorithmChoice.KLUFactorization) + else + DefaultLinearSolver(DefaultAlgorithmChoice.UMFPACKFactorization) + end + else + DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) + end +end + +PrecompileTools.@compile_workload begin + A = sprand(4, 4, 0.3) + I + b = rand(4) + prob = LinearProblem(A, b) + sol = solve(prob, KLUFactorization()) + sol = solve(prob, UMFPACKFactorization()) +end + +end diff --git a/src/LinearSolve.jl b/src/LinearSolve.jl index 4e6c9e25d..985eb83a3 100644 --- a/src/LinearSolve.jl +++ b/src/LinearSolve.jl @@ -5,42 +5,40 @@ if isdefined(Base, :Experimental) && end import PrecompileTools - using ArrayInterface - using RecursiveFactorization - using Base: cache_dependencies, Bool - using LinearAlgebra - using SparseArrays - using SparseArrays: AbstractSparseMatrixCSC, nonzeros, rowvals, getcolptr - using LazyArrays: @~, BroadcastArray - using SciMLBase: AbstractLinearAlgorithm - using SciMLOperators - using SciMLOperators: AbstractSciMLOperator, IdentityOperator - using Setfield - using UnPack - using KLU - using Sparspak - using FastLapackInterface - using DocStringExtensions - using EnumX - using Markdown - using ChainRulesCore - import InteractiveUtils - - import StaticArraysCore: StaticArray, SVector, MVector, SMatrix, MMatrix - - using LinearAlgebra: BlasInt, LU - using LinearAlgebra.LAPACK: require_one_based_indexing, - chkfinite, chkstride1, - @blasfunc, chkargsok - - import GPUArraysCore - import Preferences - import ConcreteStructs: @concrete - - # wrap - import Krylov - using SciMLBase - import Preferences +using ArrayInterface +using RecursiveFactorization +using Base: cache_dependencies, Bool +using LinearAlgebra +using LazyArrays: @~, BroadcastArray +using SciMLBase: AbstractLinearAlgorithm +using SciMLOperators +using SciMLOperators: AbstractSciMLOperator, IdentityOperator +using Setfield +using UnPack +using KLU +using Sparspak +using FastLapackInterface +using DocStringExtensions +using EnumX +using Markdown +using ChainRulesCore +import InteractiveUtils + +import StaticArraysCore: StaticArray, SVector, MVector, SMatrix, MMatrix + +using LinearAlgebra: BlasInt, LU +using LinearAlgebra.LAPACK: require_one_based_indexing, + chkfinite, chkstride1, + @blasfunc, chkargsok + +import GPUArraysCore +import Preferences +import ConcreteStructs: @concrete + +# wrap +import Krylov +using SciMLBase +import Preferences const CRC = ChainRulesCore @@ -93,8 +91,6 @@ function _fast_sym_givens! end # Code -const INCLUDE_SPARSE = Preferences.@load_preference("include_sparse", Base.USE_GPL_LIBS) - EnumX.@enumx DefaultAlgorithmChoice begin LUFactorization QRFactorization @@ -170,10 +166,6 @@ end end end -@static if INCLUDE_SPARSE - include("factorization_sparse.jl") -end - # Solver Specific Traits ## Needs Square Matrix """ @@ -215,16 +207,6 @@ PrecompileTools.@compile_workload begin sol = solve(prob, KrylovJL_GMRES()) end -@static if INCLUDE_SPARSE - PrecompileTools.@compile_workload begin - A = sprand(4, 4, 0.3) + I - b = rand(4) - prob = LinearProblem(A, b) - sol = solve(prob, KLUFactorization()) - sol = solve(prob, UMFPACKFactorization()) - end -end - PrecompileTools.@compile_workload begin A = sprand(4, 4, 0.3) + I b = rand(4) diff --git a/src/default.jl b/src/default.jl index 4621edcff..eaa2b00e8 100644 --- a/src/default.jl +++ b/src/default.jl @@ -92,35 +92,6 @@ function defaultalg(A::Symmetric{<:Number, <:Array}, b, ::OperatorAssumptions{Bo DefaultLinearSolver(DefaultAlgorithmChoice.BunchKaufmanFactorization) end -function defaultalg( - A::Symmetric{<:Number, <:SparseMatrixCSC}, b, ::OperatorAssumptions{Bool}) - DefaultLinearSolver(DefaultAlgorithmChoice.CHOLMODFactorization) -end - -function defaultalg(A::AbstractSparseMatrixCSC{Tv, Ti}, b, - assump::OperatorAssumptions{Bool}) where {Tv, Ti} - if assump.issq - DefaultLinearSolver(DefaultAlgorithmChoice.SparspakFactorization) - else - error("Generic number sparse factorization for non-square is not currently handled") - end -end - -@static if INCLUDE_SPARSE - function defaultalg(A::AbstractSparseMatrixCSC{<:Union{Float64, ComplexF64}, Ti}, b, - assump::OperatorAssumptions{Bool}) where {Ti} - if assump.issq - if length(b) <= 10_000 && length(nonzeros(A)) / length(A) < 2e-4 - DefaultLinearSolver(DefaultAlgorithmChoice.KLUFactorization) - else - DefaultLinearSolver(DefaultAlgorithmChoice.UMFPACKFactorization) - end - else - DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) - end - end -end - function defaultalg(A::GPUArraysCore.AnyGPUArray, b, assump::OperatorAssumptions{Bool}) if assump.condition === OperatorCondition.IllConditioned || !assump.issq DefaultLinearSolver(DefaultAlgorithmChoice.QRFactorization) diff --git a/src/extension_algs.jl b/src/extension_algs.jl index 7534d2fa1..ae8011cd3 100644 --- a/src/extension_algs.jl +++ b/src/extension_algs.jl @@ -389,3 +389,98 @@ A wrapper over Apple's Metal GPU library. Direct calls to Metal in a way that pr to avoid allocations and automatically offloads to the GPU. """ struct MetalLUFactorization <: AbstractFactorization end + +""" +`UMFPACKFactorization(;reuse_symbolic=true, check_pattern=true)` + +A fast sparse multithreaded LU-factorization which specializes on sparsity +patterns with “more structure”. + +!!! note + + By default, the SparseArrays.jl are implemented for efficiency by caching the + symbolic factorization. I.e., if `set_A` is used, it is expected that the new + `A` has the same sparsity pattern as the previous `A`. If this algorithm is to + be used in a context where that assumption does not hold, set `reuse_symbolic=false`. + +!!! note + + Using this solver requires adding the package SparseArrays.jl, i.e. `using SparseArrays` +""" +Base.@kwdef struct UMFPACKFactorization <: AbstractSparseFactorization + reuse_symbolic::Bool = true + check_pattern::Bool = true # Check factorization re-use + + function UMFPACKFactorization() + ext = Base.get_extension(@__MODULE__, :LinearSolveSparseArraysExt) + if ext === nothing + error("UMFPACKFactorization requires that SparseArrays is loaded, i.e. `using SparseArrays`") + else + return new{}() + end + end +end + +""" +`KLUFactorization(;reuse_symbolic=true, check_pattern=true)` + +A fast sparse LU-factorization which specializes on sparsity patterns with “less structure”. + +!!! note + + By default, the SparseArrays.jl are implemented for efficiency by caching the + symbolic factorization. I.e., if `set_A` is used, it is expected that the new + `A` has the same sparsity pattern as the previous `A`. If this algorithm is to + be used in a context where that assumption does not hold, set `reuse_symbolic=false`. + +!!! note + + Using this solver requires adding the package SparseArrays.jl, i.e. `using SparseArrays` +""" +Base.@kwdef struct KLUFactorization <: AbstractSparseFactorization + reuse_symbolic::Bool = true + check_pattern::Bool = true + + function KLUFactorization() + ext = Base.get_extension(@__MODULE__, :LinearSolveKLUExt) + if ext === nothing + error("KLUFactorization requires that KLU is loaded, i.e. `using KLU`") + else + return new{}() + end + end +end + +## CHOLMODFactorization + +""" +`CHOLMODFactorization(; shift = 0.0, perm = nothing)` + +A wrapper of CHOLMOD's polyalgorithm, mixing Cholesky factorization and ldlt. +Tries cholesky for performance and retries ldlt if conditioning causes Cholesky +to fail. + +Only supports sparse matrices. + +## Keyword Arguments + + - shift: the shift argument in CHOLMOD. + - perm: the perm argument in CHOLMOD + +!!! note + + Using this solver requires adding the package SparseArrays.jl, i.e. `using SparseArrays` +""" +Base.@kwdef struct CHOLMODFactorization{T} <: AbstractSparseFactorization + shift::Float64 = 0.0 + perm::T = nothing + + function CHOLMODFactorization() + ext = Base.get_extension(@__MODULE__, :LinearSolveSparseArraysExt) + if ext === nothing + error("CHOLMODFactorization requires that SparseArrays is loaded, i.e. `using SparseArrays`") + else + return new{}() + end + end +end diff --git a/src/factorization.jl b/src/factorization.jl index 0d339f1c4..99b71b475 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -764,224 +764,8 @@ function init_cacheval(alg::GenericFactorization, do_factorization(alg, newA, b, u) end -# Ambiguity handling dispatch - ################################## Factorizations which require solve! overloads -""" -`UMFPACKFactorization(;reuse_symbolic=true, check_pattern=true)` - -A fast sparse multithreaded LU-factorization which specializes on sparsity -patterns with “more structure”. - -!!! note - - By default, the SparseArrays.jl are implemented for efficiency by caching the - symbolic factorization. I.e., if `set_A` is used, it is expected that the new - `A` has the same sparsity pattern as the previous `A`. If this algorithm is to - be used in a context where that assumption does not hold, set `reuse_symbolic=false`. -""" -Base.@kwdef struct UMFPACKFactorization <: AbstractSparseFactorization - reuse_symbolic::Bool = true - check_pattern::Bool = true # Check factorization re-use -end - -const PREALLOCATED_UMFPACK = SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC(0, 0, [1], - Int[], Float64[])) - -function init_cacheval(alg::UMFPACKFactorization, - A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) - nothing -end - -function init_cacheval(alg::UMFPACKFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, - Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) - PREALLOCATED_UMFPACK -end - -function init_cacheval(alg::UMFPACKFactorization, A::AbstractSparseArray, b, u, Pl, Pr, - maxiters::Int, abstol, - reltol, - verbose::Bool, assumptions::OperatorAssumptions) - A = convert(AbstractMatrix, A) - zerobased = SparseArrays.getcolptr(A)[1] == 0 - return SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC(size(A)..., getcolptr(A), - rowvals(A), nonzeros(A))) -end - -function SciMLBase.solve!(cache::LinearCache, alg::UMFPACKFactorization; kwargs...) - A = cache.A - A = convert(AbstractMatrix, A) - if cache.isfresh - cacheval = @get_cacheval(cache, :UMFPACKFactorization) - if alg.reuse_symbolic - # Caches the symbolic factorization: https://github.com/JuliaLang/julia/pull/33738 - if alg.check_pattern && pattern_changed(cacheval, A) - fact = lu( - SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), - nonzeros(A)), - check = false) - else - fact = lu!(cacheval, - SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), - nonzeros(A)), check = false) - end - else - fact = lu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), nonzeros(A)), - check = false) - end - cache.cacheval = fact - cache.isfresh = false - end - - F = @get_cacheval(cache, :UMFPACKFactorization) - if F.status == SparseArrays.UMFPACK.UMFPACK_OK - y = ldiv!(cache.u, F, cache.b) - SciMLBase.build_linear_solution(alg, y, nothing, cache) - else - SciMLBase.build_linear_solution( - alg, cache.u, nothing, cache; retcode = ReturnCode.Infeasible) - end -end - -""" -`KLUFactorization(;reuse_symbolic=true, check_pattern=true)` - -A fast sparse LU-factorization which specializes on sparsity patterns with “less structure”. - -!!! note - - By default, the SparseArrays.jl are implemented for efficiency by caching the - symbolic factorization. I.e., if `set_A` is used, it is expected that the new - `A` has the same sparsity pattern as the previous `A`. If this algorithm is to - be used in a context where that assumption does not hold, set `reuse_symbolic=false`. -""" -Base.@kwdef struct KLUFactorization <: AbstractSparseFactorization - reuse_symbolic::Bool = true - check_pattern::Bool = true -end - -const PREALLOCATED_KLU = KLU.KLUFactorization(SparseMatrixCSC(0, 0, [1], Int[], - Float64[])) - -function init_cacheval(alg::KLUFactorization, - A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) - nothing -end - -function init_cacheval(alg::KLUFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, Pl, - Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) - PREALLOCATED_KLU -end - -function init_cacheval(alg::KLUFactorization, A::AbstractSparseArray, b, u, Pl, Pr, - maxiters::Int, abstol, - reltol, - verbose::Bool, assumptions::OperatorAssumptions) - A = convert(AbstractMatrix, A) - return KLU.KLUFactorization(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), - nonzeros(A))) -end - -# TODO: guard this against errors -function SciMLBase.solve!(cache::LinearCache, alg::KLUFactorization; kwargs...) - A = cache.A - A = convert(AbstractMatrix, A) - if cache.isfresh - cacheval = @get_cacheval(cache, :KLUFactorization) - if alg.reuse_symbolic - if alg.check_pattern && pattern_changed(cacheval, A) - fact = KLU.klu( - SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), - nonzeros(A)), - check = false) - else - fact = KLU.klu!(cacheval, nonzeros(A), check = false) - end - else - # New fact each time since the sparsity pattern can change - # and thus it needs to reallocate - fact = KLU.klu(SparseMatrixCSC(size(A)..., getcolptr(A), rowvals(A), - nonzeros(A))) - end - cache.cacheval = fact - cache.isfresh = false - end - F = @get_cacheval(cache, :KLUFactorization) - if F.common.status == KLU.KLU_OK - y = ldiv!(cache.u, F, cache.b) - SciMLBase.build_linear_solution(alg, y, nothing, cache) - else - SciMLBase.build_linear_solution( - alg, cache.u, nothing, cache; retcode = ReturnCode.Infeasible) - end -end - -## CHOLMODFactorization - -""" -`CHOLMODFactorization(; shift = 0.0, perm = nothing)` - -A wrapper of CHOLMOD's polyalgorithm, mixing Cholesky factorization and ldlt. -Tries cholesky for performance and retries ldlt if conditioning causes Cholesky -to fail. - -Only supports sparse matrices. - -## Keyword Arguments - - - shift: the shift argument in CHOLMOD. - - perm: the perm argument in CHOLMOD -""" -Base.@kwdef struct CHOLMODFactorization{T} <: AbstractSparseFactorization - shift::Float64 = 0.0 - perm::T = nothing -end - -const PREALLOCATED_CHOLMOD = cholesky(SparseMatrixCSC(0, 0, [1], Int[], Float64[])) - -function init_cacheval(alg::CHOLMODFactorization, - A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) - nothing -end - -function init_cacheval(alg::CHOLMODFactorization, - A::Union{SparseMatrixCSC{T, Int}, Symmetric{T, SparseMatrixCSC{T, Int}}}, b, u, - Pl, Pr, - maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: - Union{Float32, Float64}} - PREALLOCATED_CHOLMOD -end - -function SciMLBase.solve!(cache::LinearCache, alg::CHOLMODFactorization; kwargs...) - A = cache.A - A = convert(AbstractMatrix, A) - - if cache.isfresh - cacheval = @get_cacheval(cache, :CHOLMODFactorization) - fact = cholesky(A; check = false) - if !LinearAlgebra.issuccess(fact) - ldlt!(fact, A; check = false) - end - cache.cacheval = fact - cache.isfresh = false - end - - cache.u .= @get_cacheval(cache, :CHOLMODFactorization) \ cache.b - SciMLBase.build_linear_solution(alg, cache.u, nothing, cache) -end - ## RFLUFactorization """ diff --git a/src/factorization_sparse.jl b/src/factorization_sparse.jl deleted file mode 100644 index 94b83eec5..000000000 --- a/src/factorization_sparse.jl +++ /dev/null @@ -1,35 +0,0 @@ -# Specialize QR for the non-square case -# Missing ldiv! definitions: https://github.com/JuliaSparse/SparseArrays.jl/issues/242 -function _ldiv!(x::Vector, - A::Union{SparseArrays.QR, LinearAlgebra.QRCompactWY, - SparseArrays.SPQR.QRSparse, - SparseArrays.CHOLMOD.Factor}, b::Vector) - x .= A \ b -end - -function _ldiv!(x::AbstractVector, - A::Union{SparseArrays.QR, LinearAlgebra.QRCompactWY, - SparseArrays.SPQR.QRSparse, - SparseArrays.CHOLMOD.Factor}, b::AbstractVector) - x .= A \ b -end - -# Ambiguity removal -function _ldiv!(::SVector, - A::Union{SparseArrays.CHOLMOD.Factor, LinearAlgebra.QR, - LinearAlgebra.QRCompactWY, SparseArrays.SPQR.QRSparse}, - b::AbstractVector) - (A \ b) -end -function _ldiv!(::SVector, - A::Union{SparseArrays.CHOLMOD.Factor, LinearAlgebra.QR, - LinearAlgebra.QRCompactWY, SparseArrays.SPQR.QRSparse}, - b::SVector) - (A \ b) -end - -function pattern_changed(fact, A::SparseArrays.SparseMatrixCSC) - !(SparseArrays.decrement(SparseArrays.getcolptr(A)) == - fact.colptr && SparseArrays.decrement(SparseArrays.getrowval(A)) == - fact.rowval) -end