diff --git a/Project.toml b/Project.toml index b828d9b..29820fe 100644 --- a/Project.toml +++ b/Project.toml @@ -22,6 +22,12 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" +[weakdeps] +QuadraticModels = "f468eda6-eac5-11e8-05a5-ff9e497bcd19" + +[extensions] +CoolPDLPQuadraticModelsExt = "QuadraticModels" + [compat] Adapt = "4.4.0" Atomix = "1.1.2" @@ -32,6 +38,7 @@ KernelAbstractions = "0.9.38" LinearAlgebra = "1" MathOptInterface = "1.49.0" Printf = "1" +QuadraticModels = "0.9" ProgressMeter = "1.11.0" QPSReader = "0.2.1" Random = "1" diff --git a/ext/CoolPDLPQuadraticModelsExt.jl b/ext/CoolPDLPQuadraticModelsExt.jl new file mode 100644 index 0000000..c4b4a77 --- /dev/null +++ b/ext/CoolPDLPQuadraticModelsExt.jl @@ -0,0 +1,21 @@ +module CoolPDLPQuadraticModelsExt + +import QuadraticModels: QuadraticModel +import CoolPDLP + +function CoolPDLP.MILP(qm::QuadraticModel; ignore_islp = false, kwargs...) + ignore_islp || @assert qm.meta.islp + + return CoolPDLP.MILP(; + c = qm.data.c, + lv = qm.meta.lvar, + uv = qm.meta.uvar, + A = qm.data.A, + lc = qm.meta.lcon, + uc = qm.meta.ucon, + name = qm.meta.name, + kwargs... + ) +end + +end # module diff --git a/src/problems/milp.jl b/src/problems/milp.jl index 657a226..607c43a 100644 --- a/src/problems/milp.jl +++ b/src/problems/milp.jl @@ -52,12 +52,18 @@ struct MILP{ "file path the MILP was read from" path::String + _convert_or_construct(A) = try + convert(typeof(A), transpose(A)) + catch + typeof(A).name.wrapper(transpose(A)) + end + function MILP(; c, lv, uv, A, - At = convert(typeof(A), transpose(A)), + At = _convert_or_construct(A), lc, uc, D1 = Diagonal(one!(similar(lc))), diff --git a/src/utils/mat_coo.jl b/src/utils/mat_coo.jl index fa817cd..2fc1b8f 100644 --- a/src/utils/mat_coo.jl +++ b/src/utils/mat_coo.jl @@ -47,6 +47,11 @@ function Adapt.adapt_structure(to, A::GPUSparseMatrixCOO) ) end +function Base.convert(::Type{GPUSparseMatrixCOO{T, Ti, V, Vi}}, At::LinearAlgebra.Transpose{<:Any, <:GPUSparseMatrixCOO{T, Ti, V, Vi}}) where {T, Ti, V, Vi} + A = parent(At) + return GPUSparseMatrixCOO(A.n, A.m, A.colval, A.rowval, A.nzval) +end + function GPUSparseMatrixCOO(A::SparseMatrixCSC{T, Ti}) where {T, Ti} rowval, colval, nzval = findnz(A) return GPUSparseMatrixCOO(A.m, A.n, rowval, colval, nzval) diff --git a/test/Project.toml b/test/Project.toml index e23dc2b..4fc423d 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -13,6 +13,7 @@ KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MathOptBenchmarkInstances = "f7f8d0a1-fd34-491e-a7ac-a4cf52f91fe5" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" +QuadraticModels = "f468eda6-eac5-11e8-05a5-ff9e497bcd19" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SCS = "c946c3f1-0d1f-5ce8-9dea-7daa1f7e2d13" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" diff --git a/test/QuadraticModels.jl b/test/QuadraticModels.jl new file mode 100644 index 0000000..90445aa --- /dev/null +++ b/test/QuadraticModels.jl @@ -0,0 +1,94 @@ +using Test, SparseArrays, Adapt +using CoolPDLP, QuadraticModels +using CUDA, CUDA.CUSPARSE +using JLArrays +using KernelAbstractions +using GPUArraysCore: @allowscalar + +c = [2.0, 3.0] +H = spzeros(2, 2) +A = sparse([1, 1], [1, 2], [1.0, 2.0], 1, 2) +lvar = [0.0, 0.0] +uvar = [5.0, 5.0] +lcon = [1.0] +ucon = [4.0] + +@testset "QuadraticModel → CPU MILP" begin + qm = QuadraticModel(c, H; A, lvar, uvar, lcon, ucon, name = "tiny_lp") + milp = CoolPDLP.MILP(qm) + + @test milp.c ≈ c + @test milp.lv ≈ lvar + @test milp.uv ≈ uvar + @test milp.lc ≈ lcon + @test milp.uc ≈ ucon + @test Matrix(milp.A) ≈ Matrix(A) + @test milp.name == "tiny_lp" +end + +@testset "QuadraticModel → device MILP" begin + A_dev = adapt(JLBackend(), GPUSparseMatrixCOO(A)) + H_dev = adapt(JLBackend(), GPUSparseMatrixCOO(H)) + c_dev = jl(c) + lv_dev = jl(lvar) + uv_dev = jl(uvar) + lc_dev = jl(lcon) + uc_dev = jl(ucon) + + # we need @allowscalar since initializing NLPModelMeta uses findall + qm = @allowscalar QuadraticModel( + c_dev, H_dev; + A = A_dev, lvar = lv_dev, uvar = uv_dev, lcon = lc_dev, ucon = uc_dev, + name = "tiny_lp", + ) + milp = CoolPDLP.MILP(qm) + + @test milp.c isa JLVector{Float64} + @test milp.lv isa JLVector{Float64} + @test milp.lc isa JLVector{Float64} + @test milp.A isa GPUSparseMatrixCOO{Float64, Int, JLVector{Float64}, JLVector{Int}} + @test milp.At isa GPUSparseMatrixCOO{Float64, Int, JLVector{Float64}, JLVector{Int}} + @test get_backend(milp.A) isa JLBackend + + @test Array(milp.c) ≈ c + @test Array(milp.lv) ≈ lvar + @test Array(milp.uv) ≈ uvar + @test Array(milp.lc) ≈ lcon + @test Array(milp.uc) ≈ ucon + @test milp.name == "tiny_lp" +end + +if CUDA.functional() + @testset "QuadraticModel → CUDA MILP" begin + A_cu = CuSparseMatrixCSR(A) + H_cu = CuSparseMatrixCSR(H) + c_cu = CuVector(c) + lv_cu = CuVector(lvar) + uv_cu = CuVector(uvar) + lc_cu = CuVector(lcon) + uc_cu = CuVector(ucon) + + # we need @allowscalar since initializing NLPModelMeta uses findall + qm = @allowscalar QuadraticModel( + c_cu, H_cu; + A = A_cu, lvar = lv_cu, uvar = uv_cu, lcon = lc_cu, ucon = uc_cu, + name = "tiny_lp", + ) + milp = CoolPDLP.MILP(qm) + + @test milp.c isa CuVector{Float64} + @test milp.lv isa CuVector{Float64} + @test milp.lc isa CuVector{Float64} + @test milp.A isa CuSparseMatrixCSR + @test milp.At isa CuSparseMatrixCSR + + @test Array(milp.c) ≈ c + @test Array(milp.lv) ≈ lvar + @test Array(milp.uv) ≈ uvar + @test Array(milp.lc) ≈ lcon + @test Array(milp.uc) ≈ ucon + @test milp.name == "tiny_lp" + end +else + @info "Skipping CUDA QuadraticModels test" CUDA.functional() +end diff --git a/test/runtests.jl b/test/runtests.jl index 7d199a8..d6346bb 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -25,4 +25,7 @@ using Test include("moi.jl") end end + @testset "QuadraticModels Wrapper" begin + include("QuadraticModels.jl") + end end