Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,12 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"

[weakdeps]
QuadraticModels = "f468eda6-eac5-11e8-05a5-ff9e497bcd19"

[extensions]
CoolPDLPQuadraticModelsExt = "QuadraticModels"

[compat]
Adapt = "4.4.0"
Atomix = "1.1.2"
Expand All @@ -32,6 +38,7 @@ KernelAbstractions = "0.9.38"
LinearAlgebra = "1"
MathOptInterface = "1.49.0"
Printf = "1"
QuadraticModels = "0.9"
ProgressMeter = "1.11.0"
QPSReader = "0.2.1"
Random = "1"
Expand Down
21 changes: 21 additions & 0 deletions ext/CoolPDLPQuadraticModelsExt.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
module CoolPDLPQuadraticModelsExt

import QuadraticModels: QuadraticModel
import CoolPDLP

function CoolPDLP.MILP(qm::QuadraticModel; ignore_islp = false, kwargs...)
ignore_islp || @assert qm.meta.islp

return CoolPDLP.MILP(;
c = qm.data.c,
lv = qm.meta.lvar,
uv = qm.meta.uvar,
A = qm.data.A,
lc = qm.meta.lcon,
uc = qm.meta.ucon,
name = qm.meta.name,
kwargs...
)
end

end # module
8 changes: 7 additions & 1 deletion src/problems/milp.jl
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,18 @@ struct MILP{
"file path the MILP was read from"
path::String

_convert_or_construct(A) = try
convert(typeof(A), transpose(A))
catch
typeof(A).name.wrapper(transpose(A))
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Accessing private fields is generally considered an antipattern in Julia. Besides, the try/catch might lead to type instability. I think we better handle it via dispatch (but I'm not sure how).
As a first step, we might allow At to have a different type than A. In the setting where the user provides two linear operators (lazy matrix-vector products), they have no reason to share the same type anyway.

end

function MILP(;
c,
lv,
uv,
A,
At = convert(typeof(A), transpose(A)),
At = _convert_or_construct(A),
lc,
uc,
D1 = Diagonal(one!(similar(lc))),
Expand Down
5 changes: 5 additions & 0 deletions src/utils/mat_coo.jl
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,11 @@ function Adapt.adapt_structure(to, A::GPUSparseMatrixCOO)
)
end

function Base.convert(::Type{GPUSparseMatrixCOO{T, Ti, V, Vi}}, At::LinearAlgebra.Transpose{<:Any, <:GPUSparseMatrixCOO{T, Ti, V, Vi}}) where {T, Ti, V, Vi}
A = parent(At)
return GPUSparseMatrixCOO(A.n, A.m, A.colval, A.rowval, A.nzval)
end

function GPUSparseMatrixCOO(A::SparseMatrixCSC{T, Ti}) where {T, Ti}
rowval, colval, nzval = findnz(A)
return GPUSparseMatrixCOO(A.m, A.n, rowval, colval, nzval)
Expand Down
1 change: 1 addition & 0 deletions test/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MathOptBenchmarkInstances = "f7f8d0a1-fd34-491e-a7ac-a4cf52f91fe5"
MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee"
QuadraticModels = "f468eda6-eac5-11e8-05a5-ff9e497bcd19"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SCS = "c946c3f1-0d1f-5ce8-9dea-7daa1f7e2d13"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
Expand Down
94 changes: 94 additions & 0 deletions test/QuadraticModels.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
using Test, SparseArrays, Adapt
using CoolPDLP, QuadraticModels
using CUDA, CUDA.CUSPARSE
using JLArrays
using KernelAbstractions
using GPUArraysCore: @allowscalar

c = [2.0, 3.0]
H = spzeros(2, 2)
A = sparse([1, 1], [1, 2], [1.0, 2.0], 1, 2)
lvar = [0.0, 0.0]
uvar = [5.0, 5.0]
lcon = [1.0]
ucon = [4.0]

@testset "QuadraticModel → CPU MILP" begin
qm = QuadraticModel(c, H; A, lvar, uvar, lcon, ucon, name = "tiny_lp")
milp = CoolPDLP.MILP(qm)

@test milp.c ≈ c
@test milp.lv ≈ lvar
@test milp.uv ≈ uvar
@test milp.lc ≈ lcon
@test milp.uc ≈ ucon
@test Matrix(milp.A) ≈ Matrix(A)
@test milp.name == "tiny_lp"
end

@testset "QuadraticModel → device MILP" begin
A_dev = adapt(JLBackend(), GPUSparseMatrixCOO(A))
H_dev = adapt(JLBackend(), GPUSparseMatrixCOO(H))
c_dev = jl(c)
lv_dev = jl(lvar)
uv_dev = jl(uvar)
lc_dev = jl(lcon)
uc_dev = jl(ucon)

# we need @allowscalar since initializing NLPModelMeta uses findall
qm = @allowscalar QuadraticModel(
c_dev, H_dev;
A = A_dev, lvar = lv_dev, uvar = uv_dev, lcon = lc_dev, ucon = uc_dev,
name = "tiny_lp",
)
milp = CoolPDLP.MILP(qm)

@test milp.c isa JLVector{Float64}
@test milp.lv isa JLVector{Float64}
@test milp.lc isa JLVector{Float64}
@test milp.A isa GPUSparseMatrixCOO{Float64, Int, JLVector{Float64}, JLVector{Int}}
@test milp.At isa GPUSparseMatrixCOO{Float64, Int, JLVector{Float64}, JLVector{Int}}
@test get_backend(milp.A) isa JLBackend

@test Array(milp.c) ≈ c
@test Array(milp.lv) ≈ lvar
@test Array(milp.uv) ≈ uvar
@test Array(milp.lc) ≈ lcon
@test Array(milp.uc) ≈ ucon
@test milp.name == "tiny_lp"
end

if CUDA.functional()
@testset "QuadraticModel → CUDA MILP" begin
A_cu = CuSparseMatrixCSR(A)
H_cu = CuSparseMatrixCSR(H)
c_cu = CuVector(c)
lv_cu = CuVector(lvar)
uv_cu = CuVector(uvar)
lc_cu = CuVector(lcon)
uc_cu = CuVector(ucon)

# we need @allowscalar since initializing NLPModelMeta uses findall
qm = @allowscalar QuadraticModel(
c_cu, H_cu;
A = A_cu, lvar = lv_cu, uvar = uv_cu, lcon = lc_cu, ucon = uc_cu,
name = "tiny_lp",
)
milp = CoolPDLP.MILP(qm)

@test milp.c isa CuVector{Float64}
@test milp.lv isa CuVector{Float64}
@test milp.lc isa CuVector{Float64}
@test milp.A isa CuSparseMatrixCSR
@test milp.At isa CuSparseMatrixCSR

@test Array(milp.c) ≈ c
@test Array(milp.lv) ≈ lvar
@test Array(milp.uv) ≈ uvar
@test Array(milp.lc) ≈ lcon
@test Array(milp.uc) ≈ ucon
@test milp.name == "tiny_lp"
end
else
@info "Skipping CUDA QuadraticModels test" CUDA.functional()
end
3 changes: 3 additions & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,7 @@ using Test
include("moi.jl")
end
end
@testset "QuadraticModels Wrapper" begin
include("QuadraticModels.jl")
end
end