diff --git a/src/HWconstrained.jl b/src/HWconstrained.jl index 3b6faa4..a1b5013 100644 --- a/src/HWconstrained.jl +++ b/src/HWconstrained.jl @@ -5,24 +5,51 @@ greet() = print("Hello World!") using JuMP, NLopt, DataFrames, Ipopt using LinearAlgebra - export data, table_NLopt, table_JuMP + export data, table_NLopt, table_JuMP, max_JuMP, max_NLopt function data(a=0.5) - - - - - - - - - + na = 3 + p = ones(3) + e = [2.0, 0.0, 0.0] + z = zeros(16,3) + z_2 = [0.72, 0.92, 1.12, 1.32] + z_3 = [0.86, 0.96, 1.06, 1.16] + z[:,1] .= 1.0 + for i=1:4 + z[1+(4*(i-1)):4+(4*(i-1)), 2] .= z_2[i] + end + for j = 1:4 + z[j,3] = z_3[j] + z[j+4,3] = z_3[j] + z[j+4*2,3] = z_3[j] + z[j+4*3,3] = z_3[j] + end + ns = length(z_2) + nss = length(z_2) * length(z_3) + pi = 1/nss + nc = 1 + + # na number of assets + # ns number of states per asset + # nss total number of assets + # nc number of constraints + + return Dict("a"=>a,"na"=>na,"nc"=>nc,"ns"=>ns,"nss"=>nss,"e"=>e,"p"=>p,"z"=>z,"pi"=>pi) end +dat = data() function max_JuMP(a=0.5) + m = Model(with_optimizer(Ipopt.Optimizer)) + @variable(m, omega[1:3]) + @variable(m, c >= 0.0) + @NLconstraint(m, c + sum(data()["p"][i] * (omega[i] - data()["e"][i]) for i in 1:3) == 0.0) + @NLobjective(m, Max, + -exp(-a*c) + data()["pi"] * sum(-exp(-a * + sum(data()["z"][j,i] * omega[i] for i in 1:3)) for j in 1:data()["nss"])) + JuMP.optimize!(m) return Dict("obj"=>objective_value(m),"c"=>value(c),"omegas"=>[value(omega[i]) for i in 1:length(omega)]) end @@ -39,76 +66,75 @@ greet() = print("Hello World!") return d end - - - - - - + ## Does not work function obj(x::Vector,grad::Vector,data::Dict) - - - - - - - - - - - - - - - - - - - - - - - - + a = data["a"] + + if length(grad) > 0 + # grad wrt c + grad[4] = (-a) * exp(-a * x[4]) + + # grad with respect to omega 1 + grad[1] = (-a) * data["pi"] * sum(data["z"][i,1] * exp(-a * + (data["z"][i,1] * x[1] + data["z"][i,2] * x[2] + data["z"][i,3] * x[3])) for i in 1:data["nss"]) + + # grad wrt to omega 2 + grad[2] = (-a) * data["pi"] * sum(data["z"][i,2] * exp(-a * + (data["z"][i,1] * x[1] + data["z"][i,2] * x[2] + data["z"][i,3] * x[3])) for i in 1:data["nss"]) + + # grad wrt to omega 3 + grad[3] = (-a) * data["pi"] * sum(data["z"][i,3] * exp(-a * + (data["z"][i,1] * x[1] + data["z"][i,2] * x[2] + data["z"][i,3] * x[3])) for i in 1:data["nss"]) + + end + # test = (-1) * (-exp(-a*x[4]) + data["pi"] * sum(-exp(-a * + # (data["z"][i,1] * x[1] + data["z"][i,2] * x[2] + data["z"][i,3] * x[3])) for i in 1:data["nss"])) + # println(grad) + + + return (-1) * (-exp(-a*x[4]) + data["pi"] * sum(-exp(-a * + (data["z"][i,1] * x[1] + data["z"][i,2] * x[2] + data["z"][i,3] * x[3])) for i in 1:data["nss"])) end + + function constr(x::Vector,grad::Vector,data::Dict) - - - - - - - - - - - - - - + if length(grad) > 0 + grad[4] = 1 + grad[1] = data["p"][1] + grad[2] = data["p"][2] + grad[3] = data["p"][3] + + end + return x[4] + sum(data["p"][i] * (x[i] - data["e"][i]) for i in 1:3) + end + function max_NLopt(a=0.5) - - - - - - - - + count = 0 # keep track of # function evaluations + opt = Opt(:LD_MMA, 4) + lower_bounds!(opt, [-Inf, -Inf, -Inf, 0.]) + xtol_rel!(opt, 1e-7) + grad = zeros(4) + d = data(a) + min_objective!(opt,(x,grad) -> obj(x,grad,d)) + inequality_constraint!(opt, (x,grad) -> constr(x,grad,d)) + ftol_rel!(opt,1e-9) + vector_init = vcat(d["e"], 0) + NLopt.optimize(opt, vector_init) end function table_NLopt() d = DataFrame(a=[0.5;1.0;5.0],c = zeros(3),omega1=zeros(3),omega2=zeros(3),omega3=zeros(3),fval=zeros(3)) for i in 1:nrow(d) xx = max_NLopt(d[i,:a]) - for j in 2:ncol(d)-1 - d[i,j] = xx[2][j-1] + for j in 1:ncol(d)-3 + d[i,j+2] = xx[2][j] end - d[i,end] = xx[1] + d[i,2] = xx[2][end] + d[i,end] = -xx[1] end return d end diff --git a/src/HWconstrained_working.jl b/src/HWconstrained_working.jl new file mode 100644 index 0000000..0319fd0 --- /dev/null +++ b/src/HWconstrained_working.jl @@ -0,0 +1,145 @@ +module HWconstrained + +greet() = print("Hello World!") + + using JuMP, NLopt, DataFrames, Ipopt + using LinearAlgebra + + export data, table_NLopt, table_JuMP, max_JuMP, max_NLopt + + function data(a=0.5) + na = 3 + p = ones(3) + e = [2.0, 0.0, 0.0] + z = zeros(16,3) + z_2 = [0.72, 0.92, 1.12, 1.32] + z_3 = [0.86, 0.96, 1.06, 1.16] + z[:,1] .= 1.0 + for i=1:4 + z[1+(4*(i-1)):4+(4*(i-1)), 2] .= z_2[i] + end + for j = 1:4 + z[j,3] = z_3[j] + z[j+4,3] = z_3[j] + z[j+4*2,3] = z_3[j] + z[j+4*3,3] = z_3[j] + end + ns = length(z_2) + nss = length(z_2) * length(z_3) + pi = 1/nss + nc = 1 + + # na number of assets + # ns number of states per asset + # nss total number of assets + # nc number of constraints + + + return Dict("a"=>a,"na"=>na,"nc"=>nc,"ns"=>ns,"nss"=>nss,"e"=>e,"p"=>p,"z"=>z,"pi"=>pi) + end + +dat = data() + + function max_JuMP(a=0.5) + + m = Model(with_optimizer(Ipopt.Optimizer)) + @variable(m, omega[1:3]) + @variable(m, c >= 0.0) + @NLconstraint(m, c + sum(data()["p"][i] * (omega[i] - data()["e"][i]) for i in 1:3) == 0.0) + @NLobjective(m, Max, + -exp(-a*c) + data()["pi"] * sum(-exp(-a * + sum(data()["z"][j,i] * omega[i] for i in 1:3)) for j in 1:data()["nss"])) + JuMP.optimize!(m) + return Dict("obj"=>objective_value(m),"c"=>value(c),"omegas"=>[value(omega[i]) for i in 1:length(omega)]) + end + + function table_JuMP() + d = DataFrame(a=[0.5;1.0;5.0],c = zeros(3),omega1=zeros(3),omega2=zeros(3),omega3=zeros(3),fval=zeros(3)) + for i in 1:nrow(d) + xx = max_JuMP(d[i,:a]) + d[i,:c] = xx["c"] + d[i,:omega1] = xx["omegas"][1] + d[i,:omega2] = xx["omegas"][2] + d[i,:omega3] = xx["omegas"][3] + d[i,:fval] = xx["obj"] + end + return d + end + + + ## Does not work + function obj(x::Vector,grad::Vector,data::Dict) + a = data["a"] + + if length(grad) > 0 + # grad wrt c + grad[4] = (-a) * exp(-a * x[4]) + + # grad with respect to omega 1 + grad[1] = (-a) * data["pi"] * sum(data["z"][i,1] * exp(-a * + (data["z"][i,1] * x[1] + data["z"][i,2] * x[2] + data["z"][i,3] * x[3])) for i in 1:data["nss"]) + + # grad wrt to omega 2 + grad[2] = (-a) * data["pi"] * sum(data["z"][i,2] * exp(-a * + (data["z"][i,1] * x[1] + data["z"][i,2] * x[2] + data["z"][i,3] * x[3])) for i in 1:data["nss"]) + + # grad wrt to omega 3 + grad[3] = (-a) * data["pi"] * sum(data["z"][i,3] * exp(-a * + (data["z"][i,1] * x[1] + data["z"][i,2] * x[2] + data["z"][i,3] * x[3])) for i in 1:data["nss"]) + + end + # test = (-1) * (-exp(-a*x[4]) + data["pi"] * sum(-exp(-a * + # (data["z"][i,1] * x[1] + data["z"][i,2] * x[2] + data["z"][i,3] * x[3])) for i in 1:data["nss"])) + # println(grad) + + + return (-1) * (-exp(-a*x[4]) + data["pi"] * sum(-exp(-a * + (data["z"][i,1] * x[1] + data["z"][i,2] * x[2] + data["z"][i,3] * x[3])) for i in 1:data["nss"])) + end + + + + function constr(x::Vector,grad::Vector,data::Dict) + if length(grad) > 0 + grad[4] = 1 + grad[1] = data["p"][1] + grad[2] = data["p"][2] + grad[3] = data["p"][3] + + end + return x[4] + sum(data["p"][i] * (x[i] - data["e"][i]) for i in 1:3) + + end + + + function max_NLopt(a=0.5) + count = 0 # keep track of # function evaluations + opt = Opt(:LD_MMA, 4) + lower_bounds!(opt, [-Inf, -Inf, -Inf, 0.]) + xtol_rel!(opt, 1e-7) + grad = zeros(4) + println(length(grad)) + d = data(a) + min_objective!(opt,(x,grad) -> obj(x,grad,d)) + inequality_constraint!(opt, (x,grad) -> constr(x,grad,d)) + ftol_rel!(opt,1e-9) + vector_init = vcat(d["e"], 0) + NLopt.optimize(opt, vector_init) + end + + function table_NLopt() + d = DataFrame(a=[0.5;1.0;5.0],c = zeros(3),omega1=zeros(3),omega2=zeros(3),omega3=zeros(3),fval=zeros(3)) + for i in 1:nrow(d) + xx = max_NLopt(d[i,:a]) + for j in 1:ncol(d)-3 + d[i,j+2] = xx[2][j] + end + d[i,2] = xx[2][end] + d[i,end] = -xx[1] + end + return d + end + + + +end # module diff --git a/test/runtests.jl b/test/runtests.jl index 1793ffc..6c1e0bd 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -5,22 +5,57 @@ using Test @testset "testing components" begin @testset "tests gradient of objective function" begin - - - - - - + + truth = HWconstrained.DataFrame(a=[0.5;1.0;5.0], + c = [1.008;1.004;1.0008], + omega1=[-1.41237;-0.20618;0.758763], + omega2=[0.801455;0.400728;0.0801455], + omega3=[1.60291;0.801455;0.160291], + fval=[-1.20821;-0.732819;-0.013422]) + x = zeros(3,4) + x[:,1] = truth[:omega1] + x[:,2] = truth[:omega2] + x[:,3] = truth[:omega3] + x[:,4] = truth[:c] + grad = rand(size(x,2)) + testvec = deepcopy(grad) + d = data() + # Extract gradient + grad! = (x,grad) -> HWconstrained.obj(x,grad,d) + + r = grad!(x[1,:],grad) + # Cannot test gradient is zero since constrained regression + @test grad != testvec + end @testset "tests gradient of constraint function" begin - - - - - - + truth = HWconstrained.DataFrame(a=[0.5;1.0;5.0], + c = [1.008;1.004;1.0008], + omega1=[-1.41237;-0.20618;0.758763], + omega2=[0.801455;0.400728;0.0801455], + omega3=[1.60291;0.801455;0.160291], + fval=[-1.20821;-0.732819;-0.013422]) + x = zeros(3,4) + x[:,1] = truth[:omega1] + x[:,2] = truth[:omega2] + x[:,3] = truth[:omega3] + x[:,4] = truth[:c] + grad = rand(size(x,2)) + testvec = deepcopy(grad) + d = data() + # Extract gradient + gradconstr! = (x,grad) -> HWconstrained.constr(x,grad,d) + + r = gradconstr!(x[1,:],grad) + # Cannot test whether the gradient is zero since it is a constraint regression + @test grad != testvec + + + + + end end @@ -34,7 +69,7 @@ using Test fval=[-1.20821;-0.732819;-0.013422]) @testset "checking result of NLopt maximization" begin - + tol2 = 1e-1 t1 = table_NLopt() for c in names(truth) @test all(maximum.(abs.(t1[c].-truth[c])) .< tol2) @@ -43,6 +78,7 @@ using Test @testset "checking result of NLopt maximization" begin + tol2 = 1e-1 t1 = table_JuMP() for c in names(truth) @test all(maximum.(abs.(t1[c].-truth[c])) .< tol2)