Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .JuliaFormatter.toml
Original file line number Diff line number Diff line change
@@ -1,2 +1 @@
# See https://domluna.github.io/JuliaFormatter.jl/stable/ for a list of options
style = "blue"
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,7 @@
/Manifest*.toml
/docs/Manifest*.toml
/docs/build/
tensorboard_logs
.vscode
Manifest.toml
examples
16 changes: 16 additions & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,24 @@ authors = ["Members of JuliaDecisionFocusedLearning and contributors"]
version = "0.0.1"

[deps]
DecisionFocusedLearningBenchmarks = "2fbe496a-299b-4c81-bab5-c44dfc55cf20"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
InferOpt = "4846b161-c94e-4150-8dac-c7ae193c601f"
MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
ProgressMeter = "92933f4c-e287-5a05-a399-4b506db050ca"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
UnicodePlots = "b8865327-cd53-5732-bb35-84acbb429228"
ValueHistories = "98cad3c8-aec3-5f06-8e41-884608649ab7"

[compat]
DecisionFocusedLearningBenchmarks = "0.3.0"
Flux = "0.16.5"
InferOpt = "0.7.1"
MLUtils = "0.4.8"
ProgressMeter = "1.11.0"
Statistics = "1.11.1"
UnicodePlots = "3.8.1"
ValueHistories = "0.5.4"
julia = "1.11"

[extras]
Expand Down
1 change: 1 addition & 0 deletions docs/Project.toml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
[deps]
DecisionFocusedLearningAlgorithms = "46d52364-bc3b-4fac-a992-eb1d3ef2de15"
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306"
13 changes: 12 additions & 1 deletion docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,17 @@ DocMeta.setdocmeta!(
recursive=true,
)

tutorial_dir = joinpath(@__DIR__, "src", "tutorials")

include_tutorial = true

if include_tutorial
for file in tutorial_files
filepath = joinpath(tutorial_dir, file)
Literate.markdown(filepath, md_dir; documenter=true, execute=false)
end
end

makedocs(;
modules=[DecisionFocusedLearningAlgorithms],
authors="Members of JuliaDecisionFocusedLearning and contributors",
Expand All @@ -17,7 +28,7 @@ makedocs(;
edit_link="main",
assets=String[],
),
pages=["Home" => "index.md"],
pages=["Home" => "index.md", "Tutorials" => include_tutorial ? md_tutorial_files : []],
)

deploydocs(;
Expand Down
47 changes: 47 additions & 0 deletions docs/src/tutorials/tutorial.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# Tutorial
using DecisionFocusedLearningAlgorithms
using DecisionFocusedLearningBenchmarks
using MLUtils: splitobs
using Plots

b = ArgmaxBenchmark()
dataset = generate_dataset(b, 100)
train_instances, validation_instances, test_instances = splitobs(
dataset; at=(0.3, 0.3, 0.4)
)

model = generate_statistical_model(b; seed=0)
maximizer = generate_maximizer(b)

compute_gap(b, test_instances, model, maximizer)

metrics_callbacks = (;
:time => (model, maximizer, epoch) -> (epoch_time = time()),
:gap => (;
:val =>
(model, maximizer, epoch) ->
(gap = compute_gap(b, validation_instances, model, maximizer)),
:test =>
(model, maximizer, epoch) ->
(gap = compute_gap(b, test_instances, model, maximizer)),
),
)

fyl_model = deepcopy(model)
log = fyl_train_model!(
fyl_model,
maximizer,
train_instances,
validation_instances;
epochs=100,
metrics_callbacks,
)

log[:gap]
plot(
[log[:gap].val, log[:gap].test];
labels=["Val Gap" "Test Gap"],
xlabel="Epoch",
ylabel="Gap",
)
plot(log[:validation_loss])
11 changes: 11 additions & 0 deletions scripts/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
[deps]
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
DecisionFocusedLearningAlgorithms = "46d52364-bc3b-4fac-a992-eb1d3ef2de15"
DecisionFocusedLearningBenchmarks = "2fbe496a-299b-4c81-bab5-c44dfc55cf20"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
TensorBoardLogger = "899adc3e-224a-11e9-021f-63837185c80f"
ValueHistories = "98cad3c8-aec3-5f06-8e41-884608649ab7"
101 changes: 101 additions & 0 deletions scripts/main.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
using DecisionFocusedLearningAlgorithms
using DecisionFocusedLearningBenchmarks
using MLUtils
using Statistics
using Plots

# ! metric(prediction, data_sample)

b = ArgmaxBenchmark()
initial_model = generate_statistical_model(b)
maximizer = generate_maximizer(b)
dataset = generate_dataset(b, 100)
train_dataset, val_dataset, _ = splitobs(dataset; at=(0.3, 0.3, 0.4))
res, model = fyl_train_model(
initial_model, maximizer, train_dataset, val_dataset; epochs=100
)

res = fyl_train_model(StochasticVehicleSchedulingBenchmark(); epochs=100)
plot(res.validation_loss; label="Validation Loss")
plot!(res.training_loss; label="Training Loss")

baty_train_model(DynamicVehicleSchedulingBenchmark(; two_dimensional_features=false))
DAgger_train_model(DynamicVehicleSchedulingBenchmark(; two_dimensional_features=false))

struct KleopatraPolicy{M}
model::M
end

function (m::KleopatraPolicy)(env)
x, instance = observe(env)
θ = m.model(x)
return maximizer(θ; instance)
end

b = DynamicVehicleSchedulingBenchmark(; two_dimensional_features=false)
dataset = generate_dataset(b, 100)
train_instances, validation_instances, test_instances = splitobs(
dataset; at=(0.3, 0.3, 0.4)
)
train_environments = generate_environments(b, train_instances; seed=0)
validation_environments = generate_environments(b, validation_instances)
test_environments = generate_environments(b, test_instances)

train_dataset = vcat(map(train_environments) do env
v, y = generate_anticipative_solution(b, env; reset_env=true)
return y
end...)

val_dataset = vcat(map(validation_environments) do env
v, y = generate_anticipative_solution(b, env; reset_env=true)
return y
end...)

model = generate_statistical_model(b; seed=0)
maximizer = generate_maximizer(b)
anticipative_policy = (env; reset_env) -> generate_anticipative_solution(b, env; reset_env)

fyl_model = deepcopy(model)
fyl_policy = Policy("fyl", "", KleopatraPolicy(fyl_model))

metrics_callbacks = (;
obj=(model, maximizer, epoch) ->
mean(evaluate_policy!(fyl_policy, test_environments, 1)[1])
)

fyl_loss = fyl_train_model!(
fyl_model, maximizer, train_dataset, val_dataset; epochs=100, metrics_callbacks
)

dagger_model = deepcopy(model)
dagger_policy = Policy("dagger", "", KleopatraPolicy(dagger_model))
metrics_callbacks = (;
obj=(model, maximizer, epoch) ->
mean(evaluate_policy!(dagger_policy, test_environments, 1)[1])
)
dagger_loss = DAgger_train_model!(
dagger_model,
maximizer,
train_environments,
validation_environments,
anticipative_policy;
iterations=10,
fyl_epochs=10,
metrics_callbacks,
)

plot(
0:100,
[fyl_loss.obj[1:end], dagger_loss.obj[1:end]];
labels=["FYL" "DAgger"],
xlabel="Epoch",
ylabel="Test Average Reward (1 scenario)",
)

using Statistics
v_fyl, _ = evaluate_policy!(fyl_policy, test_environments, 100)
v_dagger, _ = evaluate_policy!(dagger_policy, test_environments, 100)
mean(v_fyl)
mean(v_dagger)

anticipative_policy(test_environments[1]; reset_env=true)
111 changes: 111 additions & 0 deletions scripts/main3.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
using JLD2
using Flux
using DecisionFocusedLearningBenchmarks
const DVSP = DecisionFocusedLearningBenchmarks.DynamicVehicleScheduling
using ValueHistories
using Plots

b = DynamicVehicleSchedulingBenchmark(; max_requests_per_epoch=50)

logs = JLD2.load(joinpath(@__DIR__, "logs.jld2"))
model = logs["model"]
history = logs["history"]

epochs, train_losses = get(history, :training_loss)
epochs, val_losses = get(history, :validation_loss)
epochs, train_obj = get(history, :train_obj)
epochs, val_obj = get(history, :val_obj)

slice = 1:25#length(epochs)
loss_fig = plot(
epochs[slice], train_losses[slice]; label="Train Loss", xlabel="Epoch", ylabel="Loss"
)
plot!(loss_fig, epochs[slice], val_losses[slice]; label="Val Loss")

cost_fig = plot(
epochs[slice], -train_obj[slice]; label="Train cost", xlabel="Epoch", ylabel="Cost"
)
plot!(cost_fig, epochs[slice], -val_obj[slice]; label="Val cost")

data = JLD2.load(joinpath(@__DIR__, "saved_data.jld2"))
instances = data["instances"]
dataset = data["dataset"]

extrema(dataset[1].info.static_instance.duration)

nb_instances = length(dataset)
for instance_id in 1:nb_instances
dataset[instance_id].info.static_instance.duration .=
instances[instance_id].duration ./ 1000
end

extrema(dataset[1].info.static_instance.duration)

dataset[1].info
old_instance = dataset[1].info
(;
epoch_duration,
last_epoch,
max_requests_per_epoch,
Δ_dispatch,
static_instance,
two_dimensional_features,
) = old_instance
instance = DVSP.Instance(
static_instance;
epoch_duration,
two_dimensional_features,
Δ_dispatch,
max_requests_per_epoch=50,
)

environments = generate_environments(b, [DataSample(; info=instance)])
env = first(environments)

policies = generate_policies(b)
lazy = policies[1]
greedy = policies[2]

greedy_cost, greedy_data = evaluate_policy!(greedy, first(environments))
lazy_cost, lazy_data = evaluate_policy!(lazy, first(environments))
anticipative_cost, anticipative_data = generate_anticipative_solution(
b, first(environments); reset_env=true
)
greedy_cost
lazy_cost
anticipative_cost

struct DFLPolicy{F,M}
model::F
maximizer::M
end

function (p::DFLPolicy)(env)
x, state = observe(env)
θ = p.model(x)
y = p.maximizer(θ; instance=state)
return DVSP.decode_bitmatrix_to_routes(y)
end

maximizer = generate_maximizer(b)
policy = Policy("", "", DFLPolicy(model, maximizer))

dfl_cost, dfl_data = evaluate_policy!(policy, first(environments))

using JSON3
open("greedy.json", "w") do f
JSON3.pretty(f, JSON3.write(DVSP.build_plot_data(greedy_data)))
println(f)
end
open("lazy.json", "w") do f
JSON3.pretty(f, JSON3.write(DVSP.build_plot_data(lazy_data)))
println(f)
end
open("dfl.json", "w") do f
JSON3.pretty(f, JSON3.write(DVSP.build_plot_data(dfl_data)))
println(f)
end
open("anticipative.json", "w") do f
JSON3.pretty(f, JSON3.write(DVSP.build_plot_data(anticipative_data)))
println(f)
end
Loading
Loading