Skip to content

Commit dabe58f

Browse files
andrewrosembergAndrew David Werner RosembergAndrew David Werner RosembergAndrew David Werner Rosemberg
authored
ConvexHull check for a OPF problem (#16)
* update test * update code * update * update split * update nodes * update split * update split * update split * update split * test split * update * update * add comments * fix training * update training * update * update files * fix typos * fix typo * update controls * test change parameters * update testing * fix error * update code * update visu * update train * update * testing script * update * update code * add tolorance kwarg * update vis --------- Co-authored-by: Andrew David Werner Rosemberg <[email protected]> Co-authored-by: Andrew David Werner Rosemberg <[email protected]> Co-authored-by: Andrew David Werner Rosemberg <[email protected]>
1 parent 5b7259e commit dabe58f

16 files changed

+787
-135
lines changed

.gitignore

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,16 @@ Manifest.toml
1212
*.out
1313
*.sbatch
1414
examples/unitcommitment/app/*
15+
*/app/*
1516
*.edu
16-
examples/unitcommitment/wandb/*
17+
*.yaml
1718
*.png
1819
*.jls
1920
*.jlso
2021
*.jld2
22+
*.txt
23+
*.json
24+
*.log
25+
*.wandb
26+
*latest-run
27+
*.html

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ Flux.train!(loss, Flux.params(model), [(input_features, output_variables)], opti
134134
predictions = model(input_features)
135135
```
136136

137-
## Comming Soon
137+
## Coming Soon
138138

139139
Future features:
140140
- ML objectives that penalize infeasible predictions;

examples/powermodels/HuggingFaceDatasets.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ cache_dir="./examples/powermodels/data/"
3131
organization = "L2O"
3232
dataset = "pglib_opf_solves"
3333
case_name = "pglib_opf_case300_ieee"
34-
formulation = "DCPPowerModel"
34+
formulation = "SOCWRConicPowerModel" # ACPPowerModel SOCWRConicPowerModel DCPPowerModel
3535
io_type = "input"
3636
download_dataset(organization, dataset, case_name, io_type; cache_dir=cache_dir)
3737

examples/powermodels/data_split.jl

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
####################################################
2+
############## PowerModels Data Split ##############
3+
####################################################
4+
import Pkg
5+
Pkg.activate(dirname(dirname(@__DIR__)))
6+
7+
using Distributed
8+
using Random
9+
10+
##############
11+
# Load Packages everywhere
12+
##############
13+
14+
@everywhere import Pkg
15+
@everywhere Pkg.activate(dirname(dirname(@__DIR__)))
16+
@everywhere Pkg.instantiate()
17+
@everywhere using DataFrames
18+
@everywhere using L2O
19+
@everywhere using Gurobi
20+
@everywhere using Arrow
21+
@everywhere using MLUtils
22+
23+
##############
24+
# Parameters
25+
##############
26+
case_name = "pglib_opf_case300_ieee" # pglib_opf_case300_ieee # pglib_opf_case5_pjm
27+
filetype = ArrowFile # ArrowFile # CSVFile
28+
path_dataset = joinpath(dirname(@__FILE__), "data")
29+
case_file_path = joinpath(path_dataset, case_name)
30+
case_file_path_input = joinpath(case_file_path, "input")
31+
32+
mkpath(joinpath(case_file_path_input, "train"))
33+
mkpath(joinpath(case_file_path_input, "test"))
34+
35+
##############
36+
# Load Data
37+
##############
38+
iter_files_in = readdir(joinpath(case_file_path_input))
39+
iter_files_in = filter(x -> occursin(string(filetype), x), iter_files_in)
40+
file_ins = [
41+
joinpath(case_file_path_input, file) for file in iter_files_in if occursin("input", file)
42+
]
43+
batch_ids = [split(split(file, "_")[end], ".")[1] for file in file_ins]
44+
45+
# Load input and output data tables
46+
if filetype === ArrowFile
47+
input_table_train = Arrow.Table(file_ins)
48+
else
49+
input_table_train = CSV.read(file_ins[train_idx], DataFrame)
50+
end
51+
52+
# Convert to dataframes
53+
input_data = DataFrame(input_table_train)
54+
55+
##############
56+
# Split Data
57+
##############
58+
Random.seed!(123)
59+
train_idx, test_idx = splitobs(1:size(input_data, 1), at=(0.7), shuffle=true)
60+
61+
train_table = input_data[train_idx, :]
62+
test_table = input_data[test_idx, :]
63+
64+
batch_size = 10
65+
66+
num_batches = ceil(Int, length(test_idx) / batch_size)
67+
68+
##############
69+
# Check Convex-Hull
70+
##############
71+
72+
@info "Computing if test points are in the convex hull of the training set" batch_size num_batches
73+
74+
inhull = Array{Bool}(undef, length(test_idx))
75+
@sync @distributed for i in 1:num_batches
76+
idx_range = (i-1)*batch_size+1:min(i*batch_size, length(test_idx))
77+
batch = test_table[idx_range, :]
78+
inhull[idx_range] = inconvexhull(Matrix(train_table[!, Not(:id)]), Matrix(batch[!, Not(:id)]), Gurobi.Optimizer)
79+
@info "Batch $i of $num_batches done"
80+
end
81+
82+
test_table.in_train_convex_hull = inhull
83+
84+
##############
85+
# Save Files
86+
##############
87+
88+
# Save the training and test sets
89+
if filetype === ArrowFile
90+
Arrow.write(joinpath(case_file_path_input, "train", case_name * "_train_input" * ".arrow"), train_table)
91+
Arrow.write(joinpath(case_file_path_input, "test", case_name * "_test_input" * ".arrow"), test_table)
92+
else
93+
CSV.write(joinpath(case_file_path_input, "train", case_name * "_train_input" * ".csv"), train_table)
94+
CSV.write(joinpath(case_file_path_input, "test", case_name * "_test_input" * ".csv"), test_table)
95+
end

examples/powermodels/flux_forecaster_script.jl

Lines changed: 0 additions & 97 deletions
This file was deleted.

examples/powermodels/jls2jld2.jl

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
using Arrow
2+
using CSV
3+
using MLJFlux
4+
using MLUtils
5+
using Flux
6+
using MLJ
7+
using CUDA
8+
using DataFrames
9+
using PowerModels
10+
using L2O
11+
using Random
12+
using JLD2
13+
using Wandb, Dates, Logging
14+
15+
include(joinpath(dirname(dirname(@__FILE__)), "training_utils.jl")) # include("../training_utils.jl")
16+
17+
##############
18+
# Parameters
19+
##############
20+
case_name = ARGS[1] # case_name="pglib_opf_case300_ieee" # pglib_opf_case5_pjm
21+
network_formulation = ARGS[2] # network_formulation=ACPPowerModel SOCWRConicPowerModel DCPPowerModel
22+
icnn = parse(Bool, ARGS[3]) # icnn=true # false
23+
filetype = ArrowFile # ArrowFile # CSVFile
24+
layers = [512, 256, 64] # [256, 64, 32]
25+
path_dataset = joinpath(dirname(@__FILE__), "data")
26+
case_file_path = joinpath(path_dataset, case_name)
27+
case_file_path_output = joinpath(case_file_path, "output", string(network_formulation))
28+
case_file_path_input = joinpath(case_file_path, "input", "train")
29+
save_file = if icnn
30+
"$(case_name)_$(network_formulation)_$(replace(string(layers), ", " => "_"))_icnn"
31+
else
32+
"$(case_name)_$(network_formulation)_$(replace(string(layers), ", " => "_"))_dnn"
33+
end
34+
35+
##############
36+
# Load Data
37+
##############
38+
39+
iter_files_in = readdir(joinpath(case_file_path_input))
40+
iter_files_in = filter(x -> occursin(string(filetype), x), iter_files_in)
41+
file_ins = [
42+
joinpath(case_file_path_input, file) for file in iter_files_in if occursin("input", file)
43+
]
44+
iter_files_out = readdir(joinpath(case_file_path_output))
45+
iter_files_out = filter(x -> occursin(string(filetype), x), iter_files_out)
46+
file_outs = [
47+
joinpath(case_file_path_output, file) for file in iter_files_out if occursin("output", file)
48+
]
49+
# batch_ids = [split(split(file, "_")[end], ".")[1] for file in file_ins]
50+
51+
# Load input and output data tables
52+
if filetype === ArrowFile
53+
input_table_train = Arrow.Table(file_ins)
54+
output_table_train = Arrow.Table(file_outs)
55+
else
56+
input_table_train = CSV.read(file_ins[train_idx], DataFrame)
57+
output_table_train = CSV.read(file_outs[train_idx], DataFrame)
58+
end
59+
60+
# Convert to dataframes
61+
input_data = DataFrame(input_table_train)
62+
output_data = DataFrame(output_table_train)
63+
64+
# filter out rows with 0.0 operational_cost (i.e. inidicative of numerical issues)
65+
output_data = output_data[output_data.operational_cost .> 10, :]
66+
67+
# match
68+
train_table = innerjoin(input_data, output_data[!, [:id, :operational_cost]]; on=:id)
69+
70+
input_features = names(train_table[!, Not([:id, :operational_cost])])
71+
72+
##############
73+
# Load JLS Model
74+
##############
75+
num = 3
76+
model_dir = joinpath(dirname(@__FILE__), "models")
77+
mach = machine(joinpath(model_dir, save_file * "$num.jls"))
78+
79+
##############
80+
# Save JLD2 Model
81+
##############
82+
model = mach.fitresult[1]
83+
model_state = Flux.state(model)
84+
jldsave(joinpath(model_dir, save_file * ".jld2"), model_state=model_state, input_features=input_features, layers=mach.model.builder.hidden_sizes)

0 commit comments

Comments
 (0)