Skip to content

Commit 3440692

Browse files
andrewrosembergAndrew David Werner RosembergAndrew David Werner RosembergAndrew David Werner RosembergAndrew David Werner Rosemberg
authored
Update generation code
* change generation * ignore config * update * update code * update * update code * update fix test * update tests * update code * update samplers * update tests * update code * adjust record for infeasible * create loader * fix test * update tests * update script * update code * update script * update script * update code * fix script * update script * update parallel code * fix bug infeasible * update script * update code * fix code * fix tests * update generation * update script * update code --------- Co-authored-by: Andrew David Werner Rosemberg <[email protected]> Co-authored-by: Andrew David Werner Rosemberg <[email protected]> Co-authored-by: Andrew David Werner Rosemberg <[email protected]> Co-authored-by: Andrew David Werner Rosemberg <[email protected]>
1 parent 194db3e commit 3440692

21 files changed

+1721
-156
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,3 +25,4 @@ examples/unitcommitment/app/*
2525
*.wandb
2626
*latest-run
2727
*.html
28+
*.config.toml

Project.toml

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,13 @@ version = "1.2.0-DEV"
66
[deps]
77
Arrow = "69666777-d1a9-59fb-9406-91d4454c9d45"
88
CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
9-
Dualization = "191a621a-6537-11e9-281d-650236a99e60"
9+
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
10+
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
1011
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
1112
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
1213
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
1314
MLJFlux = "094fc8d1-fd35-5302-93ea-dabda2abf845"
1415
NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
15-
Nonconvex = "01bcebdf-4d21-426d-b5c4-6132c1619978"
1616
Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2"
1717
ParametricOptInterface = "0ce4ce61-57bf-432b-a095-efac525d185e"
1818
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
@@ -23,25 +23,21 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
2323
[compat]
2424
Arrow = "2"
2525
CSV = "0.10"
26-
Dualization = "0.5"
2726
JuMP = "1"
2827
ParametricOptInterface = "0.7"
2928
Zygote = "^0.6.68"
3029
julia = "1.6"
3130

3231
[extras]
33-
AbstractGPs = "99985d1d-32ba-4be9-9821-2ec096f28918"
3432
CUDA_Runtime_jll = "76a88914-d11a-5bdc-97e0-2f5a05c973a2"
3533
Clarabel = "61c947e1-3e6d-4ee4-985a-eec8c727bd6e"
36-
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
3734
DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
3835
HiGHS = "87dc4568-4c63-4d18-b0c0-bb2238e4078b"
3936
Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9"
4037
MLJ = "add582a8-e3ab-11e8-2d5e-e98b27df1bc7"
41-
NonconvexNLopt = "b43a31b8-ff9b-442d-8e31-c163daa8ab75"
4238
PGLib = "07a8691f-3d11-4330-951b-3c50f98338be"
4339
PowerModels = "c36e90e8-916a-50a6-bd94-075b64ef4655"
4440
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
4541

4642
[targets]
47-
test = ["Test", "DelimitedFiles", "PGLib", "HiGHS", "PowerModels", "DataFrames", "Clarabel", "Ipopt", "NonconvexNLopt", "MLJ"]
43+
test = ["Test", "DelimitedFiles", "PGLib", "HiGHS", "PowerModels", "Clarabel", "Ipopt", "MLJ"]
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
using L2O
2+
3+
general_sampler(
4+
"examples/powermodels/data/6468_rte/6468_rte_SOCWRConicPowerModel_POI_load.mof.json";
5+
samplers=[
6+
(original_parameters) -> scaled_distribution_sampler(original_parameters, 10000),
7+
(original_parameters) -> line_sampler(original_parameters, 1.01:0.01:1.25),
8+
(original_parameters) -> box_sampler(original_parameters, 300),
9+
],
10+
)
Lines changed: 89 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -1,45 +1,61 @@
1-
# run with: julia ./examples/powermodels/generate_full_datasets_script.jl "./examples/powermodels/data/pglib_opf_case300_ieee/case300.config.toml"
2-
config_path = ARGS[1]
1+
# run with: julia ./generate_full_datasets_script.jl 1 1 "../powermodels/data/6468_rte.config.toml" SOCWRConicPowerModel
32

4-
using Pkg: Pkg;
5-
Pkg.activate(".");
3+
################################################################
4+
############## PowerModels Dataset Generation ##############
5+
################################################################
66

7-
using TestEnv
8-
TestEnv.activate()
7+
using Distributed
8+
using Random
9+
10+
##############
11+
# Load Functions
12+
##############
13+
14+
@everywhere import Pkg
15+
16+
@everywhere Pkg.activate(dirname(dirname(@__DIR__)))
17+
18+
@everywhere Pkg.instantiate()
919

1020
########## SCRIPT REQUIRED PACKAGES ##########
1121

12-
using L2O
13-
using Arrow
14-
using Test
15-
using UUIDs
16-
using PowerModels
17-
import JuMP.MOI as MOI
18-
import ParametricOptInterface as POI
19-
using TOML
22+
@everywhere using L2O
23+
@everywhere using Arrow
24+
@everywhere using Test
25+
@everywhere using UUIDs
26+
@everywhere using PowerModels
27+
@everywhere import JuMP.MOI as MOI
28+
@everywhere import ParametricOptInterface as POI
29+
@everywhere using TOML
30+
31+
## SOLVER PACKAGES ##
32+
33+
# using Clarabel
34+
@everywhere using Gurobi
2035

2136
PowerModels.silence()
2237

23-
## SOLVER PACKAGES ##
38+
##############
39+
# Parameters
40+
##############
2441

25-
using Clarabel
26-
using Gurobi
27-
using NonconvexNLopt
42+
config_path = ARGS[3]
2843

2944
########## POI SOLVER ##########
3045

31-
cached =
32-
() -> MOI.Bridges.full_bridge_optimizer(
33-
MOI.Utilities.CachingOptimizer(
34-
MOI.Utilities.UniversalFallback(MOI.Utilities.Model{Float64}()),
35-
Clarabel.Optimizer(),
36-
),
37-
Float64,
38-
)
46+
# cached =
47+
# () -> MOI.Bridges.full_bridge_optimizer(
48+
# MOI.Utilities.CachingOptimizer(
49+
# MOI.Utilities.UniversalFallback(MOI.Utilities.Model{Float64}()),
50+
# Gurobi.Optimizer(),
51+
# ),
52+
# Float64,
53+
# )
3954

40-
POI_cached_optimizer() = POI.Optimizer(cached())
55+
POI_cached_optimizer() = Gurobi.Optimizer() # POI.Optimizer(cached())
4156

4257
########## PARAMETERS ##########
58+
@info "Loading configuration file: $config_path"
4359

4460
config = TOML.parsefile(config_path)
4561
path = config["export_dir"]
@@ -52,7 +68,11 @@ filetype = ArrowFile # ArrowFile # CSVFile
5268
case_name = config["case_name"]
5369
case_file_path = joinpath(path, case_name)
5470
mkpath(case_file_path)
55-
network_formulation = eval(Symbol(ARGS[2])) # SOCWRConicPowerModel # DCPPowerModel
71+
network_formulation = eval(Symbol(ARGS[4])) # SOCWRConicPowerModel # DCPPowerModel
72+
73+
##############
74+
# Solve and store solutions
75+
##############
5676

5777
########## SAMPLER DATASET GENERATION ##########
5878

@@ -68,8 +88,8 @@ if haskey(config, "sampler")
6888
filetype=filetype,
6989
network_formulation=network_formulation,
7090
optimizer=POI_cached_optimizer,
71-
internal_load_sampler=(_o, n) -> load_sampler(
72-
_o, n; max_multiplier=1.25, min_multiplier=0.8, step_multiplier=0.01
91+
internal_load_sampler=(_o, n, idx, num_inputs) -> load_sampler(
92+
_o, n, idx, num_inputs; max_multiplier=1.25, min_multiplier=0.8, step_multiplier=0.01
7393
),
7494
)
7595
global success_solves += _success_solves
@@ -111,41 +131,42 @@ if haskey(config, "line_search")
111131
end
112132

113133
########## WORST CASE DUAL DATASET GENERATION ##########
114-
if haskey(config, "worst_case_dual")
115-
num_p = config["worst_case_dual"]["num_samples"]
116-
function optimizer_factory()
117-
IPO_OPT = Gurobi.Optimizer()
118-
# IPO_OPT = MadNLP.Optimizer(print_level=MadNLP.INFO, max_iter=100)
119-
# IPO = MOI.Bridges.Constraint.SOCtoNonConvexQuad{Float64}(IPO_OPT)
120-
# MIP = QuadraticToBinary.Optimizer{Float64}(IPO)
121-
return () -> IPO_OPT
122-
end
123-
124-
success_solves, number_variables, number_loads, batch_id = generate_worst_case_dataset(
125-
case_file_path,
126-
case_name;
127-
num_p=num_p,
128-
filetype=filetype,
129-
network_formulation=network_formulation,
130-
optimizer_factory=optimizer_factory,
131-
hook=(model) -> set_optimizer_attribute(model, "NonConvex", 2),
132-
)
133-
134-
@info "Success solves Worst Case: $(success_solves) of $(num_p)"
135-
end
136-
137-
########## WORST CASE NONCONVEX DATASET GENERATION ##########
138-
if haskey(config, "worst_case_nonconvex")
139-
num_p = config["worst_case_nonconvex"]["num_samples"]
140-
141-
success_solves, number_variables, number_loads, batch_id = generate_worst_case_dataset_Nonconvex(
142-
case_file_path,
143-
case_name;
144-
num_p=num_p,
145-
filetype=filetype,
146-
network_formulation=network_formulation,
147-
optimizer=POI_cached_optimizer,
148-
)
149-
150-
@info "Success solves Worst Case: $(success_solves * 100) of $(num_p)"
151-
end
134+
# if haskey(config, "worst_case_dual")
135+
# num_p = config["worst_case_dual"]["num_samples"]
136+
# function optimizer_factory()
137+
# IPO_OPT = Gurobi.Optimizer()
138+
# # IPO_OPT = MadNLP.Optimizer(print_level=MadNLP.INFO, max_iter=100)
139+
# # IPO = MOI.Bridges.Constraint.SOCtoNonConvexQuad{Float64}(IPO_OPT)
140+
# # MIP = QuadraticToBinary.Optimizer{Float64}(IPO)
141+
# return () -> IPO_OPT
142+
# end
143+
144+
# success_solves, number_variables, number_loads, batch_id = generate_worst_case_dataset(
145+
# case_file_path,
146+
# case_name;
147+
# num_p=num_p,
148+
# filetype=filetype,
149+
# network_formulation=network_formulation,
150+
# optimizer_factory=optimizer_factory,
151+
# hook=(model) -> set_optimizer_attribute(model, "NonConvex", 2),
152+
# )
153+
154+
# @info "Success solves Worst Case: $(success_solves) of $(num_p)"
155+
# end
156+
157+
# ########## WORST CASE NONCONVEX DATASET GENERATION ##########
158+
# if haskey(config, "worst_case_nonconvex")
159+
# @everywhere using NonconvexNLopt
160+
# num_p = config["worst_case_nonconvex"]["num_samples"]
161+
162+
# success_solves, number_variables, number_loads, batch_id = generate_worst_case_dataset_Nonconvex(
163+
# case_file_path,
164+
# case_name;
165+
# num_p=num_p,
166+
# filetype=filetype,
167+
# network_formulation=network_formulation,
168+
# optimizer=POI_cached_optimizer,
169+
# )
170+
171+
# @info "Success solves Worst Case: $(success_solves * 100) of $(num_p)"
172+
# end

examples/powermodels/pglib_datagen.jl

Lines changed: 30 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -40,33 +40,33 @@ function load_sampler(
4040
return load_samples
4141
end
4242

43-
"""
44-
line_sampler(original_parameter::T, num_p::Int, parameter_index::F, num_inputs::F, line_index::F; step_multiplier::T=0.1)
45-
46-
line_sampler is a function to help generate a dataset for varying parameter values. It has two modes:
47-
- If line_index is not outside the parameter index range:
48-
Return an incremental vector for the parameter at parameter_index and an unchanged parameter for the rest;
49-
- If line_index is outside the parameter index range:
50-
Return an incremental vector for all parameters.
51-
"""
52-
function line_sampler(
53-
original_parameter::T,
54-
num_p::F,
55-
parameter_index::F,
56-
num_inputs::F,
57-
line_index::F;
58-
step_multiplier::T=1.01,
59-
) where {T<:Real,F<:Integer}
60-
# parameter sampling
61-
num_parameters = floor(Int, num_inputs / 2)
62-
if (parameter_index == line_index) ||
63-
(parameter_index - num_parameters == line_index) ||
64-
(line_index == num_inputs + 1)
65-
return [original_parameter * step_multiplier^(j) for j in 1:num_p]
66-
else
67-
return ones(num_p) * original_parameter
68-
end
69-
end
43+
# """
44+
# line_sampler(original_parameter::T, num_p::Int, parameter_index::F, num_inputs::F, line_index::F; step_multiplier::T=0.1)
45+
46+
# line_sampler is a function to help generate a dataset for varying parameter values. It has two modes:
47+
# - If line_index is not outside the parameter index range:
48+
# Return an incremental vector for the parameter at parameter_index and an unchanged parameter for the rest;
49+
# - If line_index is outside the parameter index range:
50+
# Return an incremental vector for all parameters.
51+
# """
52+
# function line_sampler(
53+
# original_parameter::T,
54+
# num_p::F,
55+
# parameter_index::F,
56+
# num_inputs::F,
57+
# line_index::F;
58+
# step_multiplier::T=1.01,
59+
# ) where {T<:Real,F<:Integer}
60+
# # parameter sampling
61+
# num_parameters = floor(Int, num_inputs / 2)
62+
# if (parameter_index == line_index) ||
63+
# (parameter_index - num_parameters == line_index) ||
64+
# (line_index == num_inputs + 1)
65+
# return [original_parameter * step_multiplier^(j) for j in 1:num_p]
66+
# else
67+
# return ones(num_p) * original_parameter
68+
# end
69+
# end
7070

7171
"""
7272
load_parameter_factory(model, indices; load_set=nothing)
@@ -77,7 +77,9 @@ function load_parameter_factory(model, indices; load_set=nothing)
7777
if isnothing(load_set)
7878
return @variable(model, _p[i=indices])
7979
end
80-
return @variable(model, _p[i=indices] in load_set)
80+
num_loads = floor(Int,length(indices) / 2)
81+
pd_index = indices[1:num_loads]
82+
return [@variable(model, pd[i=pd_index] in load_set[i]).data; @variable(model, qd[i=pd_index] in load_set[i+num_loads]).data]
8183
end
8284

8385
"""

examples/powermodels/visualize.jl

Lines changed: 23 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ cossim(x,y) = dot(x,y) / (norm(x)*norm(y))
99
##############
1010
# Parameters
1111
##############
12-
network_formulation = "ACPPowerModel"
13-
case_name = "pglib_opf_case300_ieee"
12+
network_formulation = "ACPPowerModel" # ACPPowerModel "DCPPowerModel" # "SOCWRConicPowerModel"
13+
case_name = "6468_rte" # pglib_opf_case300_ieee # 6468_rte
1414
path_dataset = joinpath(dirname(@__FILE__), "data")
1515
case_file_path = joinpath(path_dataset, case_name)
1616
case_file_path_train = joinpath(case_file_path, "input", "train")
@@ -45,6 +45,8 @@ output_table = Arrow.Table(file_outs)
4545
input_data_train = DataFrame(input_table_train)
4646
input_data_test = DataFrame(input_table_test)
4747
output_data = DataFrame(output_table)
48+
output_data.operational_cost = output_data.objective
49+
output_data = output_data[output_data.objective .> 10, :]
4850
input_data = vcat(input_data_train, input_data_test[!, Not(:in_train_convex_hull)])
4951

5052
##############
@@ -59,13 +61,13 @@ file_outs_soc = [
5961
]
6062
output_table_soc = Arrow.Table(file_outs_soc)
6163
output_data_soc = DataFrame(output_table_soc)
62-
output_data_soc.operational_cost_soc = output_data_soc.operational_cost
63-
output_data_soc = output_data_soc[output_data_soc.operational_cost .> 10, :]
64+
output_data_soc.operational_cost_soc = output_data_soc.objective
65+
output_data_soc = output_data_soc[output_data_soc.objective .> 10, :]
6466

6567
# compare SOC and AC operational_cost by id
6668
ac_soc = innerjoin(output_data[!, [:id, :operational_cost]], output_data_soc[!, [:id, :operational_cost_soc]], on=:id, makeunique=true)
6769

68-
ac_soc.error = abs.(ac_soc.operational_cost .- ac_soc.operational_cost_soc) ./ ac_soc.operational_cost * 100
70+
ac_soc.error = (ac_soc.operational_cost .- ac_soc.operational_cost_soc) ./ ac_soc.operational_cost * 100
6971
mean(ac_soc.error)
7072
maximum(ac_soc.error)
7173
ac_soc[findmax(ac_soc.error)[2], :]
@@ -90,17 +92,30 @@ function total_load_vector(input_data; is_test=false)
9092
return df
9193
end
9294

95+
function total_load_vector_annon(input_data)
96+
df = DataFrame()
97+
df.id = input_data.id
98+
num_loads = floor(Int, length(names(input_data[!, Not(:id)])) / 2)
99+
for i in 1:num_loads
100+
df[!, "load[$i]"] = zeros(size(input_data, 1))
101+
end
102+
for j in 1:size(input_data, 1), i in 1:num_loads
103+
df[j, "load[$i]"] = sqrt(input_data[j, i+1]^2 + input_data[j, i+num_loads+1]^2)
104+
end
105+
return df
106+
end
107+
93108
######### Plot Load Vectors #########
94109
load_vector_train = total_load_vector(input_data_train)
95110
load_vector_test = total_load_vector(input_data_test; is_test=true)
96111

97112
# Nominal Loads
98-
nominal_loads = Vector(load_vector_train[1, Not(:id)])
113+
nominal_loads = Vector(input_data_train[1, Not(:id)])
99114
norm_nominal_loads = norm(nominal_loads)
100115

101116
# Load divergence
102-
theta_train = [acos(cossim(nominal_loads, Vector(load_vector_train[i, Not(:id)]))) for i in 2:size(load_vector_train, 1)] * 180 / pi
103-
norm_sim_train = [norm(Vector(load_vector_train[i, Not(:id)])) / norm_nominal_loads for i in 2:size(load_vector_train, 1)]
117+
theta_train = [acos(cossim(nominal_loads, Vector(input_data_train[i, Not(:id)]))) for i in 2:10000] * 180 / pi
118+
norm_sim_train = [norm(Vector(input_data_train[i, Not(:id)])) / norm_nominal_loads for i in 2:10000]
104119

105120
theta_test = [acos(cossim(nominal_loads, Vector(load_vector_test[i, Not([:id, :in_hull])]))) for i in 1:size(load_vector_test, 1)] * 180 / pi
106121
norm_sim_test = [norm(Vector(load_vector_test[i, Not([:id, :in_hull])])) / norm_nominal_loads for i in 1:size(load_vector_test, 1)]

0 commit comments

Comments
 (0)