Skip to content

Commit fd1dad5

Browse files
committed
Trying lower memory
1 parent 56318ea commit fd1dad5

File tree

1 file changed

+9
-6
lines changed

1 file changed

+9
-6
lines changed

experiments/Synth/scripts/synth_biodivine.jl

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,10 @@
22
#
33
#SBATCH --job-name="Synth"
44
#SBATCH --partition=compute
5-
#SBATCH --time=72:00:00
6-
#SBATCH --ntasks 256
5+
#SBATCH --time=00:30:00
6+
#SBATCH --ntasks 16
77
#SBATCH --cpus-per-task=1
8-
#SBATCH --mem-per-cpu=8G
8+
#SBATCH --mem-per-cpu=2G
99
#SBATCH --account=research-eemcs-st
1010

1111
using Distributed
@@ -23,6 +23,7 @@ end
2323

2424
@everywhere using ProgressMeter, DataFrames, HerbSearch, GraphDynamicalSystems, Random
2525
using MetaGraphsNext: labels
26+
using Statistics: quantile
2627

2728
traj_df = collect_results(datadir("sims", "biodivine_split"))
2829
path2id = path -> parse_savename(path)[end-1]["id"]
@@ -35,9 +36,11 @@ model_df.vertex = collect.(labels.(model_df.metagraph_model))
3536
# add a copy so that after flattening we have all of the vertices of a model in each row of df
3637
model_df.vertices = model_df.vertex
3738

38-
# Filter only smaller models
39-
# per_vertex_df = flatten(model_df[length.(model_df.vertices).<15, :], :vertex)
40-
per_vertex_df = flatten(model_df, :vertex)
39+
# Filter out the largest 5% of models
40+
# They are likely Booleanized multivalue models—have to check
41+
n_verts_per_model = length.(model_df.vertices)
42+
per_vertex_df =
43+
flatten(model_df[n_verts_per_model.<=quantile(n_verts_per_model, 0.95), :], :vertex)
4144

4245
grammars_df = model_df[!, [:ID, :vertices]]
4346
grammars_df.dnf_grammar = build_dnf_grammar.(grammars_df.vertices)

0 commit comments

Comments
 (0)