22import numpy as np
33import scipy .sparse as spa
44from time import perf_counter_ns
5+ from concurrent .futures import ThreadPoolExecutor
6+
7+ """
8+ There are two interfaces to solve a QP problem with the dense backend. a) create a qp object by passing the problem data (matrices, vectors) to the qp.init method (this does memory allocation and the preconditioning) and then calling qp.solve or b) use the solve function directly taking the problem data as input (this does everything in one go).
9+
10+ Currently, only the qp.solve method (a) is parallelized (using openmp). Therefore the memory alloc + preconditioning is done in serial when building a batch of qps that is then passed to the `solve_in_parallel` function. The solve function (b) is not parallelized but can easily be parallelized in Python using ThreadPoolExecutor.
11+
12+ Here we do some timings to compare the two approaches. We generate a batch of QP problems and solve them in parallel using the `solve_in_parallel` function and compare the timings (need to add the timings for building the batch of qps + the parallel solving) with solving each problem in parallel using ThreadPoolExecutor for the solve function.
13+ """
14+
15+ num_threads = proxsuite .proxqp .omp_get_max_threads ()
516
617
718def generate_mixed_qp (n , n_eq , n_in , seed = 1 ):
@@ -23,45 +34,109 @@ def generate_mixed_qp(n, n_eq, n_in, seed=1):
2334 u = A @ v
2435 l = - 1.0e20 * np .ones (m )
2536
26- return P .toarray (), q , A [:n_eq , :], u [:n_eq ], A [n_in :, :], u [n_in :], l [n_in :]
37+ return P .toarray (), q , A [:n_eq , :], u [:n_eq ], A [n_in :, :], l [n_in :], u [n_in :]
2738
2839
29- n = 500
30- n_eq = 200
31- n_in = 200
40+ problem_specs = [
41+ # (n, n_eq, n_in),
42+ (50 , 20 , 20 ),
43+ (100 , 40 , 40 ),
44+ (200 , 80 , 80 ),
45+ (500 , 200 , 200 ),
46+ (1000 , 200 , 200 ),
47+ ]
3248
3349num_qps = 128
3450
35- # qps = []
36- timings = {}
37- qps = proxsuite .proxqp .dense .VectorQP ()
38-
39- tic = perf_counter_ns ()
40- for j in range (num_qps ):
41- qp = proxsuite .proxqp .dense .QP (n , n_eq , n_in )
42- H , g , A , b , C , u , l = generate_mixed_qp (n , n_eq , n_in , seed = j )
43- qp .init (H , g , A , b , C , l , u )
44- qp .settings .eps_abs = 1e-9
45- qp .settings .verbose = False
46- qp .settings .initial_guess = proxsuite .proxqp .InitialGuess .NO_INITIAL_GUESS
47- qps .append (qp )
48- timings ["problem_data" ] = (perf_counter_ns () - tic ) * 1e-6
49-
50- tic = perf_counter_ns ()
51- for qp in qps :
52- qp .solve ()
53- timings ["solve_serial" ] = (perf_counter_ns () - tic ) * 1e-6
51+ for n , n_eq , n_in in problem_specs :
5452
55- num_threads = proxsuite .proxqp .omp_get_max_threads ()
56- for j in range (1 , num_threads ):
53+ print (f"\n Problem specs: { n = } { n_eq = } { n_in = } . Generating { num_qps } such problems." )
54+ problems = [generate_mixed_qp (n , n_eq , n_in , seed = j ) for j in range (num_qps )]
55+ print (
56+ f"Generated problems. Solving { num_qps } problems with proxsuite.proxqp.omp_get_max_threads()={ num_threads } threads."
57+ )
58+
59+ timings = {}
60+
61+ # create a vector of QP objects. This is not efficient because memory is allocated when creating the qp object + when it is appended to the vector which creates a copy of the object.
62+ qps_vector = proxsuite .proxqp .dense .VectorQP ()
5763 tic = perf_counter_ns ()
58- proxsuite .proxqp .dense .solve_in_parallel (j , qps )
59- timings [f"solve_parallel_{ j } _threads" ] = (perf_counter_ns () - tic ) * 1e-6
64+ print ("\n Setting up vector of qps" )
65+ for H , g , A , b , C , l , u in problems :
66+ qp = proxsuite .proxqp .dense .QP (n , n_eq , n_in )
67+ qp .init (H , g , A , b , C , l , u )
68+ qp .settings .eps_abs = 1e-9
69+ qp .settings .verbose = False
70+ qp .settings .initial_guess = proxsuite .proxqp .InitialGuess .NO_INITIAL_GUESS
71+ qps_vector .append (qp )
72+ timings ["setup_vector_of_qps" ] = (perf_counter_ns () - tic ) * 1e-6
6073
74+ # use BatchQP, which can initialize the qp objects in place and is more efficient
75+ qps_batch = proxsuite .proxqp .dense .BatchQP ()
76+ tic = perf_counter_ns ()
77+ print ("Setting up batch of qps" )
78+ for H , g , A , b , C , l , u in problems :
79+ qp = qps_batch .init_qp_in_place (n , n_eq , n_in )
80+ qp .init (H , g , A , b , C , l , u )
81+ qp .settings .eps_abs = 1e-9
82+ qp .settings .verbose = False
83+ qp .settings .initial_guess = proxsuite .proxqp .InitialGuess .NO_INITIAL_GUESS
84+ timings ["setup_batch_of_qps" ] = (perf_counter_ns () - tic ) * 1e-6
6185
62- tic = perf_counter_ns ()
63- proxsuite .proxqp .dense .solve_in_parallel (qps = qps )
64- timings [f"solve_parallel_heuristics_threads" ] = (perf_counter_ns () - tic ) * 1e-6
86+ print ("Solving batch of qps using solve_in_parallel with default thread config" )
87+ tic = perf_counter_ns ()
88+ proxsuite .proxqp .dense .solve_in_parallel (qps = qps_batch )
89+ timings [f"solve_in_parallel_heuristics_threads" ] = (perf_counter_ns () - tic ) * 1e-6
90+
91+ print ("Solving vector of qps serially" )
92+ tic = perf_counter_ns ()
93+ for qp in qps_vector :
94+ qp .solve ()
95+ timings ["qp_solve_serial" ] = (perf_counter_ns () - tic ) * 1e-6
96+
97+ print ("Solving batch of qps using solve_in_parallel with various thread configs" )
98+ for j in range (1 , num_threads , 2 ):
99+ tic = perf_counter_ns ()
100+ proxsuite .proxqp .dense .solve_in_parallel (qps = qps_batch , num_threads = j )
101+ timings [f"solve_in_parallel_{ j } _threads" ] = (perf_counter_ns () - tic ) * 1e-6
102+
103+ def solve_problem_with_dense_backend (
104+ problem ,
105+ ):
106+ H , g , A , b , C , l , u = problem
107+ return proxsuite .proxqp .dense .solve_no_gil (
108+ H ,
109+ g ,
110+ A ,
111+ b ,
112+ C ,
113+ l ,
114+ u ,
115+ initial_guess = proxsuite .proxqp .InitialGuess .NO_INITIAL_GUESS ,
116+ eps_abs = 1e-9 ,
117+ )
118+
119+ # add final timings for the solve_in_parallel function considering setup time for batch of qps
120+ for k , v in list (timings .items ()):
121+ if "solve_in_parallel" in k :
122+ k_init = k + "_and_setup_batch_of_qps"
123+ timings [k_init ] = timings ["setup_batch_of_qps" ] + v
124+
125+ print ("Solving each problem serially with solve function." )
126+ # Note: here we just pass the problem data to the solve function. This does not require running the init method separately.
127+ tic = perf_counter_ns ()
128+ for problem in problems :
129+ solve_problem_with_dense_backend (problem )
130+ timings ["solve_fun_serial" ] = (perf_counter_ns () - tic ) * 1e-6
131+
132+ print (
133+ "Solving each problem in parallel (with a ThreadPoolExecutor) with solve function."
134+ )
135+ tic = perf_counter_ns ()
136+ with ThreadPoolExecutor (max_workers = num_threads ) as executor :
137+ results = list (executor .map (solve_problem_with_dense_backend , problems ))
138+ timings ["solve_fun_parallel" ] = (perf_counter_ns () - tic ) * 1e-6
65139
66- for k , v in timings .items ():
67- print (f"{ k } : { v } ms" )
140+ print ("\n Timings:" )
141+ for k , v in timings .items ():
142+ print (f"{ k } : { v :.3f} ms" )
0 commit comments