@@ -11,6 +11,7 @@ using LinearAlgebra
11
11
using Printf
12
12
using Dates
13
13
using Base64
14
+ using ProgressMeter
14
15
15
16
# Hard dependency to ensure RFLUFactorization others solvers are available
16
17
using RecursiveFactorization
@@ -24,7 +25,7 @@ using Metal
24
25
using GitHub
25
26
using Plots
26
27
27
- export autotune_setup, share_results
28
+ export autotune_setup, share_results, AutotuneResults
28
29
29
30
include (" algorithms.jl" )
30
31
include (" gpu_detection.jl" )
@@ -33,9 +34,93 @@ include("plotting.jl")
33
34
include (" telemetry.jl" )
34
35
include (" preferences.jl" )
35
36
37
+ # Define the AutotuneResults struct
38
+ struct AutotuneResults
39
+ results_df:: DataFrame
40
+ sysinfo:: Dict
41
+ plots:: Union{Nothing, Dict}
42
+ end
43
+
44
+ # Display method for AutotuneResults
45
+ function Base. show (io:: IO , results:: AutotuneResults )
46
+ println (io, " =" ^ 60 )
47
+ println (io, " LinearSolve.jl Autotune Results" )
48
+ println (io, " =" ^ 60 )
49
+
50
+ # System info summary
51
+ println (io, " \n 📊 System Information:" )
52
+ println (io, " • CPU: " , get (results. sysinfo, " cpu_name" , " Unknown" ))
53
+ println (io, " • OS: " , get (results. sysinfo, " os" , " Unknown" ))
54
+ println (io, " • Julia: " , get (results. sysinfo, " julia_version" , " Unknown" ))
55
+ println (io, " • Threads: " , get (results. sysinfo, " num_threads" , " Unknown" ))
56
+
57
+ # Results summary
58
+ successful_results = filter (row -> row. success, results. results_df)
59
+ if nrow (successful_results) > 0
60
+ println (io, " \n 🏆 Top Performing Algorithms:" )
61
+ summary = combine (groupby (successful_results, :algorithm ),
62
+ :gflops => mean => :avg_gflops ,
63
+ :gflops => maximum => :max_gflops ,
64
+ nrow => :num_tests )
65
+ sort! (summary, :avg_gflops , rev = true )
66
+
67
+ # Show top 5
68
+ for (i, row) in enumerate (eachrow (first (summary, 5 )))
69
+ println (io, " " , i, " . " , row. algorithm, " : " ,
70
+ @sprintf (" %.2f GFLOPs avg" , row. avg_gflops))
71
+ end
72
+ end
73
+
74
+ # Element types tested
75
+ eltypes = unique (results. results_df. eltype)
76
+ println (io, " \n 🔬 Element Types Tested: " , join (eltypes, " , " ))
77
+
78
+ # Matrix sizes tested
79
+ sizes = unique (results. results_df. size)
80
+ println (io, " 📏 Matrix Sizes: " , minimum (sizes), " ×" , minimum (sizes),
81
+ " to " , maximum (sizes), " ×" , maximum (sizes))
82
+
83
+ # Call to action
84
+ println (io, " \n " * " =" ^ 60 )
85
+ println (io, " 💡 To share your results with the community, run:" )
86
+ println (io, " share_results(results)" )
87
+ println (io, " \n 📈 See community results at:" )
88
+ println (io, " https://github.com/SciML/LinearSolve.jl/issues/669" )
89
+ println (io, " =" ^ 60 )
90
+ end
91
+
92
+ # Plot method for AutotuneResults
93
+ function Plots. plot (results:: AutotuneResults ; kwargs... )
94
+ if results. plots === nothing || isempty (results. plots)
95
+ @warn " No plots available in results. Run autotune_setup with make_plot=true"
96
+ return nothing
97
+ end
98
+
99
+ # Create a composite plot from all element type plots
100
+ plot_list = []
101
+ for (eltype_name, p) in results. plots
102
+ push! (plot_list, p)
103
+ end
104
+
105
+ # Create composite plot
106
+ n_plots = length (plot_list)
107
+ if n_plots == 1
108
+ return plot_list[1 ]
109
+ elseif n_plots == 2
110
+ return plot (plot_list... , layout= (1 , 2 ), size= (1200 , 500 ); kwargs... )
111
+ elseif n_plots <= 4
112
+ return plot (plot_list... , layout= (2 , 2 ), size= (1200 , 900 ); kwargs... )
113
+ else
114
+ ncols = ceil (Int, sqrt (n_plots))
115
+ nrows = ceil (Int, n_plots / ncols)
116
+ return plot (plot_list... , layout= (nrows, ncols),
117
+ size= (400 * ncols, 400 * nrows); kwargs... )
118
+ end
119
+ end
120
+
36
121
"""
37
122
autotune_setup(;
38
- sizes = [:small, :medium],
123
+ sizes = [:small, :medium, :large ],
39
124
make_plot::Bool = true,
40
125
set_preferences::Bool = true,
41
126
samples::Int = 5,
@@ -52,7 +137,7 @@ Run a comprehensive benchmark of all available LU factorization methods and opti
52
137
53
138
# Arguments
54
139
55
- - `sizes = [:small, :medium]`: Size categories to test. Options: :small (5-20), :medium (20-100 ), :large (100 -1000), :big (10000-100000)
140
+ - `sizes = [:small, :medium, :large ]`: Size categories to test. Options: :small (5-20), :medium (20-300 ), :large (300 -1000), :big (10000-100000)
56
141
- `make_plot::Bool = true`: Generate performance plots for each element type
57
142
- `set_preferences::Bool = true`: Update LinearSolve preferences with optimal algorithms
58
143
- `samples::Int = 5`: Number of benchmark samples per algorithm/size
@@ -62,31 +147,29 @@ Run a comprehensive benchmark of all available LU factorization methods and opti
62
147
63
148
# Returns
64
149
65
- - `DataFrame`: Detailed benchmark results with performance data for all element types
66
- - `Dict`: System information about the benchmark environment
67
- - `Dict` or `Plot`: Performance visualizations by element type (if `make_plot=true`)
150
+ - `AutotuneResults`: Object containing benchmark results, system info, and plots
68
151
69
152
# Examples
70
153
71
154
```julia
72
155
using LinearSolve
73
156
using LinearSolveAutotune
74
157
75
- # Basic autotune with small and medium sizes
76
- results, sysinfo, plots = autotune_setup()
158
+ # Basic autotune with default sizes
159
+ results = autotune_setup()
77
160
78
161
# Test all size ranges
79
- results, sysinfo, plots = autotune_setup(sizes = [:small, :medium, :large, :big])
162
+ results = autotune_setup(sizes = [:small, :medium, :large, :big])
80
163
81
164
# Large matrices only
82
- results, sysinfo, plots = autotune_setup(sizes = [:large, :big], samples = 10, seconds = 1.0)
165
+ results = autotune_setup(sizes = [:large, :big], samples = 10, seconds = 1.0)
83
166
84
167
# After running autotune, share results (requires gh CLI or GitHub token)
85
- share_results(results, sysinfo, plots )
168
+ share_results(results)
86
169
```
87
170
"""
88
171
function autotune_setup (;
89
- sizes = [:small , :medium ],
172
+ sizes = [:small , :medium , :large ],
90
173
make_plot:: Bool = true ,
91
174
set_preferences:: Bool = true ,
92
175
samples:: Int = 5 ,
@@ -175,18 +258,12 @@ function autotune_setup(;
175
258
176
259
sysinfo = get_detailed_system_info ()
177
260
178
- @info " To share your results with the community, run: share_results(results_df, sysinfo, plots_dict)"
179
-
180
- # Return results and plots
181
- if make_plot && plots_dict != = nothing && ! isempty (plots_dict)
182
- return results_df, sysinfo, plots_dict
183
- else
184
- return results_df, sysinfo, nothing
185
- end
261
+ # Return AutotuneResults object
262
+ return AutotuneResults (results_df, sysinfo, plots_dict)
186
263
end
187
264
188
265
"""
189
- share_results(results_df::DataFrame, sysinfo::Dict, plots_dict=nothing )
266
+ share_results(results::AutotuneResults )
190
267
191
268
Share your benchmark results with the LinearSolve.jl community to help improve
192
269
automatic algorithm selection across different hardware configurations.
@@ -211,28 +288,27 @@ your results as a comment to the community benchmark collection issue.
211
288
6. Run this function
212
289
213
290
# Arguments
214
- - `results_df`: Benchmark results DataFrame from autotune_setup
215
- - `sysinfo`: System information Dict from autotune_setup
216
- - `plots_dict`: Optional plots dictionary from autotune_setup
291
+ - `results`: AutotuneResults object from autotune_setup
217
292
218
293
# Examples
219
294
```julia
220
295
# Run benchmarks
221
- results, sysinfo, plots = autotune_setup()
296
+ results = autotune_setup()
222
297
223
298
# Share results with the community
224
- share_results(results, sysinfo, plots )
299
+ share_results(results)
225
300
```
226
301
"""
227
- function share_results (results_df :: DataFrame , sysinfo :: Dict , plots_dict = nothing )
302
+ function share_results (results :: AutotuneResults )
228
303
@info " 📤 Preparing to share benchmark results with the community..."
229
304
230
- # Get system info if not provided
231
- system_info = if haskey (sysinfo, " os" )
232
- sysinfo
233
- else
234
- get_system_info ()
235
- end
305
+ # Extract from AutotuneResults
306
+ results_df = results. results_df
307
+ sysinfo = results. sysinfo
308
+ plots_dict = results. plots
309
+
310
+ # Get system info
311
+ system_info = sysinfo
236
312
237
313
# Categorize results
238
314
categories = categorize_results (results_df)
0 commit comments