Skip to content

Commit cb3ac54

Browse files
Fix ProgressMeter usage and delay plot generation
- Fix ProgressMeter.update\! to use 'desc' parameter instead of 'description' - Remove make_plot parameter from autotune_setup - Move plot generation from autotune_setup to plot(results) method - Remove plot uploading from GitHub sharing (plots not shared anymore) - Simplify AutotuneResults struct to only contain results_df and sysinfo - Update documentation to reflect on-demand plot generation 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]>
1 parent 62b9932 commit cb3ac54

File tree

4 files changed

+17
-87
lines changed

4 files changed

+17
-87
lines changed

lib/LinearSolveAutotune/README.md

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ share_results(results)
2626
- **Multi-size Testing**: Flexible size categories from small to very large matrices
2727
- **Element Type Support**: Tests with Float32, Float64, ComplexF32, ComplexF64
2828
- **GPU Support**: Automatically detects and benchmarks GPU algorithms if available
29-
- **Performance Visualization**: Creates plots showing algorithm performance
29+
- **Performance Visualization**: Generate plots on demand with `plot(results)`
3030
- **Community Sharing**: Optional telemetry to help improve algorithm selection
3131

3232
## Size Categories
@@ -135,7 +135,6 @@ If you prefer using a token:
135135
```julia
136136
autotune_setup(;
137137
sizes = [:small, :medium, :large],
138-
make_plot = true,
139138
set_preferences = true,
140139
samples = 5,
141140
seconds = 0.5,
@@ -146,15 +145,14 @@ autotune_setup(;
146145

147146
**Parameters:**
148147
- `sizes`: Vector of size categories to test
149-
- `make_plot`: Generate performance plots
150148
- `set_preferences`: Update LinearSolve preferences
151149
- `samples`: Number of benchmark samples per test
152150
- `seconds`: Maximum time per benchmark
153151
- `eltypes`: Element types to benchmark
154152
- `skip_missing_algs`: Continue if algorithms are missing
155153

156154
**Returns:**
157-
- `results`: AutotuneResults object containing benchmark data, system info, and plots
155+
- `results`: AutotuneResults object containing benchmark data and system info
158156

159157
### `share_results`
160158

lib/LinearSolveAutotune/src/LinearSolveAutotune.jl

Lines changed: 10 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@ include("preferences.jl")
3838
struct AutotuneResults
3939
results_df::DataFrame
4040
sysinfo::Dict
41-
plots::Union{Nothing, Dict}
4241
end
4342

4443
# Display method for AutotuneResults
@@ -91,14 +90,17 @@ end
9190

9291
# Plot method for AutotuneResults
9392
function Plots.plot(results::AutotuneResults; kwargs...)
94-
if results.plots === nothing || isempty(results.plots)
95-
@warn "No plots available in results. Run autotune_setup with make_plot=true"
93+
# Generate plots from the results data
94+
plots_dict = create_benchmark_plots(results.results_df)
95+
96+
if plots_dict === nothing || isempty(plots_dict)
97+
@warn "No data available for plotting"
9698
return nothing
9799
end
98100

99101
# Create a composite plot from all element type plots
100102
plot_list = []
101-
for (eltype_name, p) in results.plots
103+
for (eltype_name, p) in plots_dict
102104
push!(plot_list, p)
103105
end
104106

@@ -121,7 +123,6 @@ end
121123
"""
122124
autotune_setup(;
123125
sizes = [:small, :medium, :large],
124-
make_plot::Bool = true,
125126
set_preferences::Bool = true,
126127
samples::Int = 5,
127128
seconds::Float64 = 0.5,
@@ -138,7 +139,6 @@ Run a comprehensive benchmark of all available LU factorization methods and opti
138139
# Arguments
139140
140141
- `sizes = [:small, :medium, :large]`: Size categories to test. Options: :small (5-20), :medium (20-300), :large (300-1000), :big (10000-100000)
141-
- `make_plot::Bool = true`: Generate performance plots for each element type
142142
- `set_preferences::Bool = true`: Update LinearSolve preferences with optimal algorithms
143143
- `samples::Int = 5`: Number of benchmark samples per algorithm/size
144144
- `seconds::Float64 = 0.5`: Maximum time per benchmark
@@ -170,14 +170,13 @@ share_results(results)
170170
"""
171171
function autotune_setup(;
172172
sizes = [:small, :medium, :large],
173-
make_plot::Bool = true,
174173
set_preferences::Bool = true,
175174
samples::Int = 5,
176175
seconds::Float64 = 0.5,
177176
eltypes = (Float32, Float64, ComplexF32, ComplexF64),
178177
skip_missing_algs::Bool = false)
179178
@info "Starting LinearSolve.jl autotune setup..."
180-
@info "Configuration: sizes=$sizes, make_plot=$make_plot, set_preferences=$set_preferences"
179+
@info "Configuration: sizes=$sizes, set_preferences=$set_preferences"
181180
@info "Element types to benchmark: $(join(eltypes, ", "))"
182181

183182
# Get system information
@@ -243,23 +242,12 @@ function autotune_setup(;
243242
set_algorithm_preferences(categories)
244243
end
245244

246-
# Create plots if requested
247-
plots_dict = nothing
248-
plot_files = nothing
249-
if make_plot
250-
@info "Creating performance plots..."
251-
plots_dict = create_benchmark_plots(results_df)
252-
if !isempty(plots_dict)
253-
plot_files = save_benchmark_plots(plots_dict)
254-
end
255-
end
256-
257245
@info "Autotune setup completed!"
258246

259247
sysinfo = get_detailed_system_info()
260248

261249
# Return AutotuneResults object
262-
return AutotuneResults(results_df, sysinfo, plots_dict)
250+
return AutotuneResults(results_df, sysinfo)
263251
end
264252

265253
"""
@@ -305,7 +293,6 @@ function share_results(results::AutotuneResults)
305293
# Extract from AutotuneResults
306294
results_df = results.results_df
307295
sysinfo = results.sysinfo
308-
plots_dict = results.plots
309296

310297
# Get system info
311298
system_info = sysinfo
@@ -342,14 +329,8 @@ function share_results(results::AutotuneResults)
342329
# Format results
343330
markdown_content = format_results_for_github(results_df, system_info, categories)
344331

345-
# Process plots if available
346-
plot_files = nothing
347-
if plots_dict !== nothing && !isempty(plots_dict)
348-
plot_files = save_benchmark_plots(plots_dict)
349-
end
350-
351-
# Upload to GitHub
352-
upload_to_github(markdown_content, plot_files, github_auth, results_df, system_info, categories)
332+
# Upload to GitHub (without plots)
333+
upload_to_github(markdown_content, nothing, github_auth, results_df, system_info, categories)
353334

354335
@info "✅ Thank you for contributing to the LinearSolve.jl community!"
355336
end

lib/LinearSolveAutotune/src/benchmarking.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ function benchmark_algorithms(matrix_sizes, algorithms, alg_names, eltypes;
120120
for (alg, name) in zip(compatible_algs, compatible_names)
121121
# Update progress description
122122
ProgressMeter.update!(progress,
123-
description="Benchmarking $name on $(n)×$(n) $eltype matrix: ")
123+
desc="Benchmarking $name on $(n)×$(n) $eltype matrix: ")
124124

125125
gflops = 0.0
126126
success = true

lib/LinearSolveAutotune/src/telemetry.jl

Lines changed: 4 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -245,12 +245,13 @@ function format_detailed_results_markdown(df::DataFrame)
245245
end
246246

247247
"""
248-
upload_to_github(content::String, plot_files::Union{Nothing, Tuple, Dict}, auth_info::Tuple,
248+
upload_to_github(content::String, plot_files, auth_info::Tuple,
249249
results_df::DataFrame, system_info::Dict, categories::Dict)
250250
251251
Create a GitHub issue with benchmark results for community data collection.
252+
Note: plot_files parameter is kept for compatibility but not used.
252253
"""
253-
function upload_to_github(content::String, plot_files::Union{Nothing, Tuple, Dict}, auth_info::Tuple,
254+
function upload_to_github(content::String, plot_files, auth_info::Tuple,
254255
results_df::DataFrame, system_info::Dict, categories::Dict)
255256

256257
auth_method, auth_data = auth_info
@@ -272,61 +273,14 @@ function upload_to_github(content::String, plot_files::Union{Nothing, Tuple, Dic
272273
target_repo = "SciML/LinearSolve.jl"
273274
issue_number = 669 # The existing issue for collecting autotune results
274275

275-
# First, upload plots to a gist if available
276-
gist_url = nothing
277-
raw_urls = Dict{String, String}()
278-
plot_links = ""
279-
280-
if plot_files !== nothing
281-
@info "📊 Uploading plots to GitHub Gist..."
282-
283-
# Get element type for labeling
284-
eltype_str = if !isempty(results_df)
285-
unique_eltypes = unique(results_df.eltype)
286-
join(unique_eltypes, ", ")
287-
else
288-
"Mixed"
289-
end
290-
291-
if auth_method == :gh_cli
292-
gist_url, raw_urls = upload_plots_to_gist_gh(plot_files, eltype_str)
293-
elseif auth_method == :token
294-
gist_url, raw_urls = upload_plots_to_gist(plot_files, auth_data, eltype_str)
295-
end
296-
297-
if gist_url !== nothing
298-
# Add plot links section to the content
299-
plot_links = """
300-
301-
### 📊 Benchmark Plots
302-
303-
View all plots in the gist: [Benchmark Plots Gist]($gist_url)
304-
305-
"""
306-
307-
# Embed PNG images directly in the markdown if we have raw URLs
308-
for (name, url) in raw_urls
309-
if endswith(name, ".png")
310-
plot_links *= """
311-
#### $name
312-
![$(name)]($url)
313-
314-
"""
315-
end
316-
end
317-
318-
plot_links *= "---\n"
319-
end
320-
end
321-
322276
# Construct comment body
323277
cpu_name = get(system_info, "cpu_name", "unknown")
324278
os_name = get(system_info, "os", "unknown")
325279
timestamp = Dates.format(Dates.now(), "yyyy-mm-dd HH:MM")
326280

327281
comment_body = """
328282
## Benchmark Results: $cpu_name on $os_name ($timestamp)
329-
$plot_links
283+
330284
$content
331285
332286
---
@@ -350,9 +304,6 @@ function upload_to_github(content::String, plot_files::Union{Nothing, Tuple, Dic
350304

351305
if issue_url !== nothing
352306
@info "✅ Successfully added benchmark results to issue: $issue_url"
353-
if gist_url !== nothing
354-
@info "📊 Plots available at: $gist_url"
355-
end
356307
@info "🔗 Your benchmark data has been shared with the LinearSolve.jl community!"
357308
@info "💡 View all community benchmark data: https://github.com/SciML/LinearSolve.jl/issues/669"
358309
else

0 commit comments

Comments
 (0)