Skip to content

Commit ffc46a7

Browse files
committed
first draft for stdlib_sparse paper
1 parent efb26ca commit ffc46a7

File tree

3 files changed

+258
-0
lines changed

3 files changed

+258
-0
lines changed

.github/workflows/draft-pdf.yml

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
name: Draft PDF
2+
on: [push]
3+
4+
jobs:
5+
paper:
6+
runs-on: ubuntu-latest
7+
name: Paper Draft
8+
steps:
9+
- name: Checkout
10+
uses: actions/checkout@v4
11+
- name: Build draft PDF
12+
uses: openjournals/openjournals-draft-action@master
13+
with:
14+
journal: joss
15+
# This should be the path to the paper within your repo.
16+
paper-path: joss/paper_sparse.md
17+
- name: Upload
18+
uses: actions/upload-artifact@v4
19+
with:
20+
name: paper_sparse
21+
# This is the output path where Pandoc will write the compiled
22+
# PDF. Note, this should be the same directory as the input
23+
# paper.md
24+
path: joss/paper_sparse.pdf

joss/paper_sparse.bib

Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
@book{saad2003iterative,
2+
title={Iterative methods for sparse linear systems},
3+
author={Saad, Yousef},
4+
year={2003},
5+
publisher={SIAM}
6+
}
7+
@article{MUMPS:1,
8+
title = {A Fully Asynchronous Multifrontal Solver Using Distributed Dynamic Scheduling},
9+
author = {P.R. Amestoy and I. S. Duff and J. Koster and J.-Y. L'Excellent},
10+
journal = {SIAM Journal on Matrix Analysis and Applications},
11+
volume = {23},
12+
number = {1},
13+
year = {2001},
14+
pages = {15-41}
15+
}
16+
@article{MUMPS:2,
17+
title = {{Performance and Scalability of the Block Low-Rank Multifrontal
18+
Factorization on Multicore Architectures}},
19+
author = {P.R. Amestoy and A. Buttari and J.-Y. L'Excellent and T. Mary},
20+
journal = {ACM Transactions on Mathematical Software},
21+
volume = 45,
22+
issue = 1,
23+
pages = {2:1--2:26},
24+
year={2019},
25+
}
26+
27+
@Misc{petsc-web-page,
28+
author = {Satish Balay and Shrirang Abhyankar and Mark~F. Adams and Jed Brown and Peter Brune
29+
and Kris Buschelman and Lisandro Dalcin and Victor Eijkhout and William~D. Gropp
30+
and Dinesh Kaushik and Matthew~G. Knepley
31+
and Lois Curfman McInnes and Karl Rupp and Barry~F. Smith
32+
and Stefano Zampini and Hong Zhang},
33+
title = {{PETS}c {W}eb page},
34+
url = {http://www.mcs.anl.gov/petsc},
35+
howpublished = {\url{http://www.mcs.anl.gov/petsc}},
36+
year = {2015}
37+
}
38+
39+
@techreport{bell2008efficient,
40+
title={Efficient sparse matrix-vector multiplication on CUDA},
41+
author={Bell, Nathan and Garland, Michael},
42+
year={2008},
43+
institution={Nvidia Technical Report NVR-2008-004, Nvidia Corporation}
44+
}
45+
@book{magoules2017calcul,
46+
title={Calcul scientifique parall{\`e}le-2e {\'e}d.: Cours, exemples avec openMP et MPI, exercices corrig{\'e}s},
47+
author={Magoul{\`e}s, Fr{\'e}d{\'e}ric and Roux, Fran{\c{c}}ois-Xavier},
48+
year={2017},
49+
publisher={Dunod}
50+
}
51+
@article{anzt2014implementing,
52+
title={Implementing a Sparse Matrix Vector Product for the SELL-C/SELL-C-$\sigma$ formats on NVIDIA GPUs},
53+
author={Anzt, Hartwig and Tomov, Stanimire and Dongarra, Jack},
54+
journal={University of Tennessee, Tech. Rep. ut-eecs-14-727},
55+
year={2014}
56+
}
57+
@software{fsparse2024,
58+
author = {José R. Alves Z. and
59+
Samuele Giuli},
60+
title = {FSPARSE: Fortran Sparse Gallery library},
61+
month = may,
62+
year = 2024,
63+
publisher = {Zenodo},
64+
version = {v0.1.1},
65+
doi = {10.5281/zenodo.11175000},
66+
url = {https://doi.org/10.5281/zenodo.11175000},
67+
}
68+
@ARTICLE{2020SciPy-NMeth,
69+
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
70+
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
71+
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
72+
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
73+
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
74+
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
75+
Kern, Robert and Larson, Eric and Carey, C J and
76+
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
77+
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
78+
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
79+
Harris, Charles R. and Archibald, Anne M. and
80+
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
81+
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
82+
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
83+
Computing in Python}},
84+
journal = {Nature Methods},
85+
year = {2020},
86+
volume = {17},
87+
pages = {261--272},
88+
adsurl = {https://rdcu.be/b08Wh},
89+
doi = {10.1038/s41592-019-0686-2},
90+
}

joss/paper_sparse.md

Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
---
2+
title: 'stdlib sparse: A high level Fortran sparse matrix library'
3+
tags:
4+
- Fortran
5+
- linear algebra
6+
- sparse matrix
7+
authors:
8+
- name: José R. Alves Z.
9+
corresponding: true
10+
orcid: 0000-0001-9448-0145
11+
affiliation: 1
12+
- name: Ivan Pribec
13+
affiliation: 2
14+
- name: Jeremie Vandenplas
15+
affiliation: 3
16+
orcid: 0000-0002-2554-072X
17+
- name: Federico Perini
18+
affiliation: 4
19+
affiliations:
20+
- name: Transvalor S.A., France
21+
index: 1
22+
- name: Leibniz Centre of Supercomputing, Germany
23+
index: 2
24+
- name: Wageningen University, The Netherlands
25+
index: 3
26+
- name: Wisconsin Engine Research Consultants, USA
27+
index: 4
28+
date: 17 September 2025
29+
bibliography: paper_sparse.bib
30+
---
31+
32+
# Summary
33+
34+
Sparse matrices are a core building block in scientific computing, particularly in fields such as computational physics, engineering, and graph analysis. Despite Fortran’s long tradition in numerical computing, its ecosystem lacks a canonical, modern, high-level library for sparse data structures.
35+
36+
We present stdlib_sparse, a sparse matrix library implemented in modern Fortran as part of the (community) Fortran Standard Library (stdlib). It provides well-defined sparse storage formats, conversion routines, and core operations such as sparse matrix–vector multiplication. This library aims to improve reproducibility, interoperability, and performance across Fortran applications by offering standardized, extensible data structures.
37+
38+
# Statement of need
39+
40+
Many scientific applications require sparse linear algebra routines for efficient storage and computation with large, structured matrices. Fortran users have traditionally relied on external libraries (e.g. SPARSKIT, MUMPS, SuiteSparse) or custom implementations. This fragmentation leads to challenges in portability, maintainability, and discoverability.
41+
42+
The stdlib_sparse library addresses this gap by offering:
43+
44+
* A consistent set of sparse matrix formats (COO, CSR, CSC, ELLPACK, SELL-C).
45+
* Format conversion routines to enable interoperability.
46+
* Standardized operations such as sparse matrix–vector multiplication (SpMV).
47+
* A unified API, following modern Fortran practices, as part of the official Fortran stdlib project.
48+
49+
By integrating directly into stdlib, stdlib_sparse lowers the barrier for Fortran developers to adopt sparse methods, reduces code duplication, and promotes best practices for numerical software development.
50+
51+
# Related work
52+
53+
Several sparse libraries exist in Fortran and other languages:
54+
55+
* SPARSKIT (Fortran 77, Saad 1994) — influential, but outdated syntax and limited interoperability [@saad2003iterative].
56+
* MUMPS [@MUMPS:1] and PETSc [@petsc-web-page] — high-performance solvers written in Fortran/C with broad functionality, but heavy dependencies and steeper learning curve.
57+
* SciPy.sparse (Python) [@2020SciPy-NMeth] and Eigen (C++) — modern high-level APIs in other ecosystems, demonstrating the value of standardized interfaces.
58+
59+
Compared to these, stdlib_sparse focuses on providing a lightweight, modern Fortran interface integrated into the stdlib, emphasizing portability and extensibility rather than complete solver functionality.
60+
61+
# Design and implementation
62+
## Data structures
63+
64+
All sparse formats extend an abstract base type sparse_type, which holds metadata such as number of rows, columns, and nonzeros. Implementations include:
65+
66+
* COO: coordinate (triplet) format.
67+
* CSR: compressed sparse row (Yale) format.
68+
* CSC: compressed sparse column format.
69+
* ELLPACK: fixed number of nonzeros per row, suited for vectorization.
70+
* SELL-C: sliced ELLPACK, balancing CSR and ELLPACK trade-offs [@anzt2014implementing].
71+
72+
## Core functionality
73+
74+
* Construction: from triplet (`i,j,v`) arrays or dense matrices.
75+
* Data accessors: `add` (insert/update entries) and `at` (element access with management zero/NaN handling of missing entries).
76+
* Operations: sparse matrix–vector multiplication (`spmv`), with optional transpose and hermitian variants:
77+
$$ y = \alpha op(A) * x + \beta * y$$
78+
* Conversions: between sparse formats and dense matrices.
79+
* Utilities: diagonal extraction, symmetry flags, duplicate entry handling.
80+
81+
## Implementation details
82+
83+
Before introducing stdlib_sparse, the core structure and API was crafted under a stand-aline project, FSPARSE [@fsparse2024]. This enabled testing and refinement of the library before integration into stdlib.
84+
85+
The module is designed with the following key features:
86+
87+
* Generic procedures support both real and complex kinds.
88+
* Memory is allocated dynamically with type-bound malloc routines.
89+
* Conversions handle duplicate entries, with options for summation or overwriting.
90+
91+
# Example usage
92+
93+
```fortran
94+
program main
95+
use stdlib_linalg_constants, only: dp
96+
use stdlib_sparse
97+
implicit none
98+
99+
integer, parameter :: m = 4, n = 2
100+
real(dp) :: A(m,n), x(n)
101+
real(dp) :: y_dense(m), y_coo(m), y_csr(m)
102+
real(dp) :: alpha, beta
103+
type(COO_dp_type) :: COO
104+
type(CSR_dp_type) :: CSR
105+
106+
call random_number(A)
107+
! Convert from dense to COO and CSR matrices
108+
call dense2coo( A , COO )
109+
call coo2csr( COO , CSR )
110+
111+
! Initialize vectors
112+
x = 1._dp
113+
y_dense = 2._dp
114+
y_coo = y_dense
115+
y_csr = y_dense
116+
117+
! Perform matrix-vector product
118+
alpha = 3._dp; beta = 2._dp
119+
y_dense = alpha * matmul(A,x) + beta * y_dense
120+
call spmv( COO , x , y_coo , alpha = alpha, beta = beta )
121+
call spmv( CSR , x , y_csr , alpha = alpha, beta = beta )
122+
123+
print *, 'dense :', y_dense
124+
print *, 'coo :', y_coo
125+
print *, 'csr :', y_csr
126+
127+
end program main
128+
```
129+
130+
# Performance and limitations
131+
132+
Sparse matrix–vector multiplication has been implemented for all formats. Preliminary tests confirm correctness and scalability to moderately large problems. However:
133+
134+
* No sparse matrix–matrix multiplication or factorizations are yet implemented.
135+
* Parallelism (multi-thread or multi-process) and GPU acceleration are not currently supported.
136+
* Interfaces are subject to change while the module remains experimental.
137+
138+
Future work will address these limitations by adding additional kernels, improving performance portability, and expanding supported formats.
139+
140+
# Acknowledgements
141+
142+
This work is part of the Fortran-lang community project. We thank the contributors to stdlib and the Fortran community for discussions, reviews, and development efforts.
143+
144+
# References

0 commit comments

Comments
 (0)