Skip to content

Commit 5e601b9

Browse files
committed
rust: folio: introduce basic support for folios
Allow Rust file systems to handle ref-counted folios. Provide the minimum needed to implement `read_folio` (part of `struct address_space_operations`) in read-only file systems and to read uncached blocks. Signed-off-by: Wedson Almeida Filho <[email protected]>
1 parent 14b32d0 commit 5e601b9

File tree

5 files changed

+302
-0
lines changed

5 files changed

+302
-0
lines changed

rust/bindings/bindings_helper.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include <linux/fs.h>
1212
#include <linux/fs_context.h>
1313
#include <linux/slab.h>
14+
#include <linux/pagemap.h>
1415
#include <linux/refcount.h>
1516
#include <linux/wait.h>
1617
#include <linux/sched.h>
@@ -27,3 +28,5 @@ const slab_flags_t BINDINGS_SLAB_ACCOUNT = SLAB_ACCOUNT;
2728
const unsigned long BINDINGS_SB_RDONLY = SB_RDONLY;
2829

2930
const loff_t BINDINGS_MAX_LFS_FILESIZE = MAX_LFS_FILESIZE;
31+
32+
const size_t BINDINGS_PAGE_SIZE = PAGE_SIZE;

rust/bindings/lib.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,3 +59,5 @@ pub const SLAB_ACCOUNT: slab_flags_t = BINDINGS_SLAB_ACCOUNT;
5959
pub const SB_RDONLY: core::ffi::c_ulong = BINDINGS_SB_RDONLY;
6060

6161
pub const MAX_LFS_FILESIZE: loff_t = BINDINGS_MAX_LFS_FILESIZE;
62+
63+
pub const PAGE_SIZE: usize = BINDINGS_PAGE_SIZE;

rust/helpers.c

Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,14 @@
2323
#include <kunit/test-bug.h>
2424
#include <linux/bug.h>
2525
#include <linux/build_bug.h>
26+
#include <linux/cacheflush.h>
2627
#include <linux/err.h>
2728
#include <linux/errname.h>
2829
#include <linux/fs.h>
30+
#include <linux/highmem.h>
31+
#include <linux/mm.h>
2932
#include <linux/mutex.h>
33+
#include <linux/pagemap.h>
3034
#include <linux/refcount.h>
3135
#include <linux/sched/signal.h>
3236
#include <linux/spinlock.h>
@@ -145,6 +149,77 @@ struct kunit *rust_helper_kunit_get_current_test(void)
145149
}
146150
EXPORT_SYMBOL_GPL(rust_helper_kunit_get_current_test);
147151

152+
void *rust_helper_kmap(struct page *page)
153+
{
154+
return kmap(page);
155+
}
156+
EXPORT_SYMBOL_GPL(rust_helper_kmap);
157+
158+
void rust_helper_kunmap(struct page *page)
159+
{
160+
kunmap(page);
161+
}
162+
EXPORT_SYMBOL_GPL(rust_helper_kunmap);
163+
164+
void rust_helper_folio_get(struct folio *folio)
165+
{
166+
folio_get(folio);
167+
}
168+
EXPORT_SYMBOL_GPL(rust_helper_folio_get);
169+
170+
void rust_helper_folio_put(struct folio *folio)
171+
{
172+
folio_put(folio);
173+
}
174+
EXPORT_SYMBOL_GPL(rust_helper_folio_put);
175+
176+
struct page *rust_helper_folio_page(struct folio *folio, size_t n)
177+
{
178+
return folio_page(folio, n);
179+
}
180+
181+
loff_t rust_helper_folio_pos(struct folio *folio)
182+
{
183+
return folio_pos(folio);
184+
}
185+
EXPORT_SYMBOL_GPL(rust_helper_folio_pos);
186+
187+
size_t rust_helper_folio_size(struct folio *folio)
188+
{
189+
return folio_size(folio);
190+
}
191+
EXPORT_SYMBOL_GPL(rust_helper_folio_size);
192+
193+
void rust_helper_folio_mark_uptodate(struct folio *folio)
194+
{
195+
folio_mark_uptodate(folio);
196+
}
197+
EXPORT_SYMBOL_GPL(rust_helper_folio_mark_uptodate);
198+
199+
void rust_helper_folio_set_error(struct folio *folio)
200+
{
201+
folio_set_error(folio);
202+
}
203+
EXPORT_SYMBOL_GPL(rust_helper_folio_set_error);
204+
205+
void rust_helper_flush_dcache_folio(struct folio *folio)
206+
{
207+
flush_dcache_folio(folio);
208+
}
209+
EXPORT_SYMBOL_GPL(rust_helper_flush_dcache_folio);
210+
211+
void *rust_helper_kmap_local_folio(struct folio *folio, size_t offset)
212+
{
213+
return kmap_local_folio(folio, offset);
214+
}
215+
EXPORT_SYMBOL_GPL(rust_helper_kmap_local_folio);
216+
217+
void rust_helper_kunmap_local(const void *vaddr)
218+
{
219+
kunmap_local(vaddr);
220+
}
221+
EXPORT_SYMBOL_GPL(rust_helper_kunmap_local);
222+
148223
void rust_helper_i_uid_write(struct inode *inode, uid_t uid)
149224
{
150225
i_uid_write(inode, uid);
@@ -163,6 +238,12 @@ off_t rust_helper_i_size_read(const struct inode *inode)
163238
}
164239
EXPORT_SYMBOL_GPL(rust_helper_i_size_read);
165240

241+
void rust_helper_mapping_set_large_folios(struct address_space *mapping)
242+
{
243+
mapping_set_large_folios(mapping);
244+
}
245+
EXPORT_SYMBOL_GPL(rust_helper_mapping_set_large_folios);
246+
166247
/*
167248
* `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
168249
* use it in contexts where Rust expects a `usize` like slice (array) indices.

rust/kernel/folio.rs

Lines changed: 215 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,215 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
//! Groups of contiguous pages, folios.
4+
//!
5+
//! C headers: [`include/linux/mm.h`](../../include/linux/mm.h)
6+
7+
use crate::error::{code::*, Result};
8+
use crate::types::{ARef, AlwaysRefCounted, Opaque, ScopeGuard};
9+
use core::{cmp::min, ptr};
10+
11+
/// Wraps the kernel's `struct folio`.
12+
///
13+
/// # Invariants
14+
///
15+
/// Instances of this type are always ref-counted, that is, a call to `folio_get` ensures that the
16+
/// allocation remains valid at least until the matching call to `folio_put`.
17+
#[repr(transparent)]
18+
pub struct Folio(pub(crate) Opaque<bindings::folio>);
19+
20+
// SAFETY: The type invariants guarantee that `Folio` is always ref-counted.
21+
unsafe impl AlwaysRefCounted for Folio {
22+
fn inc_ref(&self) {
23+
// SAFETY: The existence of a shared reference means that the refcount is nonzero.
24+
unsafe { bindings::folio_get(self.0.get()) };
25+
}
26+
27+
unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
28+
// SAFETY: The safety requirements guarantee that the refcount is nonzero.
29+
unsafe { bindings::folio_put(obj.cast().as_ptr()) }
30+
}
31+
}
32+
33+
impl Folio {
34+
/// Tries to allocate a new folio.
35+
///
36+
/// On success, returns a folio made up of 2^order pages.
37+
pub fn try_new(order: u32) -> Result<UniqueFolio> {
38+
if order > bindings::MAX_ORDER {
39+
return Err(EDOM);
40+
}
41+
42+
// SAFETY: We checked that `order` is within the max allowed value.
43+
let f = ptr::NonNull::new(unsafe { bindings::folio_alloc(bindings::GFP_KERNEL, order) })
44+
.ok_or(ENOMEM)?;
45+
46+
// SAFETY: The folio returned by `folio_alloc` is referenced. The ownership of the
47+
// reference is transferred to the `ARef` instance.
48+
Ok(UniqueFolio(unsafe { ARef::from_raw(f.cast()) }))
49+
}
50+
51+
/// Returns the byte position of this folio in its file.
52+
pub fn pos(&self) -> i64 {
53+
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
54+
unsafe { bindings::folio_pos(self.0.get()) }
55+
}
56+
57+
/// Returns the byte size of this folio.
58+
pub fn size(&self) -> usize {
59+
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
60+
unsafe { bindings::folio_size(self.0.get()) }
61+
}
62+
63+
/// Flushes the data cache for the pages that make up the folio.
64+
pub fn flush_dcache(&self) {
65+
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
66+
unsafe { bindings::flush_dcache_folio(self.0.get()) }
67+
}
68+
}
69+
70+
/// A [`Folio`] that has a single reference to it.
71+
pub struct UniqueFolio(pub(crate) ARef<Folio>);
72+
73+
impl UniqueFolio {
74+
/// Maps the contents of a folio page into a slice.
75+
pub fn map_page(&self, page_index: usize) -> Result<MapGuard<'_>> {
76+
if page_index >= self.0.size() / bindings::PAGE_SIZE {
77+
return Err(EDOM);
78+
}
79+
80+
// SAFETY: We just checked that the index is within bounds of the folio.
81+
let page = unsafe { bindings::folio_page(self.0 .0.get(), page_index) };
82+
83+
// SAFETY: `page` is valid because it was returned by `folio_page` above.
84+
let ptr = unsafe { bindings::kmap(page) };
85+
86+
// SAFETY: We just mapped `ptr`, so it's valid for read.
87+
let data = unsafe { core::slice::from_raw_parts(ptr.cast::<u8>(), bindings::PAGE_SIZE) };
88+
89+
Ok(MapGuard { data, page })
90+
}
91+
}
92+
93+
/// A mapped [`UniqueFolio`].
94+
pub struct MapGuard<'a> {
95+
data: &'a [u8],
96+
page: *mut bindings::page,
97+
}
98+
99+
impl core::ops::Deref for MapGuard<'_> {
100+
type Target = [u8];
101+
102+
fn deref(&self) -> &Self::Target {
103+
self.data
104+
}
105+
}
106+
107+
impl Drop for MapGuard<'_> {
108+
fn drop(&mut self) {
109+
// SAFETY: A `MapGuard` instance is only created when `kmap` succeeds, so it's ok to unmap
110+
// it when the guard is dropped.
111+
unsafe { bindings::kunmap(self.page) };
112+
}
113+
}
114+
115+
/// A locked [`Folio`].
116+
pub struct LockedFolio<'a>(&'a Folio);
117+
118+
impl LockedFolio<'_> {
119+
/// Creates a new locked folio from a raw pointer.
120+
///
121+
/// # Safety
122+
///
123+
/// Callers must ensure that the folio is valid and locked. Additionally, that the
124+
/// responsibility of unlocking is transferred to the new instance of [`LockedFolio`]. Lastly,
125+
/// that the returned [`LockedFolio`] doesn't outlive the refcount that keeps it alive.
126+
#[allow(dead_code)]
127+
pub(crate) unsafe fn from_raw(folio: *const bindings::folio) -> Self {
128+
let ptr = folio.cast();
129+
// SAFETY: The safety requirements ensure that `folio` (from which `ptr` is derived) is
130+
// valid and will remain valid while the `LockedFolio` instance lives.
131+
Self(unsafe { &*ptr })
132+
}
133+
134+
/// Marks the folio as being up to date.
135+
pub fn mark_uptodate(&mut self) {
136+
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
137+
unsafe { bindings::folio_mark_uptodate(self.0 .0.get()) }
138+
}
139+
140+
/// Sets the error flag on the folio.
141+
pub fn set_error(&mut self) {
142+
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
143+
unsafe { bindings::folio_set_error(self.0 .0.get()) }
144+
}
145+
146+
fn for_each_page(
147+
&mut self,
148+
offset: usize,
149+
len: usize,
150+
mut cb: impl FnMut(&mut [u8]) -> Result,
151+
) -> Result {
152+
let mut remaining = len;
153+
let mut next_offset = offset;
154+
155+
// Check that we don't overflow the folio.
156+
let end = offset.checked_add(len).ok_or(EDOM)?;
157+
if end > self.size() {
158+
return Err(EINVAL);
159+
}
160+
161+
while remaining > 0 {
162+
let page_offset = next_offset & (bindings::PAGE_SIZE - 1);
163+
let usable = min(remaining, bindings::PAGE_SIZE - page_offset);
164+
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount;
165+
// `next_offset` is also guaranteed be lesss than the folio size.
166+
let ptr = unsafe { bindings::kmap_local_folio(self.0 .0.get(), next_offset) };
167+
168+
// SAFETY: `ptr` was just returned by the `kmap_local_folio` above.
169+
let _guard = ScopeGuard::new(|| unsafe { bindings::kunmap_local(ptr) });
170+
171+
// SAFETY: `kmap_local_folio` maps whole page so we know it's mapped for at least
172+
// `usable` bytes.
173+
let s = unsafe { core::slice::from_raw_parts_mut(ptr.cast::<u8>(), usable) };
174+
cb(s)?;
175+
176+
next_offset += usable;
177+
remaining -= usable;
178+
}
179+
180+
Ok(())
181+
}
182+
183+
/// Writes the given slice into the folio.
184+
pub fn write(&mut self, offset: usize, data: &[u8]) -> Result {
185+
let mut remaining = data;
186+
187+
self.for_each_page(offset, data.len(), |s| {
188+
s.copy_from_slice(&remaining[..s.len()]);
189+
remaining = &remaining[s.len()..];
190+
Ok(())
191+
})
192+
}
193+
194+
/// Writes zeroes into the folio.
195+
pub fn zero_out(&mut self, offset: usize, len: usize) -> Result {
196+
self.for_each_page(offset, len, |s| {
197+
s.fill(0);
198+
Ok(())
199+
})
200+
}
201+
}
202+
203+
impl core::ops::Deref for LockedFolio<'_> {
204+
type Target = Folio;
205+
fn deref(&self) -> &Self::Target {
206+
self.0
207+
}
208+
}
209+
210+
impl Drop for LockedFolio<'_> {
211+
fn drop(&mut self) {
212+
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
213+
unsafe { bindings::folio_unlock(self.0 .0.get()) }
214+
}
215+
}

rust/kernel/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ extern crate self as kernel;
3434
mod allocator;
3535
mod build_assert;
3636
pub mod error;
37+
pub mod folio;
3738
pub mod fs;
3839
pub mod init;
3940
pub mod ioctl;

0 commit comments

Comments
 (0)