Skip to content

Commit e7b5d23

Browse files
author
Thomas Hellström
committed
drm/ttm: Provide a shmem backup implementation
Provide a standalone shmem backup implementation. Given the ttm_backup interface, this could later on be extended to providing other backup implementation than shmem, with one use-case being GPU swapout to a user-provided fd. v5: - Fix a UAF. (kernel test robot, Dan Carptenter) v6: - Rename ttm_backup_shmem_copy_page() function argument (Matthew Brost) - Add some missing documentation v8: - Use folio_file_page to get to the page we want to writeback instead of using the first page of the folio. v13: - Remove the base class abstraction (Christian König) - Include ttm_backup_bytes_avail(). v14: - Fix kerneldoc for ttm_backup_bytes_avail() (0-day) - Work around casting of __randomize_layout struct pointer (0-day) v15: - Return negative error code from ttm_backup_backup_page() (Christian König) - Doc fixes. (Christian König). Cc: Christian König <[email protected]> Cc: Somalapuram Amaranath <[email protected]> Cc: Matthew Brost <[email protected]> Cc: <[email protected]> Signed-off-by: Thomas Hellström <[email protected]> Reviewed-by: Matthew Brost <[email protected]> Reviewed-by: Christian König <[email protected]> Link: https://lore.kernel.org/intel-xe/[email protected]
1 parent 3b87886 commit e7b5d23

File tree

3 files changed

+282
-1
lines changed

3 files changed

+282
-1
lines changed

drivers/gpu/drm/ttm/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
66
ttm_execbuf_util.o ttm_range_manager.o ttm_resource.o ttm_pool.o \
7-
ttm_device.o ttm_sys_manager.o
7+
ttm_device.o ttm_sys_manager.o ttm_backup.o
88
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
99

1010
obj-$(CONFIG_DRM_TTM) += ttm.o

drivers/gpu/drm/ttm/ttm_backup.c

Lines changed: 207 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,207 @@
1+
// SPDX-License-Identifier: MIT
2+
/*
3+
* Copyright © 2024 Intel Corporation
4+
*/
5+
6+
#include <drm/ttm/ttm_backup.h>
7+
#include <linux/page-flags.h>
8+
#include <linux/swap.h>
9+
10+
/*
11+
* Casting from randomized struct file * to struct ttm_backup * is fine since
12+
* struct ttm_backup is never defined nor dereferenced.
13+
*/
14+
static struct file *ttm_backup_to_file(struct ttm_backup *backup)
15+
{
16+
return (void *)backup;
17+
}
18+
19+
static struct ttm_backup *ttm_file_to_backup(struct file *file)
20+
{
21+
return (void *)file;
22+
}
23+
24+
/*
25+
* Need to map shmem indices to handle since a handle value
26+
* of 0 means error, following the swp_entry_t convention.
27+
*/
28+
static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx)
29+
{
30+
return (unsigned long)idx + 1;
31+
}
32+
33+
static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
34+
{
35+
return handle - 1;
36+
}
37+
38+
/**
39+
* ttm_backup_drop() - release memory associated with a handle
40+
* @backup: The struct backup pointer used to obtain the handle
41+
* @handle: The handle obtained from the @backup_page function.
42+
*/
43+
void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
44+
{
45+
loff_t start = ttm_backup_handle_to_shmem_idx(handle);
46+
47+
start <<= PAGE_SHIFT;
48+
shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start,
49+
start + PAGE_SIZE - 1);
50+
}
51+
52+
/**
53+
* ttm_backup_copy_page() - Copy the contents of a previously backed
54+
* up page
55+
* @backup: The struct backup pointer used to back up the page.
56+
* @dst: The struct page to copy into.
57+
* @handle: The handle returned when the page was backed up.
58+
* @intr: Try to perform waits interruptable or at least killable.
59+
*
60+
* Return: 0 on success, Negative error code on failure, notably
61+
* -EINTR if @intr was set to true and a signal is pending.
62+
*/
63+
int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
64+
pgoff_t handle, bool intr)
65+
{
66+
struct file *filp = ttm_backup_to_file(backup);
67+
struct address_space *mapping = filp->f_mapping;
68+
struct folio *from_folio;
69+
pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
70+
71+
from_folio = shmem_read_folio(mapping, idx);
72+
if (IS_ERR(from_folio))
73+
return PTR_ERR(from_folio);
74+
75+
copy_highpage(dst, folio_file_page(from_folio, idx));
76+
folio_put(from_folio);
77+
78+
return 0;
79+
}
80+
81+
/**
82+
* ttm_backup_backup_page() - Backup a page
83+
* @backup: The struct backup pointer to use.
84+
* @page: The page to back up.
85+
* @writeback: Whether to perform immediate writeback of the page.
86+
* This may have performance implications.
87+
* @idx: A unique integer for each page and each struct backup.
88+
* This allows the backup implementation to avoid managing
89+
* its address space separately.
90+
* @page_gfp: The gfp value used when the page was allocated.
91+
* This is used for accounting purposes.
92+
* @alloc_gfp: The gfp to be used when allocating memory.
93+
*
94+
* Context: If called from reclaim context, the caller needs to
95+
* assert that the shrinker gfp has __GFP_FS set, to avoid
96+
* deadlocking on lock_page(). If @writeback is set to true and
97+
* called from reclaim context, the caller also needs to assert
98+
* that the shrinker gfp has __GFP_IO set, since without it,
99+
* we're not allowed to start backup IO.
100+
*
101+
* Return: A handle on success. Negative error code on failure.
102+
*
103+
* Note: This function could be extended to back up a folio and
104+
* implementations would then split the folio internally if needed.
105+
* Drawback is that the caller would then have to keep track of
106+
* the folio size- and usage.
107+
*/
108+
s64
109+
ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
110+
bool writeback, pgoff_t idx, gfp_t page_gfp,
111+
gfp_t alloc_gfp)
112+
{
113+
struct file *filp = ttm_backup_to_file(backup);
114+
struct address_space *mapping = filp->f_mapping;
115+
unsigned long handle = 0;
116+
struct folio *to_folio;
117+
int ret;
118+
119+
to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp);
120+
if (IS_ERR(to_folio))
121+
return PTR_ERR(to_folio);
122+
123+
folio_mark_accessed(to_folio);
124+
folio_lock(to_folio);
125+
folio_mark_dirty(to_folio);
126+
copy_highpage(folio_file_page(to_folio, idx), page);
127+
handle = ttm_backup_shmem_idx_to_handle(idx);
128+
129+
if (writeback && !folio_mapped(to_folio) &&
130+
folio_clear_dirty_for_io(to_folio)) {
131+
struct writeback_control wbc = {
132+
.sync_mode = WB_SYNC_NONE,
133+
.nr_to_write = SWAP_CLUSTER_MAX,
134+
.range_start = 0,
135+
.range_end = LLONG_MAX,
136+
.for_reclaim = 1,
137+
};
138+
folio_set_reclaim(to_folio);
139+
ret = mapping->a_ops->writepage(folio_file_page(to_folio, idx), &wbc);
140+
if (!folio_test_writeback(to_folio))
141+
folio_clear_reclaim(to_folio);
142+
/*
143+
* If writepage succeeds, it unlocks the folio.
144+
* writepage() errors are otherwise dropped, since writepage()
145+
* is only best effort here.
146+
*/
147+
if (ret)
148+
folio_unlock(to_folio);
149+
} else {
150+
folio_unlock(to_folio);
151+
}
152+
153+
folio_put(to_folio);
154+
155+
return handle;
156+
}
157+
158+
/**
159+
* ttm_backup_fini() - Free the struct backup resources after last use.
160+
* @backup: Pointer to the struct backup whose resources to free.
161+
*
162+
* After a call to this function, it's illegal to use the @backup pointer.
163+
*/
164+
void ttm_backup_fini(struct ttm_backup *backup)
165+
{
166+
fput(ttm_backup_to_file(backup));
167+
}
168+
169+
/**
170+
* ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space
171+
* left for backup.
172+
*
173+
* This function is intended also for driver use to indicate whether a
174+
* backup attempt is meaningful.
175+
*
176+
* Return: An approximate size of backup space available.
177+
*/
178+
u64 ttm_backup_bytes_avail(void)
179+
{
180+
/*
181+
* The idea behind backing up to shmem is that shmem objects may
182+
* eventually be swapped out. So no point swapping out if there
183+
* is no or low swap-space available. But the accuracy of this
184+
* number also depends on shmem actually swapping out backed-up
185+
* shmem objects without too much buffering.
186+
*/
187+
return (u64)get_nr_swap_pages() << PAGE_SHIFT;
188+
}
189+
EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
190+
191+
/**
192+
* ttm_backup_shmem_create() - Create a shmem-based struct backup.
193+
* @size: The maximum size (in bytes) to back up.
194+
*
195+
* Create a backup utilizing shmem objects.
196+
*
197+
* Return: A pointer to a struct ttm_backup on success,
198+
* an error pointer on error.
199+
*/
200+
struct ttm_backup *ttm_backup_shmem_create(loff_t size)
201+
{
202+
struct file *filp;
203+
204+
filp = shmem_file_setup("ttm shmem backup", size, 0);
205+
206+
return ttm_file_to_backup(filp);
207+
}

include/drm/ttm/ttm_backup.h

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
/* SPDX-License-Identifier: MIT */
2+
/*
3+
* Copyright © 2024 Intel Corporation
4+
*/
5+
6+
#ifndef _TTM_BACKUP_H_
7+
#define _TTM_BACKUP_H_
8+
9+
#include <linux/mm_types.h>
10+
#include <linux/shmem_fs.h>
11+
12+
struct ttm_backup;
13+
14+
/**
15+
* ttm_backup_handle_to_page_ptr() - Convert handle to struct page pointer
16+
* @handle: The handle to convert.
17+
*
18+
* Converts an opaque handle received from the
19+
* struct ttm_backoup_ops::backup_page() function to an (invalid)
20+
* struct page pointer suitable for a struct page array.
21+
*
22+
* Return: An (invalid) struct page pointer.
23+
*/
24+
static inline struct page *
25+
ttm_backup_handle_to_page_ptr(unsigned long handle)
26+
{
27+
return (struct page *)(handle << 1 | 1);
28+
}
29+
30+
/**
31+
* ttm_backup_page_ptr_is_handle() - Whether a struct page pointer is a handle
32+
* @page: The struct page pointer to check.
33+
*
34+
* Return: true if the struct page pointer is a handld returned from
35+
* ttm_backup_handle_to_page_ptr(). False otherwise.
36+
*/
37+
static inline bool ttm_backup_page_ptr_is_handle(const struct page *page)
38+
{
39+
return (unsigned long)page & 1;
40+
}
41+
42+
/**
43+
* ttm_backup_page_ptr_to_handle() - Convert a struct page pointer to a handle
44+
* @page: The struct page pointer to convert
45+
*
46+
* Return: The handle that was previously used in
47+
* ttm_backup_handle_to_page_ptr() to obtain a struct page pointer, suitable
48+
* for use as argument in the struct ttm_backup_ops drop() or
49+
* copy_backed_up_page() functions.
50+
*/
51+
static inline unsigned long
52+
ttm_backup_page_ptr_to_handle(const struct page *page)
53+
{
54+
WARN_ON(!ttm_backup_page_ptr_is_handle(page));
55+
return (unsigned long)page >> 1;
56+
}
57+
58+
void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle);
59+
60+
int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
61+
pgoff_t handle, bool intr);
62+
63+
s64
64+
ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
65+
bool writeback, pgoff_t idx, gfp_t page_gfp,
66+
gfp_t alloc_gfp);
67+
68+
void ttm_backup_fini(struct ttm_backup *backup);
69+
70+
u64 ttm_backup_bytes_avail(void);
71+
72+
struct ttm_backup *ttm_backup_shmem_create(loff_t size);
73+
74+
#endif

0 commit comments

Comments
 (0)