Skip to content

Commit ad41251

Browse files
anakryikoAlexei Starovoitov
authored andcommitted
lib/buildid: implement sleepable build_id_parse() API
Extend freader with a flag specifying whether it's OK to cause page fault to fetch file data that is not already physically present in memory. With this, it's now easy to wait for data if the caller is running in sleepable (faultable) context. We utilize read_cache_folio() to bring the desired folio into page cache, after which the rest of the logic works just the same at folio level. Suggested-by: Omar Sandoval <[email protected]> Cc: Shakeel Butt <[email protected]> Cc: Johannes Weiner <[email protected]> Reviewed-by: Eduard Zingerman <[email protected]> Reviewed-by: Shakeel Butt <[email protected]> Signed-off-by: Andrii Nakryiko <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 45b8fc3 commit ad41251

File tree

1 file changed

+35
-19
lines changed

1 file changed

+35
-19
lines changed

lib/buildid.c

Lines changed: 35 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,11 @@ struct freader {
1616
int err;
1717
union {
1818
struct {
19-
struct address_space *mapping;
19+
struct file *file;
2020
struct folio *folio;
2121
void *addr;
2222
loff_t folio_off;
23+
bool may_fault;
2324
};
2425
struct {
2526
const char *data;
@@ -29,12 +30,13 @@ struct freader {
2930
};
3031

3132
static void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz,
32-
struct address_space *mapping)
33+
struct file *file, bool may_fault)
3334
{
3435
memset(r, 0, sizeof(*r));
3536
r->buf = buf;
3637
r->buf_sz = buf_sz;
37-
r->mapping = mapping;
38+
r->file = file;
39+
r->may_fault = may_fault;
3840
}
3941

4042
static void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz)
@@ -62,7 +64,16 @@ static int freader_get_folio(struct freader *r, loff_t file_off)
6264

6365
freader_put_folio(r);
6466

65-
r->folio = filemap_get_folio(r->mapping, file_off >> PAGE_SHIFT);
67+
r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT);
68+
69+
/* if sleeping is allowed, wait for the page, if necessary */
70+
if (r->may_fault && (IS_ERR(r->folio) || !folio_test_uptodate(r->folio))) {
71+
filemap_invalidate_lock_shared(r->file->f_mapping);
72+
r->folio = read_cache_folio(r->file->f_mapping, file_off >> PAGE_SHIFT,
73+
NULL, r->file);
74+
filemap_invalidate_unlock_shared(r->file->f_mapping);
75+
}
76+
6677
if (IS_ERR(r->folio) || !folio_test_uptodate(r->folio)) {
6778
if (!IS_ERR(r->folio))
6879
folio_put(r->folio);
@@ -287,18 +298,8 @@ static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *si
287298
/* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */
288299
#define MAX_FREADER_BUF_SZ 64
289300

290-
/*
291-
* Parse build ID of ELF file mapped to vma
292-
* @vma: vma object
293-
* @build_id: buffer to store build id, at least BUILD_ID_SIZE long
294-
* @size: returns actual build id size in case of success
295-
*
296-
* Assumes no page fault can be taken, so if relevant portions of ELF file are
297-
* not already paged in, fetching of build ID fails.
298-
*
299-
* Return: 0 on success; negative error, otherwise
300-
*/
301-
int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)
301+
static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
302+
__u32 *size, bool may_fault)
302303
{
303304
const Elf32_Ehdr *ehdr;
304305
struct freader r;
@@ -309,7 +310,7 @@ int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id,
309310
if (!vma->vm_file)
310311
return -EINVAL;
311312

312-
freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file->f_mapping);
313+
freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file, may_fault);
313314

314315
/* fetch first 18 bytes of ELF header for checks */
315316
ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type));
@@ -337,6 +338,22 @@ int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id,
337338
return ret;
338339
}
339340

341+
/*
342+
* Parse build ID of ELF file mapped to vma
343+
* @vma: vma object
344+
* @build_id: buffer to store build id, at least BUILD_ID_SIZE long
345+
* @size: returns actual build id size in case of success
346+
*
347+
* Assumes no page fault can be taken, so if relevant portions of ELF file are
348+
* not already paged in, fetching of build ID fails.
349+
*
350+
* Return: 0 on success; negative error, otherwise
351+
*/
352+
int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)
353+
{
354+
return __build_id_parse(vma, build_id, size, false /* !may_fault */);
355+
}
356+
340357
/*
341358
* Parse build ID of ELF file mapped to VMA
342359
* @vma: vma object
@@ -350,8 +367,7 @@ int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id,
350367
*/
351368
int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)
352369
{
353-
/* fallback to non-faultable version for now */
354-
return build_id_parse_nofault(vma, build_id, size);
370+
return __build_id_parse(vma, build_id, size, true /* may_fault */);
355371
}
356372

357373
/**

0 commit comments

Comments
 (0)