Skip to content

Commit dbf4989

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "The post-linux-next material. 7 patches. Subsystems affected by this patch series (all mm): debug, slab-generic, migration, memcg, and kasan" * emailed patches from Andrew Morton <[email protected]>: kasan: add kasan mode messages when kasan init mm: unexport {,un}lock_page_memcg mm: unexport folio_memcg_{,un}lock mm/migrate.c: remove MIGRATE_PFN_LOCKED mm: migrate: simplify the file-backed pages validation when migrating its mapping mm: allow only SLUB on PREEMPT_RT mm/page_owner.c: modify the type of argument "order" in some functions
2 parents 6d76f6e + b873e98 commit dbf4989

File tree

14 files changed

+61
-150
lines changed

14 files changed

+61
-150
lines changed

Documentation/vm/hmm.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,7 @@ between device driver specific code and shared common code:
360360
system memory page, locks the page with ``lock_page()``, and fills in the
361361
``dst`` array entry with::
362362

363-
dst[i] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
363+
dst[i] = migrate_pfn(page_to_pfn(dpage));
364364

365365
Now that the driver knows that this page is being migrated, it can
366366
invalidate device private MMU mappings and copy device private memory

arch/arm64/mm/kasan_init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,7 @@ void __init kasan_init(void)
310310
kasan_init_depth();
311311
#if defined(CONFIG_KASAN_GENERIC)
312312
/* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
313-
pr_info("KernelAddressSanitizer initialized\n");
313+
pr_info("KernelAddressSanitizer initialized (generic)\n");
314314
#endif
315315
}
316316

arch/powerpc/kvm/book3s_hv_uvmem.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -560,7 +560,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
560560
gpa, 0, page_shift);
561561

562562
if (ret == U_SUCCESS)
563-
*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
563+
*mig.dst = migrate_pfn(pfn);
564564
else {
565565
unlock_page(dpage);
566566
__free_page(dpage);
@@ -774,7 +774,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma,
774774
}
775775
}
776776

777-
*mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
777+
*mig.dst = migrate_pfn(page_to_pfn(dpage));
778778
migrate_vma_pages(&mig);
779779
out_finalize:
780780
migrate_vma_finalize(&mig);

drivers/gpu/drm/amd/amdkfd/kfd_migrate.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -317,7 +317,6 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
317317
migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
318318
svm_migrate_get_vram_page(prange, migrate->dst[i]);
319319
migrate->dst[i] = migrate_pfn(migrate->dst[i]);
320-
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
321320
src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
322321
DMA_TO_DEVICE);
323322
r = dma_mapping_error(dev, src[i]);
@@ -610,7 +609,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
610609
dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
611610

612611
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
613-
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
614612
j++;
615613
}
616614

drivers/gpu/drm/nouveau/nouveau_dmem.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
166166
goto error_dma_unmap;
167167
mutex_unlock(&svmm->mutex);
168168

169-
args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
169+
args->dst[0] = migrate_pfn(page_to_pfn(dpage));
170170
return 0;
171171

172172
error_dma_unmap:
@@ -602,7 +602,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
602602
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
603603
if (src & MIGRATE_PFN_WRITE)
604604
*pfn |= NVIF_VMM_PFNMAP_V0_W;
605-
return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
605+
return migrate_pfn(page_to_pfn(dpage));
606606

607607
out_dma_unmap:
608608
dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);

include/linux/migrate.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,6 @@ static inline int migrate_misplaced_page(struct page *page,
110110
*/
111111
#define MIGRATE_PFN_VALID (1UL << 0)
112112
#define MIGRATE_PFN_MIGRATE (1UL << 1)
113-
#define MIGRATE_PFN_LOCKED (1UL << 2)
114113
#define MIGRATE_PFN_WRITE (1UL << 3)
115114
#define MIGRATE_PFN_SHIFT 6
116115

include/linux/page_owner.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,24 +8,24 @@
88
extern struct static_key_false page_owner_inited;
99
extern struct page_ext_operations page_owner_ops;
1010

11-
extern void __reset_page_owner(struct page *page, unsigned int order);
11+
extern void __reset_page_owner(struct page *page, unsigned short order);
1212
extern void __set_page_owner(struct page *page,
13-
unsigned int order, gfp_t gfp_mask);
13+
unsigned short order, gfp_t gfp_mask);
1414
extern void __split_page_owner(struct page *page, unsigned int nr);
1515
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
1616
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
1717
extern void __dump_page_owner(const struct page *page);
1818
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1919
pg_data_t *pgdat, struct zone *zone);
2020

21-
static inline void reset_page_owner(struct page *page, unsigned int order)
21+
static inline void reset_page_owner(struct page *page, unsigned short order)
2222
{
2323
if (static_branch_unlikely(&page_owner_inited))
2424
__reset_page_owner(page, order);
2525
}
2626

2727
static inline void set_page_owner(struct page *page,
28-
unsigned int order, gfp_t gfp_mask)
28+
unsigned short order, gfp_t gfp_mask)
2929
{
3030
if (static_branch_unlikely(&page_owner_inited))
3131
__set_page_owner(page, order, gfp_mask);
@@ -52,15 +52,15 @@ static inline void dump_page_owner(const struct page *page)
5252
__dump_page_owner(page);
5353
}
5454
#else
55-
static inline void reset_page_owner(struct page *page, unsigned int order)
55+
static inline void reset_page_owner(struct page *page, unsigned short order)
5656
{
5757
}
5858
static inline void set_page_owner(struct page *page,
5959
unsigned int order, gfp_t gfp_mask)
6060
{
6161
}
6262
static inline void split_page_owner(struct page *page,
63-
unsigned int order)
63+
unsigned short order)
6464
{
6565
}
6666
static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)

init/Kconfig

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1896,6 +1896,7 @@ choice
18961896

18971897
config SLAB
18981898
bool "SLAB"
1899+
depends on !PREEMPT_RT
18991900
select HAVE_HARDENED_USERCOPY_ALLOCATOR
19001901
help
19011902
The regular slab allocator that is established and known to work
@@ -1916,6 +1917,7 @@ config SLUB
19161917
config SLOB
19171918
depends on EXPERT
19181919
bool "SLOB (Simple Allocator)"
1920+
depends on !PREEMPT_RT
19191921
help
19201922
SLOB replaces the stock allocator with a drastically simpler
19211923
allocator. SLOB is generally more space efficient but

lib/test_hmm.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -613,8 +613,7 @@ static void dmirror_migrate_alloc_and_copy(struct migrate_vma *args,
613613
*/
614614
rpage->zone_device_data = dmirror;
615615

616-
*dst = migrate_pfn(page_to_pfn(dpage)) |
617-
MIGRATE_PFN_LOCKED;
616+
*dst = migrate_pfn(page_to_pfn(dpage));
618617
if ((*src & MIGRATE_PFN_WRITE) ||
619618
(!spage && args->vma->vm_flags & VM_WRITE))
620619
*dst |= MIGRATE_PFN_WRITE;
@@ -1137,7 +1136,7 @@ static vm_fault_t dmirror_devmem_fault_alloc_and_copy(struct migrate_vma *args,
11371136
lock_page(dpage);
11381137
xa_erase(&dmirror->pt, addr >> PAGE_SHIFT);
11391138
copy_highpage(dpage, spage);
1140-
*dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
1139+
*dst = migrate_pfn(page_to_pfn(dpage));
11411140
if (*src & MIGRATE_PFN_WRITE)
11421141
*dst |= MIGRATE_PFN_WRITE;
11431142
}

mm/kasan/hw_tags.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,16 @@ static int __init early_kasan_flag_stacktrace(char *arg)
106106
}
107107
early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
108108

109+
static inline const char *kasan_mode_info(void)
110+
{
111+
if (kasan_mode == KASAN_MODE_ASYNC)
112+
return "async";
113+
else if (kasan_mode == KASAN_MODE_ASYMM)
114+
return "asymm";
115+
else
116+
return "sync";
117+
}
118+
109119
/* kasan_init_hw_tags_cpu() is called for each CPU. */
110120
void kasan_init_hw_tags_cpu(void)
111121
{
@@ -177,7 +187,9 @@ void __init kasan_init_hw_tags(void)
177187
break;
178188
}
179189

180-
pr_info("KernelAddressSanitizer initialized\n");
190+
pr_info("KernelAddressSanitizer initialized (hw-tags, mode=%s, stacktrace=%s)\n",
191+
kasan_mode_info(),
192+
kasan_stack_collection_enabled() ? "on" : "off");
181193
}
182194

183195
void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)

0 commit comments

Comments
 (0)