Skip to content

Commit 22ef76e

Browse files
nixprimegvisor-bot
authored andcommitted
mm: allow shared anonymous mappings to use huge pages
PiperOrigin-RevId: 740078127
1 parent 99749bc commit 22ef76e

File tree

2 files changed

+28
-4
lines changed

2 files changed

+28
-4
lines changed

pkg/sentry/fsimpl/tmpfs/regular_file.go

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,9 @@ type regularFile struct {
8989
// alignment padding.
9090
initiallyUnlinked bool
9191

92+
// huge is true if pages in this file may be hugepage-backed.
93+
huge bool
94+
9295
// size is the size of data.
9396
//
9497
// Protected by both dataMu and inode.mu; reading it requires holding
@@ -150,6 +153,7 @@ func NewZeroFile(ctx context.Context, creds *auth.Credentials, mount *vfs.Mount,
150153
}
151154
rf := fd.inode().impl.(*regularFile)
152155
rf.memoryUsageKind = usage.Anonymous
156+
rf.huge = true
153157
rf.size.Store(size)
154158
return &fd.vfsfd, err
155159
}
@@ -291,6 +295,7 @@ func (rf *regularFile) CopyMapping(ctx context.Context, ms memmap.MappingSpace,
291295
// Translate implements memmap.Mappable.Translate.
292296
func (rf *regularFile) Translate(ctx context.Context, required, optional memmap.MappableRange, at hostarch.AccessType) ([]memmap.Translation, error) {
293297
memCgID := pgalloc.MemoryCgroupIDFromContext(ctx)
298+
mayHuge := rf.huge && rf.inode.fs.mf.HugepagesEnabled()
294299

295300
rf.dataMu.Lock()
296301
defer rf.dataMu.Unlock()
@@ -336,6 +341,7 @@ func (rf *regularFile) Translate(ctx context.Context, required, optional memmap.
336341
pagesAlloced, cerr := rf.data.Fill(ctx, required, optional, rf.size.RacyLoad(), rf.inode.fs.mf, pgalloc.AllocOpts{
337342
Kind: rf.memoryUsageKind,
338343
MemCgID: memCgID,
344+
Huge: mayHuge,
339345
}, nil)
340346
// rf.data.Fill() may fail mid-way. We still want to account any pages that
341347
// were allocated, irrespective of an error.
@@ -461,6 +467,7 @@ func (rf *regularFile) allocateLocked(ctx context.Context, mode, newSize uint64,
461467
Kind: rf.memoryUsageKind,
462468
MemCgID: memCgID,
463469
Mode: allocMode,
470+
Huge: rf.huge && rf.inode.fs.mf.HugepagesEnabled(),
464471
}, nil /* r */)
465472
// f.data.Fill() may fail mid-way. We still want to account any pages that
466473
// were allocated, irrespective of an error.
@@ -765,6 +772,8 @@ func (rw *regularFileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64,
765772
pgstartaddr := hostarch.Addr(rw.off).RoundDown()
766773
pgendaddr, _ := hostarch.Addr(end).RoundUp()
767774
pgMR := memmap.MappableRange{uint64(pgstartaddr), uint64(pgendaddr)}
775+
fs := rw.file.inode.fs
776+
mayHuge := rw.file.huge && fs.mf.HugepagesEnabled()
768777

769778
var (
770779
done uint64
@@ -791,7 +800,7 @@ func (rw *regularFileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64,
791800
// Allocate memory for the write.
792801
gapMR := gap.Range().Intersect(pgMR)
793802
pagesToFill := gapMR.Length() / hostarch.PageSize
794-
pagesReserved := rw.file.inode.fs.accountPagesPartial(pagesToFill)
803+
pagesReserved := fs.accountPagesPartial(pagesToFill)
795804
if pagesReserved == 0 {
796805
if done == 0 {
797806
retErr = linuxerr.ENOSPC
@@ -802,7 +811,7 @@ func (rw *regularFileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64,
802811
}
803812
gapMR.End = gapMR.Start + (hostarch.PageSize * pagesReserved)
804813
allocMode := pgalloc.AllocateAndWritePopulate
805-
if rw.file.inode.fs.mf.IsDiskBacked() {
814+
if fs.mf.IsDiskBacked() {
806815
// Don't populate pages for disk-backed files. Benchmarking showed that
807816
// disk-backed pages are likely to be written back to disk before we
808817
// can write to them. The pages fault again on write anyways. In total,
@@ -811,14 +820,19 @@ func (rw *regularFileReadWriter) WriteFromBlocks(srcs safemem.BlockSeq) (uint64,
811820
// useless disk writebacks.
812821
allocMode = pgalloc.AllocateCallerIndirectCommit
813822
}
814-
fr, err := rw.file.inode.fs.mf.Allocate(gapMR.Length(), pgalloc.AllocOpts{
823+
fr, err := fs.mf.Allocate(gapMR.Length(), pgalloc.AllocOpts{
815824
Kind: rw.file.memoryUsageKind,
816825
MemCgID: rw.memCgID,
817826
Mode: allocMode,
827+
// TODO: If mayHuge is true and gap spans at least one aligned
828+
// hugepage, but either start or end are not hugepage-aligned,
829+
// consider allocating small pages on either end and huge pages
830+
// in the middle.
831+
Huge: mayHuge && hostarch.IsHugePageAligned(gapMR.Start) && hostarch.IsHugePageAligned(gapMR.End),
818832
})
819833
if err != nil {
820834
retErr = err
821-
rw.file.inode.fs.unaccountPages(pagesReserved)
835+
fs.unaccountPages(pagesReserved)
822836
goto exitLoop
823837
}
824838

pkg/sentry/fsutil/file_range_set.go

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,17 @@ func (s *FileRangeSet) Fill(ctx context.Context, required, optional memmap.Mappa
162162
return done, nil
163163
}
164164
}
165+
166+
// We can only pass opts.Huge if the allocation is hugepage-aligned.
167+
// TODO: If opts.Huge is true and gap spans at least one aligned
168+
// hugepage, but either start or end are not hugepage-aligned, consider
169+
// allocating small pages on either end and huge pages in the middle.
170+
wantHuge := opts.Huge
171+
if !hostarch.IsHugePageAligned(gr.Start) || !hostarch.IsHugePageAligned(gr.End) {
172+
opts.Huge = false
173+
}
165174
fr, err := mf.Allocate(gr.Length(), opts)
175+
opts.Huge = wantHuge
166176

167177
// Store anything we managed to read into the cache.
168178
if done := fr.Length(); done != 0 {

0 commit comments

Comments
 (0)