Skip to content

Commit 6669aa3

Browse files
runtime: randomize heap base address
During initialization, allow randomizing the heap base address by generating a random uint64 and using its bits to randomize various portions of the heap base address. We use the following method to randomize the base address: * We first generate a random heapArenaBytes aligned address that we use for generating the hints. * On the first call to mheap.grow, we then generate a random PallocChunkBytes aligned offset into the mmap'd heap region, which we use as the base for the heap region. * We then mark a random number of pages within the page allocator as allocated. Our final randomized "heap base address" becomes the first byte of the first available page returned by the page allocator. This results in an address with at least heapAddrBits-gc.PageShift-1 bits of entropy. Fixes #27583 Change-Id: Ideb4450a5ff747a132f702d563d2a516dec91a88 Reviewed-on: https://go-review.googlesource.com/c/go/+/674835 Reviewed-by: Michael Knyszek <[email protected]> LUCI-TryBot-Result: Go LUCI <[email protected]>
1 parent 26338a7 commit 6669aa3

File tree

8 files changed

+170
-3
lines changed

8 files changed

+170
-3
lines changed

src/internal/goexperiment/exp_randomizedheapbase64_off.go

Lines changed: 8 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/internal/goexperiment/exp_randomizedheapbase64_on.go

Lines changed: 8 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/internal/goexperiment/flags.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,4 +129,8 @@ type Flags struct {
129129

130130
// GreenTeaGC enables the Green Tea GC implementation.
131131
GreenTeaGC bool
132+
133+
// RandomizedHeapBase enables heap base address randomization on 64-bit
134+
// platforms.
135+
RandomizedHeapBase64 bool
132136
}

src/runtime/export_test.go

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ package runtime
99
import (
1010
"internal/abi"
1111
"internal/goarch"
12+
"internal/goexperiment"
1213
"internal/goos"
1314
"internal/runtime/atomic"
1415
"internal/runtime/gc"
@@ -417,7 +418,8 @@ func ReadMemStatsSlow() (base, slow MemStats) {
417418
slow.HeapReleased += uint64(pg) * pageSize
418419
}
419420
for _, p := range allp {
420-
pg := sys.OnesCount64(p.pcache.scav)
421+
// Only count scav bits for pages in the cache
422+
pg := sys.OnesCount64(p.pcache.cache & p.pcache.scav)
421423
slow.HeapReleased += uint64(pg) * pageSize
422424
}
423425

@@ -1120,12 +1122,16 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
11201122

11211123
// Lock so that we can safely access the bitmap.
11221124
lock(&mheap_.lock)
1125+
1126+
heapBase := mheap_.pages.inUse.ranges[0].base.addr()
1127+
secondArenaBase := arenaBase(arenaIndex(heapBase) + 1)
11231128
chunkLoop:
11241129
for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
11251130
chunk := mheap_.pages.tryChunkOf(i)
11261131
if chunk == nil {
11271132
continue
11281133
}
1134+
cb := chunkBase(i)
11291135
for j := 0; j < pallocChunkPages/64; j++ {
11301136
// Run over each 64-bit bitmap section and ensure
11311137
// scavenged is being cleared properly on allocation.
@@ -1135,12 +1141,20 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
11351141
want := chunk.scavenged[j] &^ chunk.pallocBits[j]
11361142
got := chunk.scavenged[j]
11371143
if want != got {
1144+
// When goexperiment.RandomizedHeapBase64 is set we use a
1145+
// series of padding pages to generate randomized heap base
1146+
// address which have both the alloc and scav bits set. If
1147+
// we see this for a chunk between the address of the heap
1148+
// base, and the address of the second arena continue.
1149+
if goexperiment.RandomizedHeapBase64 && (cb >= heapBase && cb < secondArenaBase) {
1150+
continue
1151+
}
11381152
ok = false
11391153
if n >= len(mismatches) {
11401154
break chunkLoop
11411155
}
11421156
mismatches[n] = BitsMismatch{
1143-
Base: chunkBase(i) + uintptr(j)*64*pageSize,
1157+
Base: cb + uintptr(j)*64*pageSize,
11441158
Got: got,
11451159
Want: want,
11461160
}

src/runtime/malloc.go

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ package runtime
102102

103103
import (
104104
"internal/goarch"
105+
"internal/goexperiment"
105106
"internal/goos"
106107
"internal/runtime/atomic"
107108
"internal/runtime/gc"
@@ -345,6 +346,14 @@ const (
345346
// metadata mappings back to the OS. That would be quite complex to do in general
346347
// as the heap is likely fragmented after a reduction in heap size.
347348
minHeapForMetadataHugePages = 1 << 30
349+
350+
// randomizeHeapBase indicates if the heap base address should be randomized.
351+
// See comment in mallocinit for how the randomization is performed.
352+
randomizeHeapBase = goexperiment.RandomizedHeapBase64 && goarch.PtrSize == 8 && !isSbrkPlatform
353+
354+
// randHeapBasePrefixMask is used to extract the top byte of the randomized
355+
// heap base address.
356+
randHeapBasePrefixMask = ^uintptr(0xff << (heapAddrBits - 8))
348357
)
349358

350359
// physPageSize is the size in bytes of the OS's physical pages.
@@ -372,6 +381,24 @@ var (
372381
physHugePageShift uint
373382
)
374383

384+
var (
385+
// heapRandSeed is a random value that is populated in mallocinit if
386+
// randomizeHeapBase is set. It is used in mallocinit, and mheap.grow, to
387+
// randomize the base heap address.
388+
heapRandSeed uintptr
389+
heapRandSeedBitsRemaining int
390+
)
391+
392+
func nextHeapRandBits(bits int) uintptr {
393+
if bits > heapRandSeedBitsRemaining {
394+
throw("not enough heapRandSeed bits remaining")
395+
}
396+
r := heapRandSeed >> (64 - bits)
397+
heapRandSeed <<= bits
398+
heapRandSeedBitsRemaining -= bits
399+
return r
400+
}
401+
375402
func mallocinit() {
376403
if gc.SizeClassToSize[tinySizeClass] != maxTinySize {
377404
throw("bad TinySizeClass")
@@ -517,6 +544,42 @@ func mallocinit() {
517544
//
518545
// In race mode we have no choice but to just use the same hints because
519546
// the race detector requires that the heap be mapped contiguously.
547+
//
548+
// If randomizeHeapBase is set, we attempt to randomize the base address
549+
// as much as possible. We do this by generating a random uint64 via
550+
// bootstrapRand and using it's bits to randomize portions of the base
551+
// address as follows:
552+
// * We first generate a random heapArenaBytes aligned address that we use for
553+
// generating the hints.
554+
// * On the first call to mheap.grow, we then generate a random PallocChunkBytes
555+
// aligned offset into the mmap'd heap region, which we use as the base for
556+
// the heap region.
557+
// * We then select a page offset in that PallocChunkBytes region to start the
558+
// heap at, and mark all the pages up to that offset as allocated.
559+
//
560+
// Our final randomized "heap base address" becomes the first byte of
561+
// the first available page returned by the page allocator. This results
562+
// in an address with at least heapAddrBits-gc.PageShift-2-(1*goarch.IsAmd64)
563+
// bits of entropy.
564+
565+
var randHeapBase uintptr
566+
var randHeapBasePrefix byte
567+
// heapAddrBits is 48 on most platforms, but we only use 47 of those
568+
// bits in order to provide a good amount of room for the heap to grow
569+
// contiguously. On amd64, there are 48 bits, but the top bit is sign
570+
// extended, so we throw away another bit, just to be safe.
571+
randHeapAddrBits := heapAddrBits - 1 - (goarch.IsAmd64 * 1)
572+
if randomizeHeapBase {
573+
// Generate a random value, and take the bottom heapAddrBits-logHeapArenaBytes
574+
// bits, using them as the top bits for randHeapBase.
575+
heapRandSeed, heapRandSeedBitsRemaining = uintptr(bootstrapRand()), 64
576+
577+
topBits := (randHeapAddrBits - logHeapArenaBytes)
578+
randHeapBase = nextHeapRandBits(topBits) << (randHeapAddrBits - topBits)
579+
randHeapBase = alignUp(randHeapBase, heapArenaBytes)
580+
randHeapBasePrefix = byte(randHeapBase >> (randHeapAddrBits - 8))
581+
}
582+
520583
for i := 0x7f; i >= 0; i-- {
521584
var p uintptr
522585
switch {
@@ -528,6 +591,9 @@ func mallocinit() {
528591
if p >= uintptrMask&0x00e000000000 {
529592
continue
530593
}
594+
case randomizeHeapBase:
595+
prefix := uintptr(randHeapBasePrefix+byte(i)) << (randHeapAddrBits - 8)
596+
p = prefix | (randHeapBase & randHeapBasePrefixMask)
531597
case GOARCH == "arm64" && GOOS == "ios":
532598
p = uintptr(i)<<40 | uintptrMask&(0x0013<<28)
533599
case GOARCH == "arm64":

src/runtime/mheap.go

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1547,6 +1547,8 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
15471547
func (h *mheap) grow(npage uintptr) (uintptr, bool) {
15481548
assertLockHeld(&h.lock)
15491549

1550+
firstGrow := h.curArena.base == 0
1551+
15501552
// We must grow the heap in whole palloc chunks.
15511553
// We call sysMap below but note that because we
15521554
// round up to pallocChunkPages which is on the order
@@ -1595,6 +1597,16 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) {
15951597
// Switch to the new space.
15961598
h.curArena.base = uintptr(av)
15971599
h.curArena.end = uintptr(av) + asize
1600+
1601+
if firstGrow && randomizeHeapBase {
1602+
// The top heapAddrBits-logHeapArenaBytes are randomized, we now
1603+
// want to randomize the next
1604+
// logHeapArenaBytes-log2(pallocChunkBytes) bits, making sure
1605+
// h.curArena.base is aligned to pallocChunkBytes.
1606+
bits := logHeapArenaBytes - logPallocChunkBytes
1607+
offset := nextHeapRandBits(bits)
1608+
h.curArena.base = alignDown(h.curArena.base|(offset<<logPallocChunkBytes), pallocChunkBytes)
1609+
}
15981610
}
15991611

16001612
// Recalculate nBase.
@@ -1625,6 +1637,22 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) {
16251637
// space ready for allocation.
16261638
h.pages.grow(v, nBase-v)
16271639
totalGrowth += nBase - v
1640+
1641+
if firstGrow && randomizeHeapBase {
1642+
// The top heapAddrBits-log2(pallocChunkBytes) bits are now randomized,
1643+
// we finally want to randomize the next
1644+
// log2(pallocChunkBytes)-log2(pageSize) bits, while maintaining
1645+
// alignment to pageSize. We do this by calculating a random number of
1646+
// pages into the current arena, and marking them as allocated. The
1647+
// address of the next available page becomes our fully randomized base
1648+
// heap address.
1649+
randOffset := nextHeapRandBits(logPallocChunkBytes)
1650+
randNumPages := alignDown(randOffset, pageSize) / pageSize
1651+
if randNumPages != 0 {
1652+
h.pages.markRandomPaddingPages(v, randNumPages)
1653+
}
1654+
}
1655+
16281656
return totalGrowth, true
16291657
}
16301658

src/runtime/mpagealloc.go

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -972,6 +972,45 @@ func (p *pageAlloc) free(base, npages uintptr) {
972972
p.update(base, npages, true, false)
973973
}
974974

975+
// markRandomPaddingPages marks the range of memory [base, base+npages*pageSize]
976+
// as both allocated and scavenged. This is used for randomizing the base heap
977+
// address. Both the alloc and scav bits are set so that the pages are not used
978+
// and so the memory accounting stats are correctly calculated.
979+
//
980+
// Similar to allocRange, it also updates the summaries to reflect the
981+
// newly-updated bitmap.
982+
//
983+
// p.mheapLock must be held.
984+
func (p *pageAlloc) markRandomPaddingPages(base uintptr, npages uintptr) {
985+
assertLockHeld(p.mheapLock)
986+
987+
limit := base + npages*pageSize - 1
988+
sc, ec := chunkIndex(base), chunkIndex(limit)
989+
si, ei := chunkPageIndex(base), chunkPageIndex(limit)
990+
if sc == ec {
991+
chunk := p.chunkOf(sc)
992+
chunk.allocRange(si, ei+1-si)
993+
p.scav.index.alloc(sc, ei+1-si)
994+
chunk.scavenged.setRange(si, ei+1-si)
995+
} else {
996+
chunk := p.chunkOf(sc)
997+
chunk.allocRange(si, pallocChunkPages-si)
998+
p.scav.index.alloc(sc, pallocChunkPages-si)
999+
chunk.scavenged.setRange(si, pallocChunkPages-si)
1000+
for c := sc + 1; c < ec; c++ {
1001+
chunk := p.chunkOf(c)
1002+
chunk.allocAll()
1003+
p.scav.index.alloc(c, pallocChunkPages)
1004+
chunk.scavenged.setAll()
1005+
}
1006+
chunk = p.chunkOf(ec)
1007+
chunk.allocRange(0, ei+1)
1008+
p.scav.index.alloc(ec, ei+1)
1009+
chunk.scavenged.setRange(0, ei+1)
1010+
}
1011+
p.update(base, npages, true, true)
1012+
}
1013+
9751014
const (
9761015
pallocSumBytes = unsafe.Sizeof(pallocSum(0))
9771016

src/runtime/proc.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -862,10 +862,10 @@ func schedinit() {
862862
ticks.init() // run as early as possible
863863
moduledataverify()
864864
stackinit()
865+
randinit() // must run before mallocinit, alginit, mcommoninit
865866
mallocinit()
866867
godebug := getGodebugEarly()
867868
cpuinit(godebug) // must run before alginit
868-
randinit() // must run before alginit, mcommoninit
869869
alginit() // maps, hash, rand must not be used before this call
870870
mcommoninit(gp.m, -1)
871871
modulesinit() // provides activeModules

0 commit comments

Comments
 (0)