Skip to content

Commit b56791c

Browse files
committed
runtime: validate candidate searchAddr in pageAlloc.find
Currently pageAlloc.find attempts to find a better estimate for the first free page in the heap, even if the space its looking for isn't necessarily going to be the first free page in the heap (e.g. if npages >= 2). However, in doing so it has the potential to return a searchAddr candidate that doesn't actually correspond to mapped memory, but this candidate might still be adopted. As a result, pageAlloc.alloc's fast path may look at unmapped summary memory and segfault. This case is rare on most operating systems since the heap is kept fairly contiguous, so the chance that the candidate searchAddr discovered is unmapped is fairly low. Even so, this is totally possible and outside the user's control when it happens (in fact, it's likely to happen consistently for a given user on a given system). Fix this problem by ensuring that our candidate always points to mapped memory. We do this by looking at mheap's arenas structure first. If it turns out our candidate doesn't correspond to mapped memory, then we look at inUse to round up the searchAddr to the next mapped address. While we're here, clean up some documentation related to searchAddr. Fixes #40191. Change-Id: I759efec78987e4a8fde466ae45aabbaa3d9d4214 Reviewed-on: https://go-review.googlesource.com/c/go/+/242680 Run-TryBot: Michael Knyszek <mknyszek@google.com> Reviewed-by: Austin Clements <austin@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
1 parent 10374e2 commit b56791c

File tree

3 files changed

+108
-11
lines changed

3 files changed

+108
-11
lines changed

src/runtime/mpagealloc.go

+32-11
Original file line numberDiff line numberDiff line change
@@ -233,16 +233,12 @@ type pageAlloc struct {
233233

234234
// The address to start an allocation search with. It must never
235235
// point to any memory that is not contained in inUse, i.e.
236-
// inUse.contains(searchAddr) must always be true.
236+
// inUse.contains(searchAddr.addr()) must always be true. The one
237+
// exception to this rule is that it may take on the value of
238+
// maxOffAddr to indicate that the heap is exhausted.
237239
//
238-
// When added with arenaBaseOffset, we guarantee that
239-
// all valid heap addresses (when also added with
240-
// arenaBaseOffset) below this value are allocated and
241-
// not worth searching.
242-
//
243-
// Note that adding in arenaBaseOffset transforms addresses
244-
// to a new address space with a linear view of the full address
245-
// space on architectures with segmented address spaces.
240+
// We guarantee that all valid heap addresses below this value
241+
// are allocated and not worth searching.
246242
searchAddr offAddr
247243

248244
// start and end represent the chunk indices
@@ -518,6 +514,30 @@ func (s *pageAlloc) allocRange(base, npages uintptr) uintptr {
518514
return uintptr(scav) * pageSize
519515
}
520516

517+
// findMappedAddr returns the smallest mapped offAddr that is
518+
// >= addr. That is, if addr refers to mapped memory, then it is
519+
// returned. If addr is higher than any mapped region, then
520+
// it returns maxOffAddr.
521+
//
522+
// s.mheapLock must be held.
523+
func (s *pageAlloc) findMappedAddr(addr offAddr) offAddr {
524+
// If we're not in a test, validate first by checking mheap_.arenas.
525+
// This is a fast path which is only safe to use outside of testing.
526+
ai := arenaIndex(addr.addr())
527+
if s.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil {
528+
vAddr, ok := s.inUse.findAddrGreaterEqual(addr.addr())
529+
if ok {
530+
return offAddr{vAddr}
531+
} else {
532+
// The candidate search address is greater than any
533+
// known address, which means we definitely have no
534+
// free memory left.
535+
return maxOffAddr
536+
}
537+
}
538+
return addr
539+
}
540+
521541
// find searches for the first (address-ordered) contiguous free region of
522542
// npages in size and returns a base address for that region.
523543
//
@@ -526,6 +546,7 @@ func (s *pageAlloc) allocRange(base, npages uintptr) uintptr {
526546
//
527547
// find also computes and returns a candidate s.searchAddr, which may or
528548
// may not prune more of the address space than s.searchAddr already does.
549+
// This candidate is always a valid s.searchAddr.
529550
//
530551
// find represents the slow path and the full radix tree search.
531552
//
@@ -695,7 +716,7 @@ nextLevel:
695716
// We found a sufficiently large run of free pages straddling
696717
// some boundary, so compute the address and return it.
697718
addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
698-
return addr, firstFree.base
719+
return addr, s.findMappedAddr(firstFree.base)
699720
}
700721
if l == 0 {
701722
// We're at level zero, so that means we've exhausted our search.
@@ -741,7 +762,7 @@ nextLevel:
741762
// found an even narrower free window.
742763
searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
743764
foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr)
744-
return addr, firstFree.base
765+
return addr, s.findMappedAddr(firstFree.base)
745766
}
746767

747768
// alloc allocates npages worth of memory from the page heap, returning the base

src/runtime/mpagealloc_test.go

+57
Original file line numberDiff line numberDiff line change
@@ -612,6 +612,63 @@ func TestPageAllocAlloc(t *testing.T) {
612612
baseChunkIdx + chunkIdxBigJump: {{0, PallocChunkPages}},
613613
},
614614
}
615+
616+
// Test to check for issue #40191. Essentially, the candidate searchAddr
617+
// discovered by find may not point to mapped memory, so we need to handle
618+
// that explicitly.
619+
//
620+
// chunkIdxSmallOffset is an offset intended to be used within chunkIdxBigJump.
621+
// It is far enough within chunkIdxBigJump that the summaries at the beginning
622+
// of an address range the size of chunkIdxBigJump will not be mapped in.
623+
const chunkIdxSmallOffset = 0x503
624+
tests["DiscontiguousBadSearchAddr"] = test{
625+
before: map[ChunkIdx][]BitRange{
626+
// The mechanism for the bug involves three chunks, A, B, and C, which are
627+
// far apart in the address space. In particular, B is chunkIdxBigJump +
628+
// chunkIdxSmalloffset chunks away from B, and C is 2*chunkIdxBigJump chunks
629+
// away from A. A has 1 page free, B has several (NOT at the end of B), and
630+
// C is totally free.
631+
// Note that B's free memory must not be at the end of B because the fast
632+
// path in the page allocator will check if the searchAddr even gives us
633+
// enough space to place the allocation in a chunk before accessing the
634+
// summary.
635+
BaseChunkIdx + chunkIdxBigJump*0: {{0, PallocChunkPages - 1}},
636+
BaseChunkIdx + chunkIdxBigJump*1 + chunkIdxSmallOffset: {
637+
{0, PallocChunkPages - 10},
638+
{PallocChunkPages - 1, 1},
639+
},
640+
BaseChunkIdx + chunkIdxBigJump*2: {},
641+
},
642+
scav: map[ChunkIdx][]BitRange{
643+
BaseChunkIdx + chunkIdxBigJump*0: {},
644+
BaseChunkIdx + chunkIdxBigJump*1 + chunkIdxSmallOffset: {},
645+
BaseChunkIdx + chunkIdxBigJump*2: {},
646+
},
647+
hits: []hit{
648+
// We first allocate into A to set the page allocator's searchAddr to the
649+
// end of that chunk. That is the only purpose A serves.
650+
{1, PageBase(BaseChunkIdx, PallocChunkPages-1), 0},
651+
// Then, we make a big allocation that doesn't fit into B, and so must be
652+
// fulfilled by C.
653+
//
654+
// On the way to fulfilling the allocation into C, we estimate searchAddr
655+
// using the summary structure, but that will give us a searchAddr of
656+
// B's base address minus chunkIdxSmallOffset chunks. These chunks will
657+
// not be mapped.
658+
{100, PageBase(baseChunkIdx+chunkIdxBigJump*2, 0), 0},
659+
// Now we try to make a smaller allocation that can be fulfilled by B.
660+
// In an older implementation of the page allocator, this will segfault,
661+
// because this last allocation will first try to access the summary
662+
// for B's base address minus chunkIdxSmallOffset chunks in the fast path,
663+
// and this will not be mapped.
664+
{9, PageBase(baseChunkIdx+chunkIdxBigJump*1+chunkIdxSmallOffset, PallocChunkPages-10), 0},
665+
},
666+
after: map[ChunkIdx][]BitRange{
667+
BaseChunkIdx + chunkIdxBigJump*0: {{0, PallocChunkPages}},
668+
BaseChunkIdx + chunkIdxBigJump*1 + chunkIdxSmallOffset: {{0, PallocChunkPages}},
669+
BaseChunkIdx + chunkIdxBigJump*2: {{0, 100}},
670+
},
671+
}
615672
}
616673
for name, v := range tests {
617674
v := v

src/runtime/mranges.go

+19
Original file line numberDiff line numberDiff line change
@@ -188,6 +188,25 @@ func (a *addrRanges) findSucc(addr uintptr) int {
188188
return len(a.ranges)
189189
}
190190

191+
// findAddrGreaterEqual returns the smallest address represented by a
192+
// that is >= addr. Thus, if the address is represented by a,
193+
// then it returns addr. The second return value indicates whether
194+
// such an address exists for addr in a. That is, if addr is larger than
195+
// any address known to a, the second return value will be false.
196+
func (a *addrRanges) findAddrGreaterEqual(addr uintptr) (uintptr, bool) {
197+
i := a.findSucc(addr)
198+
if i == 0 {
199+
return a.ranges[0].base.addr(), true
200+
}
201+
if a.ranges[i-1].contains(addr) {
202+
return addr, true
203+
}
204+
if i < len(a.ranges) {
205+
return a.ranges[i].base.addr(), true
206+
}
207+
return 0, false
208+
}
209+
191210
// contains returns true if a covers the address addr.
192211
func (a *addrRanges) contains(addr uintptr) bool {
193212
i := a.findSucc(addr)

0 commit comments

Comments
 (0)