diff options
author | Cherry Zhang <cherryyz@google.com> | 2020-10-28 09:12:20 -0400 |
---|---|---|
committer | Cherry Zhang <cherryyz@google.com> | 2020-10-28 09:12:20 -0400 |
commit | a16e30d162c1c7408db7821e7b9513cefa09c6ca (patch) | |
tree | af752ba9ba44c547df39bb0af9bff79f610ba9d5 /src/runtime/mpagealloc.go | |
parent | 91e4d2d57bc341dd82c98247117114c851380aef (diff) | |
parent | cf6cfba4d5358404dd890f6025e573a4b2156543 (diff) | |
download | go-git-dev.link.tar.gz |
[dev.link] all: merge branch 'master' into dev.linkdev.link
Clean merge.
Change-Id: Ia7b2808bc649790198d34c226a61d9e569084dc5
Diffstat (limited to 'src/runtime/mpagealloc.go')
-rw-r--r-- | src/runtime/mpagealloc.go | 202 |
1 files changed, 101 insertions, 101 deletions
diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go index c90a6378bd..2af1c97e0b 100644 --- a/src/runtime/mpagealloc.go +++ b/src/runtime/mpagealloc.go @@ -293,13 +293,13 @@ type pageAlloc struct { // sysStat is the runtime memstat to update when new system // memory is committed by the pageAlloc for allocation metadata. - sysStat *uint64 + sysStat *sysMemStat // Whether or not this struct is being used in tests. test bool } -func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) { +func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat) { if levelLogPages[0] > logMaxPackedValue { // We can't represent 1<<levelLogPages[0] pages, the maximum number // of pages we need to represent at the root level, in a summary, which @@ -308,29 +308,29 @@ func (s *pageAlloc) init(mheapLock *mutex, sysStat *uint64) { print("runtime: summary max pages = ", maxPackedValue, "\n") throw("root level max pages doesn't fit in summary") } - s.sysStat = sysStat + p.sysStat = sysStat - // Initialize s.inUse. - s.inUse.init(sysStat) + // Initialize p.inUse. + p.inUse.init(sysStat) // System-dependent initialization. - s.sysInit() + p.sysInit() // Start with the searchAddr in a state indicating there's no free memory. - s.searchAddr = maxSearchAddr + p.searchAddr = maxSearchAddr // Set the mheapLock. - s.mheapLock = mheapLock + p.mheapLock = mheapLock // Initialize scavenge tracking state. - s.scav.scavLWM = maxSearchAddr + p.scav.scavLWM = maxSearchAddr } // tryChunkOf returns the bitmap data for the given chunk. // // Returns nil if the chunk data has not been mapped. -func (s *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData { - l2 := s.chunks[ci.l1()] +func (p *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData { + l2 := p.chunks[ci.l1()] if l2 == nil { return nil } @@ -340,15 +340,15 @@ func (s *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData { // chunkOf returns the chunk at the given chunk index. // // The chunk index must be valid or this method may throw. -func (s *pageAlloc) chunkOf(ci chunkIdx) *pallocData { - return &s.chunks[ci.l1()][ci.l2()] +func (p *pageAlloc) chunkOf(ci chunkIdx) *pallocData { + return &p.chunks[ci.l1()][ci.l2()] } // grow sets up the metadata for the address range [base, base+size). -// It may allocate metadata, in which case *s.sysStat will be updated. +// It may allocate metadata, in which case *p.sysStat will be updated. // -// s.mheapLock must be held. -func (s *pageAlloc) grow(base, size uintptr) { +// p.mheapLock must be held. +func (p *pageAlloc) grow(base, size uintptr) { // Round up to chunks, since we can't deal with increments smaller // than chunks. Also, sysGrow expects aligned values. limit := alignUp(base+size, pallocChunkBytes) @@ -356,29 +356,29 @@ func (s *pageAlloc) grow(base, size uintptr) { // Grow the summary levels in a system-dependent manner. // We just update a bunch of additional metadata here. - s.sysGrow(base, limit) + p.sysGrow(base, limit) - // Update s.start and s.end. + // Update p.start and p.end. // If no growth happened yet, start == 0. This is generally // safe since the zero page is unmapped. - firstGrowth := s.start == 0 + firstGrowth := p.start == 0 start, end := chunkIndex(base), chunkIndex(limit) - if firstGrowth || start < s.start { - s.start = start + if firstGrowth || start < p.start { + p.start = start } - if end > s.end { - s.end = end + if end > p.end { + p.end = end } // Note that [base, limit) will never overlap with any existing // range inUse because grow only ever adds never-used memory // regions to the page allocator. - s.inUse.add(makeAddrRange(base, limit)) + p.inUse.add(makeAddrRange(base, limit)) // A grow operation is a lot like a free operation, so if our - // chunk ends up below s.searchAddr, update s.searchAddr to the + // chunk ends up below p.searchAddr, update p.searchAddr to the // new address, just like in free. - if b := (offAddr{base}); b.lessThan(s.searchAddr) { - s.searchAddr = b + if b := (offAddr{base}); b.lessThan(p.searchAddr) { + p.searchAddr = b } // Add entries into chunks, which is sparse, if needed. Then, @@ -387,21 +387,21 @@ func (s *pageAlloc) grow(base, size uintptr) { // Newly-grown memory is always considered scavenged. // Set all the bits in the scavenged bitmaps high. for c := chunkIndex(base); c < chunkIndex(limit); c++ { - if s.chunks[c.l1()] == nil { + if p.chunks[c.l1()] == nil { // Create the necessary l2 entry. // // Store it atomically to avoid races with readers which // don't acquire the heap lock. - r := sysAlloc(unsafe.Sizeof(*s.chunks[0]), s.sysStat) - atomic.StorepNoWB(unsafe.Pointer(&s.chunks[c.l1()]), r) + r := sysAlloc(unsafe.Sizeof(*p.chunks[0]), p.sysStat) + atomic.StorepNoWB(unsafe.Pointer(&p.chunks[c.l1()]), r) } - s.chunkOf(c).scavenged.setRange(0, pallocChunkPages) + p.chunkOf(c).scavenged.setRange(0, pallocChunkPages) } // Update summaries accordingly. The grow acts like a free, so // we need to ensure this newly-free memory is visible in the // summaries. - s.update(base, size/pageSize, true, false) + p.update(base, size/pageSize, true, false) } // update updates heap metadata. It must be called each time the bitmap @@ -411,8 +411,8 @@ func (s *pageAlloc) grow(base, size uintptr) { // a contiguous allocation or free between addr and addr+npages. alloc indicates // whether the operation performed was an allocation or a free. // -// s.mheapLock must be held. -func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) { +// p.mheapLock must be held. +func (p *pageAlloc) update(base, npages uintptr, contig, alloc bool) { // base, limit, start, and end are inclusive. limit := base + npages*pageSize - 1 sc, ec := chunkIndex(base), chunkIndex(limit) @@ -421,23 +421,23 @@ func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) { if sc == ec { // Fast path: the allocation doesn't span more than one chunk, // so update this one and if the summary didn't change, return. - x := s.summary[len(s.summary)-1][sc] - y := s.chunkOf(sc).summarize() + x := p.summary[len(p.summary)-1][sc] + y := p.chunkOf(sc).summarize() if x == y { return } - s.summary[len(s.summary)-1][sc] = y + p.summary[len(p.summary)-1][sc] = y } else if contig { // Slow contiguous path: the allocation spans more than one chunk // and at least one summary is guaranteed to change. - summary := s.summary[len(s.summary)-1] + summary := p.summary[len(p.summary)-1] // Update the summary for chunk sc. - summary[sc] = s.chunkOf(sc).summarize() + summary[sc] = p.chunkOf(sc).summarize() // Update the summaries for chunks in between, which are // either totally allocated or freed. - whole := s.summary[len(s.summary)-1][sc+1 : ec] + whole := p.summary[len(p.summary)-1][sc+1 : ec] if alloc { // Should optimize into a memclr. for i := range whole { @@ -450,22 +450,22 @@ func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) { } // Update the summary for chunk ec. - summary[ec] = s.chunkOf(ec).summarize() + summary[ec] = p.chunkOf(ec).summarize() } else { // Slow general path: the allocation spans more than one chunk // and at least one summary is guaranteed to change. // // We can't assume a contiguous allocation happened, so walk over // every chunk in the range and manually recompute the summary. - summary := s.summary[len(s.summary)-1] + summary := p.summary[len(p.summary)-1] for c := sc; c <= ec; c++ { - summary[c] = s.chunkOf(c).summarize() + summary[c] = p.chunkOf(c).summarize() } } // Walk up the radix tree and update the summaries appropriately. changed := true - for l := len(s.summary) - 2; l >= 0 && changed; l-- { + for l := len(p.summary) - 2; l >= 0 && changed; l-- { // Update summaries at level l from summaries at level l+1. changed = false @@ -479,12 +479,12 @@ func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) { // Iterate over each block, updating the corresponding summary in the less-granular level. for i := lo; i < hi; i++ { - children := s.summary[l+1][i<<logEntriesPerBlock : (i+1)<<logEntriesPerBlock] + children := p.summary[l+1][i<<logEntriesPerBlock : (i+1)<<logEntriesPerBlock] sum := mergeSummaries(children, logMaxPages) - old := s.summary[l][i] + old := p.summary[l][i] if old != sum { changed = true - s.summary[l][i] = sum + p.summary[l][i] = sum } } } @@ -497,8 +497,8 @@ func (s *pageAlloc) update(base, npages uintptr, contig, alloc bool) { // Returns the amount of scavenged memory in bytes present in the // allocated range. // -// s.mheapLock must be held. -func (s *pageAlloc) allocRange(base, npages uintptr) uintptr { +// p.mheapLock must be held. +func (p *pageAlloc) allocRange(base, npages uintptr) uintptr { limit := base + npages*pageSize - 1 sc, ec := chunkIndex(base), chunkIndex(limit) si, ei := chunkPageIndex(base), chunkPageIndex(limit) @@ -506,24 +506,24 @@ func (s *pageAlloc) allocRange(base, npages uintptr) uintptr { scav := uint(0) if sc == ec { // The range doesn't cross any chunk boundaries. - chunk := s.chunkOf(sc) + chunk := p.chunkOf(sc) scav += chunk.scavenged.popcntRange(si, ei+1-si) chunk.allocRange(si, ei+1-si) } else { // The range crosses at least one chunk boundary. - chunk := s.chunkOf(sc) + chunk := p.chunkOf(sc) scav += chunk.scavenged.popcntRange(si, pallocChunkPages-si) chunk.allocRange(si, pallocChunkPages-si) for c := sc + 1; c < ec; c++ { - chunk := s.chunkOf(c) + chunk := p.chunkOf(c) scav += chunk.scavenged.popcntRange(0, pallocChunkPages) chunk.allocAll() } - chunk = s.chunkOf(ec) + chunk = p.chunkOf(ec) scav += chunk.scavenged.popcntRange(0, ei+1) chunk.allocRange(0, ei+1) } - s.update(base, npages, true, true) + p.update(base, npages, true, true) return uintptr(scav) * pageSize } @@ -532,13 +532,13 @@ func (s *pageAlloc) allocRange(base, npages uintptr) uintptr { // returned. If addr is higher than any mapped region, then // it returns maxOffAddr. // -// s.mheapLock must be held. -func (s *pageAlloc) findMappedAddr(addr offAddr) offAddr { +// p.mheapLock must be held. +func (p *pageAlloc) findMappedAddr(addr offAddr) offAddr { // If we're not in a test, validate first by checking mheap_.arenas. // This is a fast path which is only safe to use outside of testing. ai := arenaIndex(addr.addr()) - if s.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil { - vAddr, ok := s.inUse.findAddrGreaterEqual(addr.addr()) + if p.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil { + vAddr, ok := p.inUse.findAddrGreaterEqual(addr.addr()) if ok { return offAddr{vAddr} } else { @@ -554,20 +554,20 @@ func (s *pageAlloc) findMappedAddr(addr offAddr) offAddr { // find searches for the first (address-ordered) contiguous free region of // npages in size and returns a base address for that region. // -// It uses s.searchAddr to prune its search and assumes that no palloc chunks -// below chunkIndex(s.searchAddr) contain any free memory at all. +// It uses p.searchAddr to prune its search and assumes that no palloc chunks +// below chunkIndex(p.searchAddr) contain any free memory at all. // -// find also computes and returns a candidate s.searchAddr, which may or -// may not prune more of the address space than s.searchAddr already does. -// This candidate is always a valid s.searchAddr. +// find also computes and returns a candidate p.searchAddr, which may or +// may not prune more of the address space than p.searchAddr already does. +// This candidate is always a valid p.searchAddr. // // find represents the slow path and the full radix tree search. // // Returns a base address of 0 on failure, in which case the candidate // searchAddr returned is invalid and must be ignored. // -// s.mheapLock must be held. -func (s *pageAlloc) find(npages uintptr) (uintptr, offAddr) { +// p.mheapLock must be held. +func (p *pageAlloc) find(npages uintptr) (uintptr, offAddr) { // Search algorithm. // // This algorithm walks each level l of the radix tree from the root level @@ -647,7 +647,7 @@ func (s *pageAlloc) find(npages uintptr) (uintptr, offAddr) { lastSumIdx := -1 nextLevel: - for l := 0; l < len(s.summary); l++ { + for l := 0; l < len(p.summary); l++ { // For the root level, entriesPerBlock is the whole level. entriesPerBlock := 1 << levelBits[l] logMaxPages := levelLogPages[l] @@ -657,14 +657,14 @@ nextLevel: i <<= levelBits[l] // Slice out the block of entries we care about. - entries := s.summary[l][i : i+entriesPerBlock] + entries := p.summary[l][i : i+entriesPerBlock] // Determine j0, the first index we should start iterating from. // The searchAddr may help us eliminate iterations if we followed the // searchAddr on the previous level or we're on the root leve, in which // case the searchAddr should be the same as i after levelShift. j0 := 0 - if searchIdx := offAddrToLevelIndex(l, s.searchAddr); searchIdx&^(entriesPerBlock-1) == i { + if searchIdx := offAddrToLevelIndex(l, p.searchAddr); searchIdx&^(entriesPerBlock-1) == i { j0 = searchIdx & (entriesPerBlock - 1) } @@ -729,7 +729,7 @@ nextLevel: // We found a sufficiently large run of free pages straddling // some boundary, so compute the address and return it. addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr() - return addr, s.findMappedAddr(firstFree.base) + return addr, p.findMappedAddr(firstFree.base) } if l == 0 { // We're at level zero, so that means we've exhausted our search. @@ -741,7 +741,7 @@ nextLevel: // lied to us. In either case, dump some useful state and throw. print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n") print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n") - print("runtime: s.searchAddr = ", hex(s.searchAddr.addr()), ", i = ", i, "\n") + print("runtime: p.searchAddr = ", hex(p.searchAddr.addr()), ", i = ", i, "\n") print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n") for j := 0; j < len(entries); j++ { sum := entries[j] @@ -758,12 +758,12 @@ nextLevel: // After iterating over all levels, i must contain a chunk index which // is what the final level represents. ci := chunkIdx(i) - j, searchIdx := s.chunkOf(ci).find(npages, 0) + j, searchIdx := p.chunkOf(ci).find(npages, 0) if j == ^uint(0) { // We couldn't find any space in this chunk despite the summaries telling // us it should be there. There's likely a bug, so dump some state and throw. - sum := s.summary[len(s.summary)-1][i] - print("runtime: summary[", len(s.summary)-1, "][", i, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n") + sum := p.summary[len(p.summary)-1][i] + print("runtime: summary[", len(p.summary)-1, "][", i, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n") print("runtime: npages = ", npages, "\n") throw("bad summary data") } @@ -775,7 +775,7 @@ nextLevel: // found an even narrower free window. searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr) - return addr, s.findMappedAddr(firstFree.base) + return addr, p.findMappedAddr(firstFree.base) } // alloc allocates npages worth of memory from the page heap, returning the base @@ -785,25 +785,25 @@ nextLevel: // Returns a 0 base address on failure, in which case other returned values // should be ignored. // -// s.mheapLock must be held. -func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) { +// p.mheapLock must be held. +func (p *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) { // If the searchAddr refers to a region which has a higher address than // any known chunk, then we know we're out of memory. - if chunkIndex(s.searchAddr.addr()) >= s.end { + if chunkIndex(p.searchAddr.addr()) >= p.end { return 0, 0 } // If npages has a chance of fitting in the chunk where the searchAddr is, // search it directly. searchAddr := minOffAddr - if pallocChunkPages-chunkPageIndex(s.searchAddr.addr()) >= uint(npages) { + if pallocChunkPages-chunkPageIndex(p.searchAddr.addr()) >= uint(npages) { // npages is guaranteed to be no greater than pallocChunkPages here. - i := chunkIndex(s.searchAddr.addr()) - if max := s.summary[len(s.summary)-1][i].max(); max >= uint(npages) { - j, searchIdx := s.chunkOf(i).find(npages, chunkPageIndex(s.searchAddr.addr())) + i := chunkIndex(p.searchAddr.addr()) + if max := p.summary[len(p.summary)-1][i].max(); max >= uint(npages) { + j, searchIdx := p.chunkOf(i).find(npages, chunkPageIndex(p.searchAddr.addr())) if j == ^uint(0) { print("runtime: max = ", max, ", npages = ", npages, "\n") - print("runtime: searchIdx = ", chunkPageIndex(s.searchAddr.addr()), ", s.searchAddr = ", hex(s.searchAddr.addr()), "\n") + print("runtime: searchIdx = ", chunkPageIndex(p.searchAddr.addr()), ", p.searchAddr = ", hex(p.searchAddr.addr()), "\n") throw("bad summary data") } addr = chunkBase(i) + uintptr(j)*pageSize @@ -813,7 +813,7 @@ func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) { } // We failed to use a searchAddr for one reason or another, so try // the slow path. - addr, searchAddr = s.find(npages) + addr, searchAddr = p.find(npages) if addr == 0 { if npages == 1 { // We failed to find a single free page, the smallest unit @@ -821,41 +821,41 @@ func (s *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) { // exhausted. Otherwise, the heap still might have free // space in it, just not enough contiguous space to // accommodate npages. - s.searchAddr = maxSearchAddr + p.searchAddr = maxSearchAddr } return 0, 0 } Found: // Go ahead and actually mark the bits now that we have an address. - scav = s.allocRange(addr, npages) + scav = p.allocRange(addr, npages) // If we found a higher searchAddr, we know that all the // heap memory before that searchAddr in an offset address space is - // allocated, so bump s.searchAddr up to the new one. - if s.searchAddr.lessThan(searchAddr) { - s.searchAddr = searchAddr + // allocated, so bump p.searchAddr up to the new one. + if p.searchAddr.lessThan(searchAddr) { + p.searchAddr = searchAddr } return addr, scav } // free returns npages worth of memory starting at base back to the page heap. // -// s.mheapLock must be held. -func (s *pageAlloc) free(base, npages uintptr) { - // If we're freeing pages below the s.searchAddr, update searchAddr. - if b := (offAddr{base}); b.lessThan(s.searchAddr) { - s.searchAddr = b +// p.mheapLock must be held. +func (p *pageAlloc) free(base, npages uintptr) { + // If we're freeing pages below the p.searchAddr, update searchAddr. + if b := (offAddr{base}); b.lessThan(p.searchAddr) { + p.searchAddr = b } // Update the free high watermark for the scavenger. limit := base + npages*pageSize - 1 - if offLimit := (offAddr{limit}); s.scav.freeHWM.lessThan(offLimit) { - s.scav.freeHWM = offLimit + if offLimit := (offAddr{limit}); p.scav.freeHWM.lessThan(offLimit) { + p.scav.freeHWM = offLimit } if npages == 1 { // Fast path: we're clearing a single bit, and we know exactly // where it is, so mark it directly. i := chunkIndex(base) - s.chunkOf(i).free1(chunkPageIndex(base)) + p.chunkOf(i).free1(chunkPageIndex(base)) } else { // Slow path: we're clearing more bits so we may need to iterate. sc, ec := chunkIndex(base), chunkIndex(limit) @@ -863,17 +863,17 @@ func (s *pageAlloc) free(base, npages uintptr) { if sc == ec { // The range doesn't cross any chunk boundaries. - s.chunkOf(sc).free(si, ei+1-si) + p.chunkOf(sc).free(si, ei+1-si) } else { // The range crosses at least one chunk boundary. - s.chunkOf(sc).free(si, pallocChunkPages-si) + p.chunkOf(sc).free(si, pallocChunkPages-si) for c := sc + 1; c < ec; c++ { - s.chunkOf(c).freeAll() + p.chunkOf(c).freeAll() } - s.chunkOf(ec).free(0, ei+1) + p.chunkOf(ec).free(0, ei+1) } } - s.update(base, npages, true, false) + p.update(base, npages, true, false) } const ( |