summaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorMatthew Dempsky <mdempsky@google.com>2022-08-04 10:12:28 -0700
committerMatthew Dempsky <mdempsky@google.com>2022-08-04 10:12:28 -0700
commitd558507db42d600e5ad82748bda0cb91df57b97d (patch)
tree169457500d42144774eb68c5ab2ef70ad67aa673 /src/runtime
parentc9f2150cfb3c1db87f6434f727c25403d985a6e4 (diff)
parent85d87b9c7507628144db51bd1e7e80cc3afed128 (diff)
downloadgo-git-dev.unified.tar.gz
[dev.unified] all: merge master (85d87b9) into dev.unifieddev.unified
Merge List: + 2022-08-04 85d87b9c75 all: update vendored golang.org/x dependencies for Go 1.20 development + 2022-08-04 fb1bfd4d37 all: remove pre-Go 1.17 workarounds + 2022-08-04 44ff9bff0c runtime: clean up panic and deadlock lock ranks + 2022-08-04 f42dc0de74 runtime: make the lock rank DAG make more sense + 2022-08-04 d29a0282e9 runtime: add mayAcquire annotation for finlock + 2022-08-04 c5be4ed7df runtime: add missing trace lock edges + 2022-08-04 2b8a9a484f runtime: generate the lock ranking from a DAG description + 2022-08-04 ddfd639408 runtime: delete unused lock ranks + 2022-08-04 426ea5702b internal/dag: add a Graph type and make node order deterministic + 2022-08-04 d37cc9a8cd go/build, internal/dag: lift DAG parser into an internal package + 2022-08-04 ab0a94c6d3 cmd/dist: require Go 1.17 for building Go + 2022-08-04 1e3c19f3fe runtime: support riscv64 SV57 mode + 2022-08-03 f28fa952b5 make.bat, make.rc: show bootstrap toolchain version + 2022-08-03 87384801dc cmd/asm: update package doc to describe "-p" option + 2022-08-03 c6a2dada0d net: disable TestIPv6WriteMsgUDPAddrPortTargetAddrIPVersion [sic] on DragonflyBSD + 2022-08-02 29b9a328d2 runtime: trivial replacements of g in remaining files + 2022-08-02 c647264619 runtime: trivial replacements of g in signal_unix.go + 2022-08-02 399f50c9d7 runtime: tricky replacements of g in traceback.go + 2022-08-02 4509e951ec runtime: tricky replacements of g in proc.go + 2022-08-02 4400238ec8 runtime: trivial replacements of _g_ in remaining files + 2022-08-02 5999a28de8 runtime: trivial replacements of _g_ in os files + 2022-08-02 0e18cf6d09 runtime: trivial replacements of _g_ in GC files + 2022-08-02 4358a53a97 runtime: trivial replacements of _g_ in proc.go + 2022-08-02 b486518964 runtime: tricky replacements of _g_ in os3_solaris.go + 2022-08-02 54a0ab3f7b runtime: tricky replacements of _g_ in os3_plan9.go + 2022-08-02 4240ff764b runtime: tricky replacements of _g_ in signal_windows.go + 2022-08-02 8666d89ca8 runtime: tricky replacements of _g_ in signal_unix.go + 2022-08-02 74cee276fe runtime: tricky replacements of _g_ in trace.go + 2022-08-02 222799fde6 runtime: tricky replacements of _g_ in mgc.go + 2022-08-02 e9d7f54a1a runtime: tricky replacements of _g_ in proc.go + 2022-08-02 5e8d261918 runtime: rename _p_ to pp + 2022-08-02 0ad2ec6596 runtime: clean up dopanic_m + 2022-08-02 7e952962df runtime: clean up canpanic + 2022-08-02 9dbc0f3556 runtime: fix outdated g.m comment in traceback.go + 2022-08-02 d723df76da internal/goversion: update Version to 1.20 + 2022-08-02 1b7e71e8ae all: disable tests that fail on Alpine + 2022-08-01 f2a9f3e2e0 test: improve generic type assertion test + 2022-08-01 27038b70f8 cmd/compile: fix wrong dict pass condition for type assertions + 2022-08-01 e99f53fed9 doc: move Go 1.19 release notes to x/website + 2022-08-01 8b13a073a1 doc: mention removal of cmd/compile's -importmap and -installsuffix flags + 2022-08-01 e95fd4c238 doc/go1.19: fix typo: EM_LONGARCH -> EM_LOONGARCH + 2022-08-01 dee3efd9f8 doc/go1.19: fix a few links that were missing trailing slashes + 2022-07-30 f32519e5fb runtime: fix typos + 2022-07-29 9a2001a8cc cmd/dist: always pass -short=true with -quick + 2022-07-28 5c8ec89cb5 doc/go1.19: minor adjustments and links + 2022-07-28 417be37048 doc/go1.19: improve the loong64 release notes + 2022-07-28 027855e8d8 os/exec: add GODEBUG setting to opt out of ErrDot changes Change-Id: Idc0fbe93978c0dff7600b90a2c3ecc067fd9f5f2
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/cgocheck.go6
-rw-r--r--src/runtime/chan.go2
-rw-r--r--src/runtime/debug.go16
-rw-r--r--src/runtime/export_debuglog_test.go8
-rw-r--r--src/runtime/export_test.go24
-rw-r--r--src/runtime/heapdump.go8
-rw-r--r--src/runtime/lfstack_64bit.go12
-rw-r--r--src/runtime/lockrank.go291
-rw-r--r--src/runtime/lockrank_on.go3
-rw-r--r--src/runtime/lockrank_test.go46
-rw-r--r--src/runtime/malloc.go7
-rw-r--r--src/runtime/mbitmap.go5
-rw-r--r--src/runtime/mfinal.go6
-rw-r--r--src/runtime/mgc.go27
-rw-r--r--src/runtime/mgcmark.go12
-rw-r--r--src/runtime/mgcpacer.go12
-rw-r--r--src/runtime/mgcsweep.go8
-rw-r--r--src/runtime/mklockrank.go360
-rw-r--r--src/runtime/msan.go4
-rw-r--r--src/runtime/mwbbuf.go16
-rw-r--r--src/runtime/os2_aix.go32
-rw-r--r--src/runtime/os3_plan9.go10
-rw-r--r--src/runtime/os3_solaris.go15
-rw-r--r--src/runtime/os_js.go6
-rw-r--r--src/runtime/os_netbsd.go16
-rw-r--r--src/runtime/os_openbsd.go8
-rw-r--r--src/runtime/os_plan9.go26
-rw-r--r--src/runtime/panic.go40
-rw-r--r--src/runtime/proc.go1028
-rw-r--r--src/runtime/race.go32
-rw-r--r--src/runtime/rdebug.go6
-rw-r--r--src/runtime/runtime.go1
-rw-r--r--src/runtime/runtime1.go22
-rw-r--r--src/runtime/runtime_test.go7
-rw-r--r--src/runtime/signal_unix.go116
-rw-r--r--src/runtime/signal_windows.go31
-rw-r--r--src/runtime/testdata/testprog/vdso.go2
-rw-r--r--src/runtime/trace.go85
-rw-r--r--src/runtime/traceback.go12
39 files changed, 1333 insertions, 1035 deletions
diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go
index 74a2ec09bc..6b492093ea 100644
--- a/src/runtime/cgocheck.go
+++ b/src/runtime/cgocheck.go
@@ -32,14 +32,14 @@ func cgoCheckWriteBarrier(dst *uintptr, src uintptr) {
// If we are running on the system stack then dst might be an
// address on the stack, which is OK.
- g := getg()
- if g == g.m.g0 || g == g.m.gsignal {
+ gp := getg()
+ if gp == gp.m.g0 || gp == gp.m.gsignal {
return
}
// Allocating memory can write to various mfixalloc structs
// that look like they are non-Go memory.
- if g.m.mallocing != 0 {
+ if gp.m.mallocing != 0 {
return
}
diff --git a/src/runtime/chan.go b/src/runtime/chan.go
index 993af7063b..ca516ad9e8 100644
--- a/src/runtime/chan.go
+++ b/src/runtime/chan.go
@@ -780,7 +780,7 @@ func (q *waitq) dequeue() *sudog {
} else {
y.prev = nil
q.first = y
- sgp.next = nil // mark as removed (see dequeueSudog)
+ sgp.next = nil // mark as removed (see dequeueSudoG)
}
// if a goroutine was put on this queue because of a
diff --git a/src/runtime/debug.go b/src/runtime/debug.go
index 0ab23e0eb7..669c36f0d5 100644
--- a/src/runtime/debug.go
+++ b/src/runtime/debug.go
@@ -85,13 +85,13 @@ func debug_modinfo() string {
//go:linkname mayMoreStackPreempt
func mayMoreStackPreempt() {
// Don't do anything on the g0 or gsignal stack.
- g := getg()
- if g == g.m.g0 || g == g.m.gsignal {
+ gp := getg()
+ if gp == gp.m.g0 || gp == gp.m.gsignal {
return
}
// Force a preemption, unless the stack is already poisoned.
- if g.stackguard0 < stackPoisonMin {
- g.stackguard0 = stackPreempt
+ if gp.stackguard0 < stackPoisonMin {
+ gp.stackguard0 = stackPreempt
}
}
@@ -104,12 +104,12 @@ func mayMoreStackPreempt() {
//go:linkname mayMoreStackMove
func mayMoreStackMove() {
// Don't do anything on the g0 or gsignal stack.
- g := getg()
- if g == g.m.g0 || g == g.m.gsignal {
+ gp := getg()
+ if gp == gp.m.g0 || gp == gp.m.gsignal {
return
}
// Force stack movement, unless the stack is already poisoned.
- if g.stackguard0 < stackPoisonMin {
- g.stackguard0 = stackForceMove
+ if gp.stackguard0 < stackPoisonMin {
+ gp.stackguard0 = stackForceMove
}
}
diff --git a/src/runtime/export_debuglog_test.go b/src/runtime/export_debuglog_test.go
index 1a9074e646..c9dfdcb393 100644
--- a/src/runtime/export_debuglog_test.go
+++ b/src/runtime/export_debuglog_test.go
@@ -25,11 +25,11 @@ func (l *dlogger) S(x string) *dlogger { return l.s(x) }
func (l *dlogger) PC(x uintptr) *dlogger { return l.pc(x) }
func DumpDebugLog() string {
- g := getg()
- g.writebuf = make([]byte, 0, 1<<20)
+ gp := getg()
+ gp.writebuf = make([]byte, 0, 1<<20)
printDebugLog()
- buf := g.writebuf
- g.writebuf = nil
+ buf := gp.writebuf
+ gp.writebuf = nil
return string(buf)
}
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 9639946fa9..ab0537d8b2 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -84,23 +84,23 @@ func GCMask(x any) (ret []byte) {
}
func RunSchedLocalQueueTest() {
- _p_ := new(p)
- gs := make([]g, len(_p_.runq))
+ pp := new(p)
+ gs := make([]g, len(pp.runq))
Escape(gs) // Ensure gs doesn't move, since we use guintptrs
- for i := 0; i < len(_p_.runq); i++ {
- if g, _ := runqget(_p_); g != nil {
+ for i := 0; i < len(pp.runq); i++ {
+ if g, _ := runqget(pp); g != nil {
throw("runq is not empty initially")
}
for j := 0; j < i; j++ {
- runqput(_p_, &gs[i], false)
+ runqput(pp, &gs[i], false)
}
for j := 0; j < i; j++ {
- if g, _ := runqget(_p_); g != &gs[i] {
+ if g, _ := runqget(pp); g != &gs[i] {
print("bad element at iter ", i, "/", j, "\n")
throw("bad element")
}
}
- if g, _ := runqget(_p_); g != nil {
+ if g, _ := runqget(pp); g != nil {
throw("runq is not empty afterwards")
}
}
@@ -460,17 +460,17 @@ func MapBucketsPointerIsNil(m map[int]int) bool {
}
func LockOSCounts() (external, internal uint32) {
- g := getg()
- if g.m.lockedExt+g.m.lockedInt == 0 {
- if g.lockedm != 0 {
+ gp := getg()
+ if gp.m.lockedExt+gp.m.lockedInt == 0 {
+ if gp.lockedm != 0 {
panic("lockedm on non-locked goroutine")
}
} else {
- if g.lockedm == 0 {
+ if gp.lockedm == 0 {
panic("nil lockedm on locked goroutine")
}
}
- return g.m.lockedExt, g.m.lockedInt
+ return gp.m.lockedExt, gp.m.lockedInt
}
//go:noinline
diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go
index c7f2b7a443..543efeded4 100644
--- a/src/runtime/heapdump.go
+++ b/src/runtime/heapdump.go
@@ -693,9 +693,9 @@ func mdump(m *MemStats) {
func writeheapdump_m(fd uintptr, m *MemStats) {
assertWorldStopped()
- _g_ := getg()
- casgstatus(_g_.m.curg, _Grunning, _Gwaiting)
- _g_.waitreason = waitReasonDumpingHeap
+ gp := getg()
+ casgstatus(gp.m.curg, _Grunning, _Gwaiting)
+ gp.waitreason = waitReasonDumpingHeap
// Set dump file.
dumpfd = fd
@@ -710,7 +710,7 @@ func writeheapdump_m(fd uintptr, m *MemStats) {
tmpbuf = nil
}
- casgstatus(_g_.m.curg, _Gwaiting, _Grunning)
+ casgstatus(gp.m.curg, _Gwaiting, _Grunning)
}
// dumpint() the kind & offset of each field in an object.
diff --git a/src/runtime/lfstack_64bit.go b/src/runtime/lfstack_64bit.go
index 154130cf63..88cbd3bcc7 100644
--- a/src/runtime/lfstack_64bit.go
+++ b/src/runtime/lfstack_64bit.go
@@ -36,12 +36,21 @@ const (
// We use one bit to distinguish between the two ranges.
aixAddrBits = 57
aixCntBits = 64 - aixAddrBits + 3
+
+ // riscv64 SV57 mode gives 56 bits of userspace VA.
+ // lfstack code supports it, but broader support for SV57 mode is incomplete,
+ // and there may be other issues (see #54104).
+ riscv64AddrBits = 56
+ riscv64CntBits = 64 - riscv64AddrBits + 3
)
func lfstackPack(node *lfnode, cnt uintptr) uint64 {
if GOARCH == "ppc64" && GOOS == "aix" {
return uint64(uintptr(unsafe.Pointer(node)))<<(64-aixAddrBits) | uint64(cnt&(1<<aixCntBits-1))
}
+ if GOARCH == "riscv64" {
+ return uint64(uintptr(unsafe.Pointer(node)))<<(64-riscv64AddrBits) | uint64(cnt&(1<<riscv64CntBits-1))
+ }
return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<<cntBits-1))
}
@@ -54,5 +63,8 @@ func lfstackUnpack(val uint64) *lfnode {
if GOARCH == "ppc64" && GOOS == "aix" {
return (*lfnode)(unsafe.Pointer(uintptr((val >> aixCntBits << 3) | 0xa<<56)))
}
+ if GOARCH == "riscv64" {
+ return (*lfnode)(unsafe.Pointer(uintptr(val >> riscv64CntBits << 3)))
+ }
return (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3)))
}
diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go
index bb0b189fc7..50d35dbc57 100644
--- a/src/runtime/lockrank.go
+++ b/src/runtime/lockrank.go
@@ -1,183 +1,118 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file records the static ranks of the locks in the runtime. If a lock
-// is not given a rank, then it is assumed to be a leaf lock, which means no other
-// lock can be acquired while it is held. Therefore, leaf locks do not need to be
-// given an explicit rank. We list all of the architecture-independent leaf locks
-// for documentation purposes, but don't list any of the architecture-dependent
-// locks (which are all leaf locks). debugLock is ignored for ranking, since it is used
-// when printing out lock ranking errors.
-//
-// lockInit(l *mutex, rank int) is used to set the rank of lock before it is used.
-// If there is no clear place to initialize a lock, then the rank of a lock can be
-// specified during the lock call itself via lockWithrank(l *mutex, rank int).
-//
-// Besides the static lock ranking (which is a total ordering of the locks), we
-// also represent and enforce the actual partial order among the locks in the
-// arcs[] array below. That is, if it is possible that lock B can be acquired when
-// lock A is the previous acquired lock that is still held, then there should be
-// an entry for A in arcs[B][]. We will currently fail not only if the total order
-// (the lock ranking) is violated, but also if there is a missing entry in the
-// partial order.
+// Code generated by mklockrank.go; DO NOT EDIT.
package runtime
type lockRank int
-// Constants representing the lock rank of the architecture-independent locks in
-// the runtime. Locks with lower rank must be taken before locks with higher
-// rank.
+// Constants representing the ranks of all non-leaf runtime locks, in rank order.
+// Locks with lower rank must be taken before locks with higher rank,
+// in addition to satisfying the partial order in lockPartialOrder.
+// A few ranks allow self-cycles, which are specified in lockPartialOrder.
const (
- lockRankDummy lockRank = iota
+ lockRankUnknown lockRank = iota
- // Locks held above sched
lockRankSysmon
lockRankScavenge
lockRankForcegc
+ lockRankDefer
lockRankSweepWaiters
lockRankAssistQueue
- lockRankCpuprof
lockRankSweep
-
lockRankPollDesc
+ lockRankCpuprof
lockRankSched
- lockRankDeadlock
lockRankAllg
lockRankAllp
-
- lockRankTimers // Multiple timers locked simultaneously in destroy()
+ lockRankTimers
+ lockRankNetpollInit
+ lockRankHchan
+ lockRankNotifyList
+ lockRankSudog
+ lockRankRwmutexW
+ lockRankRwmutexR
+ lockRankRoot
lockRankItab
lockRankReflectOffs
- lockRankHchan // Multiple hchans acquired in lock order in syncadjustsudogs()
+ // TRACEGLOBAL
lockRankTraceBuf
- lockRankFin
- lockRankNotifyList
lockRankTraceStrings
+ // MALLOC
+ lockRankFin
+ lockRankGcBitsArenas
+ lockRankMheapSpecial
lockRankMspanSpecial
+ lockRankSpanSetSpine
+ // MPROF
lockRankProfInsert
lockRankProfBlock
lockRankProfMemActive
lockRankProfMemFuture
- lockRankGcBitsArenas
- lockRankRoot
+ // TRACE
lockRankTrace
lockRankTraceStackTab
- lockRankNetpollInit
-
- lockRankRwmutexW
- lockRankRwmutexR
-
- lockRankSpanSetSpine
+ // STACKGROW
lockRankGscan
lockRankStackpool
lockRankStackLarge
- lockRankDefer
- lockRankSudog
-
- // Memory-related non-leaf locks
+ lockRankHchanLeaf
+ // WB
lockRankWbufSpans
lockRankMheap
- lockRankMheapSpecial
-
- // Memory-related leaf locks
lockRankGlobalAlloc
- lockRankPageAllocScav
-
- // Other leaf locks
- lockRankGFree
- // Generally, hchan must be acquired before gscan. But in one specific
- // case (in syncadjustsudogs from markroot after the g has been suspended
- // by suspendG), we allow gscan to be acquired, and then an hchan lock. To
- // allow this case, we get this lockRankHchanLeaf rank in
- // syncadjustsudogs(), rather than lockRankHchan. By using this special
- // rank, we don't allow any further locks to be acquired other than more
- // hchan locks.
- lockRankHchanLeaf
lockRankPanic
-
- // Leaf locks with no dependencies, so these constants are not actually used anywhere.
- // There are other architecture-dependent leaf locks as well.
- lockRankNewmHandoff
- lockRankDebugPtrmask
- lockRankFaketimeState
- lockRankTicks
- lockRankRaceFini
- lockRankPollCache
- lockRankDebug
+ lockRankDeadlock
)
-// lockRankLeafRank is the rank of lock that does not have a declared rank, and hence is
-// a leaf lock.
+// lockRankLeafRank is the rank of lock that does not have a declared rank,
+// and hence is a leaf lock.
const lockRankLeafRank lockRank = 1000
-// lockNames gives the names associated with each of the above ranks
+// lockNames gives the names associated with each of the above ranks.
var lockNames = []string{
- lockRankDummy: "",
-
- lockRankSysmon: "sysmon",
- lockRankScavenge: "scavenge",
- lockRankForcegc: "forcegc",
- lockRankSweepWaiters: "sweepWaiters",
- lockRankAssistQueue: "assistQueue",
- lockRankCpuprof: "cpuprof",
- lockRankSweep: "sweep",
-
- lockRankPollDesc: "pollDesc",
- lockRankSched: "sched",
- lockRankDeadlock: "deadlock",
- lockRankAllg: "allg",
- lockRankAllp: "allp",
-
- lockRankTimers: "timers",
- lockRankItab: "itab",
- lockRankReflectOffs: "reflectOffs",
-
+ lockRankSysmon: "sysmon",
+ lockRankScavenge: "scavenge",
+ lockRankForcegc: "forcegc",
+ lockRankDefer: "defer",
+ lockRankSweepWaiters: "sweepWaiters",
+ lockRankAssistQueue: "assistQueue",
+ lockRankSweep: "sweep",
+ lockRankPollDesc: "pollDesc",
+ lockRankCpuprof: "cpuprof",
+ lockRankSched: "sched",
+ lockRankAllg: "allg",
+ lockRankAllp: "allp",
+ lockRankTimers: "timers",
+ lockRankNetpollInit: "netpollInit",
lockRankHchan: "hchan",
- lockRankTraceBuf: "traceBuf",
- lockRankFin: "fin",
lockRankNotifyList: "notifyList",
+ lockRankSudog: "sudog",
+ lockRankRwmutexW: "rwmutexW",
+ lockRankRwmutexR: "rwmutexR",
+ lockRankRoot: "root",
+ lockRankItab: "itab",
+ lockRankReflectOffs: "reflectOffs",
+ lockRankTraceBuf: "traceBuf",
lockRankTraceStrings: "traceStrings",
+ lockRankFin: "fin",
+ lockRankGcBitsArenas: "gcBitsArenas",
+ lockRankMheapSpecial: "mheapSpecial",
lockRankMspanSpecial: "mspanSpecial",
+ lockRankSpanSetSpine: "spanSetSpine",
lockRankProfInsert: "profInsert",
lockRankProfBlock: "profBlock",
lockRankProfMemActive: "profMemActive",
lockRankProfMemFuture: "profMemFuture",
- lockRankGcBitsArenas: "gcBitsArenas",
- lockRankRoot: "root",
lockRankTrace: "trace",
lockRankTraceStackTab: "traceStackTab",
- lockRankNetpollInit: "netpollInit",
-
- lockRankRwmutexW: "rwmutexW",
- lockRankRwmutexR: "rwmutexR",
-
- lockRankSpanSetSpine: "spanSetSpine",
- lockRankGscan: "gscan",
- lockRankStackpool: "stackpool",
- lockRankStackLarge: "stackLarge",
- lockRankDefer: "defer",
- lockRankSudog: "sudog",
-
- lockRankWbufSpans: "wbufSpans",
- lockRankMheap: "mheap",
- lockRankMheapSpecial: "mheapSpecial",
-
- lockRankGlobalAlloc: "globalAlloc.mutex",
- lockRankPageAllocScav: "pageAlloc.scav.lock",
-
- lockRankGFree: "gFree",
- lockRankHchanLeaf: "hchanLeaf",
- lockRankPanic: "panic",
-
- lockRankNewmHandoff: "newmHandoff.lock",
- lockRankDebugPtrmask: "debugPtrmask.lock",
- lockRankFaketimeState: "faketimeState.lock",
- lockRankTicks: "ticks.lock",
- lockRankRaceFini: "raceFiniLock",
- lockRankPollCache: "pollCache.lock",
- lockRankDebug: "debugLock",
+ lockRankGscan: "gscan",
+ lockRankStackpool: "stackpool",
+ lockRankStackLarge: "stackLarge",
+ lockRankHchanLeaf: "hchanLeaf",
+ lockRankWbufSpans: "wbufSpans",
+ lockRankMheap: "mheap",
+ lockRankGlobalAlloc: "globalAlloc",
+ lockRankPanic: "panic",
+ lockRankDeadlock: "deadlock",
}
func (rank lockRank) String() string {
@@ -187,74 +122,60 @@ func (rank lockRank) String() string {
if rank == lockRankLeafRank {
return "LEAF"
}
+ if rank < 0 || int(rank) >= len(lockNames) {
+ return "BAD RANK"
+ }
return lockNames[rank]
}
-// lockPartialOrder is a partial order among the various lock types, listing the
-// immediate ordering that has actually been observed in the runtime. Each entry
-// (which corresponds to a particular lock rank) specifies the list of locks
-// that can already be held immediately "above" it.
+// lockPartialOrder is the transitive closure of the lock rank graph.
+// An entry for rank X lists all of the ranks that can already be held
+// when rank X is acquired.
//
-// So, for example, the lockRankSched entry shows that all the locks preceding
-// it in rank can actually be held. The allp lock shows that only the sysmon or
-// sched lock can be held immediately above it when it is acquired.
+// Lock ranks that allow self-cycles list themselves.
var lockPartialOrder [][]lockRank = [][]lockRank{
- lockRankDummy: {},
lockRankSysmon: {},
lockRankScavenge: {lockRankSysmon},
lockRankForcegc: {lockRankSysmon},
+ lockRankDefer: {},
lockRankSweepWaiters: {},
lockRankAssistQueue: {},
- lockRankCpuprof: {},
lockRankSweep: {},
lockRankPollDesc: {},
- lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc},
- lockRankDeadlock: {lockRankDeadlock},
- lockRankAllg: {lockRankSysmon, lockRankSched},
- lockRankAllp: {lockRankSysmon, lockRankSched},
- lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankPollDesc, lockRankSched, lockRankAllp, lockRankTimers},
+ lockRankCpuprof: {},
+ lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof},
+ lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched},
+ lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched},
+ lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllp, lockRankTimers},
+ lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllp, lockRankTimers},
+ lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankHchan},
+ lockRankNotifyList: {},
+ lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankHchan, lockRankNotifyList},
+ lockRankRwmutexW: {},
+ lockRankRwmutexR: {lockRankSysmon, lockRankRwmutexW},
+ lockRankRoot: {},
lockRankItab: {},
lockRankReflectOffs: {lockRankItab},
- lockRankHchan: {lockRankScavenge, lockRankSweep, lockRankHchan},
lockRankTraceBuf: {lockRankSysmon, lockRankScavenge},
- lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankSched, lockRankAllg, lockRankTimers, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf},
- lockRankNotifyList: {},
- lockRankTraceStrings: {lockRankTraceBuf},
- lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
- lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
- lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
- lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
- lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings, lockRankProfMemActive},
- lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
- lockRankRoot: {},
- lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankHchan, lockRankTraceBuf, lockRankTraceStrings, lockRankRoot},
- lockRankTraceStackTab: {lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankSched, lockRankAllg, lockRankTimers, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankRoot, lockRankTrace},
- lockRankNetpollInit: {lockRankTimers},
-
- lockRankRwmutexW: {},
- lockRankRwmutexR: {lockRankSysmon, lockRankRwmutexW},
-
- lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
- lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGcBitsArenas, lockRankRoot, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankSpanSetSpine},
- lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGcBitsArenas, lockRankRoot, lockRankTrace, lockRankTraceStackTab, lockRankNetpollInit, lockRankRwmutexR, lockRankSpanSetSpine, lockRankGscan},
- lockRankStackLarge: {lockRankSysmon, lockRankAssistQueue, lockRankSched, lockRankItab, lockRankHchan, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGcBitsArenas, lockRankRoot, lockRankSpanSetSpine, lockRankGscan},
- lockRankDefer: {},
- lockRankSudog: {lockRankHchan, lockRankNotifyList},
- lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankRoot, lockRankTrace, lockRankGscan, lockRankDefer, lockRankSudog},
- lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankSweepWaiters, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankFin, lockRankNotifyList, lockRankTraceStrings, lockRankMspanSpecial, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGcBitsArenas, lockRankRoot, lockRankTrace, lockRankSpanSetSpine, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankDefer, lockRankSudog, lockRankWbufSpans},
- lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankAssistQueue, lockRankCpuprof, lockRankSweep, lockRankPollDesc, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankItab, lockRankReflectOffs, lockRankHchan, lockRankTraceBuf, lockRankNotifyList, lockRankTraceStrings},
- lockRankGlobalAlloc: {lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankSpanSetSpine, lockRankMheap, lockRankMheapSpecial},
- lockRankPageAllocScav: {lockRankMheap},
-
- lockRankGFree: {lockRankSched},
- lockRankHchanLeaf: {lockRankGscan, lockRankHchanLeaf},
- lockRankPanic: {lockRankDeadlock}, // plus any other lock held on throw.
-
- lockRankNewmHandoff: {},
- lockRankDebugPtrmask: {},
- lockRankFaketimeState: {},
- lockRankTicks: {},
- lockRankRaceFini: {},
- lockRankPollCache: {},
- lockRankDebug: {},
+ lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf},
+ lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings},
+ lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive},
+ lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin},
+ lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankTrace},
+ lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankTrace, lockRankTraceStackTab},
+ lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankTrace, lockRankTraceStackTab, lockRankGscan},
+ lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankTrace, lockRankTraceStackTab, lockRankGscan},
+ lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankTrace, lockRankTraceStackTab, lockRankGscan, lockRankHchanLeaf},
+ lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankTrace, lockRankTraceStackTab, lockRankGscan},
+ lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankTrace, lockRankTraceStackTab, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans},
+ lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankGcBitsArenas, lockRankMheapSpecial, lockRankMspanSpecial, lockRankSpanSetSpine, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankTrace, lockRankTraceStackTab, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap},
+ lockRankPanic: {},
+ lockRankDeadlock: {lockRankPanic, lockRankDeadlock},
}
diff --git a/src/runtime/lockrank_on.go b/src/runtime/lockrank_on.go
index a170569d6e..23adad7660 100644
--- a/src/runtime/lockrank_on.go
+++ b/src/runtime/lockrank_on.go
@@ -24,6 +24,9 @@ type lockRankStruct struct {
pad int
}
+// lockInit(l *mutex, rank int) sets the rank of lock before it is used.
+// If there is no clear place to initialize a lock, then the rank of a lock can be
+// specified during the lock call itself via lockWithRank(l *mutex, rank int).
func lockInit(l *mutex, rank lockRank) {
l.rank = rank
}
diff --git a/src/runtime/lockrank_test.go b/src/runtime/lockrank_test.go
index 4b2fc0eaee..a7b1b8df7c 100644
--- a/src/runtime/lockrank_test.go
+++ b/src/runtime/lockrank_test.go
@@ -1,41 +1,29 @@
-// Copyright 2021 The Go Authors. All rights reserved.
+// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
- . "runtime"
+ "bytes"
+ "internal/testenv"
+ "os"
+ "os/exec"
"testing"
)
-// Check that the partial order in lockPartialOrder fits within the total order
-// determined by the order of the lockRank constants.
-func TestLockRankPartialOrder(t *testing.T) {
- for r, list := range LockPartialOrder {
- rank := LockRank(r)
- for _, e := range list {
- entry := LockRank(e)
- if entry > rank {
- t.Errorf("lockPartialOrder row %v entry %v is inconsistent with total lock ranking order", rank, entry)
- }
- }
+// Test that the generated code for the lock rank graph is up-to-date.
+func TestLockRankGenerated(t *testing.T) {
+ testenv.MustHaveGoRun(t)
+ want, err := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), "run", "mklockrank.go")).CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
}
-}
-
-// Verify that partial order lists are kept sorted. This is a purely cosemetic
-// check to make manual reviews simpler. It does not affect correctness, unlike
-// the above test.
-func TestLockRankPartialOrderSortedEntries(t *testing.T) {
- for r, list := range LockPartialOrder {
- rank := LockRank(r)
- var prev LockRank
- for _, e := range list {
- entry := LockRank(e)
- if entry <= prev {
- t.Errorf("Partial order for rank %v out of order: %v <= %v in %v", rank, entry, prev, list)
- }
- prev = entry
- }
+ got, err := os.ReadFile("lockrank.go")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(want, got) {
+ t.Fatalf("lockrank.go is out of date. Please run go generate.")
}
}
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index eb24fdb0e8..b044e29d95 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -847,6 +847,11 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if size == 0 {
return unsafe.Pointer(&zerobase)
}
+
+ // It's possible for any malloc to trigger sweeping, which may in
+ // turn queue finalizers. Record this dynamic lock edge.
+ lockRankMayQueueFinalizer()
+
userSize := size
if asanenabled {
// Refer to ASAN runtime library, the malloc() function allocates extra memory,
@@ -1245,7 +1250,7 @@ func nextSample() uintptr {
}
if GOOS == "plan9" {
// Plan 9 doesn't support floating point in note handler.
- if g := getg(); g == g.m.gsignal {
+ if gp := getg(); gp == gp.m.gsignal {
return nextSampleNoFP()
}
}
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index a3a6590d65..fcf59b8b3c 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -2028,11 +2028,10 @@ func getgcmask(ep any) (mask []byte) {
}
// stack
- if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi {
+ if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
var frame stkframe
frame.sp = uintptr(p)
- _g_ := getg()
- gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
+ gentraceback(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
if frame.fn.valid() {
locals, _, _ := getStackMap(&frame, nil, false)
if locals.n == 0 {
diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go
index f3f3a79fa5..a379cce8a3 100644
--- a/src/runtime/mfinal.go
+++ b/src/runtime/mfinal.go
@@ -75,6 +75,12 @@ var finalizer1 = [...]byte{
0<<0 | 1<<1 | 1<<2 | 1<<3 | 1<<4 | 0<<5 | 1<<6 | 1<<7,
}
+// lockRankMayQueueFinalizer records the lock ranking effects of a
+// function that may call queuefinalizer.
+func lockRankMayQueueFinalizer() {
+ lockWithRankMayAcquire(&finlock, getLockRank(&finlock))
+}
+
func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
if gcphase != _GCoff {
// Currently we assume that the finalizer queue won't
diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go
index 63e04636d7..84a7216b10 100644
--- a/src/runtime/mgc.go
+++ b/src/runtime/mgc.go
@@ -811,21 +811,21 @@ top:
// result in a deadlock as we attempt to preempt a worker that's
// trying to preempt us (e.g. for a stack scan).
casgstatus(gp, _Grunning, _Gwaiting)
- forEachP(func(_p_ *p) {
+ forEachP(func(pp *p) {
// Flush the write barrier buffer, since this may add
// work to the gcWork.
- wbBufFlush1(_p_)
+ wbBufFlush1(pp)
// Flush the gcWork, since this may create global work
// and set the flushedWork flag.
//
// TODO(austin): Break up these workbufs to
// better distribute work.
- _p_.gcw.dispose()
+ pp.gcw.dispose()
// Collect the flushedWork flag.
- if _p_.gcw.flushedWork {
+ if pp.gcw.flushedWork {
atomic.Xadd(&gcMarkDoneFlushed, 1)
- _p_.gcw.flushedWork = false
+ pp.gcw.flushedWork = false
}
})
casgstatus(gp, _Gwaiting, _Grunning)
@@ -929,11 +929,10 @@ func gcMarkTermination() {
mp := acquirem()
mp.preemptoff = "gcing"
- _g_ := getg()
- _g_.m.traceback = 2
- gp := _g_.m.curg
- casgstatus(gp, _Grunning, _Gwaiting)
- gp.waitreason = waitReasonGarbageCollection
+ mp.traceback = 2
+ curgp := mp.curg
+ casgstatus(curgp, _Grunning, _Gwaiting)
+ curgp.waitreason = waitReasonGarbageCollection
// Run gc on the g0 stack. We do this so that the g stack
// we're currently running on will no longer change. Cuts
@@ -972,8 +971,8 @@ func gcMarkTermination() {
gcSweep(work.mode)
})
- _g_.m.traceback = 0
- casgstatus(gp, _Gwaiting, _Grunning)
+ mp.traceback = 0
+ casgstatus(curgp, _Gwaiting, _Grunning)
if trace.enabled {
traceGCDone()
@@ -1075,8 +1074,8 @@ func gcMarkTermination() {
// is necessary to sweep all spans, we need to ensure all
// mcaches are flushed before we start the next GC cycle.
systemstack(func() {
- forEachP(func(_p_ *p) {
- _p_.mcache.prepareForSweep()
+ forEachP(func(pp *p) {
+ pp.mcache.prepareForSweep()
})
})
// Now that we've swept stale spans in mcaches, they don't
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index 74637072c5..551b4c447e 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -595,15 +595,15 @@ func gcAssistAlloc1(gp *g, scanWork int64) {
}
now := nanotime()
duration := now - startTime
- _p_ := gp.m.p.ptr()
- _p_.gcAssistTime += duration
+ pp := gp.m.p.ptr()
+ pp.gcAssistTime += duration
if trackLimiterEvent {
- _p_.limiterEvent.stop(limiterEventMarkAssist, now)
+ pp.limiterEvent.stop(limiterEventMarkAssist, now)
}
- if _p_.gcAssistTime > gcAssistTimeSlack {
- gcController.assistTime.Add(_p_.gcAssistTime)
+ if pp.gcAssistTime > gcAssistTimeSlack {
+ gcController.assistTime.Add(pp.gcAssistTime)
gcCPULimiter.update(now)
- _p_.gcAssistTime = 0
+ pp.gcAssistTime = 0
}
}
diff --git a/src/runtime/mgcpacer.go b/src/runtime/mgcpacer.go
index 2d9fd27748..77abee73da 100644
--- a/src/runtime/mgcpacer.go
+++ b/src/runtime/mgcpacer.go
@@ -805,9 +805,9 @@ func (c *gcControllerState) enlistWorker() {
}
}
-// findRunnableGCWorker returns a background mark worker for _p_ if it
+// findRunnableGCWorker returns a background mark worker for pp if it
// should be run. This must only be called when gcBlackenEnabled != 0.
-func (c *gcControllerState) findRunnableGCWorker(_p_ *p, now int64) (*g, int64) {
+func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) {
if gcBlackenEnabled == 0 {
throw("gcControllerState.findRunnable: blackening not enabled")
}
@@ -823,7 +823,7 @@ func (c *gcControllerState) findRunnableGCWorker(_p_ *p, now int64) (*g, int64)
gcCPULimiter.update(now)
}
- if !gcMarkWorkAvailable(_p_) {
+ if !gcMarkWorkAvailable(pp) {
// No work to be done right now. This can happen at
// the end of the mark phase when there are still
// assists tapering off. Don't bother running a worker
@@ -864,7 +864,7 @@ func (c *gcControllerState) findRunnableGCWorker(_p_ *p, now int64) (*g, int64)
if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
// This P is now dedicated to marking until the end of
// the concurrent mark phase.
- _p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
+ pp.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
} else if c.fractionalUtilizationGoal == 0 {
// No need for fractional workers.
gcBgMarkWorkerPool.push(&node.node)
@@ -875,13 +875,13 @@ func (c *gcControllerState) findRunnableGCWorker(_p_ *p, now int64) (*g, int64)
//
// This should be kept in sync with pollFractionalWorkerExit.
delta := now - c.markStartTime
- if delta > 0 && float64(_p_.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal {
+ if delta > 0 && float64(pp.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal {
// Nope. No need to run a fractional worker.
gcBgMarkWorkerPool.push(&node.node)
return nil, now
}
// Run a fractional worker.
- _p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode
+ pp.gcMarkWorkerMode = gcMarkWorkerFractionalMode
}
// Run the background mark worker.
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index de57f18c4f..2ac5d507dd 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -431,8 +431,8 @@ func (s *mspan) ensureSwept() {
// Caller must disable preemption.
// Otherwise when this function returns the span can become unswept again
// (if GC is triggered on another goroutine).
- _g_ := getg()
- if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
+ gp := getg()
+ if gp.m.locks == 0 && gp.m.mallocing == 0 && gp != gp.m.g0 {
throw("mspan.ensureSwept: m is not locked")
}
@@ -470,8 +470,8 @@ func (s *mspan) ensureSwept() {
func (sl *sweepLocked) sweep(preserve bool) bool {
// It's critical that we enter this function with preemption disabled,
// GC must not start while we are in the middle of this function.
- _g_ := getg()
- if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
+ gp := getg()
+ if gp.m.locks == 0 && gp.m.mallocing == 0 && gp != gp.m.g0 {
throw("mspan.sweep: m is not locked")
}
diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go
new file mode 100644
index 0000000000..0d50d60a22
--- /dev/null
+++ b/src/runtime/mklockrank.go
@@ -0,0 +1,360 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+
+// mklockrank records the static rank graph of the locks in the
+// runtime and generates the rank checking structures in lockrank.go.
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/format"
+ "internal/dag"
+ "io"
+ "log"
+ "os"
+ "strings"
+)
+
+// ranks describes the lock rank graph. See "go doc internal/dag" for
+// the syntax.
+//
+// "a < b" means a must be acquired before b if both are held
+// (or, if b is held, a cannot be acquired).
+//
+// "NONE < a" means no locks may be held when a is acquired.
+//
+// If a lock is not given a rank, then it is assumed to be a leaf
+// lock, which means no other lock can be acquired while it is held.
+// Therefore, leaf locks do not need to be given an explicit rank.
+//
+// Ranks in all caps are pseudo-nodes that help define order, but do
+// not actually define a rank.
+//
+// TODO: It's often hard to correlate rank names to locks. Change
+// these to be more consistent with the locks they label.
+const ranks = `
+# Sysmon
+NONE
+< sysmon
+< scavenge, forcegc;
+
+# Defer
+NONE < defer;
+
+# GC
+NONE <
+ sweepWaiters,
+ assistQueue,
+ sweep;
+
+# Scheduler, timers, netpoll
+NONE < pollDesc, cpuprof;
+assistQueue,
+ cpuprof,
+ forcegc,
+ pollDesc, # pollDesc can interact with timers, which can lock sched.
+ scavenge,
+ sweep,
+ sweepWaiters
+< sched;
+sched < allg, allp;
+allp < timers;
+timers < netpollInit;
+
+# Channels
+scavenge, sweep < hchan;
+NONE < notifyList;
+hchan, notifyList < sudog;
+
+# RWMutex
+NONE < rwmutexW;
+rwmutexW, sysmon < rwmutexR;
+
+# Semaphores
+NONE < root;
+
+# Itabs
+NONE
+< itab
+< reflectOffs;
+
+# Tracing without a P uses a global trace buffer.
+scavenge
+# Above TRACEGLOBAL can emit a trace event without a P.
+< TRACEGLOBAL
+# Below TRACEGLOBAL manages the global tracing buffer.
+# Note that traceBuf eventually chains to MALLOC, but we never get that far
+# in the situation where there's no P.
+< traceBuf;
+# Starting/stopping tracing traces strings.
+traceBuf < traceStrings;
+
+# Malloc
+allg,
+ hchan,
+ notifyList,
+ reflectOffs,
+ timers,
+ traceStrings
+# Above MALLOC are things that can allocate memory.
+< MALLOC
+# Below MALLOC is the malloc implementation.
+< fin,
+ gcBitsArenas,
+ mheapSpecial,
+ mspanSpecial,
+ spanSetSpine,
+ MPROF;
+
+# Memory profiling
+MPROF < profInsert, profBlock, profMemActive;
+profMemActive < profMemFuture;
+
+# Execution tracer events (with a P)
+hchan,
+ root,
+ sched,
+ traceStrings,
+ notifyList,
+ fin
+# Above TRACE is anything that can create a trace event
+< TRACE
+< trace
+< traceStackTab;
+
+# Stack allocation and copying
+gcBitsArenas,
+ netpollInit,
+ profBlock,
+ profInsert,
+ profMemFuture,
+ spanSetSpine,
+ traceStackTab
+# Anything that can grow the stack can acquire STACKGROW.
+# (Most higher layers imply STACKGROW, like MALLOC.)
+< STACKGROW
+# Below STACKGROW is the stack allocator/copying implementation.
+< gscan;
+gscan, rwmutexR < stackpool;
+gscan < stackLarge;
+# Generally, hchan must be acquired before gscan. But in one case,
+# where we suspend a G and then shrink its stack, syncadjustsudogs
+# can acquire hchan locks while holding gscan. To allow this case,
+# we use hchanLeaf instead of hchan.
+gscan < hchanLeaf;
+
+# Write barrier
+defer,
+ gscan,
+ mspanSpecial,
+ sudog
+# Anything that can have write barriers can acquire WB.
+# Above WB, we can have write barriers.
+< WB
+# Below WB is the write barrier implementation.
+< wbufSpans;
+
+# Span allocator
+stackLarge,
+ stackpool,
+ wbufSpans
+# Above mheap is anything that can call the span allocator.
+< mheap;
+# Below mheap is the span allocator implementation.
+mheap, mheapSpecial < globalAlloc;
+
+# panic is handled specially. It is implicitly below all other locks.
+NONE < panic;
+# deadlock is not acquired while holding panic, but it also needs to be
+# below all other locks.
+panic < deadlock;
+`
+
+// cyclicRanks lists lock ranks that allow multiple locks of the same
+// rank to be acquired simultaneously. The runtime enforces ordering
+// within these ranks using a separate mechanism.
+var cyclicRanks = map[string]bool{
+ // Multiple timers are locked simultaneously in destroy().
+ "timers": true,
+ // Multiple hchans are acquired in hchan.sortkey() order in
+ // select.
+ "hchan": true,
+ // Multiple hchanLeafs are acquired in hchan.sortkey() order in
+ // syncadjustsudogs().
+ "hchanLeaf": true,
+ // The point of the deadlock lock is to deadlock.
+ "deadlock": true,
+}
+
+func main() {
+ flagO := flag.String("o", "", "write to `file` instead of stdout")
+ flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
+ flag.Parse()
+ if flag.NArg() != 0 {
+ fmt.Fprintf(os.Stderr, "too many arguments")
+ os.Exit(2)
+ }
+
+ g, err := dag.Parse(ranks)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var out []byte
+ if *flagDot {
+ var b bytes.Buffer
+ g.TransitiveReduction()
+ // Add cyclic edges for visualization.
+ for k := range cyclicRanks {
+ g.AddEdge(k, k)
+ }
+ // Reverse the graph. It's much easier to read this as
+ // a "<" partial order than a ">" partial order. This
+ // ways, locks are acquired from the top going down
+ // and time moves forward over the edges instead of
+ // backward.
+ g.Transpose()
+ generateDot(&b, g)
+ out = b.Bytes()
+ } else {
+ var b bytes.Buffer
+ generateGo(&b, g)
+ out, err = format.Source(b.Bytes())
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+
+ if *flagO != "" {
+ err = os.WriteFile(*flagO, out, 0666)
+ } else {
+ _, err = os.Stdout.Write(out)
+ }
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func generateGo(w io.Writer, g *dag.Graph) {
+ fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
+
+package runtime
+
+type lockRank int
+
+`)
+
+ // Create numeric ranks.
+ topo := g.Topo()
+ for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
+ topo[i], topo[j] = topo[j], topo[i]
+ }
+ fmt.Fprintf(w, `
+// Constants representing the ranks of all non-leaf runtime locks, in rank order.
+// Locks with lower rank must be taken before locks with higher rank,
+// in addition to satisfying the partial order in lockPartialOrder.
+// A few ranks allow self-cycles, which are specified in lockPartialOrder.
+const (
+ lockRankUnknown lockRank = iota
+
+`)
+ for _, rank := range topo {
+ if isPseudo(rank) {
+ fmt.Fprintf(w, "\t// %s\n", rank)
+ } else {
+ fmt.Fprintf(w, "\t%s\n", cname(rank))
+ }
+ }
+ fmt.Fprintf(w, `)
+
+// lockRankLeafRank is the rank of lock that does not have a declared rank,
+// and hence is a leaf lock.
+const lockRankLeafRank lockRank = 1000
+`)
+
+ // Create string table.
+ fmt.Fprintf(w, `
+// lockNames gives the names associated with each of the above ranks.
+var lockNames = []string{
+`)
+ for _, rank := range topo {
+ if !isPseudo(rank) {
+ fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
+ }
+ }
+ fmt.Fprintf(w, `}
+
+func (rank lockRank) String() string {
+ if rank == 0 {
+ return "UNKNOWN"
+ }
+ if rank == lockRankLeafRank {
+ return "LEAF"
+ }
+ if rank < 0 || int(rank) >= len(lockNames) {
+ return "BAD RANK"
+ }
+ return lockNames[rank]
+}
+`)
+
+ // Create partial order structure.
+ fmt.Fprintf(w, `
+// lockPartialOrder is the transitive closure of the lock rank graph.
+// An entry for rank X lists all of the ranks that can already be held
+// when rank X is acquired.
+//
+// Lock ranks that allow self-cycles list themselves.
+var lockPartialOrder [][]lockRank = [][]lockRank{
+`)
+ for _, rank := range topo {
+ if isPseudo(rank) {
+ continue
+ }
+ list := []string{}
+ for _, before := range g.Edges(rank) {
+ if !isPseudo(before) {
+ list = append(list, cname(before))
+ }
+ }
+ if cyclicRanks[rank] {
+ list = append(list, cname(rank))
+ }
+
+ fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
+ }
+ fmt.Fprintf(w, "}\n")
+}
+
+// cname returns the Go const name for the given lock rank label.
+func cname(label string) string {
+ return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
+}
+
+func isPseudo(label string) bool {
+ return strings.ToUpper(label) == label
+}
+
+// generateDot emits a Graphviz dot representation of g to w.
+func generateDot(w io.Writer, g *dag.Graph) {
+ fmt.Fprintf(w, "digraph g {\n")
+
+ // Define all nodes.
+ for _, node := range g.Nodes {
+ fmt.Fprintf(w, "%q;\n", node)
+ }
+
+ // Create edges.
+ for _, node := range g.Nodes {
+ for _, to := range g.Edges(node) {
+ fmt.Fprintf(w, "%q -> %q;\n", node, to)
+ }
+ }
+
+ fmt.Fprintf(w, "}\n")
+}
diff --git a/src/runtime/msan.go b/src/runtime/msan.go
index c485216583..5e2aae1bd1 100644
--- a/src/runtime/msan.go
+++ b/src/runtime/msan.go
@@ -31,8 +31,8 @@ const msanenabled = true
//
//go:nosplit
func msanread(addr unsafe.Pointer, sz uintptr) {
- g := getg()
- if g == nil || g.m == nil || g == g.m.g0 || g == g.m.gsignal {
+ gp := getg()
+ if gp == nil || gp.m == nil || gp == gp.m.g0 || gp == gp.m.gsignal {
return
}
domsanread(addr, sz)
diff --git a/src/runtime/mwbbuf.go b/src/runtime/mwbbuf.go
index 39ce0b46a9..3b7cbf8f1f 100644
--- a/src/runtime/mwbbuf.go
+++ b/src/runtime/mwbbuf.go
@@ -212,22 +212,22 @@ func wbBufFlush(dst *uintptr, src uintptr) {
//
//go:nowritebarrierrec
//go:systemstack
-func wbBufFlush1(_p_ *p) {
+func wbBufFlush1(pp *p) {
// Get the buffered pointers.
- start := uintptr(unsafe.Pointer(&_p_.wbBuf.buf[0]))
- n := (_p_.wbBuf.next - start) / unsafe.Sizeof(_p_.wbBuf.buf[0])
- ptrs := _p_.wbBuf.buf[:n]
+ start := uintptr(unsafe.Pointer(&pp.wbBuf.buf[0]))
+ n := (pp.wbBuf.next - start) / unsafe.Sizeof(pp.wbBuf.buf[0])
+ ptrs := pp.wbBuf.buf[:n]
// Poison the buffer to make extra sure nothing is enqueued
// while we're processing the buffer.
- _p_.wbBuf.next = 0
+ pp.wbBuf.next = 0
if useCheckmark {
// Slow path for checkmark mode.
for _, ptr := range ptrs {
shade(ptr)
}
- _p_.wbBuf.reset()
+ pp.wbBuf.reset()
return
}
@@ -245,7 +245,7 @@ func wbBufFlush1(_p_ *p) {
// could track whether any un-shaded goroutine has used the
// buffer, or just track globally whether there are any
// un-shaded stacks and flush after each stack scan.
- gcw := &_p_.gcw
+ gcw := &pp.gcw
pos := 0
for _, ptr := range ptrs {
if ptr < minLegalPointer {
@@ -286,5 +286,5 @@ func wbBufFlush1(_p_ *p) {
// Enqueue the greyed objects.
gcw.putBatch(ptrs[:pos])
- _p_.wbBuf.reset()
+ pp.wbBuf.reset()
}
diff --git a/src/runtime/os2_aix.go b/src/runtime/os2_aix.go
index 9ad1caa816..2efc56554c 100644
--- a/src/runtime/os2_aix.go
+++ b/src/runtime/os2_aix.go
@@ -388,11 +388,11 @@ func exit1(code int32)
//go:nosplit
func exit(code int32) {
- _g_ := getg()
+ gp := getg()
// Check the validity of g because without a g during
// newosproc0.
- if _g_ != nil {
+ if gp != nil {
syscall1(&libc_exit, uintptr(code))
return
}
@@ -403,11 +403,11 @@ func write2(fd, p uintptr, n int32) int32
//go:nosplit
func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
- _g_ := getg()
+ gp := getg()
// Check the validity of g because without a g during
// newosproc0.
- if _g_ != nil {
+ if gp != nil {
r, errno := syscall3(&libc_write, uintptr(fd), uintptr(p), uintptr(n))
if int32(r) < 0 {
return -int32(errno)
@@ -493,11 +493,11 @@ func sigaction1(sig, new, old uintptr)
//go:nosplit
func sigaction(sig uintptr, new, old *sigactiont) {
- _g_ := getg()
+ gp := getg()
// Check the validity of g because without a g during
// runtime.libpreinit.
- if _g_ != nil {
+ if gp != nil {
r, err := syscall3(&libc_sigaction, sig, uintptr(unsafe.Pointer(new)), uintptr(unsafe.Pointer(old)))
if int32(r) == -1 {
println("Sigaction failed for sig: ", sig, " with error:", hex(err))
@@ -645,11 +645,11 @@ func pthread_attr_init1(attr uintptr) int32
//go:nosplit
func pthread_attr_init(attr *pthread_attr) int32 {
- _g_ := getg()
+ gp := getg()
// Check the validity of g because without a g during
// newosproc0.
- if _g_ != nil {
+ if gp != nil {
r, _ := syscall1(&libpthread_attr_init, uintptr(unsafe.Pointer(attr)))
return int32(r)
}
@@ -661,11 +661,11 @@ func pthread_attr_setdetachstate1(attr uintptr, state int32) int32
//go:nosplit
func pthread_attr_setdetachstate(attr *pthread_attr, state int32) int32 {
- _g_ := getg()
+ gp := getg()
// Check the validity of g because without a g during
// newosproc0.
- if _g_ != nil {
+ if gp != nil {
r, _ := syscall2(&libpthread_attr_setdetachstate, uintptr(unsafe.Pointer(attr)), uintptr(state))
return int32(r)
}
@@ -689,11 +689,11 @@ func pthread_attr_setstacksize1(attr uintptr, size uint64) int32
//go:nosplit
func pthread_attr_setstacksize(attr *pthread_attr, size uint64) int32 {
- _g_ := getg()
+ gp := getg()
// Check the validity of g because without a g during
// newosproc0.
- if _g_ != nil {
+ if gp != nil {
r, _ := syscall2(&libpthread_attr_setstacksize, uintptr(unsafe.Pointer(attr)), uintptr(size))
return int32(r)
}
@@ -705,11 +705,11 @@ func pthread_create1(tid, attr, fn, arg uintptr) int32
//go:nosplit
func pthread_create(tid *pthread, attr *pthread_attr, fn *funcDescriptor, arg unsafe.Pointer) int32 {
- _g_ := getg()
+ gp := getg()
// Check the validity of g because without a g during
// newosproc0.
- if _g_ != nil {
+ if gp != nil {
r, _ := syscall4(&libpthread_create, uintptr(unsafe.Pointer(tid)), uintptr(unsafe.Pointer(attr)), uintptr(unsafe.Pointer(fn)), uintptr(arg))
return int32(r)
}
@@ -723,11 +723,11 @@ func sigprocmask1(how, new, old uintptr)
//go:nosplit
func sigprocmask(how int32, new, old *sigset) {
- _g_ := getg()
+ gp := getg()
// Check the validity of m because it might be called during a cgo
// callback early enough where m isn't available yet.
- if _g_ != nil && _g_.m != nil {
+ if gp != nil && gp.m != nil {
r, err := syscall3(&libpthread_sigthreadmask, uintptr(how), uintptr(unsafe.Pointer(new)), uintptr(unsafe.Pointer(old)))
if int32(r) != 0 {
println("syscall sigthreadmask failed: ", hex(err))
diff --git a/src/runtime/os3_plan9.go b/src/runtime/os3_plan9.go
index e901b3e9dd..8c9cbe28ec 100644
--- a/src/runtime/os3_plan9.go
+++ b/src/runtime/os3_plan9.go
@@ -14,7 +14,9 @@ import (
//
//go:nowritebarrierrec
func sighandler(_ureg *ureg, note *byte, gp *g) int {
- _g_ := getg()
+ gsignal := getg()
+ mp := gsignal.m
+
var t sigTabT
var docrash bool
var sig int
@@ -61,7 +63,7 @@ func sighandler(_ureg *ureg, note *byte, gp *g) int {
if flags&_SigPanic != 0 {
// Copy the error string from sigtramp's stack into m->notesig so
// we can reliably access it from the panic routines.
- memmove(unsafe.Pointer(_g_.m.notesig), unsafe.Pointer(note), uintptr(len(notestr)+1))
+ memmove(unsafe.Pointer(mp.notesig), unsafe.Pointer(note), uintptr(len(notestr)+1))
gp.sig = uint32(sig)
gp.sigpc = c.pc()
@@ -120,8 +122,8 @@ func sighandler(_ureg *ureg, note *byte, gp *g) int {
return _NCONT
}
Throw:
- _g_.m.throwing = throwTypeRuntime
- _g_.m.caughtsig.set(gp)
+ mp.throwing = throwTypeRuntime
+ mp.caughtsig.set(gp)
startpanic_m()
print(notestr, "\n")
print("PC=", hex(c.pc()), "\n")
diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go
index 8c85b71532..76cf59772b 100644
--- a/src/runtime/os3_solaris.go
+++ b/src/runtime/os3_solaris.go
@@ -308,18 +308,17 @@ func semacreate(mp *m) {
}
var sem *semt
- _g_ := getg()
// Call libc's malloc rather than malloc. This will
// allocate space on the C heap. We can't call malloc
// here because it could cause a deadlock.
- _g_.m.libcall.fn = uintptr(unsafe.Pointer(&libc_malloc))
- _g_.m.libcall.n = 1
- _g_.m.scratch = mscratch{}
- _g_.m.scratch.v[0] = unsafe.Sizeof(*sem)
- _g_.m.libcall.args = uintptr(unsafe.Pointer(&_g_.m.scratch))
- asmcgocall(unsafe.Pointer(&asmsysvicall6x), unsafe.Pointer(&_g_.m.libcall))
- sem = (*semt)(unsafe.Pointer(_g_.m.libcall.r1))
+ mp.libcall.fn = uintptr(unsafe.Pointer(&libc_malloc))
+ mp.libcall.n = 1
+ mp.scratch = mscratch{}
+ mp.scratch.v[0] = unsafe.Sizeof(*sem)
+ mp.libcall.args = uintptr(unsafe.Pointer(&mp.scratch))
+ asmcgocall(unsafe.Pointer(&asmsysvicall6x), unsafe.Pointer(&mp.libcall))
+ sem = (*semt)(unsafe.Pointer(mp.libcall.r1))
if sem_init(sem, 0, 0) != 0 {
throw("sem_init")
}
diff --git a/src/runtime/os_js.go b/src/runtime/os_js.go
index 34cc0271f0..7ae0e8d3ec 100644
--- a/src/runtime/os_js.go
+++ b/src/runtime/os_js.go
@@ -49,13 +49,13 @@ func osyield_no_g() {
const _SIGSEGV = 0xb
func sigpanic() {
- g := getg()
- if !canpanic(g) {
+ gp := getg()
+ if !canpanic() {
throw("unexpected signal during runtime execution")
}
// js only invokes the exception handler for memory faults.
- g.sig = _SIGSEGV
+ gp.sig = _SIGSEGV
panicmem()
}
diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go
index 3cbace38f9..bb23adff07 100644
--- a/src/runtime/os_netbsd.go
+++ b/src/runtime/os_netbsd.go
@@ -152,16 +152,16 @@ func semacreate(mp *m) {
//go:nosplit
func semasleep(ns int64) int32 {
- _g_ := getg()
+ gp := getg()
var deadline int64
if ns >= 0 {
deadline = nanotime() + ns
}
for {
- v := atomic.Load(&_g_.m.waitsemacount)
+ v := atomic.Load(&gp.m.waitsemacount)
if v > 0 {
- if atomic.Cas(&_g_.m.waitsemacount, v, v-1) {
+ if atomic.Cas(&gp.m.waitsemacount, v, v-1) {
return 0 // semaphore acquired
}
continue
@@ -178,7 +178,7 @@ func semasleep(ns int64) int32 {
ts.setNsec(wait)
tsp = &ts
}
- ret := lwp_park(_CLOCK_MONOTONIC, _TIMER_RELTIME, tsp, 0, unsafe.Pointer(&_g_.m.waitsemacount), nil)
+ ret := lwp_park(_CLOCK_MONOTONIC, _TIMER_RELTIME, tsp, 0, unsafe.Pointer(&gp.m.waitsemacount), nil)
if ret == _ETIMEDOUT {
return -1
}
@@ -289,8 +289,8 @@ func mpreinit(mp *m) {
// Called to initialize a new m (including the bootstrap m).
// Called on the new thread, cannot allocate memory.
func minit() {
- _g_ := getg()
- _g_.m.procid = uint64(lwp_self())
+ gp := getg()
+ gp.m.procid = uint64(lwp_self())
// On NetBSD a thread created by pthread_create inherits the
// signal stack of the creating thread. We always create a
@@ -299,8 +299,8 @@ func minit() {
// created in C that calls sigaltstack and then calls a Go
// function, because we will lose track of the C code's
// sigaltstack, but it's the best we can do.
- signalstack(&_g_.m.gsignal.stack)
- _g_.m.newSigstack = true
+ signalstack(&gp.m.gsignal.stack)
+ gp.m.newSigstack = true
minitSignalMask()
}
diff --git a/src/runtime/os_openbsd.go b/src/runtime/os_openbsd.go
index 2383dc8428..d43414459d 100644
--- a/src/runtime/os_openbsd.go
+++ b/src/runtime/os_openbsd.go
@@ -84,7 +84,7 @@ func semacreate(mp *m) {
//go:nosplit
func semasleep(ns int64) int32 {
- _g_ := getg()
+ gp := getg()
// Compute sleep deadline.
var tsp *timespec
@@ -95,9 +95,9 @@ func semasleep(ns int64) int32 {
}
for {
- v := atomic.Load(&_g_.m.waitsemacount)
+ v := atomic.Load(&gp.m.waitsemacount)
if v > 0 {
- if atomic.Cas(&_g_.m.waitsemacount, v, v-1) {
+ if atomic.Cas(&gp.m.waitsemacount, v, v-1) {
return 0 // semaphore acquired
}
continue
@@ -110,7 +110,7 @@ func semasleep(ns int64) int32 {
// be examined [...] immediately before blocking. If that int
// is non-zero then __thrsleep() will immediately return EINTR
// without blocking."
- ret := thrsleep(uintptr(unsafe.Pointer(&_g_.m.waitsemacount)), _CLOCK_MONOTONIC, tsp, 0, &_g_.m.waitsemacount)
+ ret := thrsleep(uintptr(unsafe.Pointer(&gp.m.waitsemacount)), _CLOCK_MONOTONIC, tsp, 0, &gp.m.waitsemacount)
if ret == _EWOULDBLOCK {
return -1
}
diff --git a/src/runtime/os_plan9.go b/src/runtime/os_plan9.go
index f0e7c6ae70..6f4578ff48 100644
--- a/src/runtime/os_plan9.go
+++ b/src/runtime/os_plan9.go
@@ -75,13 +75,13 @@ func os_sigpipe() {
}
func sigpanic() {
- g := getg()
- if !canpanic(g) {
+ gp := getg()
+ if !canpanic() {
throw("unexpected signal during runtime execution")
}
- note := gostringnocopy((*byte)(unsafe.Pointer(g.m.notesig)))
- switch g.sig {
+ note := gostringnocopy((*byte)(unsafe.Pointer(gp.m.notesig)))
+ switch gp.sig {
case _SIGRFAULT, _SIGWFAULT:
i := indexNoFloat(note, "addr=")
if i >= 0 {
@@ -92,17 +92,17 @@ func sigpanic() {
panicmem()
}
addr := note[i:]
- g.sigcode1 = uintptr(atolwhex(addr))
- if g.sigcode1 < 0x1000 {
+ gp.sigcode1 = uintptr(atolwhex(addr))
+ if gp.sigcode1 < 0x1000 {
panicmem()
}
- if g.paniconfault {
- panicmemAddr(g.sigcode1)
+ if gp.paniconfault {
+ panicmemAddr(gp.sigcode1)
}
- print("unexpected fault address ", hex(g.sigcode1), "\n")
+ print("unexpected fault address ", hex(gp.sigcode1), "\n")
throw("fault")
case _SIGTRAP:
- if g.paniconfault {
+ if gp.paniconfault {
panicmem()
}
throw(note)
@@ -473,19 +473,19 @@ func semacreate(mp *m) {
//go:nosplit
func semasleep(ns int64) int {
- _g_ := getg()
+ gp := getg()
if ns >= 0 {
ms := timediv(ns, 1000000, nil)
if ms == 0 {
ms = 1
}
- ret := plan9_tsemacquire(&_g_.m.waitsemacount, ms)
+ ret := plan9_tsemacquire(&gp.m.waitsemacount, ms)
if ret == 1 {
return 0 // success
}
return -1 // timeout or interrupted
}
- for plan9_semacquire(&_g_.m.waitsemacount, 1) < 0 {
+ for plan9_semacquire(&gp.m.waitsemacount, 1) < 0 {
// interrupted; try again (c.f. lock_sema.go)
}
return 0 // success
diff --git a/src/runtime/panic.go b/src/runtime/panic.go
index 121f2022a4..3783e3dede 100644
--- a/src/runtime/panic.go
+++ b/src/runtime/panic.go
@@ -1190,7 +1190,7 @@ func fatalpanic(msgs *_panic) {
//
//go:nowritebarrierrec
func startpanic_m() bool {
- _g_ := getg()
+ gp := getg()
if mheap_.cachealloc.size == 0 { // very early
print("runtime: panic before malloc heap initialized\n")
}
@@ -1198,18 +1198,18 @@ func startpanic_m() bool {
// could happen in a signal handler, or in a throw, or inside
// malloc itself. We want to catch if an allocation ever does
// happen (even if we're not in one of these situations).
- _g_.m.mallocing++
+ gp.m.mallocing++
// If we're dying because of a bad lock count, set it to a
// good lock count so we don't recursively panic below.
- if _g_.m.locks < 0 {
- _g_.m.locks = 1
+ if gp.m.locks < 0 {
+ gp.m.locks = 1
}
- switch _g_.m.dying {
+ switch gp.m.dying {
case 0:
// Setting dying >0 has the side-effect of disabling this G's writebuf.
- _g_.m.dying = 1
+ gp.m.dying = 1
atomic.Xadd(&panicking, 1)
lock(&paniclk)
if debug.schedtrace > 0 || debug.scheddetail > 0 {
@@ -1220,13 +1220,13 @@ func startpanic_m() bool {
case 1:
// Something failed while panicking.
// Just print a stack trace and exit.
- _g_.m.dying = 2
+ gp.m.dying = 2
print("panic during panic\n")
return false
case 2:
// This is a genuine bug in the runtime, we couldn't even
// print the stack trace successfully.
- _g_.m.dying = 3
+ gp.m.dying = 3
print("stack trace unavailable\n")
exit(4)
fallthrough
@@ -1240,6 +1240,8 @@ func startpanic_m() bool {
var didothers bool
var deadlock mutex
+// gp is the crashing g running on this M, but may be a user G, while getg() is
+// always g0.
func dopanic_m(gp *g, pc, sp uintptr) bool {
if gp.sig != 0 {
signame := signame(gp.sig)
@@ -1252,7 +1254,6 @@ func dopanic_m(gp *g, pc, sp uintptr) bool {
}
level, all, docrash := gotraceback()
- _g_ := getg()
if level > 0 {
if gp != gp.m.curg {
all = true
@@ -1261,7 +1262,7 @@ func dopanic_m(gp *g, pc, sp uintptr) bool {
print("\n")
goroutineheader(gp)
traceback(pc, sp, 0, gp)
- } else if level >= 2 || _g_.m.throwing >= throwTypeRuntime {
+ } else if level >= 2 || gp.m.throwing >= throwTypeRuntime {
print("\nruntime stack:\n")
traceback(pc, sp, 0, gp)
}
@@ -1290,29 +1291,32 @@ func dopanic_m(gp *g, pc, sp uintptr) bool {
// panicking.
//
//go:nosplit
-func canpanic(gp *g) bool {
- // Note that g is m->gsignal, different from gp.
- // Note also that g->m can change at preemption, so m can go stale
- // if this function ever makes a function call.
- _g_ := getg()
- mp := _g_.m
+func canpanic() bool {
+ gp := getg()
+ mp := acquirem()
// Is it okay for gp to panic instead of crashing the program?
// Yes, as long as it is running Go code, not runtime code,
// and not stuck in a system call.
- if gp == nil || gp != mp.curg {
+ if gp != mp.curg {
+ releasem(mp)
return false
}
- if mp.locks != 0 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 {
+ // N.B. mp.locks != 1 instead of 0 to account for acquirem.
+ if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 {
+ releasem(mp)
return false
}
status := readgstatus(gp)
if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
+ releasem(mp)
return false
}
if GOOS == "windows" && mp.libcallsp != 0 {
+ releasem(mp)
return false
}
+ releasem(mp)
return true
}
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index 3991a48b10..0b3d90c5b2 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -143,11 +143,11 @@ var initSigmask sigset
// The main goroutine.
func main() {
- g := getg()
+ mp := getg().m
// Racectx of m0->g0 is used only as the parent of the main goroutine.
// It must not be used for anything else.
- g.m.g0.racectx = 0
+ mp.g0.racectx = 0
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
// Using decimal instead of binary GB and MB because
@@ -180,7 +180,7 @@ func main() {
// to preserve the lock.
lockOSThread()
- if g.m != &m0 {
+ if mp != &m0 {
throw("runtime.main not on m0")
}
@@ -678,9 +678,9 @@ func schedinit() {
// raceinit must be the first call to race detector.
// In particular, it must be done before mallocinit below calls racemapshadow.
- _g_ := getg()
+ gp := getg()
if raceenabled {
- _g_.racectx, raceprocctx0 = raceinit()
+ gp.racectx, raceprocctx0 = raceinit()
}
sched.maxmcount = 10000
@@ -694,14 +694,14 @@ func schedinit() {
cpuinit() // must run before alginit
alginit() // maps, hash, fastrand must not be used before this call
fastrandinit() // must run before mcommoninit
- mcommoninit(_g_.m, -1)
+ mcommoninit(gp.m, -1)
modulesinit() // provides activeModules
typelinksinit() // uses maps, activeModules
itabsinit() // uses activeModules
stkobjinit() // must run before GC starts
- sigsave(&_g_.m.sigmask)
- initSigmask = _g_.m.sigmask
+ sigsave(&gp.m.sigmask)
+ initSigmask = gp.m.sigmask
if offset := unsafe.Offsetof(sched.timeToRun); offset%8 != 0 {
println(offset)
@@ -733,8 +733,8 @@ func schedinit() {
if debug.cgocheck > 1 {
writeBarrier.cgo = true
writeBarrier.enabled = true
- for _, p := range allp {
- p.wbBuf.reset()
+ for _, pp := range allp {
+ pp.wbBuf.reset()
}
}
@@ -751,9 +751,9 @@ func schedinit() {
}
func dumpgstatus(gp *g) {
- _g_ := getg()
- print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
- print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
+ thisg := getg()
+ print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
+ print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
}
// sched.lock must be held.
@@ -784,10 +784,10 @@ func mReserveID() int64 {
// Pre-allocated ID may be passed as 'id', or omitted by passing -1.
func mcommoninit(mp *m, id int64) {
- _g_ := getg()
+ gp := getg()
// g0 stack won't make sense for user (and is not necessary unwindable).
- if _g_ != _g_.m.g0 {
+ if gp != gp.m.g0 {
callers(1, mp.createstack[:])
}
@@ -848,7 +848,6 @@ func ready(gp *g, traceskip int, next bool) {
status := readgstatus(gp)
// Mark runnable.
- _g_ := getg()
mp := acquirem() // disable preemption because it can be holding p in a local var
if status&^_Gscan != _Gwaiting {
dumpgstatus(gp)
@@ -857,7 +856,7 @@ func ready(gp *g, traceskip int, next bool) {
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
casgstatus(gp, _Gwaiting, _Grunnable)
- runqput(_g_.m.p.ptr(), gp, next)
+ runqput(mp.p.ptr(), gp, next)
wakep()
releasem(mp)
}
@@ -1177,11 +1176,11 @@ var gcsema uint32 = 1
// Holding worldsema causes any other goroutines invoking
// stopTheWorld to block.
func stopTheWorldWithSema() {
- _g_ := getg()
+ gp := getg()
// If we hold a lock, then we won't be able to stop another M
// that is blocked trying to acquire the lock.
- if _g_.m.locks > 0 {
+ if gp.m.locks > 0 {
throw("stopTheWorld: holding locks")
}
@@ -1190,28 +1189,28 @@ func stopTheWorldWithSema() {
atomic.Store(&sched.gcwaiting, 1)
preemptall()
// stop current P
- _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
+ gp.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
sched.stopwait--
// try to retake all P's in Psyscall status
- for _, p := range allp {
- s := p.status
- if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
+ for _, pp := range allp {
+ s := pp.status
+ if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
if trace.enabled {
- traceGoSysBlock(p)
- traceProcStop(p)
+ traceGoSysBlock(pp)
+ traceProcStop(pp)
}
- p.syscalltick++
+ pp.syscalltick++
sched.stopwait--
}
}
// stop idle P's
now := nanotime()
for {
- p, _ := pidleget(now)
- if p == nil {
+ pp, _ := pidleget(now)
+ if pp == nil {
break
}
- p.status = _Pgcstop
+ pp.status = _Pgcstop
sched.stopwait--
}
wait := sched.stopwait > 0
@@ -1234,8 +1233,8 @@ func stopTheWorldWithSema() {
if sched.stopwait != 0 {
bad = "stopTheWorld: not stopped (stopwait != 0)"
} else {
- for _, p := range allp {
- if p.status != _Pgcstop {
+ for _, pp := range allp {
+ if pp.status != _Pgcstop {
bad = "stopTheWorld: not stopped (status != _Pgcstop)"
}
}
@@ -1354,9 +1353,9 @@ func mstart()
//go:nosplit
//go:nowritebarrierrec
func mstart0() {
- _g_ := getg()
+ gp := getg()
- osStack := _g_.stack.lo == 0
+ osStack := gp.stack.lo == 0
if osStack {
// Initialize stack bounds from system stack.
// Cgo may have left stack size in stack.hi.
@@ -1366,25 +1365,25 @@ func mstart0() {
// We set hi to &size, but there are things above
// it. The 1024 is supposed to compensate this,
// but is somewhat arbitrary.
- size := _g_.stack.hi
+ size := gp.stack.hi
if size == 0 {
size = 8192 * sys.StackGuardMultiplier
}
- _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
- _g_.stack.lo = _g_.stack.hi - size + 1024
+ gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
+ gp.stack.lo = gp.stack.hi - size + 1024
}
// Initialize stack guard so that we can start calling regular
// Go code.
- _g_.stackguard0 = _g_.stack.lo + _StackGuard
+ gp.stackguard0 = gp.stack.lo + _StackGuard
// This is the g0, so we can also call go:systemstack
// functions, which check stackguard1.
- _g_.stackguard1 = _g_.stackguard0
+ gp.stackguard1 = gp.stackguard0
mstart1()
// Exit this thread.
if mStackIsSystemAllocated() {
// Windows, Solaris, illumos, Darwin, AIX and Plan 9 always system-allocate
- // the stack, but put it in _g_.stack before mstart,
+ // the stack, but put it in gp.stack before mstart,
// so the logic above hasn't set osStack yet.
osStack = true
}
@@ -1396,9 +1395,9 @@ func mstart0() {
//
//go:noinline
func mstart1() {
- _g_ := getg()
+ gp := getg()
- if _g_ != _g_.m.g0 {
+ if gp != gp.m.g0 {
throw("bad runtime·mstart")
}
@@ -1408,26 +1407,26 @@ func mstart1() {
// so other calls can reuse the current frame.
// And goexit0 does a gogo that needs to return from mstart1
// and let mstart0 exit the thread.
- _g_.sched.g = guintptr(unsafe.Pointer(_g_))
- _g_.sched.pc = getcallerpc()
- _g_.sched.sp = getcallersp()
+ gp.sched.g = guintptr(unsafe.Pointer(gp))
+ gp.sched.pc = getcallerpc()
+ gp.sched.sp = getcallersp()
asminit()
minit()
// Install signal handlers; after minit so that minit can
// prepare the thread to be able to handle the signals.
- if _g_.m == &m0 {
+ if gp.m == &m0 {
mstartm0()
}
- if fn := _g_.m.mstartfn; fn != nil {
+ if fn := gp.m.mstartfn; fn != nil {
fn()
}
- if _g_.m != &m0 {
- acquirep(_g_.m.nextp.ptr())
- _g_.m.nextp = 0
+ if gp.m != &m0 {
+ acquirep(gp.m.nextp.ptr())
+ gp.m.nextp = 0
}
schedule()
}
@@ -1461,7 +1460,7 @@ func mPark() {
// mexit tears down and exits the current thread.
//
// Don't call this directly to exit the thread, since it must run at
-// the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to
+// the top of the thread stack. Instead, use gogo(&gp.m.g0.sched) to
// unwind the stack to the point that exits the thread.
//
// It is entered with m.p != nil, so write barriers are allowed. It
@@ -1469,10 +1468,9 @@ func mPark() {
//
//go:yeswritebarrierrec
func mexit(osStack bool) {
- g := getg()
- m := g.m
+ mp := getg().m
- if m == &m0 {
+ if mp == &m0 {
// This is the main thread. Just wedge it.
//
// On Linux, exiting the main thread puts the process
@@ -1497,20 +1495,20 @@ func mexit(osStack bool) {
unminit()
// Free the gsignal stack.
- if m.gsignal != nil {
- stackfree(m.gsignal.stack)
+ if mp.gsignal != nil {
+ stackfree(mp.gsignal.stack)
// On some platforms, when calling into VDSO (e.g. nanotime)
// we store our g on the gsignal stack, if there is one.
// Now the stack is freed, unlink it from the m, so we
// won't write to it when calling VDSO code.
- m.gsignal = nil
+ mp.gsignal = nil
}
// Remove m from allm.
lock(&sched.lock)
for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
- if *pprev == m {
- *pprev = m.alllink
+ if *pprev == mp {
+ *pprev = mp.alllink
goto found
}
}
@@ -1521,17 +1519,17 @@ found:
//
// If this is using an OS stack, the OS will free it
// so there's no need for reaping.
- atomic.Store(&m.freeWait, 1)
+ atomic.Store(&mp.freeWait, 1)
// Put m on the free list, though it will not be reaped until
// freeWait is 0. Note that the free list must not be linked
// through alllink because some functions walk allm without
// locking, so may be using alllink.
- m.freelink = sched.freem
- sched.freem = m
+ mp.freelink = sched.freem
+ sched.freem = mp
}
unlock(&sched.lock)
- atomic.Xadd64(&ncgocall, int64(m.ncgocall))
+ atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
// Release the P.
handoffp(releasep())
@@ -1548,14 +1546,14 @@ found:
if GOOS == "darwin" || GOOS == "ios" {
// Make sure pendingPreemptSignals is correct when an M exits.
// For #41702.
- if atomic.Load(&m.signalPending) != 0 {
+ if atomic.Load(&mp.signalPending) != 0 {
atomic.Xadd(&pendingPreemptSignals, -1)
}
}
// Destroy all allocated resources. After this is called, we may no
// longer take any locks.
- mdestroy(m)
+ mdestroy(mp)
if osStack {
// Return from mstart and let the system thread
@@ -1567,7 +1565,7 @@ found:
// return to. Exit the thread directly. exitThread will clear
// m.freeWait when it's done with the stack and the m can be
// reaped.
- exitThread(&m.freeWait)
+ exitThread(&mp.freeWait)
}
// forEachP calls fn(p) for every P p when p reaches a GC safe point.
@@ -1583,7 +1581,7 @@ found:
//go:systemstack
func forEachP(fn func(*p)) {
mp := acquirem()
- _p_ := getg().m.p.ptr()
+ pp := getg().m.p.ptr()
lock(&sched.lock)
if sched.safePointWait != 0 {
@@ -1593,9 +1591,9 @@ func forEachP(fn func(*p)) {
sched.safePointFn = fn
// Ask all Ps to run the safe point function.
- for _, p := range allp {
- if p != _p_ {
- atomic.Store(&p.runSafePointFn, 1)
+ for _, p2 := range allp {
+ if p2 != pp {
+ atomic.Store(&p2.runSafePointFn, 1)
}
}
preemptall()
@@ -1617,19 +1615,19 @@ func forEachP(fn func(*p)) {
unlock(&sched.lock)
// Run fn for the current P.
- fn(_p_)
+ fn(pp)
// Force Ps currently in _Psyscall into _Pidle and hand them
// off to induce safe point function execution.
- for _, p := range allp {
- s := p.status
- if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
+ for _, p2 := range allp {
+ s := p2.status
+ if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
if trace.enabled {
- traceGoSysBlock(p)
- traceProcStop(p)
+ traceGoSysBlock(p2)
+ traceProcStop(p2)
}
- p.syscalltick++
- handoffp(p)
+ p2.syscalltick++
+ handoffp(p2)
}
}
@@ -1650,8 +1648,8 @@ func forEachP(fn func(*p)) {
if sched.safePointWait != 0 {
throw("forEachP: not done")
}
- for _, p := range allp {
- if p.runSafePointFn != 0 {
+ for _, p2 := range allp {
+ if p2.runSafePointFn != 0 {
throw("forEachP: P did not run fn")
}
}
@@ -1707,20 +1705,20 @@ type cgothreadstart struct {
// id is optional pre-allocated m ID. Omit by passing -1.
//
// This function is allowed to have write barriers even if the caller
-// isn't because it borrows _p_.
+// isn't because it borrows pp.
//
//go:yeswritebarrierrec
-func allocm(_p_ *p, fn func(), id int64) *m {
+func allocm(pp *p, fn func(), id int64) *m {
allocmLock.rlock()
- // The caller owns _p_, but we may borrow (i.e., acquirep) it. We must
+ // The caller owns pp, but we may borrow (i.e., acquirep) it. We must
// disable preemption to ensure it is not stolen, which would make the
// caller lose ownership.
acquirem()
- _g_ := getg()
- if _g_.m.p == 0 {
- acquirep(_p_) // temporarily borrow p for mallocs in this function
+ gp := getg()
+ if gp.m.p == 0 {
+ acquirep(pp) // temporarily borrow p for mallocs in this function
}
// Release the free M list. We need to do this somewhere and
@@ -1761,11 +1759,11 @@ func allocm(_p_ *p, fn func(), id int64) *m {
}
mp.g0.m = mp
- if _p_ == _g_.m.p.ptr() {
+ if pp == gp.m.p.ptr() {
releasep()
}
- releasem(_g_.m)
+ releasem(gp.m)
allocmLock.runlock()
return mp
}
@@ -1859,10 +1857,10 @@ func needm() {
// scheduling stack is, but we assume there's at least 32 kB,
// which is more than enough for us.
setg(mp.g0)
- _g_ := getg()
- _g_.stack.hi = getcallersp() + 1024
- _g_.stack.lo = getcallersp() - 32*1024
- _g_.stackguard0 = _g_.stack.lo + _StackGuard
+ gp := getg()
+ gp.stack.hi = getcallersp() + 1024
+ gp.stack.lo = getcallersp() - 32*1024
+ gp.stackguard0 = gp.stack.lo + _StackGuard
// Initialize this thread to use the m.
asminit()
@@ -2085,7 +2083,7 @@ var newmHandoff struct {
// id is optional pre-allocated m ID. Omit by passing -1.
//
//go:nowritebarrierrec
-func newm(fn func(), _p_ *p, id int64) {
+func newm(fn func(), pp *p, id int64) {
// allocm adds a new M to allm, but they do not start until created by
// the OS in newm1 or the template thread.
//
@@ -2098,8 +2096,8 @@ func newm(fn func(), _p_ *p, id int64) {
// start.
acquirem()
- mp := allocm(_p_, fn, id)
- mp.nextp.set(_p_)
+ mp := allocm(pp, fn, id)
+ mp.nextp.set(pp)
mp.sigmask = initSigmask
if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
// We're on a locked M or a thread that may have been
@@ -2221,24 +2219,24 @@ func templateThread() {
// Stops execution of the current m until new work is available.
// Returns with acquired P.
func stopm() {
- _g_ := getg()
+ gp := getg()
- if _g_.m.locks != 0 {
+ if gp.m.locks != 0 {
throw("stopm holding locks")
}
- if _g_.m.p != 0 {
+ if gp.m.p != 0 {
throw("stopm holding p")
}
- if _g_.m.spinning {
+ if gp.m.spinning {
throw("stopm spinning")
}
lock(&sched.lock)
- mput(_g_.m)
+ mput(gp.m)
unlock(&sched.lock)
mPark()
- acquirep(_g_.m.nextp.ptr())
- _g_.m.nextp = 0
+ acquirep(gp.m.nextp.ptr())
+ gp.m.nextp = 0
}
func mspinning() {
@@ -2258,7 +2256,7 @@ func mspinning() {
// Must not have write barriers because this may be called without a P.
//
//go:nowritebarrierrec
-func startm(_p_ *p, spinning bool) {
+func startm(pp *p, spinning bool) {
// Disable preemption.
//
// Every owned P must have an owner that will eventually stop it in the
@@ -2277,9 +2275,9 @@ func startm(_p_ *p, spinning bool) {
// disable preemption before acquiring a P from pidleget below.
mp := acquirem()
lock(&sched.lock)
- if _p_ == nil {
- _p_, _ = pidleget(0)
- if _p_ == nil {
+ if pp == nil {
+ pp, _ = pidleget(0)
+ if pp == nil {
unlock(&sched.lock)
if spinning {
// The caller incremented nmspinning, but there are no idle Ps,
@@ -2314,8 +2312,8 @@ func startm(_p_ *p, spinning bool) {
// The caller incremented nmspinning, so set m.spinning in the new M.
fn = mspinning
}
- newm(fn, _p_, id)
- // Ownership transfer of _p_ committed by start in newm.
+ newm(fn, pp, id)
+ // Ownership transfer of pp committed by start in newm.
// Preemption is now safe.
releasem(mp)
return
@@ -2327,14 +2325,14 @@ func startm(_p_ *p, spinning bool) {
if nmp.nextp != 0 {
throw("startm: m has p")
}
- if spinning && !runqempty(_p_) {
+ if spinning && !runqempty(pp) {
throw("startm: p has runnable gs")
}
// The caller incremented nmspinning, so set m.spinning in the new M.
nmp.spinning = spinning
- nmp.nextp.set(_p_)
+ nmp.nextp.set(pp)
notewakeup(&nmp.park)
- // Ownership transfer of _p_ committed by wakeup. Preemption is now
+ // Ownership transfer of pp committed by wakeup. Preemption is now
// safe.
releasem(mp)
}
@@ -2343,34 +2341,34 @@ func startm(_p_ *p, spinning bool) {
// Always runs without a P, so write barriers are not allowed.
//
//go:nowritebarrierrec
-func handoffp(_p_ *p) {
+func handoffp(pp *p) {
// handoffp must start an M in any situation where
- // findrunnable would return a G to run on _p_.
+ // findrunnable would return a G to run on pp.
// if it has local work, start it straight away
- if !runqempty(_p_) || sched.runqsize != 0 {
- startm(_p_, false)
+ if !runqempty(pp) || sched.runqsize != 0 {
+ startm(pp, false)
return
}
// if there's trace work to do, start it straight away
if (trace.enabled || trace.shutdown) && traceReaderAvailable() {
- startm(_p_, false)
+ startm(pp, false)
return
}
// if it has GC work, start it straight away
- if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
- startm(_p_, false)
+ if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
+ startm(pp, false)
return
}
// no local work, check that there are no spinning/idle M's,
// otherwise our help is not required
if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
- startm(_p_, true)
+ startm(pp, true)
return
}
lock(&sched.lock)
if sched.gcwaiting != 0 {
- _p_.status = _Pgcstop
+ pp.status = _Pgcstop
sched.stopwait--
if sched.stopwait == 0 {
notewakeup(&sched.stopnote)
@@ -2378,8 +2376,8 @@ func handoffp(_p_ *p) {
unlock(&sched.lock)
return
}
- if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
- sched.safePointFn(_p_)
+ if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
+ sched.safePointFn(pp)
sched.safePointWait--
if sched.safePointWait == 0 {
notewakeup(&sched.safePointNote)
@@ -2387,21 +2385,21 @@ func handoffp(_p_ *p) {
}
if sched.runqsize != 0 {
unlock(&sched.lock)
- startm(_p_, false)
+ startm(pp, false)
return
}
// If this is the last running P and nobody is polling network,
// need to wakeup another M to poll network.
if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
unlock(&sched.lock)
- startm(_p_, false)
+ startm(pp, false)
return
}
// The scheduler lock cannot be held when calling wakeNetPoller below
// because wakeNetPoller may call wakep which may call startm.
- when := nobarrierWakeTime(_p_)
- pidleput(_p_, 0)
+ when := nobarrierWakeTime(pp)
+ pidleput(pp, 0)
unlock(&sched.lock)
if when != 0 {
@@ -2425,27 +2423,27 @@ func wakep() {
// Stops execution of the current m that is locked to a g until the g is runnable again.
// Returns with acquired P.
func stoplockedm() {
- _g_ := getg()
+ gp := getg()
- if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
+ if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
throw("stoplockedm: inconsistent locking")
}
- if _g_.m.p != 0 {
+ if gp.m.p != 0 {
// Schedule another M to run this p.
- _p_ := releasep()
- handoffp(_p_)
+ pp := releasep()
+ handoffp(pp)
}
incidlelocked(1)
// Wait until another thread schedules lockedg again.
mPark()
- status := readgstatus(_g_.m.lockedg.ptr())
+ status := readgstatus(gp.m.lockedg.ptr())
if status&^_Gscan != _Grunnable {
print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
- dumpgstatus(_g_.m.lockedg.ptr())
+ dumpgstatus(gp.m.lockedg.ptr())
throw("stoplockedm: not runnable")
}
- acquirep(_g_.m.nextp.ptr())
- _g_.m.nextp = 0
+ acquirep(gp.m.nextp.ptr())
+ gp.m.nextp = 0
}
// Schedules the locked m to run the locked gp.
@@ -2453,10 +2451,8 @@ func stoplockedm() {
//
//go:nowritebarrierrec
func startlockedm(gp *g) {
- _g_ := getg()
-
mp := gp.lockedm.ptr()
- if mp == _g_.m {
+ if mp == getg().m {
throw("startlockedm: locked to me")
}
if mp.nextp != 0 {
@@ -2464,8 +2460,8 @@ func startlockedm(gp *g) {
}
// directly handoff current P to the locked m
incidlelocked(-1)
- _p_ := releasep()
- mp.nextp.set(_p_)
+ pp := releasep()
+ mp.nextp.set(pp)
notewakeup(&mp.park)
stopm()
}
@@ -2473,22 +2469,22 @@ func startlockedm(gp *g) {
// Stops the current m for stopTheWorld.
// Returns when the world is restarted.
func gcstopm() {
- _g_ := getg()
+ gp := getg()
if sched.gcwaiting == 0 {
throw("gcstopm: not waiting for gc")
}
- if _g_.m.spinning {
- _g_.m.spinning = false
+ if gp.m.spinning {
+ gp.m.spinning = false
// OK to just drop nmspinning here,
// startTheWorld will unpark threads as necessary.
if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
throw("gcstopm: negative nmspinning")
}
}
- _p_ := releasep()
+ pp := releasep()
lock(&sched.lock)
- _p_.status = _Pgcstop
+ pp.status = _Pgcstop
sched.stopwait--
if sched.stopwait == 0 {
notewakeup(&sched.stopnote)
@@ -2507,7 +2503,7 @@ func gcstopm() {
//
//go:yeswritebarrierrec
func execute(gp *g, inheritTime bool) {
- _g_ := getg()
+ mp := getg().m
if goroutineProfile.active {
// Make sure that gp has had its stack written out to the goroutine
@@ -2518,19 +2514,19 @@ func execute(gp *g, inheritTime bool) {
// Assign gp.m before entering _Grunning so running Gs have an
// M.
- _g_.m.curg = gp
- gp.m = _g_.m
+ mp.curg = gp
+ gp.m = mp
casgstatus(gp, _Grunnable, _Grunning)
gp.waitsince = 0
gp.preempt = false
gp.stackguard0 = gp.stack.lo + _StackGuard
if !inheritTime {
- _g_.m.p.ptr().schedtick++
+ mp.p.ptr().schedtick++
}
// Check whether the profiler needs to be turned on or off.
hz := sched.profilehz
- if _g_.m.profilehz != hz {
+ if mp.profilehz != hz {
setThreadCPUProfiler(hz)
}
@@ -2551,19 +2547,19 @@ func execute(gp *g, inheritTime bool) {
// tryWakeP indicates that the returned goroutine is not normal (GC worker, trace
// reader) so the caller should try to wake a P.
func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
- _g_ := getg()
+ mp := getg().m
// The conditions here and in handoffp must agree: if
// findrunnable would return a G to run, handoffp must start
// an M.
top:
- _p_ := _g_.m.p.ptr()
+ pp := mp.p.ptr()
if sched.gcwaiting != 0 {
gcstopm()
goto top
}
- if _p_.runSafePointFn != 0 {
+ if pp.runSafePointFn != 0 {
runSafePointFn()
}
@@ -2571,7 +2567,7 @@ top:
// which may steal timers. It's important that between now
// and then, nothing blocks, so these numbers remain mostly
// relevant.
- now, pollUntil, _ := checkTimers(_p_, 0)
+ now, pollUntil, _ := checkTimers(pp, 0)
// Try to schedule the trace reader.
if trace.enabled || trace.shutdown {
@@ -2585,7 +2581,7 @@ top:
// Try to schedule a GC worker.
if gcBlackenEnabled != 0 {
- gp, now = gcController.findRunnableGCWorker(_p_, now)
+ gp, now = gcController.findRunnableGCWorker(pp, now)
if gp != nil {
return gp, false, true
}
@@ -2594,9 +2590,9 @@ top:
// Check the global runnable queue once in a while to ensure fairness.
// Otherwise two goroutines can completely occupy the local runqueue
// by constantly respawning each other.
- if _p_.schedtick%61 == 0 && sched.runqsize > 0 {
+ if pp.schedtick%61 == 0 && sched.runqsize > 0 {
lock(&sched.lock)
- gp = globrunqget(_p_, 1)
+ gp = globrunqget(pp, 1)
unlock(&sched.lock)
if gp != nil {
return gp, false, false
@@ -2614,14 +2610,14 @@ top:
}
// local runq
- if gp, inheritTime := runqget(_p_); gp != nil {
+ if gp, inheritTime := runqget(pp); gp != nil {
return gp, inheritTime, false
}
// global runq
if sched.runqsize != 0 {
lock(&sched.lock)
- gp := globrunqget(_p_, 0)
+ gp := globrunqget(pp, 0)
unlock(&sched.lock)
if gp != nil {
return gp, false, false
@@ -2653,9 +2649,9 @@ top:
// This is necessary to prevent excessive CPU consumption when
// GOMAXPROCS>>1 but the program parallelism is low.
procs := uint32(gomaxprocs)
- if _g_.m.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) {
- if !_g_.m.spinning {
- _g_.m.spinning = true
+ if mp.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) {
+ if !mp.spinning {
+ mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
}
@@ -2680,10 +2676,10 @@ top:
//
// If we're in the GC mark phase, can safely scan and blacken objects,
// and have work to do, run idle-time marking rather than give up the P.
- if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) && gcController.addIdleMarkWorker() {
+ if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
if node != nil {
- _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
+ pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
gp := node.gp.ptr()
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.enabled {
@@ -2722,19 +2718,19 @@ top:
// return P and block
lock(&sched.lock)
- if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
+ if sched.gcwaiting != 0 || pp.runSafePointFn != 0 {
unlock(&sched.lock)
goto top
}
if sched.runqsize != 0 {
- gp := globrunqget(_p_, 0)
+ gp := globrunqget(pp, 0)
unlock(&sched.lock)
return gp, false, false
}
- if releasep() != _p_ {
+ if releasep() != pp {
throw("findrunnable: wrong p")
}
- now = pidleput(_p_, now)
+ now = pidleput(pp, now)
unlock(&sched.lock)
// Delicate dance: thread transitions from spinning to non-spinning
@@ -2757,9 +2753,9 @@ top:
// we also observe no idle Ps it is OK to skip unparking a new worker
// thread: the system is fully loaded so no spinning threads are required.
// Also see "Worker thread parking/unparking" comment at the top of the file.
- wasSpinning := _g_.m.spinning
- if _g_.m.spinning {
- _g_.m.spinning = false
+ wasSpinning := mp.spinning
+ if mp.spinning {
+ mp.spinning = false
if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
throw("findrunnable: negative nmspinning")
}
@@ -2771,23 +2767,23 @@ top:
// latency. See golang.org/issue/43997.
// Check all runqueues once again.
- _p_ = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
- if _p_ != nil {
- acquirep(_p_)
- _g_.m.spinning = true
+ pp = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
+ if pp != nil {
+ acquirep(pp)
+ mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
goto top
}
// Check for idle-priority GC work again.
- _p_, gp = checkIdleGCNoP()
- if _p_ != nil {
- acquirep(_p_)
- _g_.m.spinning = true
+ pp, gp = checkIdleGCNoP()
+ if pp != nil {
+ acquirep(pp)
+ mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
// Run the idle worker.
- _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
+ pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
casgstatus(gp, _Gwaiting, _Grunnable)
if trace.enabled {
traceGoUnpark(gp, 0)
@@ -2807,10 +2803,10 @@ top:
// Poll network until next timer.
if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
atomic.Store64(&sched.pollUntil, uint64(pollUntil))
- if _g_.m.p != 0 {
+ if mp.p != 0 {
throw("findrunnable: netpoll with p")
}
- if _g_.m.spinning {
+ if mp.spinning {
throw("findrunnable: netpoll with spinning")
}
// Refresh now.
@@ -2836,12 +2832,12 @@ top:
goto top
}
lock(&sched.lock)
- _p_, _ = pidleget(now)
+ pp, _ = pidleget(now)
unlock(&sched.lock)
- if _p_ == nil {
+ if pp == nil {
injectglist(&list)
} else {
- acquirep(_p_)
+ acquirep(pp)
if !list.empty() {
gp := list.pop()
injectglist(&list)
@@ -2852,7 +2848,7 @@ top:
return gp, false, false
}
if wasSpinning {
- _g_.m.spinning = true
+ mp.spinning = true
atomic.Xadd(&sched.nmspinning, 1)
}
goto top
@@ -3087,11 +3083,11 @@ func wakeNetPoller(when int64) {
}
func resetspinning() {
- _g_ := getg()
- if !_g_.m.spinning {
+ gp := getg()
+ if !gp.m.spinning {
throw("resetspinning: not a spinning m")
}
- _g_.m.spinning = false
+ gp.m.spinning = false
nmspinning := atomic.Xadd(&sched.nmspinning, -1)
if int32(nmspinning) < 0 {
throw("findrunnable: negative nmspinning")
@@ -3175,31 +3171,31 @@ func injectglist(glist *gList) {
// One round of scheduler: find a runnable goroutine and execute it.
// Never returns.
func schedule() {
- _g_ := getg()
+ mp := getg().m
- if _g_.m.locks != 0 {
+ if mp.locks != 0 {
throw("schedule: holding locks")
}
- if _g_.m.lockedg != 0 {
+ if mp.lockedg != 0 {
stoplockedm()
- execute(_g_.m.lockedg.ptr(), false) // Never returns.
+ execute(mp.lockedg.ptr(), false) // Never returns.
}
// We should not schedule away from a g that is executing a cgo call,
// since the cgo call is using the m's g0 stack.
- if _g_.m.incgo {
+ if mp.incgo {
throw("schedule: in cgo")
}
top:
- pp := _g_.m.p.ptr()
+ pp := mp.p.ptr()
pp.preempt = false
// Safety check: if we are spinning, the run queue should be empty.
// Check this before calling checkTimers, as that might call
// goready to put a ready goroutine on the local run queue.
- if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
+ if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
throw("schedule: spinning with local work")
}
@@ -3208,7 +3204,7 @@ top:
// This thread is going to run a goroutine and is not spinning anymore,
// so if it was marked as spinning we need to reset it now and potentially
// start a new spinning M.
- if _g_.m.spinning {
+ if mp.spinning {
resetspinning()
}
@@ -3252,10 +3248,10 @@ top:
// readied later, the caller can do other work but eventually should
// call schedule to restart the scheduling of goroutines on this m.
func dropg() {
- _g_ := getg()
+ gp := getg()
- setMNoWB(&_g_.m.curg.m, nil)
- setGNoWB(&_g_.m.curg, nil)
+ setMNoWB(&gp.m.curg.m, nil)
+ setGNoWB(&gp.m.curg, nil)
}
// checkTimers runs any timers for the P that are ready.
@@ -3331,19 +3327,19 @@ func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
// park continuation on g0.
func park_m(gp *g) {
- _g_ := getg()
+ mp := getg().m
if trace.enabled {
- traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
+ traceGoPark(mp.waittraceev, mp.waittraceskip)
}
casgstatus(gp, _Grunning, _Gwaiting)
dropg()
- if fn := _g_.m.waitunlockf; fn != nil {
- ok := fn(gp, _g_.m.waitlock)
- _g_.m.waitunlockf = nil
- _g_.m.waitlock = nil
+ if fn := mp.waitunlockf; fn != nil {
+ ok := fn(gp, mp.waitlock)
+ mp.waitunlockf = nil
+ mp.waitlock = nil
if !ok {
if trace.enabled {
traceGoUnpark(gp, 2)
@@ -3470,18 +3466,18 @@ func goexit1() {
// goexit continuation on g0.
func goexit0(gp *g) {
- _g_ := getg()
- _p_ := _g_.m.p.ptr()
+ mp := getg().m
+ pp := mp.p.ptr()
casgstatus(gp, _Grunning, _Gdead)
- gcController.addScannableStack(_p_, -int64(gp.stack.hi-gp.stack.lo))
+ gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
if isSystemGoroutine(gp, false) {
atomic.Xadd(&sched.ngsys, -1)
}
gp.m = nil
locked := gp.lockedm != 0
gp.lockedm = 0
- _g_.m.lockedg = 0
+ mp.lockedg = 0
gp.preemptStop = false
gp.paniconfault = false
gp._defer = nil // should be true already but just in case.
@@ -3505,15 +3501,15 @@ func goexit0(gp *g) {
dropg()
if GOARCH == "wasm" { // no threads yet on wasm
- gfput(_p_, gp)
+ gfput(pp, gp)
schedule() // never returns
}
- if _g_.m.lockedInt != 0 {
- print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
+ if mp.lockedInt != 0 {
+ print("invalid m->lockedInt = ", mp.lockedInt, "\n")
throw("internal lockOSThread error")
}
- gfput(_p_, gp)
+ gfput(pp, gp)
if locked {
// The goroutine may have locked this thread because
// it put it in an unusual kernel state. Kill it
@@ -3522,11 +3518,11 @@ func goexit0(gp *g) {
// Return to mstart, which will release the P and exit
// the thread.
if GOOS != "plan9" { // See golang.org/issue/22227.
- gogo(&_g_.m.g0.sched)
+ gogo(&mp.g0.sched)
} else {
// Clear lockedExt on plan9 since we may end up re-using
// this thread.
- _g_.m.lockedExt = 0
+ mp.lockedExt = 0
}
}
schedule()
@@ -3541,9 +3537,9 @@ func goexit0(gp *g) {
//go:nosplit
//go:nowritebarrierrec
func save(pc, sp uintptr) {
- _g_ := getg()
+ gp := getg()
- if _g_ == _g_.m.g0 || _g_ == _g_.m.gsignal {
+ if gp == gp.m.g0 || gp == gp.m.gsignal {
// m.g0.sched is special and must describe the context
// for exiting the thread. mstart1 writes to it directly.
// m.gsignal.sched should not be used at all.
@@ -3552,14 +3548,14 @@ func save(pc, sp uintptr) {
throw("save on system g not allowed")
}
- _g_.sched.pc = pc
- _g_.sched.sp = sp
- _g_.sched.lr = 0
- _g_.sched.ret = 0
+ gp.sched.pc = pc
+ gp.sched.sp = sp
+ gp.sched.lr = 0
+ gp.sched.ret = 0
// We need to ensure ctxt is zero, but can't have a write
// barrier here. However, it should always already be zero.
// Assert that.
- if _g_.sched.ctxt != nil {
+ if gp.sched.ctxt != nil {
badctxt()
}
}
@@ -3594,7 +3590,7 @@ func save(pc, sp uintptr) {
// when syscall returns we emit traceGoSysExit and when the goroutine starts running
// (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
// To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
-// we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
+// we remember current value of syscalltick in m (gp.m.syscalltick = gp.m.p.ptr().syscalltick),
// whoever emits traceGoSysBlock increments p.syscalltick afterwards;
// and we wait for the increment before emitting traceGoSysExit.
// Note that the increment is done even if tracing is not enabled,
@@ -3602,27 +3598,27 @@ func save(pc, sp uintptr) {
//
//go:nosplit
func reentersyscall(pc, sp uintptr) {
- _g_ := getg()
+ gp := getg()
// Disable preemption because during this function g is in Gsyscall status,
// but can have inconsistent g->sched, do not let GC observe it.
- _g_.m.locks++
+ gp.m.locks++
// Entersyscall must not call any function that might split/grow the stack.
// (See details in comment above.)
// Catch calls that might, by replacing the stack guard with something that
// will trip any stack check and leaving a flag to tell newstack to die.
- _g_.stackguard0 = stackPreempt
- _g_.throwsplit = true
+ gp.stackguard0 = stackPreempt
+ gp.throwsplit = true
// Leave SP around for GC and traceback.
save(pc, sp)
- _g_.syscallsp = sp
- _g_.syscallpc = pc
- casgstatus(_g_, _Grunning, _Gsyscall)
- if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
+ gp.syscallsp = sp
+ gp.syscallpc = pc
+ casgstatus(gp, _Grunning, _Gsyscall)
+ if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
systemstack(func() {
- print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ print("entersyscall inconsistent ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
throw("entersyscall")
})
}
@@ -3640,25 +3636,25 @@ func reentersyscall(pc, sp uintptr) {
save(pc, sp)
}
- if _g_.m.p.ptr().runSafePointFn != 0 {
+ if gp.m.p.ptr().runSafePointFn != 0 {
// runSafePointFn may stack split if run on this stack
systemstack(runSafePointFn)
save(pc, sp)
}
- _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
- _g_.sysblocktraced = true
- pp := _g_.m.p.ptr()
+ gp.m.syscalltick = gp.m.p.ptr().syscalltick
+ gp.sysblocktraced = true
+ pp := gp.m.p.ptr()
pp.m = 0
- _g_.m.oldp.set(pp)
- _g_.m.p = 0
+ gp.m.oldp.set(pp)
+ gp.m.p = 0
atomic.Store(&pp.status, _Psyscall)
if sched.gcwaiting != 0 {
systemstack(entersyscall_gcwait)
save(pc, sp)
}
- _g_.m.locks--
+ gp.m.locks--
}
// Standard syscall entry used by the go syscall library and normal cgo calls.
@@ -3681,16 +3677,16 @@ func entersyscall_sysmon() {
}
func entersyscall_gcwait() {
- _g_ := getg()
- _p_ := _g_.m.oldp.ptr()
+ gp := getg()
+ pp := gp.m.oldp.ptr()
lock(&sched.lock)
- if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
+ if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
if trace.enabled {
- traceGoSysBlock(_p_)
- traceProcStop(_p_)
+ traceGoSysBlock(pp)
+ traceProcStop(pp)
}
- _p_.syscalltick++
+ pp.syscalltick++
if sched.stopwait--; sched.stopwait == 0 {
notewakeup(&sched.stopnote)
}
@@ -3702,34 +3698,34 @@ func entersyscall_gcwait() {
//
//go:nosplit
func entersyscallblock() {
- _g_ := getg()
+ gp := getg()
- _g_.m.locks++ // see comment in entersyscall
- _g_.throwsplit = true
- _g_.stackguard0 = stackPreempt // see comment in entersyscall
- _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
- _g_.sysblocktraced = true
- _g_.m.p.ptr().syscalltick++
+ gp.m.locks++ // see comment in entersyscall
+ gp.throwsplit = true
+ gp.stackguard0 = stackPreempt // see comment in entersyscall
+ gp.m.syscalltick = gp.m.p.ptr().syscalltick
+ gp.sysblocktraced = true
+ gp.m.p.ptr().syscalltick++
// Leave SP around for GC and traceback.
pc := getcallerpc()
sp := getcallersp()
save(pc, sp)
- _g_.syscallsp = _g_.sched.sp
- _g_.syscallpc = _g_.sched.pc
- if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
+ gp.syscallsp = gp.sched.sp
+ gp.syscallpc = gp.sched.pc
+ if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
sp1 := sp
- sp2 := _g_.sched.sp
- sp3 := _g_.syscallsp
+ sp2 := gp.sched.sp
+ sp3 := gp.syscallsp
systemstack(func() {
- print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
throw("entersyscallblock")
})
}
- casgstatus(_g_, _Grunning, _Gsyscall)
- if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
+ casgstatus(gp, _Grunning, _Gsyscall)
+ if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
systemstack(func() {
- print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ print("entersyscallblock inconsistent ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
throw("entersyscallblock")
})
}
@@ -3739,7 +3735,7 @@ func entersyscallblock() {
// Resave for traceback during blocked call.
save(getcallerpc(), getcallersp())
- _g_.m.locks--
+ gp.m.locks--
}
func entersyscallblock_handoff() {
@@ -3763,16 +3759,16 @@ func entersyscallblock_handoff() {
//go:nowritebarrierrec
//go:linkname exitsyscall
func exitsyscall() {
- _g_ := getg()
+ gp := getg()
- _g_.m.locks++ // see comment in entersyscall
- if getcallersp() > _g_.syscallsp {
+ gp.m.locks++ // see comment in entersyscall
+ if getcallersp() > gp.syscallsp {
throw("exitsyscall: syscall frame is no longer valid")
}
- _g_.waitsince = 0
- oldp := _g_.m.oldp.ptr()
- _g_.m.oldp = 0
+ gp.waitsince = 0
+ oldp := gp.m.oldp.ptr()
+ gp.m.oldp = 0
if exitsyscallfast(oldp) {
// When exitsyscallfast returns success, we have a P so can now use
// write barriers
@@ -3781,33 +3777,33 @@ func exitsyscall() {
// profile, exactly as it was when the goroutine profiler first
// stopped the world.
systemstack(func() {
- tryRecordGoroutineProfileWB(_g_)
+ tryRecordGoroutineProfileWB(gp)
})
}
if trace.enabled {
- if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
+ if oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick {
systemstack(traceGoStart)
}
}
// There's a cpu for us, so we can run.
- _g_.m.p.ptr().syscalltick++
+ gp.m.p.ptr().syscalltick++
// We need to cas the status and scan before resuming...
- casgstatus(_g_, _Gsyscall, _Grunning)
+ casgstatus(gp, _Gsyscall, _Grunning)
// Garbage collector isn't running (since we are),
// so okay to clear syscallsp.
- _g_.syscallsp = 0
- _g_.m.locks--
- if _g_.preempt {
+ gp.syscallsp = 0
+ gp.m.locks--
+ if gp.preempt {
// restore the preemption request in case we've cleared it in newstack
- _g_.stackguard0 = stackPreempt
+ gp.stackguard0 = stackPreempt
} else {
// otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
- _g_.stackguard0 = _g_.stack.lo + _StackGuard
+ gp.stackguard0 = gp.stack.lo + _StackGuard
}
- _g_.throwsplit = false
+ gp.throwsplit = false
- if sched.disable.user && !schedEnabled(_g_) {
+ if sched.disable.user && !schedEnabled(gp) {
// Scheduling of this goroutine is disabled.
Gosched()
}
@@ -3815,21 +3811,21 @@ func exitsyscall() {
return
}
- _g_.sysexitticks = 0
+ gp.sysexitticks = 0
if trace.enabled {
// Wait till traceGoSysBlock event is emitted.
// This ensures consistency of the trace (the goroutine is started after it is blocked).
- for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
+ for oldp != nil && oldp.syscalltick == gp.m.syscalltick {
osyield()
}
// We can't trace syscall exit right now because we don't have a P.
// Tracing code can invoke write barriers that cannot run without a P.
// So instead we remember the syscall exit time and emit the event
// in execute when we have a P.
- _g_.sysexitticks = cputicks()
+ gp.sysexitticks = cputicks()
}
- _g_.m.locks--
+ gp.m.locks--
// Call the scheduler.
mcall(exitsyscall0)
@@ -3840,14 +3836,14 @@ func exitsyscall() {
// Must wait until now because until gosched returns
// we don't know for sure that the garbage collector
// is not running.
- _g_.syscallsp = 0
- _g_.m.p.ptr().syscalltick++
- _g_.throwsplit = false
+ gp.syscallsp = 0
+ gp.m.p.ptr().syscalltick++
+ gp.throwsplit = false
}
//go:nosplit
func exitsyscallfast(oldp *p) bool {
- _g_ := getg()
+ gp := getg()
// Freezetheworld sets stopwait but does not retake P's.
if sched.stopwait == freezeStopWait {
@@ -3871,7 +3867,7 @@ func exitsyscallfast(oldp *p) bool {
if oldp != nil {
// Wait till traceGoSysBlock event is emitted.
// This ensures consistency of the trace (the goroutine is started after it is blocked).
- for oldp.syscalltick == _g_.m.syscalltick {
+ for oldp.syscalltick == gp.m.syscalltick {
osyield()
}
}
@@ -3891,33 +3887,33 @@ func exitsyscallfast(oldp *p) bool {
//
//go:nosplit
func exitsyscallfast_reacquired() {
- _g_ := getg()
- if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
+ gp := getg()
+ if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
if trace.enabled {
- // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
+ // The p was retaken and then enter into syscall again (since gp.m.syscalltick has changed).
// traceGoSysBlock for this syscall was already emitted,
// but here we effectively retake the p from the new syscall running on the same p.
systemstack(func() {
// Denote blocking of the new syscall.
- traceGoSysBlock(_g_.m.p.ptr())
+ traceGoSysBlock(gp.m.p.ptr())
// Denote completion of the current syscall.
traceGoSysExit(0)
})
}
- _g_.m.p.ptr().syscalltick++
+ gp.m.p.ptr().syscalltick++
}
}
func exitsyscallfast_pidle() bool {
lock(&sched.lock)
- _p_, _ := pidleget(0)
- if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
+ pp, _ := pidleget(0)
+ if pp != nil && atomic.Load(&sched.sysmonwait) != 0 {
atomic.Store(&sched.sysmonwait, 0)
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
- if _p_ != nil {
- acquirep(_p_)
+ if pp != nil {
+ acquirep(pp)
return true
}
return false
@@ -3933,12 +3929,12 @@ func exitsyscall0(gp *g) {
casgstatus(gp, _Gsyscall, _Grunnable)
dropg()
lock(&sched.lock)
- var _p_ *p
+ var pp *p
if schedEnabled(gp) {
- _p_, _ = pidleget(0)
+ pp, _ = pidleget(0)
}
var locked bool
- if _p_ == nil {
+ if pp == nil {
globrunqput(gp)
// Below, we stoplockedm if gp is locked. globrunqput releases
@@ -3952,8 +3948,8 @@ func exitsyscall0(gp *g) {
notewakeup(&sched.sysmonnote)
}
unlock(&sched.lock)
- if _p_ != nil {
- acquirep(_p_)
+ if pp != nil {
+ acquirep(pp)
execute(gp, false) // Never returns.
}
if locked {
@@ -4089,8 +4085,8 @@ func newproc(fn *funcval) {
systemstack(func() {
newg := newproc1(fn, gp, pc)
- _p_ := getg().m.p.ptr()
- runqput(_p_, newg, true)
+ pp := getg().m.p.ptr()
+ runqput(pp, newg, true)
if mainStarted {
wakep()
@@ -4102,15 +4098,13 @@ func newproc(fn *funcval) {
// address of the go statement that created this. The caller is responsible
// for adding the new g to the scheduler.
func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
- _g_ := getg()
-
if fn == nil {
fatal("go of nil func value")
}
- acquirem() // disable preemption because it can be holding p in a local var
- _p_ := _g_.m.p.ptr()
- newg := gfget(_p_)
+ mp := acquirem() // disable preemption because we hold M and P in local vars.
+ pp := mp.p.ptr()
+ newg := gfget(pp)
if newg == nil {
newg = malg(_StackMin)
casgstatus(newg, _Gidle, _Gdead)
@@ -4148,8 +4142,8 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
atomic.Xadd(&sched.ngsys, +1)
} else {
// Only user goroutines inherit pprof labels.
- if _g_.m.curg != nil {
- newg.labels = _g_.m.curg.labels
+ if mp.curg != nil {
+ newg.labels = mp.curg.labels
}
if goroutineProfile.active {
// A concurrent goroutine profile is running. It should include
@@ -4166,18 +4160,18 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
newg.tracking = true
}
casgstatus(newg, _Gdead, _Grunnable)
- gcController.addScannableStack(_p_, int64(newg.stack.hi-newg.stack.lo))
+ gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
- if _p_.goidcache == _p_.goidcacheend {
+ if pp.goidcache == pp.goidcacheend {
// Sched.goidgen is the last allocated id,
// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
// At startup sched.goidgen=0, so main goroutine receives goid=1.
- _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
- _p_.goidcache -= _GoidCacheBatch - 1
- _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
+ pp.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
+ pp.goidcache -= _GoidCacheBatch - 1
+ pp.goidcacheend = pp.goidcache + _GoidCacheBatch
}
- newg.goid = int64(_p_.goidcache)
- _p_.goidcache++
+ newg.goid = int64(pp.goidcache)
+ pp.goidcache++
if raceenabled {
newg.racectx = racegostart(callerpc)
if newg.labels != nil {
@@ -4189,7 +4183,7 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
if trace.enabled {
traceGoCreate(newg, newg.startpc)
}
- releasem(_g_.m)
+ releasem(mp)
return newg
}
@@ -4230,7 +4224,7 @@ func saveAncestors(callergp *g) *[]ancestorInfo {
// Put on gfree list.
// If local list is too long, transfer a batch to the global list.
-func gfput(_p_ *p, gp *g) {
+func gfput(pp *p, gp *g) {
if readgstatus(gp) != _Gdead {
throw("gfput: bad status (not Gdead)")
}
@@ -4245,17 +4239,17 @@ func gfput(_p_ *p, gp *g) {
gp.stackguard0 = 0
}
- _p_.gFree.push(gp)
- _p_.gFree.n++
- if _p_.gFree.n >= 64 {
+ pp.gFree.push(gp)
+ pp.gFree.n++
+ if pp.gFree.n >= 64 {
var (
inc int32
stackQ gQueue
noStackQ gQueue
)
- for _p_.gFree.n >= 32 {
- gp = _p_.gFree.pop()
- _p_.gFree.n--
+ for pp.gFree.n >= 32 {
+ gp = pp.gFree.pop()
+ pp.gFree.n--
if gp.stack.lo == 0 {
noStackQ.push(gp)
} else {
@@ -4273,12 +4267,12 @@ func gfput(_p_ *p, gp *g) {
// Get from gfree list.
// If local list is empty, grab a batch from global list.
-func gfget(_p_ *p) *g {
+func gfget(pp *p) *g {
retry:
- if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
+ if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
lock(&sched.gFree.lock)
// Move a batch of free Gs to the P.
- for _p_.gFree.n < 32 {
+ for pp.gFree.n < 32 {
// Prefer Gs with stacks.
gp := sched.gFree.stack.pop()
if gp == nil {
@@ -4288,17 +4282,17 @@ retry:
}
}
sched.gFree.n--
- _p_.gFree.push(gp)
- _p_.gFree.n++
+ pp.gFree.push(gp)
+ pp.gFree.n++
}
unlock(&sched.gFree.lock)
goto retry
}
- gp := _p_.gFree.pop()
+ gp := pp.gFree.pop()
if gp == nil {
return nil
}
- _p_.gFree.n--
+ pp.gFree.n--
if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
// Deallocate old stack. We kept it in gfput because it was the
// right size when the goroutine was put on the free list, but
@@ -4331,15 +4325,15 @@ retry:
}
// Purge all cached G's from gfree list to the global list.
-func gfpurge(_p_ *p) {
+func gfpurge(pp *p) {
var (
inc int32
stackQ gQueue
noStackQ gQueue
)
- for !_p_.gFree.empty() {
- gp := _p_.gFree.pop()
- _p_.gFree.n--
+ for !pp.gFree.empty() {
+ gp := pp.gFree.pop()
+ pp.gFree.n--
if gp.stack.lo == 0 {
noStackQ.push(gp)
} else {
@@ -4368,9 +4362,9 @@ func dolockOSThread() {
if GOARCH == "wasm" {
return // no threads on wasm yet
}
- _g_ := getg()
- _g_.m.lockedg.set(_g_)
- _g_.lockedm.set(_g_.m)
+ gp := getg()
+ gp.m.lockedg.set(gp)
+ gp.lockedm.set(gp.m)
}
//go:nosplit
@@ -4396,10 +4390,10 @@ func LockOSThread() {
// while we're in a known-good state.
startTemplateThread()
}
- _g_ := getg()
- _g_.m.lockedExt++
- if _g_.m.lockedExt == 0 {
- _g_.m.lockedExt--
+ gp := getg()
+ gp.m.lockedExt++
+ if gp.m.lockedExt == 0 {
+ gp.m.lockedExt--
panic("LockOSThread nesting overflow")
}
dolockOSThread()
@@ -4420,12 +4414,12 @@ func dounlockOSThread() {
if GOARCH == "wasm" {
return // no threads on wasm yet
}
- _g_ := getg()
- if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
+ gp := getg()
+ if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
return
}
- _g_.m.lockedg = 0
- _g_.lockedm = 0
+ gp.m.lockedg = 0
+ gp.lockedm = 0
}
//go:nosplit
@@ -4443,21 +4437,21 @@ func dounlockOSThread() {
// the goroutine locked to the OS thread until the goroutine (and
// hence the thread) exits.
func UnlockOSThread() {
- _g_ := getg()
- if _g_.m.lockedExt == 0 {
+ gp := getg()
+ if gp.m.lockedExt == 0 {
return
}
- _g_.m.lockedExt--
+ gp.m.lockedExt--
dounlockOSThread()
}
//go:nosplit
func unlockOSThread() {
- _g_ := getg()
- if _g_.m.lockedInt == 0 {
+ gp := getg()
+ if gp.m.lockedInt == 0 {
systemstack(badunlockosthread)
}
- _g_.m.lockedInt--
+ gp.m.lockedInt--
dounlockOSThread()
}
@@ -4467,8 +4461,8 @@ func badunlockosthread() {
func gcount() int32 {
n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys))
- for _, _p_ := range allp {
- n -= _p_.gFree.n
+ for _, pp := range allp {
+ n -= pp.gFree.n
}
// All these variables can be changed concurrently, so the result can be inconsistent.
@@ -4630,8 +4624,8 @@ func setcpuprofilerate(hz int32) {
// Disable preemption, otherwise we can be rescheduled to another thread
// that has profiling enabled.
- _g_ := getg()
- _g_.m.locks++
+ gp := getg()
+ gp.m.locks++
// Stop profiler on this thread so that it is safe to lock prof.
// if a profiling signal came in while we had prof locked,
@@ -4655,7 +4649,7 @@ func setcpuprofilerate(hz int32) {
setThreadCPUProfiler(hz)
}
- _g_.m.locks--
+ gp.m.locks--
}
// init initializes pp, which may be a freshly allocated p or a
@@ -4852,32 +4846,32 @@ func procresize(nprocs int32) *p {
atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
}
- _g_ := getg()
- if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
+ gp := getg()
+ if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
// continue to use the current P
- _g_.m.p.ptr().status = _Prunning
- _g_.m.p.ptr().mcache.prepareForSweep()
+ gp.m.p.ptr().status = _Prunning
+ gp.m.p.ptr().mcache.prepareForSweep()
} else {
// release the current P and acquire allp[0].
//
// We must do this before destroying our current P
// because p.destroy itself has write barriers, so we
// need to do that from a valid P.
- if _g_.m.p != 0 {
+ if gp.m.p != 0 {
if trace.enabled {
// Pretend that we were descheduled
// and then scheduled again to keep
// the trace sane.
traceGoSched()
- traceProcStop(_g_.m.p.ptr())
+ traceProcStop(gp.m.p.ptr())
}
- _g_.m.p.ptr().m = 0
+ gp.m.p.ptr().m = 0
}
- _g_.m.p = 0
- p := allp[0]
- p.m = 0
- p.status = _Pidle
- acquirep(p)
+ gp.m.p = 0
+ pp := allp[0]
+ pp.m = 0
+ pp.status = _Pidle
+ acquirep(pp)
if trace.enabled {
traceGoStart()
}
@@ -4888,8 +4882,8 @@ func procresize(nprocs int32) *p {
// release resources from unused P's
for i := nprocs; i < old; i++ {
- p := allp[i]
- p.destroy()
+ pp := allp[i]
+ pp.destroy()
// can't free P itself because it can be referenced by an M in syscall
}
@@ -4904,17 +4898,17 @@ func procresize(nprocs int32) *p {
var runnablePs *p
for i := nprocs - 1; i >= 0; i-- {
- p := allp[i]
- if _g_.m.p.ptr() == p {
+ pp := allp[i]
+ if gp.m.p.ptr() == pp {
continue
}
- p.status = _Pidle
- if runqempty(p) {
- pidleput(p, now)
+ pp.status = _Pidle
+ if runqempty(pp) {
+ pidleput(pp, now)
} else {
- p.m.set(mget())
- p.link.set(runnablePs)
- runnablePs = p
+ pp.m.set(mget())
+ pp.link.set(runnablePs)
+ runnablePs = pp
}
}
stealOrder.reset(uint32(nprocs))
@@ -4930,18 +4924,18 @@ func procresize(nprocs int32) *p {
// Associate p and the current m.
//
// This function is allowed to have write barriers even if the caller
-// isn't because it immediately acquires _p_.
+// isn't because it immediately acquires pp.
//
//go:yeswritebarrierrec
-func acquirep(_p_ *p) {
+func acquirep(pp *p) {
// Do the part that isn't allowed to have write barriers.
- wirep(_p_)
+ wirep(pp)
// Have p; write barriers now allowed.
// Perform deferred mcache flush before this P can allocate
// from a potentially stale mcache.
- _p_.mcache.prepareForSweep()
+ pp.mcache.prepareForSweep()
if trace.enabled {
traceProcStart()
@@ -4949,49 +4943,49 @@ func acquirep(_p_ *p) {
}
// wirep is the first step of acquirep, which actually associates the
-// current M to _p_. This is broken out so we can disallow write
+// current M to pp. This is broken out so we can disallow write
// barriers for this part, since we don't yet have a P.
//
//go:nowritebarrierrec
//go:nosplit
-func wirep(_p_ *p) {
- _g_ := getg()
+func wirep(pp *p) {
+ gp := getg()
- if _g_.m.p != 0 {
+ if gp.m.p != 0 {
throw("wirep: already in go")
}
- if _p_.m != 0 || _p_.status != _Pidle {
+ if pp.m != 0 || pp.status != _Pidle {
id := int64(0)
- if _p_.m != 0 {
- id = _p_.m.ptr().id
+ if pp.m != 0 {
+ id = pp.m.ptr().id
}
- print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
+ print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
throw("wirep: invalid p state")
}
- _g_.m.p.set(_p_)
- _p_.m.set(_g_.m)
- _p_.status = _Prunning
+ gp.m.p.set(pp)
+ pp.m.set(gp.m)
+ pp.status = _Prunning
}
// Disassociate p and the current m.
func releasep() *p {
- _g_ := getg()
+ gp := getg()
- if _g_.m.p == 0 {
+ if gp.m.p == 0 {
throw("releasep: invalid arg")
}
- _p_ := _g_.m.p.ptr()
- if _p_.m.ptr() != _g_.m || _p_.status != _Prunning {
- print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n")
+ pp := gp.m.p.ptr()
+ if pp.m.ptr() != gp.m || pp.status != _Prunning {
+ print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
throw("releasep: invalid p state")
}
if trace.enabled {
- traceProcStop(_g_.m.p.ptr())
+ traceProcStop(gp.m.p.ptr())
}
- _g_.m.p = 0
- _p_.m = 0
- _p_.status = _Pidle
- return _p_
+ gp.m.p = 0
+ pp.m = 0
+ pp.status = _Pidle
+ return pp
}
func incidlelocked(v int32) {
@@ -5099,8 +5093,8 @@ func checkdead() {
}
// There are no goroutines running, so we can look at the P's.
- for _, _p_ := range allp {
- if len(_p_.timers) > 0 {
+ for _, pp := range allp {
+ if len(pp.timers) > 0 {
return
}
}
@@ -5289,23 +5283,23 @@ func retake(now int64) uint32 {
// temporarily drop the allpLock. Hence, we need to re-fetch
// allp each time around the loop.
for i := 0; i < len(allp); i++ {
- _p_ := allp[i]
- if _p_ == nil {
+ pp := allp[i]
+ if pp == nil {
// This can happen if procresize has grown
// allp but not yet created new Ps.
continue
}
- pd := &_p_.sysmontick
- s := _p_.status
+ pd := &pp.sysmontick
+ s := pp.status
sysretake := false
if s == _Prunning || s == _Psyscall {
// Preempt G if it's running for too long.
- t := int64(_p_.schedtick)
+ t := int64(pp.schedtick)
if int64(pd.schedtick) != t {
pd.schedtick = uint32(t)
pd.schedwhen = now
} else if pd.schedwhen+forcePreemptNS <= now {
- preemptone(_p_)
+ preemptone(pp)
// In case of syscall, preemptone() doesn't
// work, because there is no M wired to P.
sysretake = true
@@ -5313,7 +5307,7 @@ func retake(now int64) uint32 {
}
if s == _Psyscall {
// Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
- t := int64(_p_.syscalltick)
+ t := int64(pp.syscalltick)
if !sysretake && int64(pd.syscalltick) != t {
pd.syscalltick = uint32(t)
pd.syscallwhen = now
@@ -5322,7 +5316,7 @@ func retake(now int64) uint32 {
// On the one hand we don't want to retake Ps if there is no other work to do,
// but on the other hand we want to retake them eventually
// because they can prevent the sysmon thread from deep sleep.
- if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
+ if runqempty(pp) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
continue
}
// Drop allpLock so we can take sched.lock.
@@ -5332,14 +5326,14 @@ func retake(now int64) uint32 {
// Otherwise the M from which we retake can exit the syscall,
// increment nmidle and report deadlock.
incidlelocked(-1)
- if atomic.Cas(&_p_.status, s, _Pidle) {
+ if atomic.Cas(&pp.status, s, _Pidle) {
if trace.enabled {
- traceGoSysBlock(_p_)
- traceProcStop(_p_)
+ traceGoSysBlock(pp)
+ traceProcStop(pp)
}
n++
- _p_.syscalltick++
- handoffp(_p_)
+ pp.syscalltick++
+ handoffp(pp)
}
incidlelocked(1)
lock(&allpLock)
@@ -5356,11 +5350,11 @@ func retake(now int64) uint32 {
// Returns true if preemption request was issued to at least one goroutine.
func preemptall() bool {
res := false
- for _, _p_ := range allp {
- if _p_.status != _Prunning {
+ for _, pp := range allp {
+ if pp.status != _Prunning {
continue
}
- if preemptone(_p_) {
+ if preemptone(pp) {
res = true
}
}
@@ -5377,8 +5371,8 @@ func preemptall() bool {
// The actual preemption will happen at some point in the future
// and will be indicated by the gp->status no longer being
// Grunning
-func preemptone(_p_ *p) bool {
- mp := _p_.m.ptr()
+func preemptone(pp *p) bool {
+ mp := pp.m.ptr()
if mp == nil || mp == getg().m {
return false
}
@@ -5397,7 +5391,7 @@ func preemptone(_p_ *p) bool {
// Request an async preemption of this P.
if preemptMSupported && debug.asyncpreemptoff == 0 {
- _p_.preempt = true
+ pp.preempt = true
preemptM(mp)
}
@@ -5420,16 +5414,16 @@ func schedtrace(detailed bool) {
// We must be careful while reading data from P's, M's and G's.
// Even if we hold schedlock, most data can be changed concurrently.
// E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
- for i, _p_ := range allp {
- mp := _p_.m.ptr()
- h := atomic.Load(&_p_.runqhead)
- t := atomic.Load(&_p_.runqtail)
+ for i, pp := range allp {
+ mp := pp.m.ptr()
+ h := atomic.Load(&pp.runqhead)
+ t := atomic.Load(&pp.runqtail)
if detailed {
id := int64(-1)
if mp != nil {
id = mp.id
}
- print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, " timerslen=", len(_p_.timers), "\n")
+ print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers), "\n")
} else {
// In non-detailed mode format lengths of per-P run queues as:
// [len1 len2 len3 len4]
@@ -5450,12 +5444,12 @@ func schedtrace(detailed bool) {
}
for mp := allm; mp != nil; mp = mp.alllink {
- _p_ := mp.p.ptr()
+ pp := mp.p.ptr()
gp := mp.curg
lockedg := mp.lockedg.ptr()
id1 := int32(-1)
- if _p_ != nil {
- id1 = _p_.id
+ if pp != nil {
+ id1 = pp.id
}
id2 := int64(-1)
if gp != nil {
@@ -5592,7 +5586,7 @@ func globrunqputbatch(batch *gQueue, n int32) {
// Try get a batch of G's from the global runnable queue.
// sched.lock must be held.
-func globrunqget(_p_ *p, max int32) *g {
+func globrunqget(pp *p, max int32) *g {
assertLockHeld(&sched.lock)
if sched.runqsize == 0 {
@@ -5606,8 +5600,8 @@ func globrunqget(_p_ *p, max int32) *g {
if max > 0 && n > max {
n = max
}
- if n > int32(len(_p_.runq))/2 {
- n = int32(len(_p_.runq)) / 2
+ if n > int32(len(pp.runq))/2 {
+ n = int32(len(pp.runq)) / 2
}
sched.runqsize -= n
@@ -5616,7 +5610,7 @@ func globrunqget(_p_ *p, max int32) *g {
n--
for ; n > 0; n-- {
gp1 := sched.runq.pop()
- runqput(_p_, gp1, false)
+ runqput(pp, gp1, false)
}
return gp
}
@@ -5696,21 +5690,21 @@ func updateTimerPMask(pp *p) {
// May run during STW, so write barriers are not allowed.
//
//go:nowritebarrierrec
-func pidleput(_p_ *p, now int64) int64 {
+func pidleput(pp *p, now int64) int64 {
assertLockHeld(&sched.lock)
- if !runqempty(_p_) {
+ if !runqempty(pp) {
throw("pidleput: P has non-empty run queue")
}
if now == 0 {
now = nanotime()
}
- updateTimerPMask(_p_) // clear if there are no timers.
- idlepMask.set(_p_.id)
- _p_.link = sched.pidle
- sched.pidle.set(_p_)
+ updateTimerPMask(pp) // clear if there are no timers.
+ idlepMask.set(pp.id)
+ pp.link = sched.pidle
+ sched.pidle.set(pp)
atomic.Xadd(&sched.npidle, 1)
- if !_p_.limiterEvent.start(limiterEventIdle, now) {
+ if !pp.limiterEvent.start(limiterEventIdle, now) {
throw("must be able to track idle limiter event")
}
return now
@@ -5726,33 +5720,33 @@ func pidleput(_p_ *p, now int64) int64 {
func pidleget(now int64) (*p, int64) {
assertLockHeld(&sched.lock)
- _p_ := sched.pidle.ptr()
- if _p_ != nil {
+ pp := sched.pidle.ptr()
+ if pp != nil {
// Timer may get added at any time now.
if now == 0 {
now = nanotime()
}
- timerpMask.set(_p_.id)
- idlepMask.clear(_p_.id)
- sched.pidle = _p_.link
+ timerpMask.set(pp.id)
+ idlepMask.clear(pp.id)
+ sched.pidle = pp.link
atomic.Xadd(&sched.npidle, -1)
- _p_.limiterEvent.stop(limiterEventIdle, now)
+ pp.limiterEvent.stop(limiterEventIdle, now)
}
- return _p_, now
+ return pp, now
}
-// runqempty reports whether _p_ has no Gs on its local run queue.
+// runqempty reports whether pp has no Gs on its local run queue.
// It never returns true spuriously.
-func runqempty(_p_ *p) bool {
- // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail,
- // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext.
+func runqempty(pp *p) bool {
+ // Defend against a race where 1) pp has G1 in runqnext but runqhead == runqtail,
+ // 2) runqput on pp kicks G1 to the runq, 3) runqget on pp empties runqnext.
// Simply observing that runqhead == runqtail and then observing that runqnext == nil
// does not mean the queue is empty.
for {
- head := atomic.Load(&_p_.runqhead)
- tail := atomic.Load(&_p_.runqtail)
- runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
- if tail == atomic.Load(&_p_.runqtail) {
+ head := atomic.Load(&pp.runqhead)
+ tail := atomic.Load(&pp.runqtail)
+ runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
+ if tail == atomic.Load(&pp.runqtail) {
return head == tail && runnext == 0
}
}
@@ -5771,18 +5765,18 @@ const randomizeScheduler = raceenabled
// runqput tries to put g on the local runnable queue.
// If next is false, runqput adds g to the tail of the runnable queue.
-// If next is true, runqput puts g in the _p_.runnext slot.
+// If next is true, runqput puts g in the pp.runnext slot.
// If the run queue is full, runnext puts g on the global queue.
// Executed only by the owner P.
-func runqput(_p_ *p, gp *g, next bool) {
+func runqput(pp *p, gp *g, next bool) {
if randomizeScheduler && next && fastrandn(2) == 0 {
next = false
}
if next {
retryNext:
- oldnext := _p_.runnext
- if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
+ oldnext := pp.runnext
+ if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
goto retryNext
}
if oldnext == 0 {
@@ -5793,14 +5787,14 @@ func runqput(_p_ *p, gp *g, next bool) {
}
retry:
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers
- t := _p_.runqtail
- if t-h < uint32(len(_p_.runq)) {
- _p_.runq[t%uint32(len(_p_.runq))].set(gp)
- atomic.StoreRel(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
+ h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with consumers
+ t := pp.runqtail
+ if t-h < uint32(len(pp.runq)) {
+ pp.runq[t%uint32(len(pp.runq))].set(gp)
+ atomic.StoreRel(&pp.runqtail, t+1) // store-release, makes the item available for consumption
return
}
- if runqputslow(_p_, gp, h, t) {
+ if runqputslow(pp, gp, h, t) {
return
}
// the queue is not full, now the put above must succeed
@@ -5809,19 +5803,19 @@ retry:
// Put g and a batch of work from local runnable queue on global queue.
// Executed only by the owner P.
-func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
- var batch [len(_p_.runq)/2 + 1]*g
+func runqputslow(pp *p, gp *g, h, t uint32) bool {
+ var batch [len(pp.runq)/2 + 1]*g
// First, grab a batch from local queue.
n := t - h
n = n / 2
- if n != uint32(len(_p_.runq)/2) {
+ if n != uint32(len(pp.runq)/2) {
throw("runqputslow: queue is not full")
}
for i := uint32(0); i < n; i++ {
- batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
+ batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
}
- if !atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume
+ if !atomic.CasRel(&pp.runqhead, h, h+n) { // cas-release, commits consume
return false
}
batch[n] = gp
@@ -5886,50 +5880,50 @@ func runqputbatch(pp *p, q *gQueue, qsize int) {
// If inheritTime is true, gp should inherit the remaining time in the
// current time slice. Otherwise, it should start a new time slice.
// Executed only by the owner P.
-func runqget(_p_ *p) (gp *g, inheritTime bool) {
+func runqget(pp *p) (gp *g, inheritTime bool) {
// If there's a runnext, it's the next G to run.
- next := _p_.runnext
+ next := pp.runnext
// If the runnext is non-0 and the CAS fails, it could only have been stolen by another P,
// because other Ps can race to set runnext to 0, but only the current P can set it to non-0.
// Hence, there's no need to retry this CAS if it falls.
- if next != 0 && _p_.runnext.cas(next, 0) {
+ if next != 0 && pp.runnext.cas(next, 0) {
return next.ptr(), true
}
for {
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
- t := _p_.runqtail
+ h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers
+ t := pp.runqtail
if t == h {
return nil, false
}
- gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
- if atomic.CasRel(&_p_.runqhead, h, h+1) { // cas-release, commits consume
+ gp := pp.runq[h%uint32(len(pp.runq))].ptr()
+ if atomic.CasRel(&pp.runqhead, h, h+1) { // cas-release, commits consume
return gp, false
}
}
}
-// runqdrain drains the local runnable queue of _p_ and returns all goroutines in it.
+// runqdrain drains the local runnable queue of pp and returns all goroutines in it.
// Executed only by the owner P.
-func runqdrain(_p_ *p) (drainQ gQueue, n uint32) {
- oldNext := _p_.runnext
- if oldNext != 0 && _p_.runnext.cas(oldNext, 0) {
+func runqdrain(pp *p) (drainQ gQueue, n uint32) {
+ oldNext := pp.runnext
+ if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
drainQ.pushBack(oldNext.ptr())
n++
}
retry:
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
- t := _p_.runqtail
+ h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers
+ t := pp.runqtail
qn := t - h
if qn == 0 {
return
}
- if qn > uint32(len(_p_.runq)) { // read inconsistent h and t
+ if qn > uint32(len(pp.runq)) { // read inconsistent h and t
goto retry
}
- if !atomic.CasRel(&_p_.runqhead, h, h+qn) { // cas-release, commits consume
+ if !atomic.CasRel(&pp.runqhead, h, h+qn) { // cas-release, commits consume
goto retry
}
@@ -5941,34 +5935,34 @@ retry:
// meanwhile, other P's can't access to all G's in local P's runnable queue and steal them.
// See https://groups.google.com/g/golang-dev/c/0pTKxEKhHSc/m/6Q85QjdVBQAJ for more details.
for i := uint32(0); i < qn; i++ {
- gp := _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
+ gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
drainQ.pushBack(gp)
n++
}
return
}
-// Grabs a batch of goroutines from _p_'s runnable queue into batch.
+// Grabs a batch of goroutines from pp's runnable queue into batch.
// Batch is a ring buffer starting at batchHead.
// Returns number of grabbed goroutines.
// Can be executed by any P.
-func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
+func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
for {
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers
- t := atomic.LoadAcq(&_p_.runqtail) // load-acquire, synchronize with the producer
+ h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers
+ t := atomic.LoadAcq(&pp.runqtail) // load-acquire, synchronize with the producer
n := t - h
n = n - n/2
if n == 0 {
if stealRunNextG {
- // Try to steal from _p_.runnext.
- if next := _p_.runnext; next != 0 {
- if _p_.status == _Prunning {
- // Sleep to ensure that _p_ isn't about to run the g
+ // Try to steal from pp.runnext.
+ if next := pp.runnext; next != 0 {
+ if pp.status == _Prunning {
+ // Sleep to ensure that pp isn't about to run the g
// we are about to steal.
// The important use case here is when the g running
- // on _p_ ready()s another g and then almost
+ // on pp ready()s another g and then almost
// immediately blocks. Instead of stealing runnext
- // in this window, back off to give _p_ a chance to
+ // in this window, back off to give pp a chance to
// schedule runnext. This will avoid thrashing gs
// between different Ps.
// A sync chan send/recv takes ~50ns as of time of
@@ -5982,7 +5976,7 @@ func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool
osyield()
}
}
- if !_p_.runnext.cas(next, 0) {
+ if !pp.runnext.cas(next, 0) {
continue
}
batch[batchHead%uint32(len(batch))] = next
@@ -5991,14 +5985,14 @@ func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool
}
return 0
}
- if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
+ if n > uint32(len(pp.runq)/2) { // read inconsistent h and t
continue
}
for i := uint32(0); i < n; i++ {
- g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
+ g := pp.runq[(h+i)%uint32(len(pp.runq))]
batch[(batchHead+i)%uint32(len(batch))] = g
}
- if atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume
+ if atomic.CasRel(&pp.runqhead, h, h+n) { // cas-release, commits consume
return n
}
}
@@ -6007,22 +6001,22 @@ func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool
// Steal half of elements from local runnable queue of p2
// and put onto local runnable queue of p.
// Returns one of the stolen elements (or nil if failed).
-func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
- t := _p_.runqtail
- n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
+func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
+ t := pp.runqtail
+ n := runqgrab(p2, &pp.runq, t, stealRunNextG)
if n == 0 {
return nil
}
n--
- gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
+ gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
if n == 0 {
return gp
}
- h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers
- if t-h+n >= uint32(len(_p_.runq)) {
+ h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with consumers
+ if t-h+n >= uint32(len(pp.runq)) {
throw("runqsteal: runq overflow")
}
- atomic.StoreRel(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
+ atomic.StoreRel(&pp.runqtail, t+n) // store-release, makes the item available for consumption
return gp
}
@@ -6143,8 +6137,8 @@ func setMaxThreads(in int) (out int) {
//go:nosplit
func procPin() int {
- _g_ := getg()
- mp := _g_.m
+ gp := getg()
+ mp := gp.m
mp.locks++
return int(mp.p.ptr().id)
@@ -6152,8 +6146,8 @@ func procPin() int {
//go:nosplit
func procUnpin() {
- _g_ := getg()
- _g_.m.locks--
+ gp := getg()
+ gp.m.locks--
}
//go:linkname sync_runtime_procPin sync.runtime_procPin
diff --git a/src/runtime/race.go b/src/runtime/race.go
index 4694288082..a67c8b9cdf 100644
--- a/src/runtime/race.go
+++ b/src/runtime/race.go
@@ -67,21 +67,21 @@ func RaceReleaseMerge(addr unsafe.Pointer) {
// Non-synchronization events (memory accesses, function entry/exit) still affect
// the race detector.
func RaceDisable() {
- _g_ := getg()
- if _g_.raceignore == 0 {
- racecall(&__tsan_go_ignore_sync_begin, _g_.racectx, 0, 0, 0)
+ gp := getg()
+ if gp.raceignore == 0 {
+ racecall(&__tsan_go_ignore_sync_begin, gp.racectx, 0, 0, 0)
}
- _g_.raceignore++
+ gp.raceignore++
}
//go:nosplit
// RaceEnable re-enables handling of race events in the current goroutine.
func RaceEnable() {
- _g_ := getg()
- _g_.raceignore--
- if _g_.raceignore == 0 {
- racecall(&__tsan_go_ignore_sync_end, _g_.racectx, 0, 0, 0)
+ gp := getg()
+ gp.raceignore--
+ if gp.raceignore == 0 {
+ racecall(&__tsan_go_ignore_sync_end, gp.racectx, 0, 0, 0)
}
}
@@ -453,12 +453,12 @@ func racefree(p unsafe.Pointer, sz uintptr) {
//go:nosplit
func racegostart(pc uintptr) uintptr {
- _g_ := getg()
+ gp := getg()
var spawng *g
- if _g_.m.curg != nil {
- spawng = _g_.m.curg
+ if gp.m.curg != nil {
+ spawng = gp.m.curg
} else {
- spawng = _g_
+ spawng = gp
}
var racectx uintptr
@@ -478,8 +478,8 @@ func racectxend(racectx uintptr) {
//go:nosplit
func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
- _g_ := getg()
- if _g_ != _g_.m.curg {
+ gp := getg()
+ if gp != gp.m.curg {
// The call is coming from manual instrumentation of Go code running on g0/gsignal.
// Not interesting.
return
@@ -495,8 +495,8 @@ func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
//go:nosplit
func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
- _g_ := getg()
- if _g_ != _g_.m.curg {
+ gp := getg()
+ if gp != gp.m.curg {
// The call is coming from manual instrumentation of Go code running on g0/gsignal.
// Not interesting.
return
diff --git a/src/runtime/rdebug.go b/src/runtime/rdebug.go
index 1b213f1934..7ecb2a52ee 100644
--- a/src/runtime/rdebug.go
+++ b/src/runtime/rdebug.go
@@ -15,8 +15,8 @@ func setMaxStack(in int) (out int) {
//go:linkname setPanicOnFault runtime/debug.setPanicOnFault
func setPanicOnFault(new bool) (old bool) {
- _g_ := getg()
- old = _g_.paniconfault
- _g_.paniconfault = new
+ gp := getg()
+ old = gp.paniconfault
+ gp.paniconfault = new
return old
}
diff --git a/src/runtime/runtime.go b/src/runtime/runtime.go
index 2cf93abefa..e9fd56b46d 100644
--- a/src/runtime/runtime.go
+++ b/src/runtime/runtime.go
@@ -12,6 +12,7 @@ import (
//go:generate go run wincallback.go
//go:generate go run mkduff.go
//go:generate go run mkfastlog2table.go
+//go:generate go run mklockrank.go -o lockrank.go
var ticks ticksType
diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go
index e307901fc2..b0a458d187 100644
--- a/src/runtime/runtime1.go
+++ b/src/runtime/runtime1.go
@@ -35,13 +35,13 @@ var traceback_env uint32
//
//go:nosplit
func gotraceback() (level int32, all, crash bool) {
- _g_ := getg()
+ gp := getg()
t := atomic.Load(&traceback_cache)
crash = t&tracebackCrash != 0
- all = _g_.m.throwing >= throwTypeUser || t&tracebackAll != 0
- if _g_.m.traceback != 0 {
- level = int32(_g_.m.traceback)
- } else if _g_.m.throwing >= throwTypeRuntime {
+ all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
+ if gp.m.traceback != 0 {
+ level = int32(gp.m.traceback)
+ } else if gp.m.throwing >= throwTypeRuntime {
// Always include runtime frames in runtime throws unless
// otherwise overridden by m.traceback.
level = 2
@@ -474,18 +474,18 @@ func timediv(v int64, div int32, rem *int32) int32 {
//go:nosplit
func acquirem() *m {
- _g_ := getg()
- _g_.m.locks++
- return _g_.m
+ gp := getg()
+ gp.m.locks++
+ return gp.m
}
//go:nosplit
func releasem(mp *m) {
- _g_ := getg()
+ gp := getg()
mp.locks--
- if mp.locks == 0 && _g_.preempt {
+ if mp.locks == 0 && gp.preempt {
// restore the preemption request in case we've cleared it in newstack
- _g_.stackguard0 = stackPreempt
+ gp.stackguard0 = stackPreempt
}
}
diff --git a/src/runtime/runtime_test.go b/src/runtime/runtime_test.go
index 0bdd01b086..018a8dbaa6 100644
--- a/src/runtime/runtime_test.go
+++ b/src/runtime/runtime_test.go
@@ -18,7 +18,12 @@ import (
"unsafe"
)
-var flagQuick = flag.Bool("quick", false, "skip slow tests, for second run in all.bash")
+// flagQuick is set by the -quick option to skip some relatively slow tests.
+// This is used by the cmd/dist test runtime:cpu124.
+// The cmd/dist test passes both -test.short and -quick;
+// there are tests that only check testing.Short, and those tests will
+// not be skipped if only -quick is used.
+var flagQuick = flag.Bool("quick", false, "skip slow tests, for cmd/dist test runtime:cpu124")
func init() {
// We're testing the runtime, so make tracebacks show things
diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go
index 0be499b2e9..f241df69f1 100644
--- a/src/runtime/signal_unix.go
+++ b/src/runtime/signal_unix.go
@@ -433,9 +433,9 @@ func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
return
}
c := &sigctxt{info, ctx}
- g := sigFetchG(c)
- setg(g)
- if g == nil {
+ gp := sigFetchG(c)
+ setg(gp)
+ if gp == nil {
if sig == _SIGPROF {
// Some platforms (Linux) have per-thread timers, which we use in
// combination with the process-wide timer. Avoid double-counting.
@@ -462,22 +462,22 @@ func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
return
}
- setg(g.m.gsignal)
+ setg(gp.m.gsignal)
// If some non-Go code called sigaltstack, adjust.
var gsignalStack gsignalStack
- setStack := adjustSignalStack(sig, g.m, &gsignalStack)
+ setStack := adjustSignalStack(sig, gp.m, &gsignalStack)
if setStack {
- g.m.gsignal.stktopsp = getcallersp()
+ gp.m.gsignal.stktopsp = getcallersp()
}
- if g.stackguard0 == stackFork {
+ if gp.stackguard0 == stackFork {
signalDuringFork(sig)
}
c.fixsigcode(sig)
- sighandler(sig, info, ctx, g)
- setg(g)
+ sighandler(sig, info, ctx, gp)
+ setg(gp)
if setStack {
restoreGsignalStack(&gsignalStack)
}
@@ -596,7 +596,7 @@ var testSigusr1 func(gp *g) bool
// sighandler is invoked when a signal occurs. The global g will be
// set to a gsignal goroutine and we will be running on the alternate
-// signal stack. The parameter g will be the value of the global g
+// signal stack. The parameter gp will be the value of the global g
// when the signal occurred. The sig, info, and ctxt parameters are
// from the system signal handler: they are the parameters passed when
// the SA is passed to the sigaction system call.
@@ -606,9 +606,11 @@ var testSigusr1 func(gp *g) bool
//
//go:nowritebarrierrec
func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
- _g_ := getg()
+ // The g executing the signal handler. This is almost always
+ // mp.gsignal. See delayedSignal for an exception.
+ gsignal := getg()
+ mp := gsignal.m
c := &sigctxt{info, ctxt}
- mp := _g_.m
// Cgo TSAN (not the Go race detector) intercepts signals and calls the
// signal handler at a later time. When the signal handler is called, the
@@ -620,7 +622,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
// signal delivery. We use that as an indicator of delayed signals.
// For delayed signals, the handler is called on the g0 stack (see
// adjustSignalStack).
- delayedSignal := *cgo_yield != nil && mp != nil && _g_.stack == mp.g0.stack
+ delayedSignal := *cgo_yield != nil && mp != nil && gsignal.stack == mp.g0.stack
if sig == _SIGPROF {
// Some platforms (Linux) have per-thread timers, which we use in
@@ -710,8 +712,8 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
return
}
- _g_.m.throwing = throwTypeRuntime
- _g_.m.caughtsig.set(gp)
+ mp.throwing = throwTypeRuntime
+ mp.caughtsig.set(gp)
if crashing == 0 {
startpanic_m()
@@ -723,12 +725,12 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
print("Signal ", sig, "\n")
}
- print("PC=", hex(c.sigpc()), " m=", _g_.m.id, " sigcode=", c.sigcode(), "\n")
- if _g_.m.incgo && gp == _g_.m.g0 && _g_.m.curg != nil {
+ print("PC=", hex(c.sigpc()), " m=", mp.id, " sigcode=", c.sigcode(), "\n")
+ if mp.incgo && gp == mp.g0 && mp.curg != nil {
print("signal arrived during cgo execution\n")
// Switch to curg so that we get a traceback of the Go code
// leading up to the cgocall, which switched from curg to g0.
- gp = _g_.m.curg
+ gp = mp.curg
}
if sig == _SIGILL || sig == _SIGFPE {
// It would be nice to know how long the instruction is.
@@ -760,10 +762,10 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
if level > 0 {
goroutineheader(gp)
tracebacktrap(c.sigpc(), c.sigsp(), c.siglr(), gp)
- if crashing > 0 && gp != _g_.m.curg && _g_.m.curg != nil && readgstatus(_g_.m.curg)&^_Gscan == _Grunning {
+ if crashing > 0 && gp != mp.curg && mp.curg != nil && readgstatus(mp.curg)&^_Gscan == _Grunning {
// tracebackothers on original m skipped this one; trace it now.
- goroutineheader(_g_.m.curg)
- traceback(^uintptr(0), ^uintptr(0), 0, _g_.m.curg)
+ goroutineheader(mp.curg)
+ traceback(^uintptr(0), ^uintptr(0), 0, mp.curg)
} else if crashing == 0 {
tracebackothers(gp)
print("\n")
@@ -814,34 +816,34 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
//
//go:linkname sigpanic
func sigpanic() {
- g := getg()
- if !canpanic(g) {
+ gp := getg()
+ if !canpanic() {
throw("unexpected signal during runtime execution")
}
- switch g.sig {
+ switch gp.sig {
case _SIGBUS:
- if g.sigcode0 == _BUS_ADRERR && g.sigcode1 < 0x1000 {
+ if gp.sigcode0 == _BUS_ADRERR && gp.sigcode1 < 0x1000 {
panicmem()
}
// Support runtime/debug.SetPanicOnFault.
- if g.paniconfault {
- panicmemAddr(g.sigcode1)
+ if gp.paniconfault {
+ panicmemAddr(gp.sigcode1)
}
- print("unexpected fault address ", hex(g.sigcode1), "\n")
+ print("unexpected fault address ", hex(gp.sigcode1), "\n")
throw("fault")
case _SIGSEGV:
- if (g.sigcode0 == 0 || g.sigcode0 == _SEGV_MAPERR || g.sigcode0 == _SEGV_ACCERR) && g.sigcode1 < 0x1000 {
+ if (gp.sigcode0 == 0 || gp.sigcode0 == _SEGV_MAPERR || gp.sigcode0 == _SEGV_ACCERR) && gp.sigcode1 < 0x1000 {
panicmem()
}
// Support runtime/debug.SetPanicOnFault.
- if g.paniconfault {
- panicmemAddr(g.sigcode1)
+ if gp.paniconfault {
+ panicmemAddr(gp.sigcode1)
}
- print("unexpected fault address ", hex(g.sigcode1), "\n")
+ print("unexpected fault address ", hex(gp.sigcode1), "\n")
throw("fault")
case _SIGFPE:
- switch g.sigcode0 {
+ switch gp.sigcode0 {
case _FPE_INTDIV:
panicdivide()
case _FPE_INTOVF:
@@ -850,11 +852,11 @@ func sigpanic() {
panicfloat()
}
- if g.sig >= uint32(len(sigtable)) {
- // can't happen: we looked up g.sig in sigtable to decide to call sigpanic
+ if gp.sig >= uint32(len(sigtable)) {
+ // can't happen: we looked up gp.sig in sigtable to decide to call sigpanic
throw("unexpected signal value")
}
- panic(errorString(sigtable[g.sig].name))
+ panic(errorString(sigtable[gp.sig].name))
}
// dieFromSignal kills the program with a signal.
@@ -1115,8 +1117,8 @@ func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool {
// (1) we weren't in VDSO page,
// (2) we were in a goroutine (i.e., m.curg != nil), and
// (3) we weren't in CGO.
- g := sigFetchG(c)
- if g != nil && g.m != nil && g.m.curg != nil && !g.m.incgo {
+ gp := sigFetchG(c)
+ if gp != nil && gp.m != nil && gp.m.curg != nil && !gp.m.incgo {
return false
}
@@ -1207,15 +1209,15 @@ func minitSignals() {
// of whether it is already set). Record which choice was made in
// newSigstack, so that it can be undone in unminit.
func minitSignalStack() {
- _g_ := getg()
+ mp := getg().m
var st stackt
sigaltstack(nil, &st)
if st.ss_flags&_SS_DISABLE != 0 || !iscgo {
- signalstack(&_g_.m.gsignal.stack)
- _g_.m.newSigstack = true
+ signalstack(&mp.gsignal.stack)
+ mp.newSigstack = true
} else {
- setGsignalStack(&st, &_g_.m.goSigStack)
- _g_.m.newSigstack = false
+ setGsignalStack(&st, &mp.goSigStack)
+ mp.newSigstack = false
}
}
@@ -1297,18 +1299,18 @@ type gsignalStack struct {
//go:nosplit
//go:nowritebarrierrec
func setGsignalStack(st *stackt, old *gsignalStack) {
- g := getg()
+ gp := getg()
if old != nil {
- old.stack = g.m.gsignal.stack
- old.stackguard0 = g.m.gsignal.stackguard0
- old.stackguard1 = g.m.gsignal.stackguard1
- old.stktopsp = g.m.gsignal.stktopsp
+ old.stack = gp.m.gsignal.stack
+ old.stackguard0 = gp.m.gsignal.stackguard0
+ old.stackguard1 = gp.m.gsignal.stackguard1
+ old.stktopsp = gp.m.gsignal.stktopsp
}
stsp := uintptr(unsafe.Pointer(st.ss_sp))
- g.m.gsignal.stack.lo = stsp
- g.m.gsignal.stack.hi = stsp + st.ss_size
- g.m.gsignal.stackguard0 = stsp + _StackGuard
- g.m.gsignal.stackguard1 = stsp + _StackGuard
+ gp.m.gsignal.stack.lo = stsp
+ gp.m.gsignal.stack.hi = stsp + st.ss_size
+ gp.m.gsignal.stackguard0 = stsp + _StackGuard
+ gp.m.gsignal.stackguard1 = stsp + _StackGuard
}
// restoreGsignalStack restores the gsignal stack to the value it had
@@ -1340,9 +1342,9 @@ func signalstack(s *stack) {
//go:nosplit
//go:linkname setsigsegv
func setsigsegv(pc uintptr) {
- g := getg()
- g.sig = _SIGSEGV
- g.sigpc = pc
- g.sigcode0 = _SEGV_MAPERR
- g.sigcode1 = 0 // TODO: emulate si_addr
+ gp := getg()
+ gp.sig = _SIGSEGV
+ gp.sigpc = pc
+ gp.sigcode0 = _SEGV_MAPERR
+ gp.sigcode1 = 0 // TODO: emulate si_addr
}
diff --git a/src/runtime/signal_windows.go b/src/runtime/signal_windows.go
index c5cf38c5c2..4a0287dcfd 100644
--- a/src/runtime/signal_windows.go
+++ b/src/runtime/signal_windows.go
@@ -199,9 +199,10 @@ func lastcontinuehandler(info *exceptionrecord, r *context, gp *g) int32 {
return 0 // not reached
}
+// Always called on g0. gp is the G where the exception occurred.
//go:nosplit
func winthrow(info *exceptionrecord, r *context, gp *g) {
- _g_ := getg()
+ g0 := getg()
if panicking != 0 { // traceback already printed
exit(2)
@@ -211,23 +212,23 @@ func winthrow(info *exceptionrecord, r *context, gp *g) {
// In case we're handling a g0 stack overflow, blow away the
// g0 stack bounds so we have room to print the traceback. If
// this somehow overflows the stack, the OS will trap it.
- _g_.stack.lo = 0
- _g_.stackguard0 = _g_.stack.lo + _StackGuard
- _g_.stackguard1 = _g_.stackguard0
+ g0.stack.lo = 0
+ g0.stackguard0 = g0.stack.lo + _StackGuard
+ g0.stackguard1 = g0.stackguard0
print("Exception ", hex(info.exceptioncode), " ", hex(info.exceptioninformation[0]), " ", hex(info.exceptioninformation[1]), " ", hex(r.ip()), "\n")
print("PC=", hex(r.ip()), "\n")
- if _g_.m.incgo && gp == _g_.m.g0 && _g_.m.curg != nil {
+ if g0.m.incgo && gp == g0.m.g0 && g0.m.curg != nil {
if iscgo {
print("signal arrived during external code execution\n")
}
- gp = _g_.m.curg
+ gp = g0.m.curg
}
print("\n")
- _g_.m.throwing = throwTypeRuntime
- _g_.m.caughtsig.set(gp)
+ g0.m.throwing = throwTypeRuntime
+ g0.m.caughtsig.set(gp)
level, _, docrash := gotraceback()
if level > 0 {
@@ -244,20 +245,20 @@ func winthrow(info *exceptionrecord, r *context, gp *g) {
}
func sigpanic() {
- g := getg()
- if !canpanic(g) {
+ gp := getg()
+ if !canpanic() {
throw("unexpected signal during runtime execution")
}
- switch g.sig {
+ switch gp.sig {
case _EXCEPTION_ACCESS_VIOLATION:
- if g.sigcode1 < 0x1000 {
+ if gp.sigcode1 < 0x1000 {
panicmem()
}
- if g.paniconfault {
- panicmemAddr(g.sigcode1)
+ if gp.paniconfault {
+ panicmemAddr(gp.sigcode1)
}
- print("unexpected fault address ", hex(g.sigcode1), "\n")
+ print("unexpected fault address ", hex(gp.sigcode1), "\n")
throw("fault")
case _EXCEPTION_INT_DIVIDE_BY_ZERO:
panicdivide()
diff --git a/src/runtime/testdata/testprog/vdso.go b/src/runtime/testdata/testprog/vdso.go
index d2a300d8f2..b18bc74a06 100644
--- a/src/runtime/testdata/testprog/vdso.go
+++ b/src/runtime/testdata/testprog/vdso.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Invoke signal hander in the VDSO context (see issue 32912).
+// Invoke signal handler in the VDSO context (see issue 32912).
package main
diff --git a/src/runtime/trace.go b/src/runtime/trace.go
index 10436d80c2..9b12b42f11 100644
--- a/src/runtime/trace.go
+++ b/src/runtime/trace.go
@@ -232,14 +232,12 @@ func StartTrace() error {
// - or GoSysExit appears for a goroutine for which we don't emit EvGoInSyscall below.
// To instruct traceEvent that it must not ignore events below, we set startingtrace.
// trace.enabled is set afterwards once we have emitted all preliminary events.
- _g_ := getg()
- _g_.m.startingtrace = true
+ mp := getg().m
+ mp.startingtrace = true
// Obtain current stack ID to use in all traceEvGoCreate events below.
- mp := acquirem()
stkBuf := make([]uintptr, traceStackSize)
stackID := traceStackID(mp, stkBuf, 2)
- releasem(mp)
profBuf := newProfBuf(2, profBufWordCount, profBufTagCount) // after the timestamp, header is [pp.id, gp.goid]
trace.cpuLogRead = profBuf
@@ -293,7 +291,7 @@ func StartTrace() error {
trace.strings = make(map[string]uint64)
trace.seqGC = 0
- _g_.m.startingtrace = false
+ mp.startingtrace = false
trace.enabled = true
// Register runtime goroutine labels.
@@ -782,19 +780,18 @@ func traceReadCPU() {
}
func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
- _g_ := getg()
- gp := mp.curg
+ gp := getg()
+ curgp := mp.curg
var nstk int
- if gp == _g_ {
+ if curgp == gp {
nstk = callers(skip+1, buf)
- } else if gp != nil {
- gp = mp.curg
- nstk = gcallers(gp, skip, buf)
+ } else if curgp != nil {
+ nstk = gcallers(curgp, skip, buf)
}
if nstk > 0 {
nstk-- // skip runtime.goexit
}
- if nstk > 0 && gp.goid == 1 {
+ if nstk > 0 && curgp.goid == 1 {
nstk-- // skip runtime.main
}
id := trace.stackTab.put(buf[:nstk])
@@ -1208,11 +1205,11 @@ func traceGCSTWDone() {
func traceGCSweepStart() {
// Delay the actual GCSweepStart event until the first span
// sweep. If we don't sweep anything, don't emit any events.
- _p_ := getg().m.p.ptr()
- if _p_.traceSweep {
+ pp := getg().m.p.ptr()
+ if pp.traceSweep {
throw("double traceGCSweepStart")
}
- _p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
+ pp.traceSweep, pp.traceSwept, pp.traceReclaimed = true, 0, 0
}
// traceGCSweepSpan traces the sweep of a single page.
@@ -1220,24 +1217,24 @@ func traceGCSweepStart() {
// This may be called outside a traceGCSweepStart/traceGCSweepDone
// pair; however, it will not emit any trace events in this case.
func traceGCSweepSpan(bytesSwept uintptr) {
- _p_ := getg().m.p.ptr()
- if _p_.traceSweep {
- if _p_.traceSwept == 0 {
+ pp := getg().m.p.ptr()
+ if pp.traceSweep {
+ if pp.traceSwept == 0 {
traceEvent(traceEvGCSweepStart, 1)
}
- _p_.traceSwept += bytesSwept
+ pp.traceSwept += bytesSwept
}
}
func traceGCSweepDone() {
- _p_ := getg().m.p.ptr()
- if !_p_.traceSweep {
+ pp := getg().m.p.ptr()
+ if !pp.traceSweep {
throw("missing traceGCSweepStart")
}
- if _p_.traceSwept != 0 {
- traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
+ if pp.traceSwept != 0 {
+ traceEvent(traceEvGCSweepDone, -1, uint64(pp.traceSwept), uint64(pp.traceReclaimed))
}
- _p_.traceSweep = false
+ pp.traceSweep = false
}
func traceGCMarkAssistStart() {
@@ -1257,16 +1254,16 @@ func traceGoCreate(newg *g, pc uintptr) {
}
func traceGoStart() {
- _g_ := getg().m.curg
- _p_ := _g_.m.p
- _g_.traceseq++
- if _p_.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
- traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
- } else if _g_.tracelastp == _p_ {
- traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
+ gp := getg().m.curg
+ pp := gp.m.p
+ gp.traceseq++
+ if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
+ traceEvent(traceEvGoStartLabel, -1, uint64(gp.goid), gp.traceseq, trace.markWorkerLabels[pp.ptr().gcMarkWorkerMode])
+ } else if gp.tracelastp == pp {
+ traceEvent(traceEvGoStartLocal, -1, uint64(gp.goid))
} else {
- _g_.tracelastp = _p_
- traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
+ gp.tracelastp = pp
+ traceEvent(traceEvGoStart, -1, uint64(gp.goid), gp.traceseq)
}
}
@@ -1275,14 +1272,14 @@ func traceGoEnd() {
}
func traceGoSched() {
- _g_ := getg()
- _g_.tracelastp = _g_.m.p
+ gp := getg()
+ gp.tracelastp = gp.m.p
traceEvent(traceEvGoSched, 1)
}
func traceGoPreempt() {
- _g_ := getg()
- _g_.tracelastp = _g_.m.p
+ gp := getg()
+ gp.tracelastp = gp.m.p
traceEvent(traceEvGoPreempt, 1)
}
@@ -1294,12 +1291,12 @@ func traceGoPark(traceEv byte, skip int) {
}
func traceGoUnpark(gp *g, skip int) {
- _p_ := getg().m.p
+ pp := getg().m.p
gp.traceseq++
- if gp.tracelastp == _p_ {
+ if gp.tracelastp == pp {
traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
} else {
- gp.tracelastp = _p_
+ gp.tracelastp = pp
traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
}
}
@@ -1321,10 +1318,10 @@ func traceGoSysExit(ts int64) {
// aka right now), and assign a fresh time stamp to keep the log consistent.
ts = 0
}
- _g_ := getg().m.curg
- _g_.traceseq++
- _g_.tracelastp = _g_.m.p
- traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
+ gp := getg().m.curg
+ gp.traceseq++
+ gp.tracelastp = gp.m.p
+ traceEvent(traceEvGoSysExit, -1, uint64(gp.goid), gp.traceseq, uint64(ts)/traceTickDiv)
}
func traceGoSysBlock(pp *p) {
diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go
index 49147ff838..6df0bbfabe 100644
--- a/src/runtime/traceback.go
+++ b/src/runtime/traceback.go
@@ -923,8 +923,8 @@ func gcallers(gp *g, skip int, pcbuf []uintptr) int {
// showframe reports whether the frame with the given characteristics should
// be printed during a traceback.
func showframe(f funcInfo, gp *g, firstFrame bool, funcID, childID funcID) bool {
- g := getg()
- if g.m.throwing >= throwTypeRuntime && gp != nil && (gp == g.m.curg || gp == g.m.caughtsig.ptr()) {
+ mp := getg().m
+ if mp.throwing >= throwTypeRuntime && gp != nil && (gp == mp.curg || gp == mp.caughtsig.ptr()) {
return true
}
return showfuncinfo(f, firstFrame, funcID, childID)
@@ -1051,10 +1051,10 @@ func tracebackothers(me *g) {
}
print("\n")
goroutineheader(gp)
- // Note: gp.m == g.m occurs when tracebackothers is
- // called from a signal handler initiated during a
- // systemstack call. The original G is still in the
- // running state, and we want to print its stack.
+ // Note: gp.m == getg().m occurs when tracebackothers is called
+ // from a signal handler initiated during a systemstack call.
+ // The original G is still in the running state, and we want to
+ // print its stack.
if gp.m != getg().m && readgstatus(gp)&^_Gscan == _Grunning {
print("\tgoroutine running on other thread; stack unavailable\n")
printcreatedby(gp)