// Racectx of m0->g0 is used only as the parent of the main goroutine. // It must not be used for anything else. g.m.g0.racectx = 0
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. // Using decimal instead of binary GB and MB because // they look nicer in the stack overflow failure message. //最大栈空间限制 if sys.PtrSize == 8 { maxstacksize = 1000000000 } else { maxstacksize = 250000000 }
// Allow newproc to start new Ms. mainStarted = true
//如果是wasm,就不要调度程序了. if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
//系统栈调用 systemstack(func() { // newm的回调函数,,,一个M一个sysmon P newm(sysmon, nil) }) }
// Lock the main goroutine onto this, the main OS thread, // during initialization. Most programs won't care, but a few // do require certain calls to be made by the main thread. // Those can arrange for main.main to run in the main thread // by calling runtime.LockOSThread during initialization // to preserve the lock. lockOSThread()
if g.m != &m0 { throw("runtime.main not on m0") }
doInit(&runtime_inittask) // must be before defer if nanotime() == 0 { throw("nanotime returning zero") }
// Defer unlock so that runtime.Goexit during init does the unlock too. needUnlock := true deferfunc() { if needUnlock { unlockOSThread() } }()
// Record when the world started. runtimeInitTime = nanotime()
//启动GC gcenable()
main_init_done = make(chanbool) if iscgo { if _cgo_thread_start == nil { throw("_cgo_thread_start missing") } if GOOS != "windows" { if _cgo_setenv == nil { throw("_cgo_setenv missing") } if _cgo_unsetenv == nil { throw("_cgo_unsetenv missing") } } if _cgo_notify_runtime_init_done == nil { throw("_cgo_notify_runtime_init_done missing") } // Start the template thread in case we enter Go from // a C-created thread and need to create a new thread. startTemplateThread() cgocall(_cgo_notify_runtime_init_done, nil) }
doInit(&main_inittask)
close(main_init_done)
needUnlock = false unlockOSThread()
if isarchive || islibrary { // A program compiled with -buildmode=c-archive or c-shared // has a main, but it is not executed. return } //对于main函数的回调,也就是用户写的main程序 fn := main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime fn() if raceenabled { racefini() }
// Make racy client program work: if panicking on // another goroutine at the same time as main returns, // let the other goroutine finish printing the panic trace. // Once it does, it will exit. See issues 3934 and 20018. //判断panicDefer函数,,,,,, if atomic.Load(&runningPanicDefers) != 0 { // Running deferred functions should not take long. for c := 0; c < 1000; c++ { if atomic.Load(&runningPanicDefers) == 0 { break } Gosched() } } // 判断panic if atomic.Load(&panicking) != 0 { gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1) } //正常退出了那..... exit(0) for { var x *int32 *x = 0 } }
// systemstack runs fn on a system stack. // If systemstack is called from the per-OS-thread (g0) stack, or // if systemstack is called from the signal handling (gsignal) stack, // systemstack calls fn directly and returns. //g0 stack或者信号处理的,就直接调用并返回。
// Otherwise, systemstack is being called from the limited stack // of an ordinary goroutine. In this case, systemstack switches // to the per-OS-thread stack, calls fn, and switches back. // It is common to use a func literal as the argument, in order // to share inputs and outputs with the code around the call // to system stack: // // ... set up y ... // systemstack(func() { // x = bigcall(y) // }) // ... use x ... // //go:noescape funcsystemstack(fn func())
// Create a new m. It will start off with a call to fn, or else the scheduler. // fn needs to be static and not a heap allocated closure. // May run with m.p==nil, so write barriers are not allowed. //go:nowritebarrierrec
//用于创建新的M,fn:sysmon函数,类似于事件驱动类型的。 funcnewm(fn func(), _p_ *p) { //分配内存 mp := allocm(_p_, fn) mp.nextp.set(_p_) mp.sigmask = initSigmask if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" { // We're on a locked M or a thread that may have been // started by C. The kernel state of this thread may // be strange (the user may have locked it for that // purpose). We don't want to clone that into another // thread. Instead, ask a known-good thread to create // the thread for us. // // This is disabled on Plan 9. See golang.org/issue/22227. // // TODO: This may be unnecessary on Windows, which // doesn't model thread creation off fork. lock(&newmHandoff.lock) if newmHandoff.haveTemplateThread == 0 { throw("on a locked thread with no template thread") } mp.schedlink = newmHandoff.newm newmHandoff.newm.set(mp) if newmHandoff.waiting { newmHandoff.waiting = false notewakeup(&newmHandoff.wake) } unlock(&newmHandoff.lock) return } newm1(mp) }
// Always runs without a P, so write barriers are not allowed. // //go:nowritebarrierrec funcsysmon() { lock(&sched.lock) sched.nmsys++ //基于running中的M,检查死锁,,,, checkdead() unlock(&sched.lock)
lasttrace := int64(0) idle := 0// how many cycles in succession we had not wokeup somebody delay := uint32(0) for { if idle == 0 { // start with 20us sleep... delay = 20 } elseif idle > 50 { // start doubling the sleep after 1ms... delay *= 2 } if delay > 10*1000 { // up to 10ms delay = 10 * 1000 } //休眠时间 usleep(delay) //获取时间 now := nanotime() // 休眠等待唤醒信号 next, _ := timeSleepUntil() if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { lock(&sched.lock) if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { if next > now { atomic.Store(&sched.sysmonwait, 1) unlock(&sched.lock) // Make wake-up period small enough // for the sampling to be correct. sleep := forcegcperiod / 2 if next-now < sleep { sleep = next - now } shouldRelax := sleep >= osRelaxMinNS if shouldRelax { osRelax(true) } notetsleep(&sched.sysmonnote, sleep) if shouldRelax { osRelax(false) } now = nanotime() next, _ = timeSleepUntil() lock(&sched.lock) atomic.Store(&sched.sysmonwait, 0) noteclear(&sched.sysmonnote) } idle = 0 delay = 20 } unlock(&sched.lock) } lock(&sched.sysmonlock) { // If we spent a long time blocked on sysmonlock // then we want to update now and next since it's // likely stale. now1 := nanotime() if now1-now > 50*1000/* 50µs */ { next, _ = timeSleepUntil() } now = now1 }
// trigger libc interceptors if needed if *cgo_yield != nil { asmcgocall(*cgo_yield, nil) } // poll network if not polled for more than 10ms //在队列中等待调度超过10ms,就给交给global抢渡了 lastpoll := int64(atomic.Load64(&sched.lastpoll)) if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now { atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) list := netpoll(0) // non-blocking - returns list of goroutines if !list.empty() { // Need to decrement number of idle locked M's // (pretending that one more is running) before injectglist. // Otherwise it can lead to the following situation: // injectglist grabs all P's but before it starts M's to run the P's, // another M returns from syscall, finishes running its G, // observes that there is no work to do and no other running M's // and reports deadlock. incidlelocked(-1) //注入全局g队列中 injectglist(&list) incidlelocked(1) } } if next < now { // There are timers that should have already run, // perhaps because there is an unpreemptible P. // Try to start an M to run them. //需要一个新的M来跑P上面的G。 startm(nil, false) } if atomic.Load(&scavenge.sysmonWake) != 0 { // Kick the scavenger awake if someone requested it. wakeScavenger() } // retake P's blocked in syscalls // and preempt long running G's // 循环所有的allp,进行抢夺。 if retake(now) != 0 { idle = 0 } else { idle++ } // check if we need to force a GC //强行GC if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 { lock(&forcegc.lock) forcegc.idle = 0 var list gList list.push(forcegc.g) injectglist(&list) unlock(&forcegc.lock) } if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now { lasttrace = now schedtrace(debug.scheddetail > 0) } unlock(&sched.sysmonlock) } }
// gcenable is called after the bulk of the runtime initialization, // just before we're about to start letting user code run. // It kicks off the background sweeper goroutine, the background // scavenger goroutine, and enables GC. funcgcenable() { // Kick off sweeping and scavenging. c := make(chanint, 2) go bgsweep(c) go bgscavenge(c) <-c <-c memstats.enablegc = true// now that runtime is initialized, GC is okay }
// Gosched yields the processor, allowing other goroutines to run. It does not // suspend the current goroutine, so execution resumes automatically. funcGosched() { checkTimeouts() mcall(gosched_m) }
// Puts the current goroutine into a waiting state and calls unlockf. // If unlockf returns false, the goroutine is resumed. // unlockf must not access this G's stack, as it may be moved between // the call to gopark and the call to unlockf. // Reason explains why the goroutine has been parked. // It is displayed in stack traces and heap dumps. // Reasons should be unique and descriptive. // Do not re-use reasons, add new ones. funcgopark(unlockf func(*g, unsafe.Pointer)bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) { if reason != waitReasonSleep { checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy } mp := acquirem() gp := mp.curg status := readgstatus(gp) if status != _Grunning && status != _Gscanrunning { throw("gopark: bad g status") } mp.waitlock = lock mp.waitunlockf = unlockf gp.waitreason = reason mp.waittraceev = traceEv mp.waittraceskip = traceskip //mp解绑 releasem(mp) // can't do anything that might move the G between Ms here. mcall(park_m) }