forked from tiancaiamao/go-internals
-
Notifications
You must be signed in to change notification settings - Fork 21
/
proc.c
1910 lines (1678 loc) · 51.8 KB
/
proc.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "runtime.h"
#include "arch_GOARCH.h"
#include "defs_GOOS_GOARCH.h"
#include "malloc.h"
#include "os_GOOS.h"
#include "stack.h"
bool runtime.iscgo;
static void unwindstack(G*, byte*);
static void schedule(G*);
typedef struct Sched Sched;
M runtime.m0;
G runtime.g0; // idle goroutine for m0
static int32 debug = 0;
int32 runtime.gcwaiting;
// Go scheduler
//
// The go scheduler's job is to match ready-to-run goroutines (`g's)
// with waiting-for-work schedulers (`m's). If there are ready g's
// and no waiting m's, ready() will start a new m running in a new
// OS thread, so that all ready g's can run simultaneously, up to a limit.
// For now, m's never go away.
//
// By default, Go keeps only one kernel thread (m) running user code
// at a single time; other threads may be blocked in the operating system.
// Setting the environment variable $GOMAXPROCS or calling
// runtime.GOMAXPROCS() will change the number of user threads
// allowed to execute simultaneously. $GOMAXPROCS is thus an
// approximation of the maximum number of cores to use.
//
// Even a program that can run without deadlock in a single process
// might use more m's if given the chance. For example, the prime
// sieve will use as many m's as there are primes (up to runtime.sched.mmax),
// allowing different stages of the pipeline to execute in parallel.
// We could revisit this choice, only kicking off new m's for blocking
// system calls, but that would limit the amount of parallel computation
// that go would try to do.
//
// In general, one could imagine all sorts of refinements to the
// scheduler, but the goal now is just to get something working on
// Linux and OS X.
struct Sched {
Lock;
G *gfree; // available g's (status == Gdead)
int32 goidgen;
G *ghead; // g's waiting to run
G *gtail;
int32 gwait; // number of g's waiting to run
int32 gcount; // number of g's that are alive
int32 grunning; // number of g's running on cpu or in syscall
M *mhead; // m's waiting for work
int32 mwait; // number of m's waiting for work
int32 mcount; // number of m's that have been created
volatile uint32 atomic; // 调度器中的原子字段。注意这个volatile
int32 profilehz; // cpu profiling rate
bool init; // running initialization
bool lockmain; // init called runtime.LockOSThread
Note stopped; // one g can set waitstop and wait here for m's to stop
};
// The atomic word in sched is an atomic uint32 that
// holds these fields.
// sched中的原子字段是一个原子的uint32,存放下列域
// 15位 mcpu --正在占用cpu运行的m数量 (进入syscall的m是不占用cpu的)
// 15位 mcpumax --最大允许这么多个m同时使用cpu
// 1位 waitstop --有g等待结束
// 1位 gwaiting --等待队列不为空,有g处于waiting状态
// [15 bits] mcpu number of m's executing on cpu
// [15 bits] mcpumax max number of m's allowed on cpu
// [1 bit] waitstop some g is waiting on stopped
// [1 bit] gwaiting gwait != 0
//
// 这些信息是进行系统调用和出系统调用时需要用到的,它会决定是否需要进入到调度器层面。
// 将它们打包成一个字节使得可以通过一次原子读写获取它们而不用加锁。
// 这将极大的减少那些大量使用系统调用或者cgo的多线程程序的contention
// These fields are the information needed by entersyscall
// and exitsyscall to decide whether to coordinate with the
// scheduler. Packing them into a single machine word lets
// them use a fast path with a single atomic read/write and
// no lock/unlock. This greatly reduces contention in
// syscall- or cgo-heavy multithreaded programs.
//
// 除了进出系统调用以外,操作这些域只会发生于持有调度器锁的时候,因此
// goroutines不用担心其它goroutine会对这些字段进行操作。
// Except for entersyscall and exitsyscall, the manipulations
// to these fields only happen while holding the schedlock,
// so the routines holding schedlock only need to worry about
// what entersyscall and exitsyscall do, not the other routines
// (which also use the schedlock).
//
// 特别是,进出系统调用只会读mcpumax,waitstop和gwaiting。决不会写他们。
// 因此,(持有调度器锁)写这些域时完全不用担心会发生写冲突。
// In particular, entersyscall and exitsyscall only read mcpumax,
// waitstop, and gwaiting. They never write them. Thus, writes to those
// fields can be done (holding schedlock) without fear of write conflicts.
// There may still be logic conflicts: for example, the set of waitstop must
// be conditioned on mcpu >= mcpumax or else the wait may be a
// spurious sleep. The Promela model in proc.p verifies these accesses.
enum {
mcpuWidth = 15,
mcpuMask = (1<<mcpuWidth) - 1,
mcpuShift = 0,
mcpumaxShift = mcpuShift + mcpuWidth,
waitstopShift = mcpumaxShift + mcpuWidth,
gwaitingShift = waitstopShift+1,
// The max value of GOMAXPROCS is constrained
// by the max value we can store in the bit fields
// of the atomic word. Reserve a few high values
// so that we can detect accidental decrement
// beyond zero.
maxgomaxprocs = mcpuMask - 10,
};
#define atomic_mcpu(v) (((v)>>mcpuShift)&mcpuMask)
#define atomic_mcpumax(v) (((v)>>mcpumaxShift)&mcpuMask)
#define atomic_waitstop(v) (((v)>>waitstopShift)&1)
#define atomic_gwaiting(v) (((v)>>gwaitingShift)&1)
Sched runtime.sched;
int32 runtime.gomaxprocs;
bool runtime.singleproc;
static bool canaddmcpu(void);
// An m that is waiting for notewakeup(&m->havenextg). This may
// only be accessed while the scheduler lock is held. This is used to
// minimize the number of times we call notewakeup while the scheduler
// lock is held, since the m will normally move quickly to lock the
// scheduler itself, producing lock contention.
// 有某个m等待notewakeup(&m->havenextg)。只有在调度器锁住时才可以访问,
// 因为m一般会很快锁往调度器本身,导致锁形成环。
static M* mwakeup;
// Scheduling helpers. Sched must be locked.
static void gput(G*); // put/get on ghead/gtail
static G* gget(void);
static void mput(M*); // put/get on mhead
static M* mget(G*);
static void gfput(G*); // put/get on gfree
static G* gfget(void);
static void matchmg(void); // match m's to g's
static void readylocked(G*); // ready, but sched is locked
static void mnextg(M*, G*);
static void mcommoninit(M*);
// 用cas操作去设置sched.automic原子字段中的mcpumax域
void
setmcpumax(uint32 n)
{
uint32 v, w;
for(;;) {
v = runtime.sched.atomic;
w = v;
w &= ~(mcpuMask<<mcpumaxShift);
w |= n<<mcpumaxShift;
if(runtime.cas(&runtime.sched.atomic, v, w))
break;
}
}
// Keep trace of scavenger's goroutine for deadlock detection.
static G *scvg;
// bootstrap的顺序是:
//
// call osinit
// call schedinit
// make & queue new G
// call runtime.mstart
//
// 新建一个G,当它运行时会调用runtime.main
void
runtime.schedinit(void)
{
int32 n;
byte *p;
m->nomemprof++;
runtime.mallocinit();
mcommoninit(m);
runtime.goargs();
runtime.goenvs();
// For debugging:
// Allocate internal symbol table representation now,
// so that we don't need to call malloc when we crash.
// runtime.findfunc(0);
runtime.gomaxprocs = 1;
p = runtime.getenv("GOMAXPROCS");
if(p != nil && (n = runtime.atoi(p)) != 0) {
if(n > maxgomaxprocs)
n = maxgomaxprocs;
runtime.gomaxprocs = n;
}
// 在考虑 GOMAXPROCS之前,先等待main goroutine启动。
// 在后面runtime.main中才将mcpumax设置为runtime.gomaxprocs的
setmcpumax(1);
runtime.singleproc = runtime.gomaxprocs == 1;
canaddmcpu(); // 为boostrap的m做mcpu++
m->helpgc = 1; // flag to tell schedule() to mcpu--
runtime.sched.grunning++;
mstats.enablegc = 1;
m->nomemprof--;
}
extern void main.init(void);
extern void main.main(void);
// The main goroutine.
void
runtime.main(void)
{
// 在初始化时,将main goroutine锁定到main系统线程
// 大部分的程序都不在乎,但是有一些是需要在main线程中调用的。
// 它们可以通过在初始化时调用runtime.LockOSThread安排main.main运行在main线程中
runtime.LockOSThread();
// 从现在起,新的goroutine可以使用非主线程
setmcpumax(runtime.gomaxprocs);
runtime.sched.init = true;
// 新建垃圾回收的goroutine
scvg = runtime.newproc1((byte*)runtime.MHeap_Scavenger, nil, 0, 0, runtime.main);
main.init();
runtime.sched.init = false;
if(!runtime.sched.lockmain)
runtime.UnlockOSThread();
// The deadlock detection has false negatives.
// Let scvg start up, to eliminate the false negative
// for the trivial program func main() { select{} }.
runtime.gosched();
main.main();
runtime.exit(0);
for(;;)
*(int32*)runtime.main = 0;
}
// Lock the scheduler.
static void
schedlock(void)
{
runtime.lock(&runtime.sched);
}
// Unlock the scheduler.
static void
schedunlock(void)
{
M *m;
m = mwakeup;
mwakeup = nil;
runtime.unlock(&runtime.sched);
if(m != nil)
runtime.notewakeup(&m->havenextg);
}
void
runtime.goexit(void)
{
g->status = Gmoribund;
runtime.gosched();
}
void
runtime.goroutineheader(G *g)
{
int8 *status;
switch(g->status) {
case Gidle:
status = "idle";
break;
case Grunnable:
status = "runnable";
break;
case Grunning:
status = "running";
break;
case Gsyscall:
status = "syscall";
break;
case Gwaiting:
if(g->waitreason)
status = g->waitreason;
else
status = "waiting";
break;
case Gmoribund:
status = "moribund";
break;
default:
status = "???";
break;
}
runtime.printf("goroutine %d [%s]:\n", g->goid, status);
}
void
runtime.tracebackothers(G *me)
{
G *g;
for(g = runtime.allg; g != nil; g = g->alllink) {
if(g == me || g->status == Gdead)
continue;
runtime.printf("\n");
runtime.goroutineheader(g);
runtime.traceback(g->sched.pc, g->sched.sp, 0, g);
}
}
// Mark this g as m's idle goroutine.
// This functionality might be used in environments where programs
// are limited to a single thread, to simulate a select-driven
// network server. It is not exposed via the standard runtime API.
void
runtime.idlegoroutine(void)
{
if(g->idlem != nil)
runtime.throw("g is already an idle goroutine");
g->idlem = m;
}
static void
mcommoninit(M *m)
{
m->id = runtime.sched.mcount++;
m->fastrand = 0x49f6428aUL + m->id + runtime.cputicks();
m->stackalloc = runtime.malloc(sizeof(*m->stackalloc));
runtime.FixAlloc_Init(m->stackalloc, FixedStack, runtime.SysAlloc, nil, nil);
if(m->mcache == nil)
m->mcache = runtime.allocmcache();
runtime.callers(1, m->createstack, nelem(m->createstack));
// Add to runtime.allm so garbage collector doesn't free m
// when it is just in a register or thread-local storage.
m->alllink = runtime.allm;
// runtime.NumCgoCall() iterates over allm w/o schedlock,
// so we need to publish it safely.
runtime.atomicstorep(&runtime.allm, m);
}
// Try to increment mcpu. Report whether succeeded.
static bool
canaddmcpu(void)
{
uint32 v;
for(;;) {
v = runtime.sched.atomic;
if(atomic_mcpu(v) >= atomic_mcpumax(v))
return 0;
if(runtime.cas(&runtime.sched.atomic, v, v+(1<<mcpuShift)))
return 1;
}
}
// Put on `g' queue. Sched must be locked.
static void
gput(G *g)
{
M *m;
// If g is wired, hand it off directly.
if((m = g->lockedm) != nil && canaddmcpu()) {
mnextg(m, g);
return;
}
// If g is the idle goroutine for an m, hand it off.
if(g->idlem != nil) {
if(g->idlem->idleg != nil) {
runtime.printf("m%d idle out of sync: g%d g%d\n",
g->idlem->id,
g->idlem->idleg->goid, g->goid);
runtime.throw("runtime: double idle");
}
g->idlem->idleg = g;
return;
}
g->schedlink = nil;
if(runtime.sched.ghead == nil)
runtime.sched.ghead = g;
else
runtime.sched.gtail->schedlink = g;
runtime.sched.gtail = g;
// increment gwait.
// if it transitions to nonzero, set atomic gwaiting bit.
if(runtime.sched.gwait++ == 0)
runtime.xadd(&runtime.sched.atomic, 1<<gwaitingShift);
}
// Report whether gget would return something.
static bool
haveg(void)
{
return runtime.sched.ghead != nil || m->idleg != nil;
}
// Get from `g' queue. Sched must be locked.
static G*
gget(void)
{
G *g;
g = runtime.sched.ghead;
if(g){
runtime.sched.ghead = g->schedlink;
if(runtime.sched.ghead == nil)
runtime.sched.gtail = nil;
// decrement gwait.
// if it transitions to zero, clear atomic gwaiting bit.
if(--runtime.sched.gwait == 0)
runtime.xadd(&runtime.sched.atomic, -1<<gwaitingShift);
} else if(m->idleg != nil) {
g = m->idleg;
m->idleg = nil;
}
return g;
}
// Put on `m' list. Sched must be locked.
static void
mput(M *m)
{
m->schedlink = runtime.sched.mhead;
runtime.sched.mhead = m;
runtime.sched.mwait++;
}
// Get an `m' to run `g'. Sched must be locked.
static M*
mget(G *g)
{
M *m;
// if g has its own m, use it.
if(g && (m = g->lockedm) != nil)
return m;
// otherwise use general m pool.
if((m = runtime.sched.mhead) != nil){
runtime.sched.mhead = m->schedlink;
runtime.sched.mwait--;
}
return m;
}
// Mark g ready to run.
void
runtime.ready(G *g)
{
schedlock();
readylocked(g);
schedunlock();
}
// Mark g ready to run. Sched is already locked.
// G might be running already and about to stop.
// The sched lock protects g->status from changing underfoot.
/* ´Óº¯ÊýÃû¾Í¿ÉÒÔ¿´³ö,Ëü¸ÉµÄÊÂÇéÊÇ,°Ñg±ä³ÉGrunnableµÄ,¹ÒÔÚ¾ÍÐ÷¶ÓÁÐ
ÒªÇóµ÷Óú¯ÊýÇ°µ÷¶ÈÆ÷ÊÇËø×ŵÄ,ËùÒÔ½Ðreadylocked
*/
static void
readylocked(G *g)
{
if(g->m){
// Running on another machine.
// Ready it when it stops.
g->readyonstop = 1;
return;
}
// Mark runnable.
if(g->status == Grunnable || g->status == Grunning) {
runtime.printf("goroutine %d has status %d\n", g->goid, g->status);
runtime.throw("bad g->status in ready");
}
g->status = Grunnable;
gput(g);
matchmg();
}
static void
nop(void)
{
}
// Same as readylocked but a different symbol so that
// debuggers can set a breakpoint here and catch all
// new goroutines.
static void
newprocreadylocked(G *g)
{
nop(); // avoid inlining in 6l
readylocked(g);
}
// Pass g to m for running.
// Caller has already incremented mcpu.
static void
mnextg(M *m, G *g)
{
runtime.sched.grunning++;
m->nextg = g;
if(m->waitnextg) {
m->waitnextg = 0;
if(mwakeup != nil)
runtime.notewakeup(&mwakeup->havenextg);
mwakeup = m;
}
}
// Get the next goroutine that m should run.
// Sched must be locked on entry, is unlocked on exit.
// Makes sure that at most $GOMAXPROCS g's are
// running on cpus (not in system calls) at any given time.
/* ×î¶àͬʱֻÓÐGOMAXPROCS¸ögÕýÔÚÔËÐÐ,´¦ÓÚϵͳµ÷ÓõIJ»Ëã
Õû¸öº¯Êý¾ÍÊÇ·µ»Øm->nextg,¼´ÏÂÒ»¸ö´ýÔËÐеÄgoroutine
*/
static G*
nextgandunlock(void)
{
G *gp;
uint32 v;
top:
if(atomic_mcpu(runtime.sched.atomic) >= maxgomaxprocs)
runtime.throw("negative mcpu");
// If there is a g waiting as m->nextg, the mcpu++
// happened before it was passed to mnextg.
/* Èç¹ûµ±Ç°µÄmÖÐÓÐÕýÔڵȴý´¦ÀíµÄg,Ôò´¦Àí¸Ãg
*/
if(m->nextg != nil) {
gp = m->nextg;
m->nextg = nil;
schedunlock();
return gp;
}
if(m->lockedg != nil) {
// We can only run one g, and it's not available.
// Make sure some other cpu is running to handle
// the ordinary run queue.
if(runtime.sched.gwait != 0) {
matchmg(); /* Ö»ÒªmÊýÁ¿Ã»µ½ÉÏÏÞ,ÇÒ¾ÍÐ÷¶ÓÁÐÖÐÓÐg,¾ÍÄÃÒ»¸öm°ó¶¨Ò»¸ög */
// m->lockedg might have been on the queue.
if(m->nextg != nil) {
gp = m->nextg;
m->nextg = nil;
schedunlock();
return gp;
}
}
} else {
// Look for work on global queue.
while(haveg() && canaddmcpu()) {
gp = gget();
if(gp == nil)
runtime.throw("gget inconsistency");
if(gp->lockedm) {
mnextg(gp->lockedm, gp);
continue;
}
runtime.sched.grunning++;
schedunlock();
return gp;
}
// The while loop ended either because the g queue is empty
// or because we have maxed out our m procs running go
// code (mcpu >= mcpumax). We need to check that
// concurrent actions by entersyscall/exitsyscall cannot
// invalidate the decision to end the loop.
//
// We hold the sched lock, so no one else is manipulating the
// g queue or changing mcpumax. Entersyscall can decrement ½øÈëϵͳµ÷Óûá¼õÉÙmcpu
// mcpu, but if does so when there is something on the g queue,
// the gwait bit will be set, so entersyscall will take the slow path
// and use the sched lock. So it cannot invalidate our decision.
//
// Wait on global m queue.
/* ¾ÍÐ÷g¶ÓÁÐÖж¼Ã»ÓÐÁË»òÕß²»ÄܼÓmcpuÁË,Ôò°Ñm·Åµ½¶ÓÁÐÖÐ
ºóÃæÔÙ·ÖÅäm,¼´newmº¯ÊýÖÐ,»áÏÈ´Ó¶ÓÁÐÄÃ.ûÓвŻáÔÙ·ÖÅä
*/
mput(m);
}
// Look for deadlock situation.
// There is a race with the scavenger that causes false negatives:
// if the scavenger is just starting, then we have
// scvg != nil && grunning == 0 && gwait == 0
// and we do not detect a deadlock. It is possible that we should
// add that case to the if statement here, but it is too close to Go 1
// to make such a subtle change. Instead, we work around the
// false negative in trivial programs by calling runtime.gosched
// from the main goroutine just before main.main.
// See runtime.main above.
//
// On a related note, it is also possible that the scvg == nil case is
// wrong and should include gwait, but that does not happen in
// standard Go programs, which all start the scavenger.
//
if((scvg == nil && runtime.sched.grunning == 0) ||
(scvg != nil && runtime.sched.grunning == 1 && runtime.sched.gwait == 0 &&
(scvg->status == Grunning || scvg->status == Gsyscall))) {
runtime.throw("all goroutines are asleep - deadlock!");
}
m->nextg = nil;
m->waitnextg = 1;
runtime.noteclear(&m->havenextg);
// Stoptheworld is waiting for all but its cpu to go to stop.
// Entersyscall might have decremented mcpu too, but if so
// it will see the waitstop and take the slow path.
// Exitsyscall never increments mcpu beyond mcpumax.
v = runtime.atomicload(&runtime.sched.atomic);
if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
// set waitstop = 0 (known to be 1)
runtime.xadd(&runtime.sched.atomic, -1<<waitstopShift);
runtime.notewakeup(&runtime.sched.stopped);
}
schedunlock();
runtime.notesleep(&m->havenextg);
if(m->helpgc) {
runtime.gchelper();
m->helpgc = 0;
runtime.lock(&runtime.sched);
goto top;
}
if((gp = m->nextg) == nil)
runtime.throw("bad m->nextg in nextgoroutine");
m->nextg = nil;
return gp;
}
int32
runtime.helpgc(bool *extra)
{
M *mp;
int32 n, max;
// Figure out how many CPUs to use.
// Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
max = runtime.gomaxprocs;
if(max > runtime.ncpu)
max = runtime.ncpu;
if(max > MaxGcproc)
max = MaxGcproc;
// We're going to use one CPU no matter what.
// Figure out the max number of additional CPUs.
max--;
runtime.lock(&runtime.sched);
n = 0;
while(n < max && (mp = mget(nil)) != nil) {
n++;
mp->helpgc = 1;
mp->waitnextg = 0;
runtime.notewakeup(&mp->havenextg);
}
runtime.unlock(&runtime.sched);
if(extra)
*extra = n != max;
return n;
}
void
runtime.stoptheworld(void)
{
uint32 v;
schedlock();
runtime.gcwaiting = 1;
setmcpumax(1);
// while mcpu > 1
for(;;) {
v = runtime.sched.atomic;
if(atomic_mcpu(v) <= 1)
break;
// It would be unsafe for multiple threads to be using
// the stopped note at once, but there is only
// ever one thread doing garbage collection.
runtime.noteclear(&runtime.sched.stopped);
if(atomic_waitstop(v))
runtime.throw("invalid waitstop");
// atomic { waitstop = 1 }, predicated on mcpu <= 1 check above
// still being true.
if(!runtime.cas(&runtime.sched.atomic, v, v+(1<<waitstopShift)))
continue;
schedunlock();
runtime.notesleep(&runtime.sched.stopped);
schedlock();
}
runtime.singleproc = runtime.gomaxprocs == 1;
schedunlock();
}
void
runtime.starttheworld(bool extra)
{
M *m;
schedlock();
runtime.gcwaiting = 0;
setmcpumax(runtime.gomaxprocs);
matchmg();
if(extra && canaddmcpu()) {
// Start a new m that will (we hope) be idle
// and so available to help when the next
// garbage collection happens.
// canaddmcpu above did mcpu++
// (necessary, because m will be doing various
// initialization work so is definitely running),
// but m is not running a specific goroutine,
// so set the helpgc flag as a signal to m's
// first schedule(nil) to mcpu-- and grunning--.
m = runtime.newm();
m->helpgc = 1;
runtime.sched.grunning++;
}
schedunlock();
}
// Called to start an M.
void
runtime.mstart(void)
{
/* mstartÊÇruntime.newosprocн¨µÄÏ̵߳ÄÈë¿ÚµØÖ·
ÐÂÏß³ÌÖ´ÐÐʱ»á´ÓÕâÀ↑ʼÔËÐÐ
×¢ÒâÔÚruntime.newosprocÖд«¹ýÀ´Ê±,²ÎÊý¾ÍÊÇg0ºÍg0µÄÕ»
²¢²»ÊÇн¨goroutineµÄg!
*/
if(g != m->g0)
runtime.throw("bad runtime.mstart");
// Record top of stack for use by mcall.
// Once we call schedule we're never coming back,
// so other calls can reuse this stack space.
/* ¼Ç¼ÏÂÕ»¶¥¹©mcallʹÓÃ.
Ò»µ©µ÷ÓÃÁËscheduleº¯Êý,schedule²»»á¼ÌÐøʹÓÃÕâ¸öÕ»(schedule²»ÊÇÆÕͨµÄÕ»µ÷Ó÷½Ê½,±»µ÷Õß¼ÌÐøʹÓõ÷ÓÃÕßÕ»µÄÏÂÃæµÄµØÖ·¿Õ¼ä)
Òò´ËÆäËüµ÷ÓÿÉÒÔÖØÓÃÕâ¸öÕ»µØÖ·¿Õ¼ä
*/
runtime.gosave(&m->g0->sched);
m->g0->sched.pc = (void*)-1; // make sure it is never used
runtime.asminit();
runtime.minit(); /* ΪÐźŴ¦Àí½¨Á¢ÁËרÃŵÄG½á¹¹Ìå */
// Install signal handlers; after minit so that minit can
// prepare the thread to be able to handle the signals.
if(m == &runtime.m0)
runtime.initsig();
schedule(nil); /* ²»»á·µ»Ø */
}
// When running with cgo, we call libcgo_thread_start
// to start threads for us so that we can play nicely with
// foreign code.
void (*libcgo_thread_start)(void*);
typedef struct CgoThreadStart CgoThreadStart;
struct CgoThreadStart
{
M *m;
G *g;
void (*fn)(void);
};
// Kick off new m's as needed (up to mcpumax).
// Sched is locked.
/* Õâ¸öº¯Êý¾ÍÊÇ×ö¸öÆ¥Åä,Ö»ÒªmûÓÐÍ»ÆÆÉÏÏÞ,¾ÍÐ÷¶ÓÁÐÖл¹ÓÐg,¾ÍÓÃÒ»¸ömÔËÐÐÒ»¸ög */
static void
matchmg(void)
{
G *gp;
M *mp;
if(m->mallocing || m->gcing)
return;
/* Ö»Òª¾ÍÐ÷¶ÓÁÐÖÐÓÐg,²¢ÇÒ¿ÉÔËÐеÄmÊýûÓе½ÉÏÏÞ */
while(haveg() && canaddmcpu()) {
gp = gget();
if(gp == nil)
runtime.throw("gget inconsistency");
// Find the m that will run gp.
if((mp = mget(gp)) == nil) //mµÄwaiting¶ÓÁÐÖÐÓоÍÖ±½ÓÄÃ,ûÓоÍн¨
mp = runtime.newm();
mnextg(mp, gp);
}
}
// Create a new m. It will start off with a call to runtime.mstart.
/* Æäʵ¾ÍÊÇн¨Ò»¸ö²Ù×÷ϵͳÏß³Ì,Ï̵߳ÄÈë¿ÚµãÊÇmstart
µ÷¶ÈÆ÷»á½«Õâ¸ömachineºÍij¸ögoroutine°ó¶¨(matchmg)
mstart»á»Øµ÷¶ÔÓ¦µÄgoroutineµÄÉÏÏÂÎÄ
*/
M*
runtime.newm(void)
{
M *m;
m = runtime.malloc(sizeof(M));
mcommoninit(m); //×öһЩ³õʼ»¯¹¤×÷
/* cgoÊDz»ÄÜÔÚ·Ö¶ÎÕ»ÔËÐÐ,ÒªÇл»µ½ÏµÍ³Õ»ÖÐ,ËùÒÔ´úÂë´¦ÀíÓÐЩ²»Í¬
Õⲿ·ÖÏÈÌø¹ý²»¿´
*/
if(runtime.iscgo) {
CgoThreadStart ts;
if(libcgo_thread_start == nil)
runtime.throw("libcgo_thread_start missing");
// pthread_create will make us a stack.
m->g0 = runtime.malg(-1);
ts.m = m;
ts.g = m->g0;
ts.fn = runtime.mstart;
runtime.asmcgocall(libcgo_thread_start, &ts);
} else {
if(Windows)
// windows will layout sched stack on os stack
m->g0 = runtime.malg(-1);
else
m->g0 = runtime.malg(8192);
runtime.newosproc(m, m->g0, m->g0->stackbase, runtime.mstart);
}
return m;
}
// One round of scheduler: find a goroutine and run it.
// The argument is the goroutine that was running before
// schedule was called, or nil if this is the first call.
// Never returns.
static void
schedule(G *gp)
{
int32 hz;
uint32 v;
schedlock();
if(gp != nil) {
// Just finished running gp.
gp->m = nil;
runtime.sched.grunning--;
// atomic { mcpu-- }
v = runtime.xadd(&runtime.sched.atomic, -1<<mcpuShift);
if(atomic_mcpu(v) > maxgomaxprocs)
runtime.throw("negative mcpu in scheduler");
switch(gp->status){
case Grunnable:
case Gdead:
// Shouldn't have been running!
runtime.throw("bad gp->status in sched");
case Grunning:
gp->status = Grunnable;
gput(gp);
break;
case Gmoribund:
/* ÉèÖÃgµÄ״̬ΪGdead,½«gÓëm·ÖÀë,°Ñg·Å»Øµ½free¶ÓÁÐ */
gp->status = Gdead;
if(gp->lockedm) {
gp->lockedm = nil;
m->lockedg = nil;
}
gp->idlem = nil;
unwindstack(gp, nil);
gfput(gp);
if(--runtime.sched.gcount == 0)
runtime.exit(0);
break;
}
/* exitsyscallʱÈç¹ûmcpu´óÓÚÉÏÏÞÁË,Ôòg³öÁËϵͳµ÷Óò¢²»ÄܼÌÐøÔËÐÐ,Ҫͨ¹ýµ÷ÓÃgosched»á½øÈëµ½schedule
ÕâÖÖÇé¿ögÊDZ»ÉèÖÃÁËreadyonstopµÄ,½«Ö®·ÅÈë¾ÍÐ÷¶ÓÁÐ
*/
if(gp->readyonstop){
gp->readyonstop = 0;
readylocked(gp);
}
} else if(m->helpgc) {
// Bootstrap m or new m started by starttheworld.
// atomic { mcpu-- }
v = runtime.xadd(&runtime.sched.atomic, -1<<mcpuShift);
if(atomic_mcpu(v) > maxgomaxprocs)
runtime.throw("negative mcpu in scheduler");
// Compensate for increment in starttheworld().
runtime.sched.grunning--;
m->helpgc = 0;
} else if(m->nextg != nil) {
// New m started by matchmg.
} else {
runtime.throw("invalid m state in scheduler");
}
// Find (or wait for) g to run. Unlocks runtime.sched.
/* ÏÂÃæÕâ¶Î´úÂë,ÕÒ¸ö´ýÔËÐеÄg,½«Ëü°áµ½m->curg,ÉèÖÃÆä״̬ΪGrunning
ÕÒ´ýÔËÐеÄgÏÈ¿´µ±Ç°µÄm¼Ä´æÆ÷µÄnextg,ºÍlockedgÓò,Èç¹ûÓоÍÊÇËüÃÇ
Èç¹ûûÓÐ,ÔÙÈ¥¾ÍÐ÷g¶ÓÁÐÖÐÕÒ.
Èç¹û´ÓmstartÈë¿Ú½øÈëscheduleµÄ,ÔòÖ±½Ó´ÓÕâÀ↑ʼ¿´´úÂë
*/
gp = nextgandunlock();
gp->readyonstop = 0;
gp->status = Grunning;
m->curg = gp;
gp->m = m;
// Check whether the profiler needs to be turned on or off.
hz = runtime.sched.profilehz;
if(m->profilehz != hz)
runtime.resetcpuprofiler(hz);
/* ²é¿´gµÄÉÏÏÂÎÄ»·¾³Öб£´æµÄpc¼Ä´æÆ÷,Èç¹ûÊÇruntime.goexit,˵Ã÷Ïß³ÌÖ´ÐÐÍêÁË
Òò´Ëµ÷ÓÃÏàÓ¦µÄÍ˳ö´¦Àí.·ñÔò»Ö¸´µ½gµ±Ê±µÄÉÏÏÂÎÄ
runtime.gogo¾ÍÏ൱ÓÚCÓïÑÔµÄÒ»¸ölongjmp,²»ÊǺ¯Êýµ÷ÓÃÐÎʽ,¶øÊÇÖ±½ÓÇл»ÉÏÏÂÎÄ
*/
if(gp->sched.pc == (byte*)runtime.goexit) { // kickoff
runtime.gogocall(&gp->sched, (void(*)(void))gp->entry);
}
runtime.gogo(&gp->sched, 0);
}
// Enter scheduler. If g->status is Grunning,
// re-queues g and runs everyone else who is waiting
// before running g again. If g->status is Gmoribund,
// kills off g.
// Cannot split stack because it is called from exitsyscall.
// See comment below.
#pragma textflag 7
void
runtime.gosched(void)
{
if(m->locks != 0)
runtime.throw("gosched holding locks");
if(g == m->g0)
runtime.throw("gosched of g0");
runtime.mcall(schedule);
}
// The goroutine g is about to enter a system call.
// Record that it's not using the cpu anymore.
// This is called only from the go syscall library and cgocall,
// not from the low-level system calls used by the runtime.
//
// Entersyscall cannot split the stack: the runtime.gosave must
// make g->sched refer to the caller's stack segment, because
// entersyscall is going to return immediately after.
// It's okay to call matchmg and notewakeup even after
// decrementing mcpu, because we haven't released the
// sched lock yet, so the garbage collector cannot be running.
/* µ±goroutine½øÈëµ½syscallÖ®ºómcpuÊýÁ¿»á¼õ.ÕâÑùÔÚmatchmgÖÐ,¾Í¿ÉÒÔзÖÅämÀ´ÔËÐоÍÐ÷µÄg.