Source file
src/runtime/trace.go
Documentation: runtime
1
2
3
4
5
6
7
8
9
10
11
12
13 package runtime
14
15 import (
16 "runtime/internal/sys"
17 "unsafe"
18 )
19
20
21 const (
22 traceEvNone = 0
23 traceEvBatch = 1
24 traceEvFrequency = 2
25 traceEvStack = 3
26 traceEvGomaxprocs = 4
27 traceEvProcStart = 5
28 traceEvProcStop = 6
29 traceEvGCStart = 7
30 traceEvGCDone = 8
31 traceEvGCSTWStart = 9
32 traceEvGCSTWDone = 10
33 traceEvGCSweepStart = 11
34 traceEvGCSweepDone = 12
35 traceEvGoCreate = 13
36 traceEvGoStart = 14
37 traceEvGoEnd = 15
38 traceEvGoStop = 16
39 traceEvGoSched = 17
40 traceEvGoPreempt = 18
41 traceEvGoSleep = 19
42 traceEvGoBlock = 20
43 traceEvGoUnblock = 21
44 traceEvGoBlockSend = 22
45 traceEvGoBlockRecv = 23
46 traceEvGoBlockSelect = 24
47 traceEvGoBlockSync = 25
48 traceEvGoBlockCond = 26
49 traceEvGoBlockNet = 27
50 traceEvGoSysCall = 28
51 traceEvGoSysExit = 29
52 traceEvGoSysBlock = 30
53 traceEvGoWaiting = 31
54 traceEvGoInSyscall = 32
55 traceEvHeapAlloc = 33
56 traceEvNextGC = 34
57 traceEvTimerGoroutine = 35
58 traceEvFutileWakeup = 36
59 traceEvString = 37
60 traceEvGoStartLocal = 38
61 traceEvGoUnblockLocal = 39
62 traceEvGoSysExitLocal = 40
63 traceEvGoStartLabel = 41
64 traceEvGoBlockGC = 42
65 traceEvGCMarkAssistStart = 43
66 traceEvGCMarkAssistDone = 44
67 traceEvCount = 45
68 )
69
70 const (
71
72
73
74
75
76
77
78
79
80 traceTickDiv = 16 + 48*(sys.Goarch386|sys.GoarchAmd64|sys.GoarchAmd64p32)
81
82
83
84 traceStackSize = 128
85
86 traceGlobProc = -1
87
88 traceBytesPerNumber = 10
89
90 traceArgCountShift = 6
91
92
93
94
95
96
97 traceFutileWakeup byte = 128
98 )
99
100
101 var trace struct {
102 lock mutex
103 lockOwner *g
104 enabled bool
105 shutdown bool
106 headerWritten bool
107 footerWritten bool
108 shutdownSema uint32
109 seqStart uint64
110 ticksStart int64
111 ticksEnd int64
112 timeStart int64
113 timeEnd int64
114 seqGC uint64
115 reading traceBufPtr
116 empty traceBufPtr
117 fullHead traceBufPtr
118 fullTail traceBufPtr
119 reader guintptr
120 stackTab traceStackTable
121
122
123
124
125
126
127 strings map[string]uint64
128 stringSeq uint64
129
130
131 markWorkerLabels [len(gcMarkWorkerModeStrings)]uint64
132
133 bufLock mutex
134 buf traceBufPtr
135 }
136
137
138 type traceBufHeader struct {
139 link traceBufPtr
140 lastTicks uint64
141 pos int
142 stk [traceStackSize]uintptr
143 }
144
145
146
147
148 type traceBuf struct {
149 traceBufHeader
150 arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte
151 }
152
153
154
155
156
157
158
159
160 type traceBufPtr uintptr
161
162 func (tp traceBufPtr) ptr() *traceBuf { return (*traceBuf)(unsafe.Pointer(tp)) }
163 func (tp *traceBufPtr) set(b *traceBuf) { *tp = traceBufPtr(unsafe.Pointer(b)) }
164 func traceBufPtrOf(b *traceBuf) traceBufPtr {
165 return traceBufPtr(unsafe.Pointer(b))
166 }
167
168
169
170
171
172
173 func StartTrace() error {
174
175
176 stopTheWorld("start tracing")
177
178
179
180
181
182
183 lock(&trace.bufLock)
184
185 if trace.enabled || trace.shutdown {
186 unlock(&trace.bufLock)
187 startTheWorld()
188 return errorString("tracing is already enabled")
189 }
190
191
192
193
194
195
196
197
198 _g_ := getg()
199 _g_.m.startingtrace = true
200
201
202 mp := acquirem()
203 stkBuf := make([]uintptr, traceStackSize)
204 stackID := traceStackID(mp, stkBuf, 2)
205 releasem(mp)
206
207 for _, gp := range allgs {
208 status := readgstatus(gp)
209 if status != _Gdead {
210 gp.traceseq = 0
211 gp.tracelastp = getg().m.p
212
213 id := trace.stackTab.put([]uintptr{gp.startpc + sys.PCQuantum})
214 traceEvent(traceEvGoCreate, -1, uint64(gp.goid), uint64(id), stackID)
215 }
216 if status == _Gwaiting {
217
218 gp.traceseq++
219 traceEvent(traceEvGoWaiting, -1, uint64(gp.goid))
220 }
221 if status == _Gsyscall {
222 gp.traceseq++
223 traceEvent(traceEvGoInSyscall, -1, uint64(gp.goid))
224 } else {
225 gp.sysblocktraced = false
226 }
227 }
228 traceProcStart()
229 traceGoStart()
230
231
232
233
234 trace.ticksStart = cputicks()
235 trace.timeStart = nanotime()
236 trace.headerWritten = false
237 trace.footerWritten = false
238
239
240
241
242 trace.stringSeq = 0
243 trace.strings = make(map[string]uint64)
244
245 trace.seqGC = 0
246 _g_.m.startingtrace = false
247 trace.enabled = true
248
249
250 _, pid, bufp := traceAcquireBuffer()
251 for i, label := range gcMarkWorkerModeStrings[:] {
252 trace.markWorkerLabels[i], bufp = traceString(bufp, pid, label)
253 }
254 traceReleaseBuffer(pid)
255
256 unlock(&trace.bufLock)
257
258 startTheWorld()
259 return nil
260 }
261
262
263
264 func StopTrace() {
265
266
267 stopTheWorld("stop tracing")
268
269
270 lock(&trace.bufLock)
271
272 if !trace.enabled {
273 unlock(&trace.bufLock)
274 startTheWorld()
275 return
276 }
277
278 traceGoSched()
279
280
281
282 for _, p := range allp[:cap(allp)] {
283 buf := p.tracebuf
284 if buf != 0 {
285 traceFullQueue(buf)
286 p.tracebuf = 0
287 }
288 }
289 if trace.buf != 0 {
290 buf := trace.buf
291 trace.buf = 0
292 if buf.ptr().pos != 0 {
293 traceFullQueue(buf)
294 }
295 }
296
297 for {
298 trace.ticksEnd = cputicks()
299 trace.timeEnd = nanotime()
300
301 if trace.timeEnd != trace.timeStart {
302 break
303 }
304 osyield()
305 }
306
307 trace.enabled = false
308 trace.shutdown = true
309 unlock(&trace.bufLock)
310
311 startTheWorld()
312
313
314
315 semacquire(&trace.shutdownSema)
316 if raceenabled {
317 raceacquire(unsafe.Pointer(&trace.shutdownSema))
318 }
319
320
321 lock(&trace.lock)
322 for _, p := range allp[:cap(allp)] {
323 if p.tracebuf != 0 {
324 throw("trace: non-empty trace buffer in proc")
325 }
326 }
327 if trace.buf != 0 {
328 throw("trace: non-empty global trace buffer")
329 }
330 if trace.fullHead != 0 || trace.fullTail != 0 {
331 throw("trace: non-empty full trace buffer")
332 }
333 if trace.reading != 0 || trace.reader != 0 {
334 throw("trace: reading after shutdown")
335 }
336 for trace.empty != 0 {
337 buf := trace.empty
338 trace.empty = buf.ptr().link
339 sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
340 }
341 trace.strings = nil
342 trace.shutdown = false
343 unlock(&trace.lock)
344 }
345
346
347
348
349
350
351 func ReadTrace() []byte {
352
353
354
355
356
357
358 lock(&trace.lock)
359 trace.lockOwner = getg()
360
361 if trace.reader != 0 {
362
363
364
365 trace.lockOwner = nil
366 unlock(&trace.lock)
367 println("runtime: ReadTrace called from multiple goroutines simultaneously")
368 return nil
369 }
370
371 if buf := trace.reading; buf != 0 {
372 buf.ptr().link = trace.empty
373 trace.empty = buf
374 trace.reading = 0
375 }
376
377 if !trace.headerWritten {
378 trace.headerWritten = true
379 trace.lockOwner = nil
380 unlock(&trace.lock)
381 return []byte("go 1.10 trace\x00\x00\x00")
382 }
383
384 if trace.fullHead == 0 && !trace.shutdown {
385 trace.reader.set(getg())
386 goparkunlock(&trace.lock, "trace reader (blocked)", traceEvGoBlock, 2)
387 lock(&trace.lock)
388 }
389
390 if trace.fullHead != 0 {
391 buf := traceFullDequeue()
392 trace.reading = buf
393 trace.lockOwner = nil
394 unlock(&trace.lock)
395 return buf.ptr().arr[:buf.ptr().pos]
396 }
397
398 if !trace.footerWritten {
399 trace.footerWritten = true
400
401 freq := float64(trace.ticksEnd-trace.ticksStart) * 1e9 / float64(trace.timeEnd-trace.timeStart) / traceTickDiv
402 trace.lockOwner = nil
403 unlock(&trace.lock)
404 var data []byte
405 data = append(data, traceEvFrequency|0<<traceArgCountShift)
406 data = traceAppend(data, uint64(freq))
407 for i := range timers {
408 tb := &timers[i]
409 if tb.gp != nil {
410 data = append(data, traceEvTimerGoroutine|0<<traceArgCountShift)
411 data = traceAppend(data, uint64(tb.gp.goid))
412 }
413 }
414
415
416 trace.stackTab.dump()
417 return data
418 }
419
420 if trace.shutdown {
421 trace.lockOwner = nil
422 unlock(&trace.lock)
423 if raceenabled {
424
425
426
427 racerelease(unsafe.Pointer(&trace.shutdownSema))
428 }
429
430 semrelease(&trace.shutdownSema)
431 return nil
432 }
433
434 trace.lockOwner = nil
435 unlock(&trace.lock)
436 println("runtime: spurious wakeup of trace reader")
437 return nil
438 }
439
440
441 func traceReader() *g {
442 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
443 return nil
444 }
445 lock(&trace.lock)
446 if trace.reader == 0 || (trace.fullHead == 0 && !trace.shutdown) {
447 unlock(&trace.lock)
448 return nil
449 }
450 gp := trace.reader.ptr()
451 trace.reader.set(nil)
452 unlock(&trace.lock)
453 return gp
454 }
455
456
457 func traceProcFree(pp *p) {
458 buf := pp.tracebuf
459 pp.tracebuf = 0
460 if buf == 0 {
461 return
462 }
463 lock(&trace.lock)
464 traceFullQueue(buf)
465 unlock(&trace.lock)
466 }
467
468
469 func traceFullQueue(buf traceBufPtr) {
470 buf.ptr().link = 0
471 if trace.fullHead == 0 {
472 trace.fullHead = buf
473 } else {
474 trace.fullTail.ptr().link = buf
475 }
476 trace.fullTail = buf
477 }
478
479
480 func traceFullDequeue() traceBufPtr {
481 buf := trace.fullHead
482 if buf == 0 {
483 return 0
484 }
485 trace.fullHead = buf.ptr().link
486 if trace.fullHead == 0 {
487 trace.fullTail = 0
488 }
489 buf.ptr().link = 0
490 return buf
491 }
492
493
494
495
496
497
498 func traceEvent(ev byte, skip int, args ...uint64) {
499 mp, pid, bufp := traceAcquireBuffer()
500
501
502
503
504
505
506
507
508
509 if !trace.enabled && !mp.startingtrace {
510 traceReleaseBuffer(pid)
511 return
512 }
513 buf := (*bufp).ptr()
514 const maxSize = 2 + 5*traceBytesPerNumber
515 if buf == nil || len(buf.arr)-buf.pos < maxSize {
516 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
517 (*bufp).set(buf)
518 }
519
520 ticks := uint64(cputicks()) / traceTickDiv
521 tickDiff := ticks - buf.lastTicks
522 buf.lastTicks = ticks
523 narg := byte(len(args))
524 if skip >= 0 {
525 narg++
526 }
527
528
529 if narg > 3 {
530 narg = 3
531 }
532 startPos := buf.pos
533 buf.byte(ev | narg<<traceArgCountShift)
534 var lenp *byte
535 if narg == 3 {
536
537 buf.varint(0)
538 lenp = &buf.arr[buf.pos-1]
539 }
540 buf.varint(tickDiff)
541 for _, a := range args {
542 buf.varint(a)
543 }
544 if skip == 0 {
545 buf.varint(0)
546 } else if skip > 0 {
547 buf.varint(traceStackID(mp, buf.stk[:], skip))
548 }
549 evSize := buf.pos - startPos
550 if evSize > maxSize {
551 throw("invalid length of trace event")
552 }
553 if lenp != nil {
554
555 *lenp = byte(evSize - 2)
556 }
557 traceReleaseBuffer(pid)
558 }
559
560 func traceStackID(mp *m, buf []uintptr, skip int) uint64 {
561 _g_ := getg()
562 gp := mp.curg
563 var nstk int
564 if gp == _g_ {
565 nstk = callers(skip+1, buf[:])
566 } else if gp != nil {
567 gp = mp.curg
568 nstk = gcallers(gp, skip, buf[:])
569 }
570 if nstk > 0 {
571 nstk--
572 }
573 if nstk > 0 && gp.goid == 1 {
574 nstk--
575 }
576 id := trace.stackTab.put(buf[:nstk])
577 return uint64(id)
578 }
579
580
581 func traceAcquireBuffer() (mp *m, pid int32, bufp *traceBufPtr) {
582 mp = acquirem()
583 if p := mp.p.ptr(); p != nil {
584 return mp, p.id, &p.tracebuf
585 }
586 lock(&trace.bufLock)
587 return mp, traceGlobProc, &trace.buf
588 }
589
590
591 func traceReleaseBuffer(pid int32) {
592 if pid == traceGlobProc {
593 unlock(&trace.bufLock)
594 }
595 releasem(getg().m)
596 }
597
598
599 func traceFlush(buf traceBufPtr, pid int32) traceBufPtr {
600 owner := trace.lockOwner
601 dolock := owner == nil || owner != getg().m.curg
602 if dolock {
603 lock(&trace.lock)
604 }
605 if buf != 0 {
606 traceFullQueue(buf)
607 }
608 if trace.empty != 0 {
609 buf = trace.empty
610 trace.empty = buf.ptr().link
611 } else {
612 buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
613 if buf == 0 {
614 throw("trace: out of memory")
615 }
616 }
617 bufp := buf.ptr()
618 bufp.link.set(nil)
619 bufp.pos = 0
620
621
622 ticks := uint64(cputicks()) / traceTickDiv
623 bufp.lastTicks = ticks
624 bufp.byte(traceEvBatch | 1<<traceArgCountShift)
625 bufp.varint(uint64(pid))
626 bufp.varint(ticks)
627
628 if dolock {
629 unlock(&trace.lock)
630 }
631 return buf
632 }
633
634
635 func traceString(bufp *traceBufPtr, pid int32, s string) (uint64, *traceBufPtr) {
636 if s == "" {
637 return 0, bufp
638 }
639 if id, ok := trace.strings[s]; ok {
640 return id, bufp
641 }
642
643 trace.stringSeq++
644 id := trace.stringSeq
645 trace.strings[s] = id
646
647
648
649
650
651
652 buf := (*bufp).ptr()
653 size := 1 + 2*traceBytesPerNumber + len(s)
654 if buf == nil || len(buf.arr)-buf.pos < size {
655 buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
656 (*bufp).set(buf)
657 }
658 buf.byte(traceEvString)
659 buf.varint(id)
660 buf.varint(uint64(len(s)))
661 buf.pos += copy(buf.arr[buf.pos:], s)
662
663 (*bufp).set(buf)
664 return id, bufp
665 }
666
667
668 func traceAppend(buf []byte, v uint64) []byte {
669 for ; v >= 0x80; v >>= 7 {
670 buf = append(buf, 0x80|byte(v))
671 }
672 buf = append(buf, byte(v))
673 return buf
674 }
675
676
677 func (buf *traceBuf) varint(v uint64) {
678 pos := buf.pos
679 for ; v >= 0x80; v >>= 7 {
680 buf.arr[pos] = 0x80 | byte(v)
681 pos++
682 }
683 buf.arr[pos] = byte(v)
684 pos++
685 buf.pos = pos
686 }
687
688
689 func (buf *traceBuf) byte(v byte) {
690 buf.arr[buf.pos] = v
691 buf.pos++
692 }
693
694
695
696 type traceStackTable struct {
697 lock mutex
698 seq uint32
699 mem traceAlloc
700 tab [1 << 13]traceStackPtr
701 }
702
703
704 type traceStack struct {
705 link traceStackPtr
706 hash uintptr
707 id uint32
708 n int
709 stk [0]uintptr
710 }
711
712 type traceStackPtr uintptr
713
714 func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
715
716
717 func (ts *traceStack) stack() []uintptr {
718 return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
719 }
720
721
722
723 func (tab *traceStackTable) put(pcs []uintptr) uint32 {
724 if len(pcs) == 0 {
725 return 0
726 }
727 hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
728
729 if id := tab.find(pcs, hash); id != 0 {
730 return id
731 }
732
733 lock(&tab.lock)
734 if id := tab.find(pcs, hash); id != 0 {
735 unlock(&tab.lock)
736 return id
737 }
738
739 tab.seq++
740 stk := tab.newStack(len(pcs))
741 stk.hash = hash
742 stk.id = tab.seq
743 stk.n = len(pcs)
744 stkpc := stk.stack()
745 for i, pc := range pcs {
746 stkpc[i] = pc
747 }
748 part := int(hash % uintptr(len(tab.tab)))
749 stk.link = tab.tab[part]
750 atomicstorep(unsafe.Pointer(&tab.tab[part]), unsafe.Pointer(stk))
751 unlock(&tab.lock)
752 return stk.id
753 }
754
755
756 func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
757 part := int(hash % uintptr(len(tab.tab)))
758 Search:
759 for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
760 if stk.hash == hash && stk.n == len(pcs) {
761 for i, stkpc := range stk.stack() {
762 if stkpc != pcs[i] {
763 continue Search
764 }
765 }
766 return stk.id
767 }
768 }
769 return 0
770 }
771
772
773 func (tab *traceStackTable) newStack(n int) *traceStack {
774 return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
775 }
776
777
778 func allFrames(pcs []uintptr) []Frame {
779 frames := make([]Frame, 0, len(pcs))
780 ci := CallersFrames(pcs)
781 for {
782 f, more := ci.Next()
783 frames = append(frames, f)
784 if !more {
785 return frames
786 }
787 }
788 }
789
790
791
792 func (tab *traceStackTable) dump() {
793 var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
794 bufp := traceFlush(0, 0)
795 for _, stk := range tab.tab {
796 stk := stk.ptr()
797 for ; stk != nil; stk = stk.link.ptr() {
798 tmpbuf := tmp[:0]
799 tmpbuf = traceAppend(tmpbuf, uint64(stk.id))
800 frames := allFrames(stk.stack())
801 tmpbuf = traceAppend(tmpbuf, uint64(len(frames)))
802 for _, f := range frames {
803 var frame traceFrame
804 frame, bufp = traceFrameForPC(bufp, 0, f)
805 tmpbuf = traceAppend(tmpbuf, uint64(f.PC))
806 tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
807 tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
808 tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
809 }
810
811 size := 1 + traceBytesPerNumber + len(tmpbuf)
812 if buf := bufp.ptr(); len(buf.arr)-buf.pos < size {
813 bufp = traceFlush(bufp, 0)
814 }
815 buf := bufp.ptr()
816 buf.byte(traceEvStack | 3<<traceArgCountShift)
817 buf.varint(uint64(len(tmpbuf)))
818 buf.pos += copy(buf.arr[buf.pos:], tmpbuf)
819 }
820 }
821
822 lock(&trace.lock)
823 traceFullQueue(bufp)
824 unlock(&trace.lock)
825
826 tab.mem.drop()
827 *tab = traceStackTable{}
828 }
829
830 type traceFrame struct {
831 funcID uint64
832 fileID uint64
833 line uint64
834 }
835
836
837
838 func traceFrameForPC(buf traceBufPtr, pid int32, f Frame) (traceFrame, traceBufPtr) {
839 bufp := &buf
840 var frame traceFrame
841
842 fn := f.Function
843 const maxLen = 1 << 10
844 if len(fn) > maxLen {
845 fn = fn[len(fn)-maxLen:]
846 }
847 frame.funcID, bufp = traceString(bufp, pid, fn)
848 frame.line = uint64(f.Line)
849 file := f.File
850 if len(file) > maxLen {
851 file = file[len(file)-maxLen:]
852 }
853 frame.fileID, bufp = traceString(bufp, pid, file)
854 return frame, (*bufp)
855 }
856
857
858
859 type traceAlloc struct {
860 head traceAllocBlockPtr
861 off uintptr
862 }
863
864
865
866
867
868
869
870
871 type traceAllocBlock struct {
872 next traceAllocBlockPtr
873 data [64<<10 - sys.PtrSize]byte
874 }
875
876
877 type traceAllocBlockPtr uintptr
878
879 func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
880 func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
881
882
883 func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
884 n = round(n, sys.PtrSize)
885 if a.head == 0 || a.off+n > uintptr(len(a.head.ptr().data)) {
886 if n > uintptr(len(a.head.ptr().data)) {
887 throw("trace: alloc too large")
888 }
889 block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
890 if block == nil {
891 throw("trace: out of memory")
892 }
893 block.next.set(a.head.ptr())
894 a.head.set(block)
895 a.off = 0
896 }
897 p := &a.head.ptr().data[a.off]
898 a.off += n
899 return unsafe.Pointer(p)
900 }
901
902
903 func (a *traceAlloc) drop() {
904 for a.head != 0 {
905 block := a.head.ptr()
906 a.head.set(block.next.ptr())
907 sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
908 }
909 }
910
911
912
913 func traceGomaxprocs(procs int32) {
914 traceEvent(traceEvGomaxprocs, 1, uint64(procs))
915 }
916
917 func traceProcStart() {
918 traceEvent(traceEvProcStart, -1, uint64(getg().m.id))
919 }
920
921 func traceProcStop(pp *p) {
922
923
924 mp := acquirem()
925 oldp := mp.p
926 mp.p.set(pp)
927 traceEvent(traceEvProcStop, -1)
928 mp.p = oldp
929 releasem(mp)
930 }
931
932 func traceGCStart() {
933 traceEvent(traceEvGCStart, 3, trace.seqGC)
934 trace.seqGC++
935 }
936
937 func traceGCDone() {
938 traceEvent(traceEvGCDone, -1)
939 }
940
941 func traceGCSTWStart(kind int) {
942 traceEvent(traceEvGCSTWStart, -1, uint64(kind))
943 }
944
945 func traceGCSTWDone() {
946 traceEvent(traceEvGCSTWDone, -1)
947 }
948
949
950
951
952
953
954 func traceGCSweepStart() {
955
956
957 _p_ := getg().m.p.ptr()
958 if _p_.traceSweep {
959 throw("double traceGCSweepStart")
960 }
961 _p_.traceSweep, _p_.traceSwept, _p_.traceReclaimed = true, 0, 0
962 }
963
964
965
966
967
968 func traceGCSweepSpan(bytesSwept uintptr) {
969 _p_ := getg().m.p.ptr()
970 if _p_.traceSweep {
971 if _p_.traceSwept == 0 {
972 traceEvent(traceEvGCSweepStart, 1)
973 }
974 _p_.traceSwept += bytesSwept
975 }
976 }
977
978 func traceGCSweepDone() {
979 _p_ := getg().m.p.ptr()
980 if !_p_.traceSweep {
981 throw("missing traceGCSweepStart")
982 }
983 if _p_.traceSwept != 0 {
984 traceEvent(traceEvGCSweepDone, -1, uint64(_p_.traceSwept), uint64(_p_.traceReclaimed))
985 }
986 _p_.traceSweep = false
987 }
988
989 func traceGCMarkAssistStart() {
990 traceEvent(traceEvGCMarkAssistStart, 1)
991 }
992
993 func traceGCMarkAssistDone() {
994 traceEvent(traceEvGCMarkAssistDone, -1)
995 }
996
997 func traceGoCreate(newg *g, pc uintptr) {
998 newg.traceseq = 0
999 newg.tracelastp = getg().m.p
1000
1001 id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum})
1002 traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
1003 }
1004
1005 func traceGoStart() {
1006 _g_ := getg().m.curg
1007 _p_ := _g_.m.p
1008 _g_.traceseq++
1009 if _g_ == _p_.ptr().gcBgMarkWorker.ptr() {
1010 traceEvent(traceEvGoStartLabel, -1, uint64(_g_.goid), _g_.traceseq, trace.markWorkerLabels[_p_.ptr().gcMarkWorkerMode])
1011 } else if _g_.tracelastp == _p_ {
1012 traceEvent(traceEvGoStartLocal, -1, uint64(_g_.goid))
1013 } else {
1014 _g_.tracelastp = _p_
1015 traceEvent(traceEvGoStart, -1, uint64(_g_.goid), _g_.traceseq)
1016 }
1017 }
1018
1019 func traceGoEnd() {
1020 traceEvent(traceEvGoEnd, -1)
1021 }
1022
1023 func traceGoSched() {
1024 _g_ := getg()
1025 _g_.tracelastp = _g_.m.p
1026 traceEvent(traceEvGoSched, 1)
1027 }
1028
1029 func traceGoPreempt() {
1030 _g_ := getg()
1031 _g_.tracelastp = _g_.m.p
1032 traceEvent(traceEvGoPreempt, 1)
1033 }
1034
1035 func traceGoPark(traceEv byte, skip int) {
1036 if traceEv&traceFutileWakeup != 0 {
1037 traceEvent(traceEvFutileWakeup, -1)
1038 }
1039 traceEvent(traceEv & ^traceFutileWakeup, skip)
1040 }
1041
1042 func traceGoUnpark(gp *g, skip int) {
1043 _p_ := getg().m.p
1044 gp.traceseq++
1045 if gp.tracelastp == _p_ {
1046 traceEvent(traceEvGoUnblockLocal, skip, uint64(gp.goid))
1047 } else {
1048 gp.tracelastp = _p_
1049 traceEvent(traceEvGoUnblock, skip, uint64(gp.goid), gp.traceseq)
1050 }
1051 }
1052
1053 func traceGoSysCall() {
1054 traceEvent(traceEvGoSysCall, 1)
1055 }
1056
1057 func traceGoSysExit(ts int64) {
1058 if ts != 0 && ts < trace.ticksStart {
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068 ts = 0
1069 }
1070 _g_ := getg().m.curg
1071 _g_.traceseq++
1072 _g_.tracelastp = _g_.m.p
1073 traceEvent(traceEvGoSysExit, -1, uint64(_g_.goid), _g_.traceseq, uint64(ts)/traceTickDiv)
1074 }
1075
1076 func traceGoSysBlock(pp *p) {
1077
1078
1079 mp := acquirem()
1080 oldp := mp.p
1081 mp.p.set(pp)
1082 traceEvent(traceEvGoSysBlock, -1)
1083 mp.p = oldp
1084 releasem(mp)
1085 }
1086
1087 func traceHeapAlloc() {
1088 traceEvent(traceEvHeapAlloc, -1, memstats.heap_live)
1089 }
1090
1091 func traceNextGC() {
1092 if memstats.next_gc == ^uint64(0) {
1093
1094 traceEvent(traceEvNextGC, -1, 0)
1095 } else {
1096 traceEvent(traceEvNextGC, -1, memstats.next_gc)
1097 }
1098 }
1099
View as plain text