|
| 1 | +// Copyright 2023 The Go Authors. All rights reserved. |
| 2 | +// Use of this source code is governed by a BSD-style |
| 3 | +// license that can be found in the LICENSE file. |
| 4 | + |
| 5 | +package trace |
| 6 | + |
| 7 | +import ( |
| 8 | + "fmt" |
| 9 | + "internal/trace/traceviewer" |
| 10 | + "internal/trace/traceviewer/format" |
| 11 | + tracev2 "internal/trace/v2" |
| 12 | +) |
| 13 | + |
| 14 | +var _ generator = &threadGenerator{} |
| 15 | + |
| 16 | +type threadGenerator struct { |
| 17 | + globalRangeGenerator |
| 18 | + globalMetricGenerator |
| 19 | + stackSampleGenerator[tracev2.ThreadID] |
| 20 | + |
| 21 | + gStates map[tracev2.GoID]*gState[tracev2.ThreadID] |
| 22 | + threads map[tracev2.ThreadID]struct{} |
| 23 | +} |
| 24 | + |
| 25 | +func newThreadGenerator() *threadGenerator { |
| 26 | + tg := new(threadGenerator) |
| 27 | + tg.stackSampleGenerator.getResource = func(ev *tracev2.Event) tracev2.ThreadID { |
| 28 | + return ev.Thread() |
| 29 | + } |
| 30 | + tg.gStates = make(map[tracev2.GoID]*gState[tracev2.ThreadID]) |
| 31 | + tg.threads = make(map[tracev2.ThreadID]struct{}) |
| 32 | + return tg |
| 33 | +} |
| 34 | + |
| 35 | +func (g *threadGenerator) Sync() { |
| 36 | + g.globalRangeGenerator.Sync() |
| 37 | +} |
| 38 | + |
| 39 | +func (g *threadGenerator) GoroutineLabel(ctx *traceContext, ev *tracev2.Event) { |
| 40 | + l := ev.Label() |
| 41 | + g.gStates[l.Resource.Goroutine()].setLabel(l.Label) |
| 42 | +} |
| 43 | + |
| 44 | +func (g *threadGenerator) GoroutineRange(ctx *traceContext, ev *tracev2.Event) { |
| 45 | + r := ev.Range() |
| 46 | + switch ev.Kind() { |
| 47 | + case tracev2.EventRangeBegin: |
| 48 | + g.gStates[r.Scope.Goroutine()].rangeBegin(ev.Time(), r.Name, ev.Stack()) |
| 49 | + case tracev2.EventRangeActive: |
| 50 | + g.gStates[r.Scope.Goroutine()].rangeActive(r.Name) |
| 51 | + case tracev2.EventRangeEnd: |
| 52 | + gs := g.gStates[r.Scope.Goroutine()] |
| 53 | + gs.rangeEnd(ev.Time(), r.Name, ev.Stack(), ctx) |
| 54 | + } |
| 55 | +} |
| 56 | + |
| 57 | +func (g *threadGenerator) GoroutineTransition(ctx *traceContext, ev *tracev2.Event) { |
| 58 | + if ev.Thread() != tracev2.NoThread { |
| 59 | + if _, ok := g.threads[ev.Thread()]; !ok { |
| 60 | + g.threads[ev.Thread()] = struct{}{} |
| 61 | + } |
| 62 | + } |
| 63 | + |
| 64 | + st := ev.StateTransition() |
| 65 | + goID := st.Resource.Goroutine() |
| 66 | + |
| 67 | + // If we haven't seen this goroutine before, create a new |
| 68 | + // gState for it. |
| 69 | + gs, ok := g.gStates[goID] |
| 70 | + if !ok { |
| 71 | + gs = newGState[tracev2.ThreadID](goID) |
| 72 | + g.gStates[goID] = gs |
| 73 | + } |
| 74 | + // If we haven't already named this goroutine, try to name it. |
| 75 | + gs.augmentName(st.Stack) |
| 76 | + |
| 77 | + // Handle the goroutine state transition. |
| 78 | + from, to := st.Goroutine() |
| 79 | + if from == to { |
| 80 | + // Filter out no-op events. |
| 81 | + return |
| 82 | + } |
| 83 | + if from.Executing() && !to.Executing() { |
| 84 | + if to == tracev2.GoWaiting { |
| 85 | + // Goroutine started blocking. |
| 86 | + gs.block(ev.Time(), ev.Stack(), st.Reason, ctx) |
| 87 | + } else { |
| 88 | + gs.stop(ev.Time(), ev.Stack(), ctx) |
| 89 | + } |
| 90 | + } |
| 91 | + if !from.Executing() && to.Executing() { |
| 92 | + start := ev.Time() |
| 93 | + if from == tracev2.GoUndetermined { |
| 94 | + // Back-date the event to the start of the trace. |
| 95 | + start = ctx.startTime |
| 96 | + } |
| 97 | + gs.start(start, ev.Thread(), ctx) |
| 98 | + } |
| 99 | + |
| 100 | + if from == tracev2.GoWaiting { |
| 101 | + // Goroutine was unblocked. |
| 102 | + gs.unblock(ev.Time(), ev.Stack(), ev.Thread(), ctx) |
| 103 | + } |
| 104 | + if from == tracev2.GoNotExist && to == tracev2.GoRunnable { |
| 105 | + // Goroutine was created. |
| 106 | + gs.created(ev.Time(), ev.Thread(), ev.Stack()) |
| 107 | + } |
| 108 | + if from == tracev2.GoSyscall { |
| 109 | + // Exiting syscall. |
| 110 | + gs.syscallEnd(ev.Time(), to != tracev2.GoRunning, ctx) |
| 111 | + } |
| 112 | + |
| 113 | + // Handle syscalls. |
| 114 | + if to == tracev2.GoSyscall { |
| 115 | + start := ev.Time() |
| 116 | + if from == tracev2.GoUndetermined { |
| 117 | + // Back-date the event to the start of the trace. |
| 118 | + start = ctx.startTime |
| 119 | + } |
| 120 | + // Write down that we've entered a syscall. Note: we might have no P here |
| 121 | + // if we're in a cgo callback or this is a transition from GoUndetermined |
| 122 | + // (i.e. the G has been blocked in a syscall). |
| 123 | + gs.syscallBegin(start, ev.Thread(), ev.Stack()) |
| 124 | + } |
| 125 | + |
| 126 | + // Note down the goroutine transition. |
| 127 | + _, inMarkAssist := gs.activeRanges["GC mark assist"] |
| 128 | + ctx.GoroutineTransition(ctx.elapsed(ev.Time()), viewerGState(from, inMarkAssist), viewerGState(to, inMarkAssist)) |
| 129 | +} |
| 130 | + |
| 131 | +func (g *threadGenerator) ProcTransition(ctx *traceContext, ev *tracev2.Event) { |
| 132 | + if ev.Thread() != tracev2.NoThread { |
| 133 | + if _, ok := g.threads[ev.Thread()]; !ok { |
| 134 | + g.threads[ev.Thread()] = struct{}{} |
| 135 | + } |
| 136 | + } |
| 137 | + |
| 138 | + type procArg struct { |
| 139 | + Proc uint64 `json:"proc,omitempty"` |
| 140 | + } |
| 141 | + st := ev.StateTransition() |
| 142 | + viewerEv := traceviewer.InstantEvent{ |
| 143 | + Resource: uint64(ev.Thread()), |
| 144 | + Stack: ctx.Stack(viewerFrames(ev.Stack())), |
| 145 | + Arg: procArg{Proc: uint64(st.Resource.Proc())}, |
| 146 | + } |
| 147 | + |
| 148 | + from, to := st.Proc() |
| 149 | + if from == to { |
| 150 | + // Filter out no-op events. |
| 151 | + return |
| 152 | + } |
| 153 | + if to.Executing() { |
| 154 | + start := ev.Time() |
| 155 | + if from == tracev2.ProcUndetermined { |
| 156 | + start = ctx.startTime |
| 157 | + } |
| 158 | + viewerEv.Name = "proc start" |
| 159 | + viewerEv.Arg = format.ThreadIDArg{ThreadID: uint64(ev.Thread())} |
| 160 | + viewerEv.Ts = ctx.elapsed(start) |
| 161 | + // TODO(mknyszek): We don't have a state machine for threads, so approximate |
| 162 | + // running threads with running Ps. |
| 163 | + ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, 1) |
| 164 | + } |
| 165 | + if from.Executing() { |
| 166 | + start := ev.Time() |
| 167 | + viewerEv.Name = "proc stop" |
| 168 | + viewerEv.Ts = ctx.elapsed(start) |
| 169 | + // TODO(mknyszek): We don't have a state machine for threads, so approximate |
| 170 | + // running threads with running Ps. |
| 171 | + ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, -1) |
| 172 | + } |
| 173 | + // TODO(mknyszek): Consider modeling procs differently and have them be |
| 174 | + // transition to and from NotExist when GOMAXPROCS changes. We can emit |
| 175 | + // events for this to clearly delineate GOMAXPROCS changes. |
| 176 | + |
| 177 | + if viewerEv.Name != "" { |
| 178 | + ctx.Instant(viewerEv) |
| 179 | + } |
| 180 | +} |
| 181 | + |
| 182 | +func (g *threadGenerator) ProcRange(ctx *traceContext, ev *tracev2.Event) { |
| 183 | + // TODO(mknyszek): Extend procRangeGenerator to support rendering proc ranges on threads. |
| 184 | +} |
| 185 | + |
| 186 | +func (g *threadGenerator) Finish(ctx *traceContext) { |
| 187 | + ctx.SetResourceType("OS THREADS") |
| 188 | + |
| 189 | + // Finish off global ranges. |
| 190 | + g.globalRangeGenerator.Finish(ctx) |
| 191 | + |
| 192 | + // Finish off all the goroutine slices. |
| 193 | + for _, gs := range g.gStates { |
| 194 | + gs.finish(ctx) |
| 195 | + } |
| 196 | + |
| 197 | + // Name all the threads to the emitter. |
| 198 | + for id := range g.threads { |
| 199 | + ctx.Resource(uint64(id), fmt.Sprintf("Thread %d", id)) |
| 200 | + } |
| 201 | +} |
0 commit comments