Skip to content

Commit

Permalink
internal/lsp/cache: reduce critical sections
Browse files Browse the repository at this point in the history
This change reduces the sizes of the critical sections
in traces.ProcessEvent and Generation.Bind, in particular
moving allocations ahead of Lock. This reduces the contention
according to the trace profiler.

See https://go-review.googlesource.com/c/go/+/411909 for
another reduction in contention. The largest remaining
contention is Handle.Get, which thousands of goroutines
wait for because we initiate typechecking top down.

Also, add a couple of possible optimization TODO comments,
and delete a stale comment re: Bind.

Change-Id: I995a0bb46e8c9bf0c23492fb62b56f4539bc32f8
Reviewed-on: https://go-review.googlesource.com/c/tools/+/411910
Run-TryBot: Hyang-Ah Hana Kim <hyangah@gmail.com>
gopls-CI: kokoro <noreply+kokoro@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Hyang-Ah Hana Kim <hyangah@gmail.com>
  • Loading branch information
adonovan committed Jun 15, 2022
1 parent 27db7f4 commit 654a14b
Show file tree
Hide file tree
Showing 4 changed files with 37 additions and 26 deletions.
16 changes: 8 additions & 8 deletions internal/lsp/cache/check.go
Expand Up @@ -97,14 +97,6 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so
return nil, err
}

// Do not close over the packageHandle or the snapshot in the Bind function.
// This creates a cycle, which causes the finalizers to never run on the handles.
// The possible cycles are:
//
// packageHandle.h.function -> packageHandle
// packageHandle.h.function -> snapshot -> packageHandle
//

m := ph.m
key := ph.key

Expand All @@ -121,6 +113,13 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so
}(dep)
}

// TODO(adonovan): opt: consider moving the Wait here,
// so that dependencies complete before we start to
// read+parse+typecheck this package. Although the
// read+parse can proceed, typechecking will block
// almost immediately until the imports are done.
// The effect is to increase contention.

data := &packageData{}
data.pkg, data.err = typeCheck(ctx, snapshot, m.Metadata, mode, deps)
// Make sure that the workers above have finished before we return,
Expand Down Expand Up @@ -448,6 +447,7 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode sour
}
typeparams.InitInstanceInfo(pkg.typesInfo)

// TODO(adonovan): opt: execute this loop in parallel.
for _, gf := range pkg.m.GoFiles {
// In the presence of line directives, we may need to report errors in
// non-compiled Go files, so we need to register them on the package.
Expand Down
2 changes: 1 addition & 1 deletion internal/lsp/cache/parse.go
Expand Up @@ -278,7 +278,7 @@ func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mod

tok := fset.File(file.Pos())
if tok == nil {
// file.Pos is the location of the package declaration. If there was
// file.Pos is the location of the package declaration (issue #53202). If there was
// none, we can't find the token.File that ParseFile created, and we
// have no choice but to recreate it.
tok = fset.AddFile(fh.URI().Filename(), -1, len(src))
Expand Down
39 changes: 23 additions & 16 deletions internal/lsp/debug/trace.go
Expand Up @@ -119,20 +119,15 @@ func formatEvent(ctx context.Context, ev core.Event, lm label.Map) string {
}

func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) context.Context {
t.mu.Lock()
defer t.mu.Unlock()
span := export.GetSpan(ctx)
if span == nil {
return ctx
}

switch {
case event.IsStart(ev):
if t.sets == nil {
t.sets = make(map[string]*traceSet)
t.unfinished = make(map[export.SpanContext]*traceData)
}
// just starting, add it to the unfinished map
// Just starting: add it to the unfinished map.
// Allocate before the critical section.
td := &traceData{
TraceID: span.ID.TraceID,
SpanID: span.ID.SpanID,
Expand All @@ -141,6 +136,13 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map)
Start: span.Start().At(),
Tags: renderLabels(span.Start()),
}

t.mu.Lock()
defer t.mu.Unlock()
if t.sets == nil {
t.sets = make(map[string]*traceSet)
t.unfinished = make(map[export.SpanContext]*traceData)
}
t.unfinished[span.ID] = td
// and wire up parents if we have them
if !span.ParentID.IsValid() {
Expand All @@ -155,7 +157,19 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map)
parent.Children = append(parent.Children, td)

case event.IsEnd(ev):
// finishing, must be already in the map
// Finishing: must be already in the map.
// Allocate events before the critical section.
events := span.Events()
tdEvents := make([]traceEvent, len(events))
for i, event := range events {
tdEvents[i] = traceEvent{
Time: event.At(),
Tags: renderLabels(event),
}
}

t.mu.Lock()
defer t.mu.Unlock()
td, found := t.unfinished[span.ID]
if !found {
return ctx // if this happens we are in a bad place
Expand All @@ -164,14 +178,7 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map)

td.Finish = span.Finish().At()
td.Duration = span.Finish().At().Sub(span.Start().At())
events := span.Events()
td.Events = make([]traceEvent, len(events))
for i, event := range events {
td.Events[i] = traceEvent{
Time: event.At(),
Tags: renderLabels(event),
}
}
td.Events = tdEvents

set, ok := t.sets[span.Name]
if !ok {
Expand Down
6 changes: 5 additions & 1 deletion internal/memoize/memoize.go
Expand Up @@ -181,8 +181,9 @@ func (g *Generation) Bind(key interface{}, function Function, cleanup func(inter
if atomic.LoadUint32(&g.destroyed) != 0 {
panic("operation on generation " + g.name + " destroyed by " + g.destroyedBy)
}

// Avoid 'defer Unlock' to reduce critical section.
g.store.mu.Lock()
defer g.store.mu.Unlock()
h, ok := g.store.handles[key]
if !ok {
h := &Handle{
Expand All @@ -192,8 +193,11 @@ func (g *Generation) Bind(key interface{}, function Function, cleanup func(inter
cleanup: cleanup,
}
g.store.handles[key] = h
g.store.mu.Unlock()
return h
}
g.store.mu.Unlock()

h.mu.Lock()
defer h.mu.Unlock()
if _, ok := h.generations[g]; !ok {
Expand Down

0 comments on commit 654a14b

Please sign in to comment.