Skip to content

Commit

Permalink
fix:'go routine' should be 'goroutine'
Browse files Browse the repository at this point in the history
Signed-off-by: yulng <wei.yang@daocloud.io>
  • Loading branch information
yulng committed Jan 3, 2023
1 parent 17ad4e1 commit fe5c272
Show file tree
Hide file tree
Showing 21 changed files with 37 additions and 37 deletions.
2 changes: 1 addition & 1 deletion daemon/cmd/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -734,7 +734,7 @@ func newDaemon(ctx context.Context, cleaner *daemonCleanup,

// Stop all endpoints (its goroutines) on exit.
cleaner.cleanupFuncs.Add(func() {
log.Info("Waiting for all endpoints' go routines to be stopped.")
log.Info("Waiting for all endpoints' goroutines to be stopped.")
var wg sync.WaitGroup

eps := d.endpointManager.GetEndpoints()
Expand Down
4 changes: 2 additions & 2 deletions operator/cmd/cilium_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ func (s *ciliumNodeSynchronizer) Start(ctx context.Context, wg *sync.WaitGroup)
if s.withKVStore {
// Connect to the KVStore asynchronously so that we are able to start
// the operator without relying on the KVStore to be up.
// Start a go routine to GC all CiliumNodes from the KVStore that are
// Start a goroutine to GC all CiliumNodes from the KVStore that are
// no longer running.
wg.Add(1)
go func() {
Expand Down Expand Up @@ -250,7 +250,7 @@ func (s *ciliumNodeSynchronizer) Start(ctx context.Context, wg *sync.WaitGroup)
// then there isn't any event handler set for CiliumNodes events.
if nodeManagerSyncHandler != nil {
go func() {
// infinite loop. run in a go routine to unblock code execution
// infinite loop. run in a goroutine to unblock code execution
for s.processNextWorkItem(ciliumNodeManagerQueue, nodeManagerSyncHandler) {
}
}()
Expand Down
2 changes: 1 addition & 1 deletion pkg/endpoint/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -518,7 +518,7 @@ func (e *Endpoint) ProcessChangeRequest(newEp *Endpoint, validPatchTransitionSta
e.replaceInformationLabels(newEp.OpLabels.OrchestrationInfo)
rev := e.replaceIdentityLabels(newEp.OpLabels.IdentityLabels())
if rev != 0 {
// Run as a go routine since the runIdentityResolver needs to get the lock
// Run as a goroutine since the runIdentityResolver needs to get the lock
go e.runIdentityResolver(e.aliveCtx, rev, false)
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/fswatcher/fswatcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func New(trackedFiles []string) (*Watcher, error) {
}

// We add all paths in the constructor avoid the need for additional
// synchronization, as the loop go routine below will call updateWatchedPath
// synchronization, as the loop goroutine below will call updateWatchedPath
// concurrently
for _, f := range trackedFiles {
err := w.updateWatchedPath(f)
Expand Down
4 changes: 2 additions & 2 deletions pkg/hubble/container/ring_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,10 +121,10 @@ func (r *RingReader) NextFollow(ctx context.Context) *v1.Event {
}
}

// Close waits for any spawned go routines to finish. It is not
// Close waits for any spawned goroutines to finish. It is not
// required to call Close on a RingReader but it may be useful for specific
// situations such as testing. Must not be called concurrently with NextFollow,
// as otherwise NextFollow spawns new go routines that are not waited on.
// as otherwise NextFollow spawns new goroutines that are not waited on.
func (r *RingReader) Close() error {
r.wg.Wait()
return nil
Expand Down
4 changes: 2 additions & 2 deletions pkg/hubble/container/ring_reader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ func TestRingReader_NextLost(t *testing.T) {
func TestRingReader_NextFollow(t *testing.T) {
defer goleak.VerifyNone(
t,
// ignore go routines started by the redirect we do from klog to logrus
// ignore goroutines started by the redirect we do from klog to logrus
goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("io.(*pipe).read"))
Expand Down Expand Up @@ -279,7 +279,7 @@ func TestRingReader_NextFollow(t *testing.T) {
func TestRingReader_NextFollow_WithEmptyRing(t *testing.T) {
defer goleak.VerifyNone(
t,
// ignore go routines started by the redirect we do from klog to logrus
// ignore goroutines started by the redirect we do from klog to logrus
goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("io.(*pipe).read"))
Expand Down
6 changes: 3 additions & 3 deletions pkg/hubble/container/ring_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -713,7 +713,7 @@ func TestRingFunctionalitySerialized(t *testing.T) {
func TestRing_ReadFrom_Test_1(t *testing.T) {
defer goleak.VerifyNone(
t,
// ignore go routines started by the redirect we do from klog to logrus
// ignore goroutines started by the redirect we do from klog to logrus
goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("io.(*pipe).read"))
Expand Down Expand Up @@ -774,7 +774,7 @@ func TestRing_ReadFrom_Test_1(t *testing.T) {
func TestRing_ReadFrom_Test_2(t *testing.T) {
defer goleak.VerifyNone(
t,
// ignore go routines started by the redirect we do from klog to logrus
// ignore goroutines started by the redirect we do from klog to logrus
goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("io.(*pipe).read"))
Expand Down Expand Up @@ -873,7 +873,7 @@ func TestRing_ReadFrom_Test_2(t *testing.T) {
func TestRing_ReadFrom_Test_3(t *testing.T) {
defer goleak.VerifyNone(
t,
// ignore go routines started by the redirect we do from klog to logrus
// ignore goroutines started by the redirect we do from klog to logrus
goleak.IgnoreTopFunction("k8s.io/klog.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("k8s.io/klog/v2.(*loggingT).flushDaemon"),
goleak.IgnoreTopFunction("io.(*pipe).read"))
Expand Down
4 changes: 2 additions & 2 deletions pkg/hubble/observer/local_observer.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,10 +105,10 @@ func NewLocalServer(

// Start implements GRPCServer.Start.
func (s *LocalObserverServer) Start() {
// We use a cancellation context here so that any Go routines spawned in the
// We use a cancellation context here so that any goroutines spawned in the
// OnMonitorEvent/OnDecodedFlow/OnDecodedEvent hooks have a signal for cancellation.
// When Start() returns, the deferred cancel() will run and we expect hooks
// to stop any Go routines that may have spawned by listening to the
// to stop any goroutines that may have spawned by listening to the
// ctx.Done() channel for the stop signal.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Expand Down
4 changes: 2 additions & 2 deletions pkg/hubble/recorder/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ func (s *Service) Record(stream recorderpb.Recorder_RecordServer) error {
ctx, cancel := context.WithCancel(stream.Context())
defer cancel()

// Spawn a go routine that forwards any received messages in order to be
// Spawn a goroutine that forwards any received messages in order to be
// able to use select on it
reqCh := make(chan *recorderpb.RecordRequest)
errCh := make(chan error, 1)
Expand Down Expand Up @@ -147,7 +147,7 @@ func (s *Service) Record(stream recorderpb.Recorder_RecordServer) error {
return fmt.Errorf("received invalid request %q, expected start request", req)
}

// The startRecording helper spawns a clean up go routine to remove all
// The startRecording helper spawns a clean up goroutine to remove all
// state associated with this recording when the context ctx is cancelled.
recording, filePath, err = s.startRecording(ctx, startRecording)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/hubble/recorder/sink/sink.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ type sink struct {
lastError error
}

// startSink creates a queue and go routine for the sink. The spawned go
// startSink creates a queue and goroutine for the sink. The spawned go
// routine will run until one of the following happens:
// - sink.stop is called
// - a p.StopCondition is reached
Expand Down
2 changes: 1 addition & 1 deletion pkg/idpool/idpool_race_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import (
// TestAllocateID without the race detection enabled is too slow to run with
// race detector set. Thus, we need to put it in a separate file so the unit
// tests don't time out while running with race detector by having a lower
// number of parallel go routines than it would have been if we ran it without
// number of parallel goroutines than it would have been if we ran it without
// the race detector.
func (s *IDPoolTestSuite) TestAllocateID(c *C) {
s.testAllocatedID(c, 5)
Expand Down
2 changes: 1 addition & 1 deletion pkg/idpool/idpool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ func (s *IDPoolTestSuite) testAllocatedID(c *C, nGoRoutines int) {
bufferChannelSize := 100
minID, maxID := 1, 6000
if maxID-minID < nGoRoutines+bufferChannelSize {
panic(fmt.Sprintf("Number of go routines and size of the buffered channel (%d) "+
panic(fmt.Sprintf("Number of goroutines and size of the buffered channel (%d) "+
"should be lower than the number of IDs to be tested (%d)",
nGoRoutines+bufferChannelSize, maxID-minID))
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/ipam/node_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ func (n *NodeManager) instancesAPIResync(ctx context.Context) (time.Time, bool)
}

// Start kicks of the NodeManager by performing the initial state
// synchronization and starting the background sync go routine
// synchronization and starting the background sync goroutine
func (n *NodeManager) Start(ctx context.Context) error {
// Trigger the initial resync in a blocking manner
if _, ok := n.instancesAPIResync(ctx); !ok {
Expand Down Expand Up @@ -502,7 +502,7 @@ func (n *NodeManager) Resync(ctx context.Context, syncTime time.Time) {
}(node, &stats)
}

// Acquire the full semaphore, this requires all go routines to
// Acquire the full semaphore, this requires all goroutines to
// complete and thus blocks until all nodes are synced
sem.Acquire(ctx, n.parallelWorkers)

Expand Down
8 changes: 4 additions & 4 deletions pkg/k8s/client/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func (s *K8sClientSuite) Test_runHeartbeat(c *C) {
)

// We need to polling for the condition instead of using a time.After to
// give the opportunity for scheduler to run the go routine inside runHeartbeat
// give the opportunity for scheduler to run the goroutine inside runHeartbeat
err := testutils.WaitUntil(func() bool {
select {
case <-called:
Expand Down Expand Up @@ -96,7 +96,7 @@ func (s *K8sClientSuite) Test_runHeartbeat(c *C) {
)

// We need to polling for the condition instead of using a time.After to
// give the opportunity for scheduler to run the go routine inside runHeartbeat
// give the opportunity for scheduler to run the goroutine inside runHeartbeat
err = testutils.WaitUntil(func() bool {
select {
case <-called:
Expand Down Expand Up @@ -151,7 +151,7 @@ func (s *K8sClientSuite) Test_runHeartbeat(c *C) {
)

// We need to polling for the condition instead of using a time.After to
// give the opportunity for scheduler to run the go routine inside runHeartbeat
// give the opportunity for scheduler to run the goroutine inside runHeartbeat
err = testutils.WaitUntil(func() bool {
select {
case <-called:
Expand Down Expand Up @@ -186,7 +186,7 @@ func (s *K8sClientSuite) Test_runHeartbeat(c *C) {
)

// We need to polling for the condition instead of using a time.After to
// give the opportunity for scheduler to run the go routine inside runHeartbeat
// give the opportunity for scheduler to run the goroutine inside runHeartbeat
err = testutils.WaitUntil(func() bool {
select {
case <-called:
Expand Down
4 changes: 2 additions & 2 deletions pkg/lock/lock_debug.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,8 @@ func printStackTo(sec float64, stack []byte, writer io.Writer) {
// runtime/debug.Stack(0xc424c4a370, 0xc421f7f750, 0x1)
// /usr/local/go/src/runtime/debug/stack.go:24 +0xa7
// ...
// To know which trace belongs to which go routine we will append the
// go routine number to every line of the stack trace.
// To know which trace belongs to which goroutine we will append the
// goroutine number to every line of the stack trace.
writer.Write(bytes.Replace(
stack,
[]byte{'\n'},
Expand Down
6 changes: 3 additions & 3 deletions pkg/lock/stoppable_waitgroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func (l *StoppableWaitGroup) WaitChannel() <-chan struct{} {
return l.noopDone
}

// Add adds the go routine to the list of routines to that Wait() will have
// Add adds the goroutine to the list of routines to that Wait() will have
// to wait before it returns.
// If the StoppableWaitGroup was stopped this will be a no-op.
func (l *StoppableWaitGroup) Add() {
Expand All @@ -69,9 +69,9 @@ func (l *StoppableWaitGroup) Add() {
}
}

// Done will decrement the number of go routines the Wait() will have to wait
// Done will decrement the number of goroutines the Wait() will have to wait
// before it returns.
// This function is a no-op once all go routines that have called 'Add()' have
// This function is a no-op once all goroutines that have called 'Add()' have
// also called 'Done()' and the StoppableWaitGroup was stopped.
func (l *StoppableWaitGroup) Done() {
select {
Expand Down
2 changes: 1 addition & 1 deletion pkg/maps/ctmap/ctmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ func doGC4(m *Map, filter *GCFilter) gcStats {
}

// We serialize the deletions in order to avoid forced map walk restarts
// when keys are being evicted underneath us from concurrent go routines.
// when keys are being evicted underneath us from concurrent goroutines.
globalDeleteLock[m.mapType].Lock()
stats.dumpError = m.DumpReliablyWithCallback(filterCallback, stats.DumpStats)
globalDeleteLock[m.mapType].Unlock()
Expand Down
2 changes: 1 addition & 1 deletion pkg/monitor/agent/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ type Agent struct {
// Internally, the agent spawns a singleton goroutine reading events from
// the BPF perf ring buffer and provides an interface to pass in non-BPF events.
// The instance can be stopped by cancelling ctx, which will stop the perf reader
// go routine and close all registered listeners.
// goroutine and close all registered listeners.
// Note that the perf buffer reader is started only when listeners are
// connected.
func NewAgent(ctx context.Context) *Agent {
Expand Down
2 changes: 1 addition & 1 deletion pkg/node/manager/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -711,7 +711,7 @@ func (m *Manager) StartNeighborRefresh(nh datapath.NodeHandler) {
controller.NewManager().UpdateController("neighbor-table-refresh",
controller.ControllerParams{
DoFunc: func(controllerCtx context.Context) error {
// Cancel previous go routines from previous controller run
// Cancel previous goroutines from previous controller run
cancel()
ctx, cancel = context.WithCancel(controllerCtx)
m.mutex.RLock()
Expand Down
2 changes: 1 addition & 1 deletion pkg/policy/identifier.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func NewEndpointSet(m map[Endpoint]struct{}) *EndpointSet {
}
}

// ForEachGo runs epFunc asynchronously inside a go routine for each endpoint in
// ForEachGo runs epFunc asynchronously inside a goroutine for each endpoint in
// the EndpointSet. It signals to the provided WaitGroup when epFunc has been
// executed for each endpoint.
func (e *EndpointSet) ForEachGo(wg *sync.WaitGroup, epFunc func(epp Endpoint)) {
Expand Down
6 changes: 3 additions & 3 deletions pkg/wireguard/agent/agent_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ func (a *AgentSuite) TestAgent_PeerConfig(c *C) {
close(agentUpdated)
}()

// wait for the above go routine to be scheduled
// wait for the above goroutine to be scheduled
<-agentUpdatePending

ipCacheUpdated := make(chan struct{})
Expand All @@ -153,10 +153,10 @@ func (a *AgentSuite) TestAgent_PeerConfig(c *C) {
close(ipCacheUpdated)
}()

// wait for the above go routine to be scheduled
// wait for the above goroutine to be scheduled
<-ipCacheUpdatePending

// At this point we know both go routines have been scheduled. We assume
// At this point we know both goroutines have been scheduled. We assume
// that they are now both blocked by checking they haven't closed the
// channel yet. Thus once release the lock we expect them to make progress
select {
Expand Down

0 comments on commit fe5c272

Please sign in to comment.