/
listener.go
178 lines (151 loc) · 4.64 KB
/
listener.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
package upgrader
import (
"context"
"fmt"
"sync"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/transport"
logging "github.com/ipfs/go-log/v2"
tec "github.com/jbenet/go-temp-err-catcher"
manet "github.com/multiformats/go-multiaddr/net"
)
var log = logging.Logger("upgrader")
type listener struct {
manet.Listener
transport transport.Transport
upgrader *upgrader
rcmgr network.ResourceManager
incoming chan transport.CapableConn
err error
// Used for backpressure
threshold *threshold
// Canceling this context isn't sufficient to tear down the listener.
// Call close.
ctx context.Context
cancel func()
}
// Close closes the listener.
func (l *listener) Close() error {
// Do this first to try to get any relevent errors.
err := l.Listener.Close()
l.cancel()
// Drain and wait.
for c := range l.incoming {
c.Close()
}
return err
}
// handles inbound connections.
//
// This function does a few interesting things that should be noted:
//
// 1. It logs and discards temporary/transient errors (errors with a Temporary()
// function that returns true).
// 2. It stops accepting new connections once AcceptQueueLength connections have
// been fully negotiated but not accepted. This gives us a basic backpressure
// mechanism while still allowing us to negotiate connections in parallel.
func (l *listener) handleIncoming() {
var wg sync.WaitGroup
defer func() {
// make sure we're closed
l.Listener.Close()
if l.err == nil {
l.err = fmt.Errorf("listener closed")
}
wg.Wait()
close(l.incoming)
}()
var catcher tec.TempErrCatcher
for l.ctx.Err() == nil {
maconn, err := l.Listener.Accept()
if err != nil {
// Note: function may pause the accept loop.
if catcher.IsTemporary(err) {
log.Infof("temporary accept error: %s", err)
continue
}
l.err = err
return
}
catcher.Reset()
// gate the connection if applicable
if l.upgrader.connGater != nil && !l.upgrader.connGater.InterceptAccept(maconn) {
log.Debugf("gater blocked incoming connection on local addr %s from %s",
maconn.LocalMultiaddr(), maconn.RemoteMultiaddr())
if err := maconn.Close(); err != nil {
log.Warnf("failed to close incoming connection rejected by gater: %s", err)
}
continue
}
connScope, err := l.rcmgr.OpenConnection(network.DirInbound, true, maconn.RemoteMultiaddr())
if err != nil {
log.Debugw("resource manager blocked accept of new connection", "error", err)
if err := maconn.Close(); err != nil {
log.Warnf("failed to incoming connection rejected by resource manager: %s", err)
}
continue
}
// The go routine below calls Release when the context is
// canceled so there's no need to wait on it here.
l.threshold.Wait()
log.Debugf("listener %s got connection: %s <---> %s",
l,
maconn.LocalMultiaddr(),
maconn.RemoteMultiaddr())
wg.Add(1)
go func() {
defer wg.Done()
ctx, cancel := context.WithTimeout(l.ctx, l.upgrader.acceptTimeout)
defer cancel()
conn, err := l.upgrader.Upgrade(ctx, l.transport, maconn, network.DirInbound, "", connScope)
if err != nil {
// Don't bother bubbling this up. We just failed
// to completely negotiate the connection.
log.Debugf("accept upgrade error: %s (%s <--> %s)",
err,
maconn.LocalMultiaddr(),
maconn.RemoteMultiaddr())
connScope.Done()
return
}
log.Debugf("listener %s accepted connection: %s", l, conn)
// This records the fact that the connection has been
// setup and is waiting to be accepted. This call
// *never* blocks, even if we go over the threshold. It
// simply ensures that calls to Wait block while we're
// over the threshold.
l.threshold.Acquire()
defer l.threshold.Release()
select {
case l.incoming <- conn:
case <-ctx.Done():
if l.ctx.Err() == nil {
// Listener *not* closed but the accept timeout expired.
log.Warn("listener dropped connection due to slow accept")
}
// Wait on the context with a timeout. This way,
// if we stop accepting connections for some reason,
// we'll eventually close all the open ones
// instead of hanging onto them.
conn.Close()
}
}()
}
}
// Accept accepts a connection.
func (l *listener) Accept() (transport.CapableConn, error) {
for c := range l.incoming {
// Could have been sitting there for a while.
if !c.IsClosed() {
return c, nil
}
}
return nil, l.err
}
func (l *listener) String() string {
if s, ok := l.transport.(fmt.Stringer); ok {
return fmt.Sprintf("<stream.Listener[%s] %s>", s, l.Multiaddr())
}
return fmt.Sprintf("<stream.Listener %s>", l.Multiaddr())
}
var _ transport.Listener = (*listener)(nil)