From 386020880a2062b5b53c803aab7642e9b0db90a6 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Fri, 19 Apr 2024 21:17:10 +0800 Subject: [PATCH 01/12] feat: support edge-triggered I/O (#576) Fixes #573 --- acceptor_unix.go | 12 +- client_test.go | 178 +++++++++++---- connection_bsd.go | 40 ++-- connection_linux.go | 58 +++-- connection_unix.go | 137 +++++++----- connection_windows.go | 4 +- engine_unix.go | 34 ++- eventloop_unix.go | 76 ++++--- gnet.go | 70 +++--- gnet_test.go | 206 ++++++++++++------ .../{poll_data_unix.go => defs_poller.go} | 0 .../{epoll_events.go => defs_poller_epoll.go} | 7 +- ...kqueue_events.go => defs_poller_kqueue.go} | 11 +- internal/netpoll/poll_data_bsd.go | 21 -- internal/netpoll/poll_data_linux.go | 18 -- ...ault_poller.go => poller_epoll_default.go} | 70 ++++-- ...zed_poller.go => poller_epoll_ultimate.go} | 55 +++-- ...ult_poller.go => poller_kqueue_default.go} | 36 ++- ...ed_poller.go => poller_kqueue_ultimate.go} | 22 +- internal/{netpoll => socket}/fd_unix.go | 2 +- listener_unix.go | 2 +- options.go | 14 +- pkg/buffer/elastic/elastic_buffer_test.go | 15 +- .../elastic/elastic_ring_list_buffer.go | 10 +- pkg/buffer/linkedlist/linked_list_buffer.go | 74 +++++-- pkg/buffer/linkedlist/llbuffer_test.go | 10 +- reactor_default_bsd.go | 127 ----------- reactor_default_linux.go | 134 ------------ reactor_epoll_default.go | 189 ++++++++++++++++ ...ized_linux.go => reactor_epoll_ultimate.go | 20 +- reactor_kqueue_default.go | 166 ++++++++++++++ ...mized_bsd.go => reactor_kqueue_ultimate.go | 16 +- 32 files changed, 1159 insertions(+), 675 deletions(-) rename internal/netpoll/{poll_data_unix.go => defs_poller.go} (100%) rename internal/netpoll/{epoll_events.go => defs_poller_epoll.go} (89%) rename internal/netpoll/{kqueue_events.go => defs_poller_kqueue.go} (83%) delete mode 100644 internal/netpoll/poll_data_bsd.go delete mode 100644 internal/netpoll/poll_data_linux.go rename internal/netpoll/{epoll_default_poller.go => poller_epoll_default.go} (81%) rename internal/netpoll/{epoll_optimized_poller.go => poller_epoll_ultimate.go} (85%) rename internal/netpoll/{kqueue_default_poller.go => poller_kqueue_default.go} (86%) rename internal/netpoll/{kqueue_optimized_poller.go => poller_kqueue_ultimate.go} (92%) rename internal/{netpoll => socket}/fd_unix.go (99%) delete mode 100644 reactor_default_bsd.go delete mode 100644 reactor_default_linux.go create mode 100644 reactor_epoll_default.go rename reactor_optimized_linux.go => reactor_epoll_ultimate.go (67%) create mode 100644 reactor_kqueue_default.go rename reactor_optimized_bsd.go => reactor_kqueue_ultimate.go (70%) diff --git a/acceptor_unix.go b/acceptor_unix.go index 13177a8b8..10467b4e6 100644 --- a/acceptor_unix.go +++ b/acceptor_unix.go @@ -34,7 +34,7 @@ func (eng *engine) accept1(fd int, _ netpoll.IOEvent, _ netpoll.IOFlags) error { if err != nil { switch err { case unix.EINTR, unix.EAGAIN, unix.ECONNABORTED: - // ECONNABORTED means that a socket on the listen + // ECONNABORTED indicates that a socket on the listen // queue was closed before we Accept()ed it; // it's a silly error, so try again. return nil @@ -66,11 +66,11 @@ func (el *eventloop) accept1(fd int, ev netpoll.IOEvent, flags netpoll.IOFlags) return el.readUDP1(fd, ev, flags) } - nfd, sa, err := socket.Accept(el.ln.fd) + nfd, sa, err := socket.Accept(fd) if err != nil { switch err { case unix.EINTR, unix.EAGAIN, unix.ECONNABORTED: - // ECONNABORTED means that a socket on the listen + // ECONNABORTED indicates that a socket on the listen // queue was closed before we Accept()ed it; // it's a silly error, so try again. return nil @@ -87,7 +87,11 @@ func (el *eventloop) accept1(fd int, ev netpoll.IOEvent, flags netpoll.IOFlags) } c := newTCPConn(nfd, el, sa, el.ln.addr, remoteAddr) - if err = el.poller.AddRead(&c.pollAttachment); err != nil { + addEvents := el.poller.AddRead + if el.engine.opts.EdgeTriggeredIO { + addEvents = el.poller.AddReadWrite + } + if err = addEvents(&c.pollAttachment, el.engine.opts.EdgeTriggeredIO); err != nil { return err } el.connections.addConn(c, el.idx) diff --git a/client_test.go b/client_test.go index 27c2b9ff5..3aa3d1fed 100644 --- a/client_test.go +++ b/client_test.go @@ -31,7 +31,7 @@ type connHandler struct { type clientEvents struct { *BuiltinEventEngine tester *testing.T - svr *testClientServer + svr *testClient packetLen int } @@ -87,117 +87,219 @@ func (ev *clientEvents) OnShutdown(e Engine) { assert.EqualValuesf(ev.tester, fd, -1, "expected -1, but got: %d", fd) } -func TestServeWithGnetClient(t *testing.T) { +func TestClient(t *testing.T) { // start an engine // connect 10 clients // each client will pipe random data for 1-3 seconds. // the writes to the engine will be random sizes. 0KB - 1MB. // the engine will echo back the data. // waits for graceful connection closing. - t.Run("poll", func(t *testing.T) { + t.Run("poll-LT", func(t *testing.T) { t.Run("tcp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServeWithGnetClient(t, "tcp", ":9991", false, false, false, false, 10, RoundRobin) + runClient(t, "tcp", ":9991", false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServeWithGnetClient(t, "tcp", ":9992", false, false, true, false, 10, LeastConnections) + runClient(t, "tcp", ":9992", false, false, true, false, 10, LeastConnections) }) }) t.Run("tcp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServeWithGnetClient(t, "tcp", ":9991", false, false, false, true, 10, RoundRobin) + runClient(t, "tcp", ":9991", false, false, false, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServeWithGnetClient(t, "tcp", ":9992", false, false, true, true, 10, LeastConnections) + runClient(t, "tcp", ":9992", false, false, true, true, 10, LeastConnections) }) }) t.Run("udp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServeWithGnetClient(t, "udp", ":9991", false, false, false, false, 10, RoundRobin) + runClient(t, "udp", ":9991", false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServeWithGnetClient(t, "udp", ":9992", false, false, true, false, 10, LeastConnections) + runClient(t, "udp", ":9992", false, false, true, false, 10, LeastConnections) }) }) t.Run("udp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServeWithGnetClient(t, "udp", ":9991", false, false, false, true, 10, RoundRobin) + runClient(t, "udp", ":9991", false, false, false, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServeWithGnetClient(t, "udp", ":9992", false, false, true, true, 10, LeastConnections) + runClient(t, "udp", ":9992", false, false, true, true, 10, LeastConnections) }) }) t.Run("unix", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServeWithGnetClient(t, "unix", "gnet1.sock", false, false, false, false, 10, RoundRobin) + runClient(t, "unix", "gnet1.sock", false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServeWithGnetClient(t, "unix", "gnet2.sock", false, false, true, false, 10, SourceAddrHash) + runClient(t, "unix", "gnet2.sock", false, false, true, false, 10, SourceAddrHash) }) }) t.Run("unix-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServeWithGnetClient(t, "unix", "gnet1.sock", false, false, false, true, 10, RoundRobin) + runClient(t, "unix", "gnet1.sock", false, false, false, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServeWithGnetClient(t, "unix", "gnet2.sock", false, false, true, true, 10, SourceAddrHash) + runClient(t, "unix", "gnet2.sock", false, false, true, true, 10, SourceAddrHash) }) }) }) - t.Run("poll-reuseport", func(t *testing.T) { + t.Run("poll-ET", func(t *testing.T) { t.Run("tcp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServeWithGnetClient(t, "tcp", ":9991", true, true, false, false, 10, RoundRobin) + runClient(t, "tcp", ":9991", true, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServeWithGnetClient(t, "tcp", ":9992", true, true, true, false, 10, LeastConnections) + runClient(t, "tcp", ":9992", true, false, true, false, 10, LeastConnections) }) }) t.Run("tcp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServeWithGnetClient(t, "tcp", ":9991", true, true, false, true, 10, RoundRobin) + runClient(t, "tcp", ":9991", true, false, false, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServeWithGnetClient(t, "tcp", ":9992", true, true, true, false, 10, LeastConnections) + runClient(t, "tcp", ":9992", true, false, true, true, 10, LeastConnections) }) }) t.Run("udp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServeWithGnetClient(t, "udp", ":9991", true, true, false, false, 10, RoundRobin) + runClient(t, "udp", ":9991", true, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServeWithGnetClient(t, "udp", ":9992", true, true, true, false, 10, LeastConnections) + runClient(t, "udp", ":9992", true, false, true, false, 10, LeastConnections) }) }) t.Run("udp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServeWithGnetClient(t, "udp", ":9991", true, true, false, false, 10, RoundRobin) + runClient(t, "udp", ":9991", true, false, false, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServeWithGnetClient(t, "udp", ":9992", true, true, true, true, 10, LeastConnections) + runClient(t, "udp", ":9992", true, false, true, true, 10, LeastConnections) }) }) t.Run("unix", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServeWithGnetClient(t, "unix", "gnet1.sock", true, true, false, false, 10, RoundRobin) + runClient(t, "unix", "gnet1.sock", true, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServeWithGnetClient(t, "unix", "gnet2.sock", true, true, true, false, 10, LeastConnections) + runClient(t, "unix", "gnet2.sock", true, false, true, false, 10, SourceAddrHash) }) }) t.Run("unix-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServeWithGnetClient(t, "unix", "gnet1.sock", true, true, false, true, 10, RoundRobin) + runClient(t, "unix", "gnet1.sock", true, false, false, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServeWithGnetClient(t, "unix", "gnet2.sock", true, true, true, true, 10, LeastConnections) + runClient(t, "unix", "gnet2.sock", true, false, true, true, 10, SourceAddrHash) + }) + }) + }) + + t.Run("poll-LT-reuseport", func(t *testing.T) { + t.Run("tcp", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runClient(t, "tcp", ":9991", false, true, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runClient(t, "tcp", ":9992", false, true, true, false, 10, LeastConnections) + }) + }) + t.Run("tcp-async", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runClient(t, "tcp", ":9991", false, true, false, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runClient(t, "tcp", ":9992", false, true, true, false, 10, LeastConnections) + }) + }) + t.Run("udp", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runClient(t, "udp", ":9991", false, true, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runClient(t, "udp", ":9992", false, true, true, false, 10, LeastConnections) + }) + }) + t.Run("udp-async", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runClient(t, "udp", ":9991", false, true, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runClient(t, "udp", ":9992", false, true, true, true, 10, LeastConnections) + }) + }) + t.Run("unix", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runClient(t, "unix", "gnet1.sock", false, true, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runClient(t, "unix", "gnet2.sock", false, true, true, false, 10, LeastConnections) + }) + }) + t.Run("unix-async", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runClient(t, "unix", "gnet1.sock", false, true, false, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runClient(t, "unix", "gnet2.sock", false, true, true, true, 10, LeastConnections) + }) + }) + }) + + t.Run("poll-ET-reuseport", func(t *testing.T) { + t.Run("tcp", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runClient(t, "tcp", ":9991", true, true, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runClient(t, "tcp", ":9992", true, true, true, false, 10, LeastConnections) + }) + }) + t.Run("tcp-async", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runClient(t, "tcp", ":9991", true, true, false, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runClient(t, "tcp", ":9992", true, true, true, false, 10, LeastConnections) + }) + }) + t.Run("udp", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runClient(t, "udp", ":9991", true, true, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runClient(t, "udp", ":9992", true, true, true, false, 10, LeastConnections) + }) + }) + t.Run("udp-async", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runClient(t, "udp", ":9991", true, true, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runClient(t, "udp", ":9992", true, true, true, true, 10, LeastConnections) + }) + }) + t.Run("unix", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runClient(t, "unix", "gnet1.sock", true, true, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runClient(t, "unix", "gnet2.sock", true, true, true, false, 10, LeastConnections) + }) + }) + t.Run("unix-async", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runClient(t, "unix", "gnet1.sock", true, true, false, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runClient(t, "unix", "gnet2.sock", true, true, true, true, 10, LeastConnections) }) }) }) } -type testClientServer struct { +type testClient struct { *BuiltinEventEngine client *Client tester *testing.T @@ -215,12 +317,12 @@ type testClientServer struct { udpReadHeader int32 } -func (s *testClientServer) OnBoot(eng Engine) (action Action) { +func (s *testClient) OnBoot(eng Engine) (action Action) { s.eng = eng return } -func (s *testClientServer) OnOpen(c Conn) (out []byte, action Action) { +func (s *testClient) OnOpen(c Conn) (out []byte, action Action) { c.SetContext(&sync.Once{}) atomic.AddInt32(&s.connected, 1) require.NotNil(s.tester, c.LocalAddr(), "nil local addr") @@ -228,7 +330,7 @@ func (s *testClientServer) OnOpen(c Conn) (out []byte, action Action) { return } -func (s *testClientServer) OnClose(c Conn, err error) (action Action) { +func (s *testClient) OnClose(c Conn, err error) (action Action) { if err != nil { logging.Debugf("error occurred on closed, %v\n", err) } @@ -246,13 +348,13 @@ func (s *testClientServer) OnClose(c Conn, err error) (action Action) { return } -func (s *testClientServer) OnShutdown(Engine) { +func (s *testClient) OnShutdown(Engine) { if s.network == "udp" { require.EqualValues(s.tester, int32(s.nclients), atomic.LoadInt32(&s.udpReadHeader)) } } -func (s *testClientServer) OnTraffic(c Conn) (action Action) { +func (s *testClient) OnTraffic(c Conn) (action Action) { readHeader := func() { ping := make([]byte, len(pingMsg)) n, err := io.ReadFull(c, ping) @@ -302,7 +404,7 @@ func (s *testClientServer) OnTraffic(c Conn) (action Action) { return } -func (s *testClientServer) OnTick() (delay time.Duration, action Action) { +func (s *testClient) OnTick() (delay time.Duration, action Action) { delay = time.Second / 5 if atomic.CompareAndSwapInt32(&s.started, 0, 1) { for i := 0; i < s.nclients; i++ { @@ -321,8 +423,8 @@ func (s *testClientServer) OnTick() (delay time.Duration, action Action) { return } -func testServeWithGnetClient(t *testing.T, network, addr string, reuseport, reuseaddr, multicore, async bool, nclients int, lb LoadBalancing) { - ts := &testClientServer{ +func runClient(t *testing.T, network, addr string, et, reuseport, multicore, async bool, nclients int, lb LoadBalancing) { + ts := &testClient{ tester: t, network: network, addr: addr, @@ -347,10 +449,10 @@ func testServeWithGnetClient(t *testing.T, network, addr string, reuseport, reus err = Run(ts, network+"://"+addr, + WithEdgeTriggeredIO(et), WithLockOSThread(async), WithMulticore(multicore), WithReusePort(reuseport), - WithReuseAddr(reuseaddr), WithTicker(true), WithTCPKeepAlive(time.Minute*1), WithTCPNoDelay(TCPDelay), diff --git a/connection_bsd.go b/connection_bsd.go index 507f636df..6a085c166 100644 --- a/connection_bsd.go +++ b/connection_bsd.go @@ -20,25 +20,39 @@ package gnet import ( "io" + "golang.org/x/sys/unix" + "github.com/panjf2000/gnet/v2/internal/netpoll" ) func (c *conn) handleEvents(_ int, filter int16, flags uint16) (err error) { - switch { - case flags&netpoll.EVFlagsDelete != 0: - case flags&netpoll.EVFlagsEOF != 0: - switch { - case filter == netpoll.EVFilterRead: // read the remaining data after the peer wrote and closed immediately - err = c.loop.read(c) - case filter == netpoll.EVFilterWrite && !c.outboundBuffer.IsEmpty(): - err = c.loop.write(c) + el := c.loop + switch filter { + case unix.EVFILT_READ: + err = el.read(c) + case unix.EVFILT_WRITE: + err = el.write(c) + } + // EV_EOF indicates that the remote has closed the connection. + // We check for EV_EOF after processing the read/write event + // to ensure that nothing is left out on this event filter. + if flags&unix.EV_EOF != 0 && c.opened && err == nil { + switch filter { + case unix.EVFILT_READ: + // Receive the event of EVFILT_READ | EV_EOF, but the previous eventloop.read + // failed to drain the socket buffer, so we make sure we get it done this time. + c.isEOF = true + err = el.read(c) + case unix.EVFILT_WRITE: + // On macOS, the kqueue in both LT and ET mode will notify with one event for the EOF + // of the TCP remote: EVFILT_READ|EV_ADD|EV_CLEAR|EV_EOF. But for some reason, two + // events will be issued in ET mode for the EOF of the Unix remote in this order: + // 1) EVFILT_WRITE|EV_ADD|EV_CLEAR|EV_EOF, 2) EVFILT_READ|EV_ADD|EV_CLEAR|EV_EOF. + err = el.write(c) default: - err = c.loop.close(c, io.EOF) + c.outboundBuffer.Release() // don't bother to write to a connection with some unknown error + err = el.close(c, io.EOF) } - case filter == netpoll.EVFilterRead: - err = c.loop.read(c) - case filter == netpoll.EVFilterWrite && !c.outboundBuffer.IsEmpty(): - err = c.loop.write(c) } return } diff --git a/connection_linux.go b/connection_linux.go index 1f951394e..30af24d59 100644 --- a/connection_linux.go +++ b/connection_linux.go @@ -17,29 +17,55 @@ package gnet -import "github.com/panjf2000/gnet/v2/internal/netpoll" +import ( + "io" -func (c *conn) handleEvents(_ int, ev uint32) error { - // Don't change the ordering of processing EPOLLOUT | EPOLLRDHUP / EPOLLIN unless you're 100% - // sure what you're doing! - // Re-ordering can easily introduce bugs and bad side-effects, as I found out painfully in the past. + "golang.org/x/sys/unix" + + "github.com/panjf2000/gnet/v2/internal/netpoll" +) - // We should always check for the EPOLLOUT event first, as we must try to send the leftover data back to - // the peer when any error occurs on a connection. +func (c *conn) handleEvents(_ int, ev uint32) error { + el := c.loop + // First check for any unexpected non-IO events. + // For these events we just close the corresponding connection directly. + if ev&netpoll.ErrEvents != 0 && ev&unix.EPOLLIN == 0 && ev&unix.EPOLLOUT == 0 { + c.outboundBuffer.Release() // don't bother to write to a connection with some unknown error + return el.close(c, io.EOF) + } + // Secondly, check for EPOLLOUT before EPOLLIN, the former has a higher priority + // than the latter regardless of the aliveness of the current connection: + // + // 1. When the connection is alive and the system is overloaded, we want to + // offload the incoming traffic by writing all pending data back to the remotes + // before continuing to read and handle requests. + // 2. When the connection is dead, we need to try writing any pending data back + // to the remote and close the connection first. // - // Either an EPOLLOUT or EPOLLERR event may be fired when a connection is refused. - // In either case write() should take care of it properly: - // 1) writing data back, - // 2) closing the connection. - if ev&netpoll.OutEvents != 0 && !c.outboundBuffer.IsEmpty() { - if err := c.loop.write(c); err != nil { + // We perform eventloop.write for EPOLLOUT because it will take good care of either case. + if ev&(unix.EPOLLOUT|unix.EPOLLERR) != 0 { + if err := el.write(c); err != nil { return err } } - if ev&netpoll.InEvents != 0 { - return c.loop.read(c) + // Check for EPOLLIN before EPOLLRDHUP in case that there are pending data in + // the socket buffer. + if ev&(unix.EPOLLIN|unix.EPOLLERR) != 0 { + if err := el.read(c); err != nil { + return err + } + } + // Ultimately, check for EPOLLRDHUP, this event indicates that the remote has + // either closed connection or shut down the writing half of the connection. + if ev&unix.EPOLLRDHUP != 0 && c.opened { + if ev&unix.EPOLLIN == 0 { // unreadable EPOLLRDHUP, close the connection directly + return el.close(c, io.EOF) + } + // Received the event of EPOLLIN | EPOLLRDHUP, but the previous eventloop.read + // failed to drain the socket buffer, so we make sure we get it done this time. + c.isEOF = true + return el.read(c) } - return nil } diff --git a/connection_unix.go b/connection_unix.go index b82bc9336..18fe3e26e 100644 --- a/connection_unix.go +++ b/connection_unix.go @@ -41,22 +41,23 @@ type conn struct { fd int // file descriptor gfd gfd.GFD // gnet file descriptor ctx interface{} // user-defined context - peer unix.Sockaddr // remote socket address + remote unix.Sockaddr // remote socket address localAddr net.Addr // local addr remoteAddr net.Addr // remote addr loop *eventloop // connected event-loop - outboundBuffer elastic.Buffer // buffer for data that is eligible to be sent to the peer + outboundBuffer elastic.Buffer // buffer for data that is eligible to be sent to the remote pollAttachment netpoll.PollAttachment // connection attachment for poller - inboundBuffer elastic.RingBuffer // buffer for leftover data from the peer + inboundBuffer elastic.RingBuffer // buffer for leftover data from the remote buffer []byte // buffer for the latest bytes isDatagram bool // UDP protocol opened bool // connection opened event fired + isEOF bool // whether the connection has reached EOF } func newTCPConn(fd int, el *eventloop, sa unix.Sockaddr, localAddr, remoteAddr net.Addr) (c *conn) { c = &conn{ fd: fd, - peer: sa, + remote: sa, loop: el, localAddr: localAddr, remoteAddr: remoteAddr, @@ -71,7 +72,7 @@ func newUDPConn(fd int, el *eventloop, localAddr net.Addr, sa unix.Sockaddr, con c = &conn{ fd: fd, gfd: gfd.NewGFD(fd, el.idx, 0, 0), - peer: sa, + remote: sa, loop: el, localAddr: localAddr, remoteAddr: socket.SockaddrToUDPAddr(sa), @@ -79,13 +80,14 @@ func newUDPConn(fd int, el *eventloop, localAddr net.Addr, sa unix.Sockaddr, con pollAttachment: netpoll.PollAttachment{FD: fd, Callback: el.readUDP}, } if connected { - c.peer = nil + c.remote = nil } return } func (c *conn) release() { c.opened = false + c.isEOF = false c.ctx = nil c.buffer = nil if addr, ok := c.localAddr.(*net.TCPAddr); ok && c.localAddr != c.loop.ln.addr && len(addr.Zone) > 0 { @@ -102,92 +104,122 @@ func (c *conn) release() { } c.localAddr = nil c.remoteAddr = nil - c.pollAttachment.FD, c.pollAttachment.Callback = 0, nil if !c.isDatagram { - c.peer = nil + c.remote = nil c.inboundBuffer.Done() c.outboundBuffer.Release() } } func (c *conn) open(buf []byte) error { - if c.isDatagram && c.peer == nil { + if c.isDatagram && c.remote == nil { return unix.Send(c.fd, buf, 0) } - n, err := unix.Write(c.fd, buf) - if err != nil && err == unix.EAGAIN { - _, _ = c.outboundBuffer.Write(buf) - return nil - } - - if err == nil && n < len(buf) { - _, _ = c.outboundBuffer.Write(buf[n:]) + for { + n, err := unix.Write(c.fd, buf) + if err != nil { + if err == unix.EAGAIN { + _, _ = c.outboundBuffer.Write(buf) + break + } + return err + } + buf = buf[n:] + if len(buf) == 0 { + break + } } - return err + return nil } func (c *conn) write(data []byte) (n int, err error) { + isET := c.loop.engine.opts.EdgeTriggeredIO n = len(data) - // If there is pending data in outbound buffer, the current data ought to be appended to the outbound buffer - // for maintaining the sequence of network packets. + // If there is pending data in outbound buffer, + // the current data ought to be appended to the + // outbound buffer for maintaining the sequence + // of network packets. if !c.outboundBuffer.IsEmpty() { _, _ = c.outboundBuffer.Write(data) + if isET { + err = c.loop.write(c) + } return } var sent int +loop: if sent, err = unix.Write(c.fd, data); err != nil { - // A temporary error occurs, append the data to outbound buffer, writing it back to the peer in the next round. + // A temporary error occurs, append the data to outbound buffer, + // writing it back to the remote in the next round for LT mode. if err == unix.EAGAIN { - _, _ = c.outboundBuffer.Write(data) - err = c.loop.poller.ModReadWrite(&c.pollAttachment) + _, err = c.outboundBuffer.Write(data) + if !isET { + err = c.loop.poller.ModReadWrite(&c.pollAttachment, false) + } return } if err := c.loop.close(c, os.NewSyscallError("write", err)); err != nil { - logging.Errorf("failed to close connection(fd=%d,peer=%+v) on conn.write: %v", + logging.Errorf("failed to close connection(fd=%d,remote=%+v) on conn.write: %v", c.fd, c.remoteAddr, err) } return 0, os.NewSyscallError("write", err) } - // Failed to send all data back to the peer, buffer the leftover data for the next round. - if sent < n { - _, _ = c.outboundBuffer.Write(data[sent:]) - err = c.loop.poller.ModReadWrite(&c.pollAttachment) + data = data[sent:] + if isET && len(data) > 0 { + goto loop } + // Failed to send all data back to the remote, buffer the leftover data for the next round. + if len(data) > 0 { + _, _ = c.outboundBuffer.Write(data) + err = c.loop.poller.ModReadWrite(&c.pollAttachment, false) + } + return } func (c *conn) writev(bs [][]byte) (n int, err error) { + isET := c.loop.engine.opts.EdgeTriggeredIO + for _, b := range bs { n += len(b) } - // If there is pending data in outbound buffer, the current data ought to be appended to the outbound buffer - // for maintaining the sequence of network packets. + // If there is pending data in outbound buffer, + // the current data ought to be appended to the + // outbound buffer for maintaining the sequence + // of network packets. if !c.outboundBuffer.IsEmpty() { _, _ = c.outboundBuffer.Writev(bs) + if isET { + err = c.loop.write(c) + } return } + remaining := n var sent int +loop: if sent, err = gio.Writev(c.fd, bs); err != nil { - // A temporary error occurs, append the data to outbound buffer, writing it back to the peer in the next round. + // A temporary error occurs, append the data to outbound buffer, + // writing it back to the remote in the next round for LT mode. if err == unix.EAGAIN { - _, _ = c.outboundBuffer.Writev(bs) - err = c.loop.poller.ModReadWrite(&c.pollAttachment) + _, err = c.outboundBuffer.Writev(bs) + if !isET { + err = c.loop.poller.ModReadWrite(&c.pollAttachment, false) + } return } if err := c.loop.close(c, os.NewSyscallError("writev", err)); err != nil { - logging.Errorf("failed to close connection(fd=%d,peer=%+v) on conn.writev: %v", + logging.Errorf("failed to close connection(fd=%d,remote=%+v) on conn.writev: %v", c.fd, c.remoteAddr, err) } return 0, os.NewSyscallError("writev", err) } - // Failed to send all data back to the peer, buffer the leftover data for the next round. - if sent < n { - var pos int + pos := len(bs) + if remaining -= sent; remaining > 0 { for i := range bs { bn := len(bs[i]) if sent < bn { @@ -197,9 +229,18 @@ func (c *conn) writev(bs [][]byte) (n int, err error) { } sent -= bn } - _, _ = c.outboundBuffer.Writev(bs[pos:]) - err = c.loop.poller.ModReadWrite(&c.pollAttachment) } + bs = bs[pos:] + if isET && remaining > 0 { + goto loop + } + + // Failed to send all data back to the remote, buffer the leftover data for the next round. + if remaining > 0 { + _, _ = c.outboundBuffer.Writev(bs) + err = c.loop.poller.ModReadWrite(&c.pollAttachment, false) + } + return } @@ -246,10 +287,10 @@ func (c *conn) asyncWritev(itf interface{}) (err error) { } func (c *conn) sendTo(buf []byte) error { - if c.peer == nil { + if c.remote == nil { return unix.Send(c.fd, buf, 0) } - return unix.Sendto(c.fd, buf, 0, c.peer) + return unix.Sendto(c.fd, buf, 0, c.remote) } func (c *conn) resetBuffer() { @@ -290,13 +331,13 @@ func (c *conn) Next(n int) (buf []byte, err error) { } head, tail := c.inboundBuffer.Peek(n) defer c.inboundBuffer.Discard(n) //nolint:errcheck - if len(head) >= n { + if len(head) == n { return head[:n], err } c.loop.cache.Reset() c.loop.cache.Write(head) c.loop.cache.Write(tail) - if inBufferLen >= n { + if inBufferLen == n { return c.loop.cache.Bytes(), err } @@ -317,13 +358,13 @@ func (c *conn) Peek(n int) (buf []byte, err error) { return c.buffer[:n], err } head, tail := c.inboundBuffer.Peek(n) - if len(head) >= n { + if len(head) == n { return head[:n], err } c.loop.cache.Reset() c.loop.cache.Write(head) c.loop.cache.Write(tail) - if inBufferLen >= n { + if inBufferLen == n { return c.loop.cache.Bytes(), err } @@ -389,10 +430,6 @@ func (c *conn) WriteTo(w io.Writer) (n int64, err error) { } func (c *conn) Flush() error { - if c.outboundBuffer.IsEmpty() { - return nil - } - return c.loop.write(c) } @@ -414,7 +451,7 @@ func (c *conn) RemoteAddr() net.Addr { return c.remoteAddr } // func (c *conn) Gfd() gfd.GFD { return c.gfd } func (c *conn) Fd() int { return c.fd } -func (c *conn) Dup() (fd int, err error) { fd, _, err = netpoll.Dup(c.fd); return } +func (c *conn) Dup() (fd int, err error) { fd, _, err = socket.Dup(c.fd); return } func (c *conn) SetReadBuffer(bytes int) error { return socket.SetRecvBuffer(c.fd, bytes) } func (c *conn) SetWriteBuffer(bytes int) error { return socket.SetSendBuffer(c.fd, bytes) } func (c *conn) SetLinger(sec int) error { return socket.SetLinger(c.fd, sec) } diff --git a/connection_windows.go b/connection_windows.go index 61619b5bb..21ebb64d4 100644 --- a/connection_windows.go +++ b/connection_windows.go @@ -54,8 +54,8 @@ type conn struct { buffer *bbPool.ByteBuffer // reuse memory of inbound data as a temporary buffer rawConn net.Conn // original connection localAddr net.Addr // local server addr - remoteAddr net.Addr // remote peer addr - inboundBuffer elastic.RingBuffer // buffer for data from the peer + remoteAddr net.Addr // remote addr + inboundBuffer elastic.RingBuffer // buffer for data from the remote } func packTCPConn(c *conn, buf []byte) *tcpConn { diff --git a/engine_unix.go b/engine_unix.go index 5040a3fe1..261b67100 100644 --- a/engine_unix.go +++ b/engine_unix.go @@ -66,13 +66,6 @@ func (eng *engine) shutdown(err error) { }) } -func (eng *engine) startEventLoops() { - eng.eventLoops.iterate(func(_ int, el *eventloop) bool { - eng.workerPool.Go(el.run) - return true - }) -} - func (eng *engine) closeEventLoops() { eng.eventLoops.iterate(func(_ int, el *eventloop) bool { el.ln.close() @@ -88,14 +81,7 @@ func (eng *engine) closeEventLoops() { } } -func (eng *engine) startSubReactors() { - eng.eventLoops.iterate(func(_ int, el *eventloop) bool { - eng.workerPool.Go(el.activateSubReactor) - return true - }) -} - -func (eng *engine) activateEventLoops(numEventLoop int) (err error) { +func (eng *engine) runEventLoops(numEventLoop int) (err error) { network, address := eng.ln.network, eng.ln.address ln := eng.ln var striker *eventloop @@ -115,7 +101,7 @@ func (eng *engine) activateEventLoops(numEventLoop int) (err error) { el.buffer = make([]byte, eng.opts.ReadBufferCap) el.connections.init() el.eventHandler = eng.eventHandler - if err = el.poller.AddRead(el.ln.packPollAttachment(el.accept)); err != nil { + if err = el.poller.AddRead(el.ln.packPollAttachment(el.accept), false); err != nil { return } eng.eventLoops.register(el) @@ -130,7 +116,10 @@ func (eng *engine) activateEventLoops(numEventLoop int) (err error) { } // Start event-loops in background. - eng.startEventLoops() + eng.eventLoops.iterate(func(_ int, el *eventloop) bool { + eng.workerPool.Go(el.run) + return true + }) eng.workerPool.Go(func() error { striker.ticker(eng.ticker.ctx) @@ -157,7 +146,10 @@ func (eng *engine) activateReactors(numEventLoop int) error { } // Start sub reactors in background. - eng.startSubReactors() + eng.eventLoops.iterate(func(_ int, el *eventloop) bool { + eng.workerPool.Go(el.orbit) + return true + }) if p, err := netpoll.OpenPoller(); err == nil { el := new(eventloop) @@ -166,13 +158,13 @@ func (eng *engine) activateReactors(numEventLoop int) error { el.engine = eng el.poller = p el.eventHandler = eng.eventHandler - if err = el.poller.AddRead(eng.ln.packPollAttachment(eng.accept)); err != nil { + if err = el.poller.AddRead(eng.ln.packPollAttachment(eng.accept), false); err != nil { return err } eng.acceptor = el // Start main reactor in background. - eng.workerPool.Go(el.activateMainReactor) + eng.workerPool.Go(el.rotate) } else { return err } @@ -190,7 +182,7 @@ func (eng *engine) activateReactors(numEventLoop int) error { func (eng *engine) start(numEventLoop int) error { if eng.opts.ReusePort || eng.ln.network == "udp" { - return eng.activateEventLoops(numEventLoop) + return eng.runEventLoops(numEventLoop) } return eng.activateReactors(numEventLoop) diff --git a/eventloop_unix.go b/eventloop_unix.go index e8c8eab92..2d6eae077 100644 --- a/eventloop_unix.go +++ b/eventloop_unix.go @@ -22,13 +22,14 @@ import ( "context" "errors" "fmt" + "io" "os" "strings" "time" "golang.org/x/sys/unix" - "github.com/panjf2000/gnet/v2/internal/io" + gio "github.com/panjf2000/gnet/v2/internal/io" "github.com/panjf2000/gnet/v2/internal/netpoll" "github.com/panjf2000/gnet/v2/internal/queue" errorx "github.com/panjf2000/gnet/v2/pkg/errors" @@ -75,7 +76,12 @@ func (el *eventloop) register(itf interface{}) error { defer ccb.cb() } - if err := el.poller.AddRead(&c.pollAttachment); err != nil { + addEvents := el.poller.AddRead + if el.engine.opts.EdgeTriggeredIO { + addEvents = el.poller.AddReadWrite + } + + if err := addEvents(&c.pollAttachment, el.engine.opts.EdgeTriggeredIO); err != nil { _ = unix.Close(c.fd) c.release() return err @@ -83,7 +89,7 @@ func (el *eventloop) register(itf interface{}) error { el.connections.addConn(c, el.idx) - if c.isDatagram && c.peer != nil { + if c.isDatagram && c.remote != nil { return nil } return el.open(c) @@ -99,8 +105,8 @@ func (el *eventloop) open(c *conn) error { } } - if !c.outboundBuffer.IsEmpty() { - if err := el.poller.ModReadWrite(&c.pollAttachment); err != nil { + if !c.outboundBuffer.IsEmpty() && !el.engine.opts.EdgeTriggeredIO { + if err := el.poller.ModReadWrite(&c.pollAttachment, false); err != nil { return err } } @@ -109,13 +115,19 @@ func (el *eventloop) open(c *conn) error { } func (el *eventloop) read(c *conn) error { + if !c.opened { + return nil + } + + isET := el.engine.opts.EdgeTriggeredIO +loop: n, err := unix.Read(c.fd, el.buffer) if err != nil || n == 0 { if err == unix.EAGAIN { return nil } if n == 0 { - err = unix.ECONNRESET + err = io.EOF } return el.close(c, os.NewSyscallError("read", err)) } @@ -131,6 +143,11 @@ func (el *eventloop) read(c *conn) error { } _, _ = c.inboundBuffer.Write(c.buffer) c.buffer = c.buffer[:0] + + if isET || c.isEOF { + goto loop + } + return nil } @@ -138,16 +155,22 @@ func (el *eventloop) read(c *conn) error { const iovMax = 1024 func (el *eventloop) write(c *conn) error { - iov := c.outboundBuffer.Peek(-1) + if c.outboundBuffer.IsEmpty() { + return nil + } + + isET := el.engine.opts.EdgeTriggeredIO var ( n int err error ) +loop: + iov, _ := c.outboundBuffer.Peek(-1) if len(iov) > 1 { if len(iov) > iovMax { iov = iov[:iovMax] } - n, err = io.Writev(c.fd, iov) + n, err = gio.Writev(c.fd, iov) } else { n, err = unix.Write(c.fd, iov[0]) } @@ -159,11 +182,14 @@ func (el *eventloop) write(c *conn) error { default: return el.close(c, os.NewSyscallError("write", err)) } + if isET && !c.outboundBuffer.IsEmpty() { + goto loop + } - // All data have been drained, it's no need to monitor the writable events, - // remove the writable event from poller to help the future event-loops. - if c.outboundBuffer.IsEmpty() { - _ = el.poller.ModRead(&c.pollAttachment) + // All data have been sent, it's no need to monitor the writable events for LT mode, + // remove the writable event from poller to help the future event-loops if necessary. + if !isET && c.outboundBuffer.IsEmpty() { + _ = el.poller.ModRead(&c.pollAttachment, false) } return nil @@ -187,19 +213,17 @@ func (el *eventloop) close(c *conn, err error) (rerr error) { return // ignore stale connections } - // Send residual data in buffer back to the peer before actually closing the connection. - if !c.outboundBuffer.IsEmpty() { - for !c.outboundBuffer.IsEmpty() { - iov := c.outboundBuffer.Peek(0) - if len(iov) > iovMax { - iov = iov[:iovMax] - } - if n, e := io.Writev(c.fd, iov); e != nil { - el.getLogger().Warnf("close: error occurs when sending data back to peer, %v", e) - break - } else { //nolint:revive - _, _ = c.outboundBuffer.Discard(n) - } + // Send residual data in buffer back to the remote before actually closing the connection. + for !c.outboundBuffer.IsEmpty() { + iov, _ := c.outboundBuffer.Peek(0) + if len(iov) > iovMax { + iov = iov[:iovMax] + } + if n, e := gio.Writev(c.fd, iov); e != nil { + el.getLogger().Warnf("close: error occurs when sending data back to remote, %v", e) + break + } else { //nolint:revive + _, _ = c.outboundBuffer.Discard(n) } } @@ -303,7 +327,7 @@ func (el *eventloop) readUDP1(fd int, _ netpoll.IOEvent, _ netpoll.IOFlags) erro } c.buffer = el.buffer[:n] action := el.eventHandler.OnTraffic(c) - if c.peer != nil { + if c.remote != nil { c.release() } if action == Shutdown { diff --git a/gnet.go b/gnet.go index f77ee5793..d511ab5be 100644 --- a/gnet.go +++ b/gnet.go @@ -167,7 +167,7 @@ func (e Engine) Wake(fd gfd.GFD, cb AsyncCallback) error { // Reader is an interface that consists of a number of methods for reading that Conn must implement. // -// Note that the methods in this interface are not goroutine-safe for concurrent use, +// Note that the methods in this interface are not concurrency-safe for concurrent use, // you must invoke them within any method in EventHandler. type Reader interface { io.Reader @@ -175,8 +175,9 @@ type Reader interface { // Next returns a slice containing the next n bytes from the buffer, // advancing the buffer as if the bytes had been returned by Read. - // If there are fewer than n bytes in the buffer, Next returns the entire buffer. - // The error is ErrBufferFull if n is larger than b's buffer size. + // Calling this method has the same effect as calling Peek and Discard. + // If the amount of the available bytes is less than requested, a pair of (0, io.ErrShortBuffer) + // is returned. // // Note that the []byte buf returned by Next() is not allowed to be passed to a new goroutine, // as this []byte will be reused within event-loop. @@ -184,10 +185,9 @@ type Reader interface { // to that new goroutine. Next(n int) (buf []byte, err error) - // Peek returns the next n bytes without advancing the reader. The bytes stop - // being valid at the next read call. If Peek returns fewer than n bytes, it - // also returns an error explaining why the read is short. The error is - // ErrBufferFull if n is larger than b's buffer size. + // Peek returns the next n bytes without advancing the inbound buffer, the returned bytes + // remain valid until a Discard is called. If the amount of the available bytes is + // less than requested, a pair of (0, io.ErrShortBuffer) is returned. // // Note that the []byte buf returned by Peek() is not allowed to be passed to a new goroutine, // as this []byte will be reused within event-loop. @@ -195,11 +195,7 @@ type Reader interface { // to that new goroutine. Peek(n int) (buf []byte, err error) - // Discard skips the next n bytes, returning the number of bytes discarded. - // - // If Discard skips fewer than n bytes, it also returns an error. - // If 0 <= n <= b.Buffered(), Discard is guaranteed to succeed without - // reading from the underlying io.Reader. + // Discard advances the inbound buffer with next n bytes, returning the number of bytes discarded. Discard(n int) (discarded int, err error) // InboundBuffered returns the number of bytes that can be read from the current buffer. @@ -208,22 +204,22 @@ type Reader interface { // Writer is an interface that consists of a number of methods for writing that Conn must implement. type Writer interface { - io.Writer // not goroutine-safe - io.ReaderFrom // not goroutine-safe + io.Writer // not concurrency-safe + io.ReaderFrom // not concurrency-safe - // Writev writes multiple byte slices to peer synchronously, it's not goroutine-safe, + // Writev writes multiple byte slices to remote synchronously, it's not concurrency-safe, // you must invoke it within any method in EventHandler. Writev(bs [][]byte) (n int, err error) - // Flush writes any buffered data to the underlying connection, it's not goroutine-safe, + // Flush writes any buffered data to the underlying connection, it's not concurrency-safe, // you must invoke it within any method in EventHandler. Flush() (err error) // OutboundBuffered returns the number of bytes that can be read from the current buffer. - // it's not goroutine-safe, you must invoke it within any method in EventHandler. + // it's not concurrency-safe, you must invoke it within any method in EventHandler. OutboundBuffered() (n int) - // AsyncWrite writes bytes to peer asynchronously, it's goroutine-safe, + // AsyncWrite writes bytes to remote asynchronously, it's concurrency-safe, // you don't have to invoke it within any method in EventHandler, // usually you would call it in an individual goroutine. // @@ -234,7 +230,7 @@ type Writer interface { // just call Conn.Write to send back your data. AsyncWrite(buf []byte, callback AsyncCallback) (err error) - // AsyncWritev writes multiple byte slices to peer asynchronously, + // AsyncWritev writes multiple byte slices to remote asynchronously, // you don't have to invoke it within any method in EventHandler, // usually you would call it in an individual goroutine. AsyncWritev(bs [][]byte, callback AsyncCallback) (err error) @@ -247,7 +243,7 @@ type AsyncCallback func(c Conn, err error) error // Socket is a set of functions which manipulate the underlying file descriptor of a connection. // -// Note that the methods in this interface are goroutine-safe for concurrent use, +// Note that the methods in this interface are concurrency-safe for concurrent use, // you don't have to invoke them within any method in EventHandler. type Socket interface { // Gfd returns the gfd of socket. @@ -302,35 +298,35 @@ type Socket interface { // Conn is an interface of underlying connection. type Conn interface { - Reader // all methods in Reader are not goroutine-safe. - Writer // some methods in Writer are goroutine-safe, some are not. - Socket // all methods in Socket are goroutine-safe. + Reader // all methods in Reader are not concurrency-safe. + Writer // some methods in Writer are concurrency-safe, some are not. + Socket // all methods in Socket are concurrency-safe. - // Context returns a user-defined context, it's not goroutine-safe, + // Context returns a user-defined context, it's not concurrency-safe, // you must invoke it within any method in EventHandler. Context() (ctx interface{}) - // SetContext sets a user-defined context, it's not goroutine-safe, + // SetContext sets a user-defined context, it's not concurrency-safe, // you must invoke it within any method in EventHandler. SetContext(ctx interface{}) - // LocalAddr is the connection's local socket address, it's not goroutine-safe, + // LocalAddr is the connection's local socket address, it's not concurrency-safe, // you must invoke it within any method in EventHandler. LocalAddr() (addr net.Addr) - // RemoteAddr is the connection's remote peer address, it's not goroutine-safe, + // RemoteAddr is the connection's remote remote address, it's not concurrency-safe, // you must invoke it within any method in EventHandler. RemoteAddr() (addr net.Addr) - // Wake triggers a OnTraffic event for the current connection, it's goroutine-safe. + // Wake triggers a OnTraffic event for the current connection, it's concurrency-safe. Wake(callback AsyncCallback) (err error) - // CloseWithCallback closes the current connection, it's goroutine-safe. + // CloseWithCallback closes the current connection, it's concurrency-safe. // Usually you should provide a non-nil callback for this method, // otherwise your better choice is Close(). CloseWithCallback(callback AsyncCallback) (err error) - // Close closes the current connection, implements net.Conn, it's goroutine-safe. + // Close closes the current connection, implements net.Conn, it's concurrency-safe. Close() (err error) // SetDeadline implements net.Conn. @@ -359,15 +355,15 @@ type ( // OnOpen fires when a new connection has been opened. // // The Conn c has information about the connection such as its local and remote addresses. - // The parameter out is the return value which is going to be sent back to the peer. - // Sending large amounts of data back to the peer in OnOpen is usually not recommended. + // The parameter out is the return value which is going to be sent back to the remote. + // Sending large amounts of data back to the remote in OnOpen is usually not recommended. OnOpen(c Conn) (out []byte, action Action) // OnClose fires when a connection has been closed. // The parameter err is the last known connection error. OnClose(c Conn, err error) (action Action) - // OnTraffic fires when a socket receives data from the peer. + // OnTraffic fires when a socket receives data from the remote. // // Note that the []byte returned from Conn.Peek(int)/Conn.Next(int) is not allowed to be passed to a new goroutine, // as this []byte will be reused within event-loop after OnTraffic() returns. @@ -398,7 +394,7 @@ func (*BuiltinEventEngine) OnShutdown(_ Engine) { } // OnOpen fires when a new connection has been opened. -// The parameter out is the return value which is going to be sent back to the peer. +// The parameter out is the return value which is going to be sent back to the remote. func (*BuiltinEventEngine) OnOpen(_ Conn) (out []byte, action Action) { return } @@ -409,7 +405,7 @@ func (*BuiltinEventEngine) OnClose(_ Conn, _ error) (action Action) { return } -// OnTraffic fires when a local socket receives data from the peer. +// OnTraffic fires when a local socket receives data from the remote. func (*BuiltinEventEngine) OnTraffic(_ Conn) (action Action) { return } @@ -492,6 +488,10 @@ func Run(eventHandler EventHandler, protoAddr string, opts ...Option) (err error } defer ln.close() + if ln.network == "udp" { + options.EdgeTriggeredIO = false + } + return run(eventHandler, ln, options, protoAddr) } diff --git a/gnet_test.go b/gnet_test.go index e6150023d..8d0415079 100644 --- a/gnet_test.go +++ b/gnet_test.go @@ -30,194 +30,277 @@ var ( streamLen = 1024 * 1024 ) -func TestServe(t *testing.T) { +func TestServer(t *testing.T) { // start an engine // connect 10 clients // each client will pipe random data for 1-3 seconds. // the writes to the engine will be random sizes. 0KB - 1MB. // the engine will echo back the data. // waits for graceful connection closing. - t.Run("poll", func(t *testing.T) { + t.Run("poll-LT", func(t *testing.T) { t.Run("tcp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "tcp", ":9991", false, false, false, false, false, 10, RoundRobin) + runServer(t, "tcp", ":9991", false, false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "tcp", ":9992", false, false, true, false, false, 10, LeastConnections) + runServer(t, "tcp", ":9992", false, false, true, false, false, 10, LeastConnections) }) }) t.Run("tcp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "tcp", ":9991", false, false, false, true, false, 10, RoundRobin) + runServer(t, "tcp", ":9991", false, false, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "tcp", ":9992", false, false, true, true, false, 10, LeastConnections) + runServer(t, "tcp", ":9992", false, false, true, true, false, 10, LeastConnections) }) }) t.Run("tcp-async-writev", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "tcp", ":9991", false, false, false, true, true, 10, RoundRobin) + runServer(t, "tcp", ":9991", false, false, false, true, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "tcp", ":9992", false, false, true, true, true, 10, LeastConnections) + runServer(t, "tcp", ":9992", false, false, true, true, true, 10, LeastConnections) }) }) t.Run("udp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "udp", ":9991", false, false, false, false, false, 10, RoundRobin) + runServer(t, "udp", ":9991", false, false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "udp", ":9992", false, false, true, false, false, 10, LeastConnections) + runServer(t, "udp", ":9992", false, false, true, false, false, 10, LeastConnections) }) }) t.Run("udp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "udp", ":9991", false, false, false, true, false, 10, RoundRobin) + runServer(t, "udp", ":9991", false, false, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "udp", ":9992", false, false, true, true, false, 10, LeastConnections) + runServer(t, "udp", ":9992", false, false, true, true, false, 10, LeastConnections) }) }) t.Run("unix", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "unix", "gnet1.sock", false, false, false, false, false, 10, RoundRobin) + runServer(t, "unix", "gnet1.sock", false, false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "unix", "gnet2.sock", false, false, true, false, false, 10, SourceAddrHash) + runServer(t, "unix", "gnet2.sock", false, false, true, false, false, 10, SourceAddrHash) }) }) t.Run("unix-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "unix", "gnet1.sock", false, false, false, true, false, 10, RoundRobin) + runServer(t, "unix", "gnet1.sock", false, false, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "unix", "gnet2.sock", false, false, true, true, false, 10, SourceAddrHash) + runServer(t, "unix", "gnet2.sock", false, false, true, true, false, 10, SourceAddrHash) }) }) t.Run("unix-async-writev", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "unix", "gnet1.sock", false, false, false, true, true, 10, RoundRobin) + runServer(t, "unix", "gnet1.sock", false, false, false, true, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "unix", "gnet2.sock", false, false, true, true, true, 10, SourceAddrHash) + runServer(t, "unix", "gnet2.sock", false, false, true, true, true, 10, SourceAddrHash) }) }) }) - t.Run("poll-reuseport", func(t *testing.T) { + t.Run("poll-ET", func(t *testing.T) { t.Run("tcp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "tcp", ":9991", true, false, false, false, false, 10, RoundRobin) + runServer(t, "tcp", ":9991", true, false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "tcp", ":9992", true, false, true, false, false, 10, LeastConnections) + runServer(t, "tcp", ":9992", true, false, true, false, false, 10, LeastConnections) }) }) t.Run("tcp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "tcp", ":9991", true, false, false, true, false, 10, RoundRobin) + runServer(t, "tcp", ":9991", true, false, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "tcp", ":9992", true, false, true, true, false, 10, LeastConnections) + runServer(t, "tcp", ":9992", true, false, true, true, false, 10, LeastConnections) }) }) t.Run("tcp-async-writev", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "tcp", ":9991", true, false, false, true, true, 10, RoundRobin) + runServer(t, "tcp", ":9991", true, false, false, true, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "tcp", ":9992", true, false, true, true, true, 10, LeastConnections) + runServer(t, "tcp", ":9992", true, false, true, true, true, 10, LeastConnections) }) }) t.Run("udp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "udp", ":9991", true, false, false, false, false, 10, RoundRobin) + runServer(t, "udp", ":9991", true, false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "udp", ":9992", true, false, true, false, false, 10, LeastConnections) + runServer(t, "udp", ":9992", true, false, true, false, false, 10, LeastConnections) }) }) t.Run("udp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "udp", ":9991", true, false, false, true, false, 10, RoundRobin) + runServer(t, "udp", ":9991", true, false, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "udp", ":9992", true, false, true, true, false, 10, LeastConnections) + runServer(t, "udp", ":9992", true, false, true, true, false, 10, LeastConnections) }) }) t.Run("unix", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "unix", "gnet1.sock", true, false, false, false, false, 10, RoundRobin) + runServer(t, "unix", "gnet1.sock", true, false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "unix", "gnet2.sock", true, false, true, false, false, 10, LeastConnections) + runServer(t, "unix", "gnet2.sock", true, false, true, false, false, 10, SourceAddrHash) }) }) t.Run("unix-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "unix", "gnet1.sock", true, false, false, true, false, 10, RoundRobin) + runServer(t, "unix", "gnet1.sock", true, false, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "unix", "gnet2.sock", true, false, true, true, false, 10, LeastConnections) + runServer(t, "unix", "gnet2.sock", true, false, true, true, false, 10, SourceAddrHash) }) }) t.Run("unix-async-writev", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "unix", "gnet1.sock", true, false, false, true, true, 10, RoundRobin) + runServer(t, "unix", "gnet1.sock", true, false, false, true, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "unix", "gnet2.sock", true, false, true, true, true, 10, LeastConnections) + runServer(t, "unix", "gnet2.sock", true, false, true, true, true, 10, SourceAddrHash) }) }) }) - t.Run("poll-reuseaddr", func(t *testing.T) { + t.Run("poll-LT-reuseport", func(t *testing.T) { t.Run("tcp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "tcp", ":9991", false, true, false, false, false, 10, RoundRobin) + runServer(t, "tcp", ":9991", false, true, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "tcp", ":9992", false, true, true, false, false, 10, LeastConnections) + runServer(t, "tcp", ":9992", false, true, true, false, false, 10, LeastConnections) }) }) t.Run("tcp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "tcp", ":9991", false, true, false, true, false, 10, RoundRobin) + runServer(t, "tcp", ":9991", false, true, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "tcp", ":9992", false, true, true, false, false, 10, LeastConnections) + runServer(t, "tcp", ":9992", false, true, true, true, false, 10, LeastConnections) + }) + }) + t.Run("tcp-async-writev", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, "tcp", ":9991", false, true, false, true, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, "tcp", ":9992", false, true, true, true, true, 10, LeastConnections) + }) + }) + t.Run("udp", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, "udp", ":9991", false, true, false, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, "udp", ":9992", false, true, true, false, false, 10, LeastConnections) + }) + }) + t.Run("udp-async", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, "udp", ":9991", false, true, false, true, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, "udp", ":9992", false, true, true, true, false, 10, LeastConnections) + }) + }) + t.Run("unix", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, "unix", "gnet1.sock", false, true, false, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, "unix", "gnet2.sock", false, true, true, false, false, 10, LeastConnections) + }) + }) + t.Run("unix-async", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, "unix", "gnet1.sock", false, true, false, true, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, "unix", "gnet2.sock", false, true, true, true, false, 10, LeastConnections) + }) + }) + t.Run("unix-async-writev", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, "unix", "gnet1.sock", false, true, false, true, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, "unix", "gnet2.sock", false, true, true, true, true, 10, LeastConnections) + }) + }) + }) + + t.Run("poll-ET-reuseport", func(t *testing.T) { + t.Run("tcp", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, "tcp", ":9991", true, true, false, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, "tcp", ":9992", true, true, true, false, false, 10, LeastConnections) + }) + }) + t.Run("tcp-async", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, "tcp", ":9991", true, true, false, true, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, "tcp", ":9992", true, true, true, true, false, 10, LeastConnections) + }) + }) + t.Run("tcp-async-writev", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, "tcp", ":9991", true, true, false, true, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, "tcp", ":9992", true, true, true, true, true, 10, LeastConnections) }) }) t.Run("udp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "udp", ":9991", false, true, false, false, false, 10, RoundRobin) + runServer(t, "udp", ":9991", true, true, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "udp", ":9992", false, true, true, false, false, 10, LeastConnections) + runServer(t, "udp", ":9992", true, true, true, false, false, 10, LeastConnections) }) }) t.Run("udp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "udp", ":9991", false, true, false, false, false, 10, RoundRobin) + runServer(t, "udp", ":9991", true, true, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "udp", ":9992", false, true, true, true, false, 10, LeastConnections) + runServer(t, "udp", ":9992", true, true, true, true, false, 10, LeastConnections) }) }) t.Run("unix", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "unix", "gnet1.sock", false, true, false, false, false, 10, RoundRobin) + runServer(t, "unix", "gnet1.sock", true, true, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "unix", "gnet2.sock", false, true, true, false, false, 10, LeastConnections) + runServer(t, "unix", "gnet2.sock", true, true, true, false, false, 10, LeastConnections) }) }) t.Run("unix-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - testServe(t, "unix", "gnet1.sock", false, true, false, true, false, 10, RoundRobin) + runServer(t, "unix", "gnet1.sock", true, true, false, true, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, "unix", "gnet2.sock", true, true, true, true, false, 10, LeastConnections) + }) + }) + t.Run("unix-async-writev", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, "unix", "gnet1.sock", true, true, false, true, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - testServe(t, "unix", "gnet2.sock", false, true, true, true, false, 10, LeastConnections) + runServer(t, "unix", "gnet2.sock", true, true, true, true, true, 10, LeastConnections) }) }) }) @@ -377,7 +460,7 @@ func (s *testServer) OnTick() (delay time.Duration, action Action) { return } -func testServe(t *testing.T, network, addr string, reuseport, reuseaddr, multicore, async, writev bool, nclients int, lb LoadBalancing) { +func runServer(t *testing.T, network, addr string, et, reuseport, multicore, async, writev bool, nclients int, lb LoadBalancing) { ts := &testServer{ tester: t, network: network, @@ -390,10 +473,10 @@ func testServe(t *testing.T, network, addr string, reuseport, reuseaddr, multico } err := Run(ts, network+"://"+addr, + WithEdgeTriggeredIO(et), WithLockOSThread(async), WithMulticore(multicore), WithReusePort(reuseport), - WithReuseAddr(reuseaddr), WithTicker(true), WithTCPKeepAlive(time.Minute*1), WithTCPNoDelay(TCPDelay), @@ -1248,7 +1331,7 @@ func (s *simServer) OnTick() (delay time.Duration, action Action) { if atomic.CompareAndSwapInt32(&s.started, 0, 1) { for i := 0; i < s.nclients; i++ { go func() { - runClient(s.tester, s.network, s.addr, s.packetSize, s.packetBatch) + runSimClient(s.tester, s.network, s.addr, s.packetSize, s.packetBatch) }() } } @@ -1339,32 +1422,32 @@ func (codec testCodec) Unpack(buf []byte) ([]byte, error) { func TestSimServer(t *testing.T) { t.Run("packet-size=128,batch=100", func(t *testing.T) { - testSimServer(t, ":7200", 10, 128, 100) + runSimServer(t, ":7200", false, 10, 128, 100) }) t.Run("packet-size=256,batch=50", func(t *testing.T) { - testSimServer(t, ":7201", 10, 256, 50) + runSimServer(t, ":7201", true, 10, 256, 50) }) t.Run("packet-size=512,batch=30", func(t *testing.T) { - testSimServer(t, ":7202", 10, 512, 30) + runSimServer(t, ":7202", false, 10, 512, 30) }) t.Run("packet-size=1024,batch=20", func(t *testing.T) { - testSimServer(t, ":7203", 10, 1024, 20) + runSimServer(t, ":7203", true, 10, 1024, 20) }) t.Run("packet-size=64*1024,batch=10", func(t *testing.T) { - testSimServer(t, ":7204", 10, 64*1024, 10) + runSimServer(t, ":7204", false, 10, 64*1024, 10) }) t.Run("packet-size=128*1024,batch=5", func(t *testing.T) { - testSimServer(t, ":7205", 10, 128*1024, 5) + runSimServer(t, ":7205", true, 10, 128*1024, 5) }) t.Run("packet-size=512*1024,batch=3", func(t *testing.T) { - testSimServer(t, ":7206", 10, 512*1024, 3) + runSimServer(t, ":7206", false, 10, 512*1024, 3) }) t.Run("packet-size=1024*1024,batch=2", func(t *testing.T) { - testSimServer(t, ":7207", 10, 1024*1024, 2) + runSimServer(t, ":7207", true, 10, 1024*1024, 2) }) } -func testSimServer(t *testing.T, addr string, nclients, packetSize, packetBatch int) { +func runSimServer(t *testing.T, addr string, et bool, nclients, packetSize, packetBatch int) { ts := &simServer{ tester: t, network: "tcp", @@ -1376,13 +1459,14 @@ func testSimServer(t *testing.T, addr string, nclients, packetSize, packetBatch } err := Run(ts, ts.network+"://"+ts.addr, + WithEdgeTriggeredIO(et), WithMulticore(ts.multicore), WithTicker(true), WithTCPKeepAlive(time.Minute*1)) assert.NoError(t, err) } -func runClient(t *testing.T, network, addr string, packetSize, batch int) { +func runSimClient(t *testing.T, network, addr string, packetSize, batch int) { rand.Seed(time.Now().UnixNano()) c, err := net.Dial(network, addr) require.NoError(t, err) diff --git a/internal/netpoll/poll_data_unix.go b/internal/netpoll/defs_poller.go similarity index 100% rename from internal/netpoll/poll_data_unix.go rename to internal/netpoll/defs_poller.go diff --git a/internal/netpoll/epoll_events.go b/internal/netpoll/defs_poller_epoll.go similarity index 89% rename from internal/netpoll/epoll_events.go rename to internal/netpoll/defs_poller_epoll.go index a9a5a3c7b..d67f36d6f 100644 --- a/internal/netpoll/epoll_events.go +++ b/internal/netpoll/defs_poller_epoll.go @@ -34,12 +34,11 @@ const ( // ErrEvents represents exceptional events that are not read/write, like socket being closed, // reading/writing from/to a closed socket, etc. ErrEvents = unix.EPOLLERR | unix.EPOLLHUP | unix.EPOLLRDHUP - // OutEvents combines EPOLLOUT event and some exceptional events. - OutEvents = ErrEvents | unix.EPOLLOUT - // InEvents combines EPOLLIN/EPOLLPRI events and some exceptional events. - InEvents = ErrEvents | unix.EPOLLIN | unix.EPOLLPRI ) +// PollEventHandler is the callback for I/O events notified by the poller. +type PollEventHandler func(int, uint32) error + type eventList struct { size int events []epollevent diff --git a/internal/netpoll/kqueue_events.go b/internal/netpoll/defs_poller_kqueue.go similarity index 83% rename from internal/netpoll/kqueue_events.go rename to internal/netpoll/defs_poller_kqueue.go index ba55cd7fd..0b2e883b8 100644 --- a/internal/netpoll/kqueue_events.go +++ b/internal/netpoll/defs_poller_kqueue.go @@ -31,16 +31,11 @@ const ( MinPollEventsCap = 16 // MaxAsyncTasksAtOneTime is the maximum amount of asynchronous tasks that the event-loop will process at one time. MaxAsyncTasksAtOneTime = 128 - // EVFilterWrite represents writeable events from sockets. - EVFilterWrite = unix.EVFILT_WRITE - // EVFilterRead represents readable events from sockets. - EVFilterRead = unix.EVFILT_READ - // EVFlagsDelete indicates an event has been removed from the kqueue. - EVFlagsDelete = unix.EV_DELETE - // EVFlagsEOF indicates filter-specific EOF condition. - EVFlagsEOF = unix.EV_EOF ) +// PollEventHandler is the callback for I/O events notified by the poller. +type PollEventHandler func(int, int16, uint16) error + type eventList struct { size int events []unix.Kevent_t diff --git a/internal/netpoll/poll_data_bsd.go b/internal/netpoll/poll_data_bsd.go deleted file mode 100644 index 9b605568b..000000000 --- a/internal/netpoll/poll_data_bsd.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2021 The Gnet Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build freebsd || dragonfly || netbsd || openbsd || darwin -// +build freebsd dragonfly netbsd openbsd darwin - -package netpoll - -// PollEventHandler is the callback for I/O events notified by the poller. -type PollEventHandler func(int, int16, uint16) error diff --git a/internal/netpoll/poll_data_linux.go b/internal/netpoll/poll_data_linux.go deleted file mode 100644 index e9571188e..000000000 --- a/internal/netpoll/poll_data_linux.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) 2021 The Gnet Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package netpoll - -// PollEventHandler is the callback for I/O events notified by the poller. -type PollEventHandler func(int, uint32) error diff --git a/internal/netpoll/epoll_default_poller.go b/internal/netpoll/poller_epoll_default.go similarity index 81% rename from internal/netpoll/epoll_default_poller.go rename to internal/netpoll/poller_epoll_default.go index b8e686192..c21765772 100644 --- a/internal/netpoll/epoll_default_poller.go +++ b/internal/netpoll/poller_epoll_default.go @@ -57,7 +57,7 @@ func OpenPoller() (poller *Poller, err error) { return } poller.efdBuf = make([]byte, 8) - if err = poller.AddRead(&PollAttachment{FD: poller.efd}); err != nil { + if err = poller.AddRead(&PollAttachment{FD: poller.efd}, true); err != nil { _ = poller.Close() poller = nil return @@ -100,8 +100,13 @@ func (p *Poller) Trigger(priority queue.EventPriority, fn queue.TaskFunc, arg in p.urgentAsyncTaskQueue.Enqueue(task) } if atomic.CompareAndSwapInt32(&p.wakeupCall, 0, 1) { - if _, err = unix.Write(p.efd, b); err == unix.EAGAIN { - err = nil + for { + _, err = unix.Write(p.efd, b) + if err == unix.EAGAIN { + _, _ = unix.Read(p.efd, p.efdBuf) + continue + } + break } } return os.NewSyscallError("write", err) @@ -129,7 +134,6 @@ func (p *Poller) Polling(callback PollEventHandler) error { ev := &el.events[i] if fd := int(ev.Fd); fd == p.efd { // poller is awakened to run tasks in queues. doChores = true - _, _ = unix.Read(p.efd, p.efdBuf) } else { switch err = callback(fd, ev.Events); err { case nil: @@ -169,10 +173,16 @@ func (p *Poller) Polling(callback PollEventHandler) error { } atomic.StoreInt32(&p.wakeupCall, 0) if (!p.asyncTaskQueue.IsEmpty() || !p.urgentAsyncTaskQueue.IsEmpty()) && atomic.CompareAndSwapInt32(&p.wakeupCall, 0, 1) { - switch _, err = unix.Write(p.efd, b); err { - case nil, unix.EAGAIN: - default: - doChores = true + for { + _, err = unix.Write(p.efd, b) + if err == unix.EAGAIN { + _, _ = unix.Read(p.efd, p.efdBuf) + continue + } + if err != nil { + logging.Errorf("failed to notify next round of event-loop for leftover tasks, %v", os.NewSyscallError("write", err)) + } + break } } } @@ -186,39 +196,59 @@ func (p *Poller) Polling(callback PollEventHandler) error { } const ( - readEvents = unix.EPOLLPRI | unix.EPOLLIN - writeEvents = unix.EPOLLOUT + readEvents = unix.EPOLLIN | unix.EPOLLPRI | unix.EPOLLRDHUP + writeEvents = unix.EPOLLOUT | unix.EPOLLRDHUP readWriteEvents = readEvents | writeEvents ) // AddReadWrite registers the given file-descriptor with readable and writable events to the poller. -func (p *Poller) AddReadWrite(pa *PollAttachment) error { +func (p *Poller) AddReadWrite(pa *PollAttachment, edgeTriggered bool) error { + var ev uint32 = readWriteEvents + if edgeTriggered { + ev |= unix.EPOLLET + } return os.NewSyscallError("epoll_ctl add", - unix.EpollCtl(p.fd, unix.EPOLL_CTL_ADD, pa.FD, &unix.EpollEvent{Fd: int32(pa.FD), Events: readWriteEvents})) + unix.EpollCtl(p.fd, unix.EPOLL_CTL_ADD, pa.FD, &unix.EpollEvent{Fd: int32(pa.FD), Events: ev})) } // AddRead registers the given file-descriptor with readable event to the poller. -func (p *Poller) AddRead(pa *PollAttachment) error { +func (p *Poller) AddRead(pa *PollAttachment, edgeTriggered bool) error { + var ev uint32 = readEvents + if edgeTriggered { + ev |= unix.EPOLLET + } return os.NewSyscallError("epoll_ctl add", - unix.EpollCtl(p.fd, unix.EPOLL_CTL_ADD, pa.FD, &unix.EpollEvent{Fd: int32(pa.FD), Events: readEvents})) + unix.EpollCtl(p.fd, unix.EPOLL_CTL_ADD, pa.FD, &unix.EpollEvent{Fd: int32(pa.FD), Events: ev})) } // AddWrite registers the given file-descriptor with writable event to the poller. -func (p *Poller) AddWrite(pa *PollAttachment) error { +func (p *Poller) AddWrite(pa *PollAttachment, edgeTriggered bool) error { + var ev uint32 = writeEvents + if edgeTriggered { + ev |= unix.EPOLLET + } return os.NewSyscallError("epoll_ctl add", - unix.EpollCtl(p.fd, unix.EPOLL_CTL_ADD, pa.FD, &unix.EpollEvent{Fd: int32(pa.FD), Events: writeEvents})) + unix.EpollCtl(p.fd, unix.EPOLL_CTL_ADD, pa.FD, &unix.EpollEvent{Fd: int32(pa.FD), Events: ev})) } // ModRead renews the given file-descriptor with readable event in the poller. -func (p *Poller) ModRead(pa *PollAttachment) error { +func (p *Poller) ModRead(pa *PollAttachment, edgeTriggered bool) error { + var ev uint32 = readEvents + if edgeTriggered { + ev |= unix.EPOLLET + } return os.NewSyscallError("epoll_ctl mod", - unix.EpollCtl(p.fd, unix.EPOLL_CTL_MOD, pa.FD, &unix.EpollEvent{Fd: int32(pa.FD), Events: readEvents})) + unix.EpollCtl(p.fd, unix.EPOLL_CTL_MOD, pa.FD, &unix.EpollEvent{Fd: int32(pa.FD), Events: ev})) } // ModReadWrite renews the given file-descriptor with readable and writable events in the poller. -func (p *Poller) ModReadWrite(pa *PollAttachment) error { +func (p *Poller) ModReadWrite(pa *PollAttachment, edgeTriggered bool) error { + var ev uint32 = readWriteEvents + if edgeTriggered { + ev |= unix.EPOLLET + } return os.NewSyscallError("epoll_ctl mod", - unix.EpollCtl(p.fd, unix.EPOLL_CTL_MOD, pa.FD, &unix.EpollEvent{Fd: int32(pa.FD), Events: readWriteEvents})) + unix.EpollCtl(p.fd, unix.EPOLL_CTL_MOD, pa.FD, &unix.EpollEvent{Fd: int32(pa.FD), Events: ev})) } // Delete removes the given file-descriptor from the poller. diff --git a/internal/netpoll/epoll_optimized_poller.go b/internal/netpoll/poller_epoll_ultimate.go similarity index 85% rename from internal/netpoll/epoll_optimized_poller.go rename to internal/netpoll/poller_epoll_ultimate.go index 2c5db7353..479415366 100644 --- a/internal/netpoll/epoll_optimized_poller.go +++ b/internal/netpoll/poller_epoll_ultimate.go @@ -58,7 +58,7 @@ func OpenPoller() (poller *Poller, err error) { } poller.efdBuf = make([]byte, 8) poller.epa = &PollAttachment{FD: efd} - if err = poller.AddRead(poller.epa); err != nil { + if err = poller.AddRead(poller.epa, true); err != nil { _ = poller.Close() poller = nil return @@ -101,8 +101,13 @@ func (p *Poller) Trigger(priority queue.EventPriority, fn queue.TaskFunc, arg in p.urgentAsyncTaskQueue.Enqueue(task) } if atomic.CompareAndSwapInt32(&p.wakeupCall, 0, 1) { - if _, err = unix.Write(p.epa.FD, b); err == unix.EAGAIN { - err = nil + for { + _, err = unix.Write(p.epa.FD, b) + if err == unix.EAGAIN { + _, _ = unix.Read(p.epa.FD, p.efdBuf) + continue + } + break } } return os.NewSyscallError("write", err) @@ -131,7 +136,6 @@ func (p *Poller) Polling() error { pollAttachment := *(**PollAttachment)(unsafe.Pointer(&ev.data)) if pollAttachment.FD == p.epa.FD { // poller is awakened to run tasks in queues. doChores = true - _, _ = unix.Read(p.epa.FD, p.efdBuf) } else { switch err = pollAttachment.Callback(pollAttachment.FD, ev.events); err { case nil: @@ -171,10 +175,16 @@ func (p *Poller) Polling() error { } atomic.StoreInt32(&p.wakeupCall, 0) if (!p.asyncTaskQueue.IsEmpty() || !p.urgentAsyncTaskQueue.IsEmpty()) && atomic.CompareAndSwapInt32(&p.wakeupCall, 0, 1) { - switch _, err = unix.Write(p.epa.FD, b); err { - case nil, unix.EAGAIN: - default: - doChores = true + for { + _, err = unix.Write(p.epa.FD, b) + if err == unix.EAGAIN { + _, _ = unix.Read(p.epa.FD, p.efdBuf) + continue + } + if err != nil { + logging.Errorf("failed to notify next round of event-loop for leftover tasks, %v", os.NewSyscallError("write", err)) + } + break } } } @@ -188,47 +198,62 @@ func (p *Poller) Polling() error { } const ( - readEvents = unix.EPOLLPRI | unix.EPOLLIN - writeEvents = unix.EPOLLOUT + readEvents = unix.EPOLLIN | unix.EPOLLPRI | unix.EPOLLRDHUP + writeEvents = unix.EPOLLOUT | unix.EPOLLRDHUP readWriteEvents = readEvents | writeEvents ) // AddReadWrite registers the given file-descriptor with readable and writable events to the poller. -func (p *Poller) AddReadWrite(pa *PollAttachment) error { +func (p *Poller) AddReadWrite(pa *PollAttachment, edgeTriggered bool) error { var ev epollevent ev.events = readWriteEvents + if edgeTriggered { + ev.events |= unix.EPOLLET + } *(**PollAttachment)(unsafe.Pointer(&ev.data)) = pa return os.NewSyscallError("epoll_ctl add", epollCtl(p.fd, unix.EPOLL_CTL_ADD, pa.FD, &ev)) } // AddRead registers the given file-descriptor with readable event to the poller. -func (p *Poller) AddRead(pa *PollAttachment) error { +func (p *Poller) AddRead(pa *PollAttachment, edgeTriggered bool) error { var ev epollevent ev.events = readEvents + if edgeTriggered { + ev.events |= unix.EPOLLET + } *(**PollAttachment)(unsafe.Pointer(&ev.data)) = pa return os.NewSyscallError("epoll_ctl add", epollCtl(p.fd, unix.EPOLL_CTL_ADD, pa.FD, &ev)) } // AddWrite registers the given file-descriptor with writable event to the poller. -func (p *Poller) AddWrite(pa *PollAttachment) error { +func (p *Poller) AddWrite(pa *PollAttachment, edgeTriggered bool) error { var ev epollevent ev.events = writeEvents + if edgeTriggered { + ev.events |= unix.EPOLLET + } *(**PollAttachment)(unsafe.Pointer(&ev.data)) = pa return os.NewSyscallError("epoll_ctl add", epollCtl(p.fd, unix.EPOLL_CTL_ADD, pa.FD, &ev)) } // ModRead renews the given file-descriptor with readable event in the poller. -func (p *Poller) ModRead(pa *PollAttachment) error { +func (p *Poller) ModRead(pa *PollAttachment, edgeTriggered bool) error { var ev epollevent ev.events = readEvents + if edgeTriggered { + ev.events |= unix.EPOLLET + } *(**PollAttachment)(unsafe.Pointer(&ev.data)) = pa return os.NewSyscallError("epoll_ctl mod", epollCtl(p.fd, unix.EPOLL_CTL_MOD, pa.FD, &ev)) } // ModReadWrite renews the given file-descriptor with readable and writable events in the poller. -func (p *Poller) ModReadWrite(pa *PollAttachment) error { +func (p *Poller) ModReadWrite(pa *PollAttachment, edgeTriggered bool) error { var ev epollevent ev.events = readWriteEvents + if edgeTriggered { + ev.events |= unix.EPOLLET + } *(**PollAttachment)(unsafe.Pointer(&ev.data)) = pa return os.NewSyscallError("epoll_ctl mod", epollCtl(p.fd, unix.EPOLL_CTL_MOD, pa.FD, &ev)) } diff --git a/internal/netpoll/kqueue_default_poller.go b/internal/netpoll/poller_kqueue_default.go similarity index 86% rename from internal/netpoll/kqueue_default_poller.go rename to internal/netpoll/poller_kqueue_default.go index 12f29b443..4a04c3d80 100644 --- a/internal/netpoll/kqueue_default_poller.go +++ b/internal/netpoll/poller_kqueue_default.go @@ -179,32 +179,44 @@ func (p *Poller) Polling(callback PollEventHandler) error { } // AddReadWrite registers the given file-descriptor with readable and writable events to the poller. -func (p *Poller) AddReadWrite(pa *PollAttachment) error { +func (p *Poller) AddReadWrite(pa *PollAttachment, edgeTriggered bool) error { + var flags uint16 = unix.EV_ADD + if edgeTriggered { + flags |= unix.EV_CLEAR + } _, err := unix.Kevent(p.fd, []unix.Kevent_t{ - {Ident: keventIdent(pa.FD), Flags: unix.EV_ADD, Filter: unix.EVFILT_READ}, - {Ident: keventIdent(pa.FD), Flags: unix.EV_ADD, Filter: unix.EVFILT_WRITE}, + {Ident: keventIdent(pa.FD), Flags: flags, Filter: unix.EVFILT_READ}, + {Ident: keventIdent(pa.FD), Flags: flags, Filter: unix.EVFILT_WRITE}, }, nil, nil) return os.NewSyscallError("kevent add", err) } // AddRead registers the given file-descriptor with readable event to the poller. -func (p *Poller) AddRead(pa *PollAttachment) error { +func (p *Poller) AddRead(pa *PollAttachment, edgeTriggered bool) error { + var flags uint16 = unix.EV_ADD + if edgeTriggered { + flags |= unix.EV_CLEAR + } _, err := unix.Kevent(p.fd, []unix.Kevent_t{ - {Ident: keventIdent(pa.FD), Flags: unix.EV_ADD, Filter: unix.EVFILT_READ}, + {Ident: keventIdent(pa.FD), Flags: flags, Filter: unix.EVFILT_READ}, }, nil, nil) return os.NewSyscallError("kevent add", err) } // AddWrite registers the given file-descriptor with writable event to the poller. -func (p *Poller) AddWrite(pa *PollAttachment) error { +func (p *Poller) AddWrite(pa *PollAttachment, edgeTriggered bool) error { + var flags uint16 = unix.EV_ADD + if edgeTriggered { + flags |= unix.EV_CLEAR + } _, err := unix.Kevent(p.fd, []unix.Kevent_t{ - {Ident: keventIdent(pa.FD), Flags: unix.EV_ADD, Filter: unix.EVFILT_WRITE}, + {Ident: keventIdent(pa.FD), Flags: flags, Filter: unix.EVFILT_WRITE}, }, nil, nil) return os.NewSyscallError("kevent add", err) } // ModRead renews the given file-descriptor with readable event in the poller. -func (p *Poller) ModRead(pa *PollAttachment) error { +func (p *Poller) ModRead(pa *PollAttachment, _ bool) error { _, err := unix.Kevent(p.fd, []unix.Kevent_t{ {Ident: keventIdent(pa.FD), Flags: unix.EV_DELETE, Filter: unix.EVFILT_WRITE}, }, nil, nil) @@ -212,9 +224,13 @@ func (p *Poller) ModRead(pa *PollAttachment) error { } // ModReadWrite renews the given file-descriptor with readable and writable events in the poller. -func (p *Poller) ModReadWrite(pa *PollAttachment) error { +func (p *Poller) ModReadWrite(pa *PollAttachment, edgeTriggered bool) error { + var flags uint16 = unix.EV_ADD + if edgeTriggered { + flags |= unix.EV_CLEAR + } _, err := unix.Kevent(p.fd, []unix.Kevent_t{ - {Ident: keventIdent(pa.FD), Flags: unix.EV_ADD, Filter: unix.EVFILT_WRITE}, + {Ident: keventIdent(pa.FD), Flags: flags, Filter: unix.EVFILT_WRITE}, }, nil, nil) return os.NewSyscallError("kevent add", err) } diff --git a/internal/netpoll/kqueue_optimized_poller.go b/internal/netpoll/poller_kqueue_ultimate.go similarity index 92% rename from internal/netpoll/kqueue_optimized_poller.go rename to internal/netpoll/poller_kqueue_ultimate.go index 1b5b69e7b..397dad052 100644 --- a/internal/netpoll/kqueue_optimized_poller.go +++ b/internal/netpoll/poller_kqueue_ultimate.go @@ -181,10 +181,13 @@ func (p *Poller) Polling() error { } // AddReadWrite registers the given file-descriptor with readable and writable events to the poller. -func (p *Poller) AddReadWrite(pa *PollAttachment) error { +func (p *Poller) AddReadWrite(pa *PollAttachment, edgeTriggered bool) error { var evs [2]unix.Kevent_t evs[0].Ident = keventIdent(pa.FD) evs[0].Flags = unix.EV_ADD + if edgeTriggered { + evs[0].Flags |= unix.EV_CLEAR + } evs[0].Filter = unix.EVFILT_READ evs[0].Udata = (*byte)(unsafe.Pointer(pa)) evs[1] = evs[0] @@ -194,10 +197,13 @@ func (p *Poller) AddReadWrite(pa *PollAttachment) error { } // AddRead registers the given file-descriptor with readable event to the poller. -func (p *Poller) AddRead(pa *PollAttachment) error { +func (p *Poller) AddRead(pa *PollAttachment, edgeTriggered bool) error { var evs [1]unix.Kevent_t evs[0].Ident = keventIdent(pa.FD) evs[0].Flags = unix.EV_ADD + if edgeTriggered { + evs[0].Flags |= unix.EV_CLEAR + } evs[0].Filter = unix.EVFILT_READ evs[0].Udata = (*byte)(unsafe.Pointer(pa)) _, err := unix.Kevent(p.fd, evs[:], nil, nil) @@ -205,10 +211,13 @@ func (p *Poller) AddRead(pa *PollAttachment) error { } // AddWrite registers the given file-descriptor with writable event to the poller. -func (p *Poller) AddWrite(pa *PollAttachment) error { +func (p *Poller) AddWrite(pa *PollAttachment, edgeTriggered bool) error { var evs [1]unix.Kevent_t evs[0].Ident = keventIdent(pa.FD) evs[0].Flags = unix.EV_ADD + if edgeTriggered { + evs[0].Flags |= unix.EV_CLEAR + } evs[0].Filter = unix.EVFILT_WRITE evs[0].Udata = (*byte)(unsafe.Pointer(pa)) _, err := unix.Kevent(p.fd, evs[:], nil, nil) @@ -216,7 +225,7 @@ func (p *Poller) AddWrite(pa *PollAttachment) error { } // ModRead renews the given file-descriptor with readable event in the poller. -func (p *Poller) ModRead(pa *PollAttachment) error { +func (p *Poller) ModRead(pa *PollAttachment, _ bool) error { var evs [1]unix.Kevent_t evs[0].Ident = keventIdent(pa.FD) evs[0].Flags = unix.EV_DELETE @@ -227,10 +236,13 @@ func (p *Poller) ModRead(pa *PollAttachment) error { } // ModReadWrite renews the given file-descriptor with readable and writable events in the poller. -func (p *Poller) ModReadWrite(pa *PollAttachment) error { +func (p *Poller) ModReadWrite(pa *PollAttachment, edgeTriggered bool) error { var evs [1]unix.Kevent_t evs[0].Ident = keventIdent(pa.FD) evs[0].Flags = unix.EV_ADD + if edgeTriggered { + evs[0].Flags |= unix.EV_CLEAR + } evs[0].Filter = unix.EVFILT_WRITE evs[0].Udata = (*byte)(unsafe.Pointer(pa)) _, err := unix.Kevent(p.fd, evs[:], nil, nil) diff --git a/internal/netpoll/fd_unix.go b/internal/socket/fd_unix.go similarity index 99% rename from internal/netpoll/fd_unix.go rename to internal/socket/fd_unix.go index 65fb730a0..32e1d5d10 100644 --- a/internal/netpoll/fd_unix.go +++ b/internal/socket/fd_unix.go @@ -15,7 +15,7 @@ //go:build linux || freebsd || dragonfly || netbsd || openbsd || darwin // +build linux freebsd dragonfly netbsd openbsd darwin -package netpoll +package socket import ( "sync/atomic" diff --git a/listener_unix.go b/listener_unix.go index ceb734187..95bf0171d 100644 --- a/listener_unix.go +++ b/listener_unix.go @@ -46,7 +46,7 @@ func (ln *listener) packPollAttachment(handler netpoll.PollEventHandler) *netpol } func (ln *listener) dup() (int, string, error) { - return netpoll.Dup(ln.fd) + return socket.Dup(ln.fd) } func (ln *listener) normalize() (err error) { diff --git a/options.go b/options.go index fb39e7fbf..7eb355ed3 100644 --- a/options.go +++ b/options.go @@ -68,7 +68,7 @@ type Options struct { // ============================= Options for both server-side and client-side ============================= - // ReadBufferCap is the maximum number of bytes that can be read from the peer when the readable event comes. + // ReadBufferCap is the maximum number of bytes that can be read from the remote when the readable event comes. // The default value is 64KB, it can either be reduced to avoid starving the subsequent connections or increased // to read more data from a socket. // @@ -122,6 +122,11 @@ type Options struct { // Logger is the customized logger for logging info, if it is not set, // then gnet will use the default logger powered by go.uber.org/zap. Logger logging.Logger + + // EdgeTriggeredIO enables the edge-triggered I/O for the underlying epoll/kqueue event-loop. + // Don't enable it unless you are 100% sure what you are doing. + // Note that this option is only available for TCP protocol. + EdgeTriggeredIO bool } // WithOptions sets up all options. @@ -249,3 +254,10 @@ func WithMulticastInterfaceIndex(idx int) Option { opts.MulticastInterfaceIndex = idx } } + +// WithEdgeTriggeredIO enables the edge-triggered I/O for the underlying epoll/kqueue event-loop. +func WithEdgeTriggeredIO(et bool) Option { + return func(opts *Options) { + opts.EdgeTriggeredIO = et + } +} diff --git a/pkg/buffer/elastic/elastic_buffer_test.go b/pkg/buffer/elastic/elastic_buffer_test.go index 971e75c27..f000ac00d 100644 --- a/pkg/buffer/elastic/elastic_buffer_test.go +++ b/pkg/buffer/elastic/elastic_buffer_test.go @@ -34,21 +34,24 @@ func TestMixedBuffer_Basic(t *testing.T) { require.EqualValues(t, newDataLen, mb.Buffered()) require.EqualValues(t, rbn, mb.ringBuffer.Buffered()) - bs := mb.Peek(-1) + bs, err := mb.Peek(-1) + require.NoError(t, err) var p []byte for _, b := range bs { p = append(p, b...) } require.EqualValues(t, data, p) - bs = mb.Peek(rbn) + bs, err = mb.Peek(rbn) + require.NoError(t, err) p = bs[0] require.EqualValues(t, data[:rbn], p) n, err = mb.Discard(rbn) require.NoError(t, err) require.EqualValues(t, rbn, n) require.NotNil(t, mb.ringBuffer) - bs = mb.Peek(newDataLen - rbn) + bs, err = mb.Peek(newDataLen - rbn) + require.NoError(t, err) p = bs[0] require.EqualValues(t, data[rbn:], p) n, err = mb.Discard(newDataLen - rbn) @@ -82,7 +85,8 @@ func TestMixedBuffer_Basic(t *testing.T) { require.NoError(t, err) require.EqualValues(t, cum-headCum, n) require.EqualValues(t, cum, mb.Buffered()) - bs = mb.Peek(-1) + bs, err = mb.Peek(-1) + require.NoError(t, err) p = p[:0] for _, b := range bs { p = append(p, b...) @@ -125,7 +129,8 @@ func TestMixedBuffer_ReadFrom(t *testing.T) { require.NoError(t, err) require.EqualValues(t, dataLen, m) require.EqualValues(t, data, buf) - bs := mb.Peek(dataLen) + bs, err := mb.Peek(dataLen) + require.NoError(t, err) var p []byte for _, b := range bs { p = append(p, b...) diff --git a/pkg/buffer/elastic/elastic_ring_list_buffer.go b/pkg/buffer/elastic/elastic_ring_list_buffer.go index d16fd3803..624b5af98 100644 --- a/pkg/buffer/elastic/elastic_ring_list_buffer.go +++ b/pkg/buffer/elastic/elastic_ring_list_buffer.go @@ -53,13 +53,15 @@ func (mb *Buffer) Read(p []byte) (n int, err error) { } // Peek returns n bytes as [][]byte, these bytes won't be discarded until Buffer.Discard() is called. -func (mb *Buffer) Peek(n int) [][]byte { - if n <= 0 { +func (mb *Buffer) Peek(n int) ([][]byte, error) { + if n <= 0 || n == math.MaxInt32 { n = math.MaxInt32 + } else if n > mb.Buffered() { + return nil, io.ErrShortBuffer } head, tail := mb.ringBuffer.Peek(n) - if mb.ringBuffer.Buffered() >= n { - return [][]byte{head, tail} + if mb.ringBuffer.Buffered() == n { + return [][]byte{head, tail}, nil } return mb.listBuffer.PeekWithBytes(n, head, tail) } diff --git a/pkg/buffer/linkedlist/linked_list_buffer.go b/pkg/buffer/linkedlist/linked_list_buffer.go index 1f1ea9fbe..0095422ae 100644 --- a/pkg/buffer/linkedlist/linked_list_buffer.go +++ b/pkg/buffer/linkedlist/linked_list_buffer.go @@ -58,9 +58,41 @@ func (llb *Buffer) Read(p []byte) (n int, err error) { return } } + if n == 0 { + err = io.EOF + } return } +// AllocNode allocates a []byte with the given length that is expected to +// be pushed into the Buffer. +func (llb *Buffer) AllocNode(n int) []byte { + return bsPool.Get(n) +} + +// FreeNode puts the given []byte back to the pool to free the memory. +func (llb *Buffer) FreeNode(p []byte) { + bsPool.Put(p) +} + +// Append is like PushBack but appends b without copying it. +func (llb *Buffer) Append(p []byte) { + n := len(p) + if n == 0 { + return + } + llb.pushBack(&node{buf: p}) +} + +// Pop removes and returns the buffer of the head or nil if the list is empty. +func (llb *Buffer) Pop() []byte { + n := llb.pop() + if n == nil { + return nil + } + return n.buf +} + // PushFront is a wrapper of pushFront, which accepts []byte as its argument. func (llb *Buffer) PushFront(p []byte) { n := len(p) @@ -85,43 +117,59 @@ func (llb *Buffer) PushBack(p []byte) { // Peek assembles the up to maxBytes of [][]byte based on the list of node, // it won't remove these nodes from l until Discard() is called. -func (llb *Buffer) Peek(maxBytes int) [][]byte { - if maxBytes <= 0 { +func (llb *Buffer) Peek(maxBytes int) ([][]byte, error) { + if maxBytes <= 0 || maxBytes == math.MaxInt32 { maxBytes = math.MaxInt32 + } else if maxBytes > llb.Buffered() { + return nil, io.ErrShortBuffer } llb.bs = llb.bs[:0] var cum int for iter := llb.head; iter != nil; iter = iter.next { - llb.bs = append(llb.bs, iter.buf) - if cum += iter.len(); cum >= maxBytes { + offset := iter.len() + if cum+offset > maxBytes { + offset = maxBytes - cum + } + llb.bs = append(llb.bs, iter.buf[:offset]) + if cum += offset; cum == maxBytes { break } } - return llb.bs + return llb.bs, nil } // PeekWithBytes is like Peek but accepts [][]byte and puts them onto head. -func (llb *Buffer) PeekWithBytes(maxBytes int, bs ...[]byte) [][]byte { - if maxBytes <= 0 { +func (llb *Buffer) PeekWithBytes(maxBytes int, bs ...[]byte) ([][]byte, error) { + if maxBytes <= 0 || maxBytes == math.MaxInt32 { maxBytes = math.MaxInt32 + } else if maxBytes > llb.Buffered() { + return nil, io.ErrShortBuffer } llb.bs = llb.bs[:0] var cum int for _, b := range bs { if n := len(b); n > 0 { - llb.bs = append(llb.bs, b) - if cum += n; cum >= maxBytes { - return llb.bs + offset := n + if cum+offset > maxBytes { + offset = maxBytes - cum + } + llb.bs = append(llb.bs, b[:offset]) + if cum += offset; cum == maxBytes { + return llb.bs, nil } } } for iter := llb.head; iter != nil; iter = iter.next { - llb.bs = append(llb.bs, iter.buf) - if cum += iter.len(); cum >= maxBytes { + offset := iter.len() + if cum+offset > maxBytes { + offset = maxBytes - cum + } + llb.bs = append(llb.bs, iter.buf[:offset]) + if cum += offset; cum == maxBytes { break } } - return llb.bs + return llb.bs, nil } // Discard removes some nodes based on n bytes. diff --git a/pkg/buffer/linkedlist/llbuffer_test.go b/pkg/buffer/linkedlist/llbuffer_test.go index fb5c61477..1a755ca0d 100644 --- a/pkg/buffer/linkedlist/llbuffer_test.go +++ b/pkg/buffer/linkedlist/llbuffer_test.go @@ -28,25 +28,27 @@ func TestLinkedListBuffer_Basic(t *testing.T) { require.EqualValues(t, maxBlocks, llb.Len()) require.EqualValues(t, cum, llb.Buffered()) - bs := llb.Peek(cum / 4) + bs, err := llb.Peek(cum / 4) + require.NoError(t, err) var p []byte for _, b := range bs { p = append(p, b...) } pn := len(p) - require.GreaterOrEqual(t, pn, cum/4) + require.EqualValues(t, pn, cum/4) require.EqualValues(t, buf.Bytes()[:pn], p) tmpA := make([]byte, cum/16) tmpB := make([]byte, cum/16) rand.Read(tmpA) rand.Read(tmpB) - bs = llb.PeekWithBytes(cum/4, tmpA, tmpB) + bs, err = llb.PeekWithBytes(cum/4, tmpA, tmpB) + require.NoError(t, err) p = p[:0] for _, b := range bs { p = append(p, b...) } pn = len(p) - require.GreaterOrEqual(t, pn, cum/4) + require.EqualValues(t, pn, cum/4) var tmpBuf bytes.Buffer tmpBuf.Write(tmpA) tmpBuf.Write(tmpB) diff --git a/reactor_default_bsd.go b/reactor_default_bsd.go deleted file mode 100644 index fa0a7d7bb..000000000 --- a/reactor_default_bsd.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright (c) 2019 The Gnet Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (freebsd || dragonfly || netbsd || openbsd || darwin) && !poll_opt -// +build freebsd dragonfly netbsd openbsd darwin -// +build !poll_opt - -package gnet - -import ( - "io" - "runtime" - - "github.com/panjf2000/gnet/v2/internal/netpoll" - "github.com/panjf2000/gnet/v2/pkg/errors" -) - -func (el *eventloop) activateMainReactor() error { - if el.engine.opts.LockOSThread { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - } - - err := el.poller.Polling(el.engine.accept) - if err == errors.ErrEngineShutdown { - el.engine.opts.Logger.Debugf("main reactor is exiting in terms of the demand from user, %v", err) - err = nil - } else if err != nil { - el.engine.opts.Logger.Errorf("main reactor is exiting due to error: %v", err) - } - - el.engine.shutdown(err) - - return err -} - -func (el *eventloop) activateSubReactor() error { - if el.engine.opts.LockOSThread { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - } - - err := el.poller.Polling(func(fd int, filter int16, flags uint16) (err error) { - if c := el.connections.getConn(fd); c != nil { - switch { - case flags&netpoll.EVFlagsDelete != 0: - case flags&netpoll.EVFlagsEOF != 0: - switch { - case filter == netpoll.EVFilterRead: // read the remaining data after the peer wrote and closed immediately - err = el.read(c) - case filter == netpoll.EVFilterWrite && !c.outboundBuffer.IsEmpty(): - err = el.write(c) - default: - err = el.close(c, io.EOF) - } - case filter == netpoll.EVFilterRead: - err = el.read(c) - case filter == netpoll.EVFilterWrite && !c.outboundBuffer.IsEmpty(): - err = el.write(c) - } - } - return - }) - if err == errors.ErrEngineShutdown { - el.engine.opts.Logger.Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) - err = nil - } else if err != nil { - el.engine.opts.Logger.Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) - } - - el.closeConns() - el.engine.shutdown(err) - - return err -} - -func (el *eventloop) run() error { - if el.engine.opts.LockOSThread { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - } - - err := el.poller.Polling(func(fd int, filter int16, flags uint16) (err error) { - if c := el.connections.getConn(fd); c != nil { - switch { - case flags&netpoll.EVFlagsDelete != 0: - case flags&netpoll.EVFlagsEOF != 0: - switch { - case filter == netpoll.EVFilterRead: // read the remaining data after the peer wrote and closed immediately - err = el.read(c) - case filter == netpoll.EVFilterWrite && !c.outboundBuffer.IsEmpty(): - err = el.write(c) - default: - err = el.close(c, io.EOF) - } - case filter == netpoll.EVFilterRead: - err = el.read(c) - case filter == netpoll.EVFilterWrite && !c.outboundBuffer.IsEmpty(): - err = el.write(c) - } - return - } - return el.accept(fd, filter, flags) - }) - if err == errors.ErrEngineShutdown { - el.engine.opts.Logger.Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) - err = nil - } else if err != nil { - el.engine.opts.Logger.Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) - } - - el.closeConns() - el.engine.shutdown(err) - - return err -} diff --git a/reactor_default_linux.go b/reactor_default_linux.go deleted file mode 100644 index 7fb863f11..000000000 --- a/reactor_default_linux.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2019 The Gnet Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !poll_opt -// +build !poll_opt - -package gnet - -import ( - "runtime" - - "github.com/panjf2000/gnet/v2/internal/netpoll" - "github.com/panjf2000/gnet/v2/pkg/errors" -) - -func (el *eventloop) activateMainReactor() error { - if el.engine.opts.LockOSThread { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - } - - err := el.poller.Polling(el.engine.accept) - if err == errors.ErrEngineShutdown { - el.engine.opts.Logger.Debugf("main reactor is exiting in terms of the demand from user, %v", err) - err = nil - } else if err != nil { - el.engine.opts.Logger.Errorf("main reactor is exiting due to error: %v", err) - } - - el.engine.shutdown(err) - - return err -} - -func (el *eventloop) activateSubReactor() error { - if el.engine.opts.LockOSThread { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - } - - err := el.poller.Polling(func(fd int, ev uint32) error { - if c := el.connections.getConn(fd); c != nil { - // Don't change the ordering of processing EPOLLOUT | EPOLLRDHUP / EPOLLIN unless you're 100% - // sure what you're doing! - // Re-ordering can easily introduce bugs and bad side-effects, as I found out painfully in the past. - - // We should always check for the EPOLLOUT event first, as we must try to send the leftover data back to - // the peer when any error occurs on a connection. - // - // Either an EPOLLOUT or EPOLLERR event may be fired when a connection is refused. - // In either case write() should take care of it properly: - // 1) writing data back, - // 2) closing the connection. - if ev&netpoll.OutEvents != 0 && !c.outboundBuffer.IsEmpty() { - if err := el.write(c); err != nil { - return err - } - } - if ev&netpoll.InEvents != 0 { - return el.read(c) - } - return nil - } - return nil - }) - - if err == errors.ErrEngineShutdown { - el.engine.opts.Logger.Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) - err = nil - } else if err != nil { - el.engine.opts.Logger.Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) - } - - el.closeConns() - el.engine.shutdown(err) - - return err -} - -func (el *eventloop) run() error { - if el.engine.opts.LockOSThread { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - } - - err := el.poller.Polling(func(fd int, ev uint32) error { - if c := el.connections.getConn(fd); c != nil { - // Don't change the ordering of processing EPOLLOUT | EPOLLRDHUP / EPOLLIN unless you're 100% - // sure what you're doing! - // Re-ordering can easily introduce bugs and bad side-effects, as I found out painfully in the past. - - // We should always check for the EPOLLOUT event first, as we must try to send the leftover data back to - // the peer when any error occurs on a connection. - // - // Either an EPOLLOUT or EPOLLERR event may be fired when a connection is refused. - // In either case write() should take care of it properly: - // 1) writing data back, - // 2) closing the connection. - if ev&netpoll.OutEvents != 0 && !c.outboundBuffer.IsEmpty() { - if err := el.write(c); err != nil { - return err - } - } - if ev&netpoll.InEvents != 0 { - return el.read(c) - } - return nil - } - return el.accept(fd, ev) - }) - - if err == errors.ErrEngineShutdown { - el.engine.opts.Logger.Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) - err = nil - } else if err != nil { - el.engine.opts.Logger.Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) - } - - el.closeConns() - el.engine.shutdown(err) - - return err -} diff --git a/reactor_epoll_default.go b/reactor_epoll_default.go new file mode 100644 index 000000000..b2c514dd5 --- /dev/null +++ b/reactor_epoll_default.go @@ -0,0 +1,189 @@ +// Copyright (c) 2019 The Gnet Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux && !poll_opt +// +build linux,!poll_opt + +package gnet + +import ( + "io" + "runtime" + + "golang.org/x/sys/unix" + + "github.com/panjf2000/gnet/v2/internal/netpoll" + "github.com/panjf2000/gnet/v2/pkg/errors" +) + +func (el *eventloop) rotate() error { + if el.engine.opts.LockOSThread { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + } + + err := el.poller.Polling(el.engine.accept) + if err == errors.ErrEngineShutdown { + el.getLogger().Debugf("main reactor is exiting in terms of the demand from user, %v", err) + err = nil + } else if err != nil { + el.getLogger().Errorf("main reactor is exiting due to error: %v", err) + } + + el.engine.shutdown(err) + + return err +} + +func (el *eventloop) orbit() error { + if el.engine.opts.LockOSThread { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + } + + err := el.poller.Polling(func(fd int, ev uint32) error { + c := el.connections.getConn(fd) + if c == nil { + // Somehow epoll notify with an event for a stale fd that is not in our connection set. + // We need to delete it from the epoll set. + return el.poller.Delete(fd) + } + + // First check for any unexpected non-IO events. + // For these events we just close the corresponding connection directly. + if ev&netpoll.ErrEvents != 0 && ev&unix.EPOLLIN == 0 && ev&unix.EPOLLOUT == 0 { + c.outboundBuffer.Release() // don't bother to write to a connection with some unknown error + return el.close(c, io.EOF) + } + // Secondly, check for EPOLLOUT before EPOLLIN, the former has a higher priority + // than the latter regardless of the aliveness of the current connection: + // + // 1. When the connection is alive and the system is overloaded, we want to + // offload the incoming traffic by writing all pending data back to the remotes + // before continuing to read and handle requests. + // 2. When the connection is dead, we need to try writing any pending data back + // to the remote and close the connection first. + // + // We perform eventloop.write for EPOLLOUT because it will take good care of either case. + if ev&(unix.EPOLLOUT|unix.EPOLLERR) != 0 { + if err := el.write(c); err != nil { + return err + } + } + // Check for EPOLLIN before EPOLLRDHUP in case that there are pending data in + // the socket buffer. + if ev&(unix.EPOLLIN|unix.EPOLLERR) != 0 { + if err := el.read(c); err != nil { + return err + } + } + // Ultimately, check for EPOLLRDHUP, this event indicates that the remote has + // either closed connection or shut down the writing half of the connection. + if ev&unix.EPOLLRDHUP != 0 && c.opened { + if ev&unix.EPOLLIN == 0 { // unreadable EPOLLRDHUP, close the connection directly + return el.close(c, io.EOF) + } + // Received the event of EPOLLIN | EPOLLRDHUP, but the previous eventloop.read + // failed to drain the socket buffer, so we make sure we get it done this time. + c.isEOF = true + return el.read(c) + } + return nil + }) + + if err == errors.ErrEngineShutdown { + el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) + err = nil + } else if err != nil { + el.getLogger().Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) + } + + el.closeConns() + el.engine.shutdown(err) + + return err +} + +func (el *eventloop) run() error { + if el.engine.opts.LockOSThread { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + } + + err := el.poller.Polling(func(fd int, ev uint32) error { + c := el.connections.getConn(fd) + if c == nil { + if fd == el.ln.fd { + return el.accept(fd, ev) + } + // Somehow epoll notify with an event for a stale fd that is not in our connection set. + // We need to delete it from the epoll set. + return el.poller.Delete(fd) + + } + + // First check for any unexpected non-IO events. + // For these events we just close the corresponding connection directly. + if ev&netpoll.ErrEvents != 0 && ev&unix.EPOLLIN == 0 && ev&unix.EPOLLOUT == 0 { + c.outboundBuffer.Release() // don't bother to write to a connection with some unknown error + return el.close(c, io.EOF) + } + // Secondly, check for EPOLLOUT before EPOLLIN, the former has a higher priority + // than the latter regardless of the aliveness of the current connection: + // + // 1. When the connection is alive and the system is overloaded, we want to + // offload the incoming traffic by writing all pending data back to the remotes + // before continuing to read and handle requests. + // 2. When the connection is dead, we need to try writing any pending data back + // to the remote and close the connection first. + // + // We perform eventloop.write for EPOLLOUT because it will take good care of either case. + if ev&(unix.EPOLLOUT|unix.EPOLLERR) != 0 { + if err := el.write(c); err != nil { + return err + } + } + // Check for EPOLLIN before EPOLLRDHUP in case that there are pending data in + // the socket buffer. + if ev&(unix.EPOLLIN|unix.EPOLLERR) != 0 { + if err := el.read(c); err != nil { + return err + } + } + // Ultimately, check for EPOLLRDHUP, this event indicates that the remote has + // either closed connection or shut down the writing half of the connection. + if ev&unix.EPOLLRDHUP != 0 && c.opened { + if ev&unix.EPOLLIN == 0 { // unreadable EPOLLRDHUP, close the connection directly + return el.close(c, io.EOF) + } + // Received the event of EPOLLIN | EPOLLRDHUP, but the previous eventloop.read + // failed to drain the socket buffer, so we make sure we get it done this time. + c.isEOF = true + return el.read(c) + } + return nil + }) + + if err == errors.ErrEngineShutdown { + el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) + err = nil + } else if err != nil { + el.getLogger().Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) + } + + el.closeConns() + el.engine.shutdown(err) + + return err +} diff --git a/reactor_optimized_linux.go b/reactor_epoll_ultimate.go similarity index 67% rename from reactor_optimized_linux.go rename to reactor_epoll_ultimate.go index 73312d9d1..49e1477ba 100644 --- a/reactor_optimized_linux.go +++ b/reactor_epoll_ultimate.go @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build poll_opt -// +build poll_opt +//go:build linux && poll_opt +// +build linux,poll_opt package gnet @@ -23,7 +23,7 @@ import ( "github.com/panjf2000/gnet/v2/pkg/errors" ) -func (el *eventloop) activateMainReactor() error { +func (el *eventloop) rotate() error { if el.engine.opts.LockOSThread { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -31,10 +31,10 @@ func (el *eventloop) activateMainReactor() error { err := el.poller.Polling() if err == errors.ErrEngineShutdown { - el.engine.opts.Logger.Debugf("main reactor is exiting in terms of the demand from user, %v", err) + el.getLogger().Debugf("main reactor is exiting in terms of the demand from user, %v", err) err = nil } else if err != nil { - el.engine.opts.Logger.Errorf("main reactor is exiting due to error: %v", err) + el.getLogger().Errorf("main reactor is exiting due to error: %v", err) } el.engine.shutdown(err) @@ -42,7 +42,7 @@ func (el *eventloop) activateMainReactor() error { return err } -func (el *eventloop) activateSubReactor() error { +func (el *eventloop) orbit() error { if el.engine.opts.LockOSThread { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -50,10 +50,10 @@ func (el *eventloop) activateSubReactor() error { err := el.poller.Polling() if err == errors.ErrEngineShutdown { - el.engine.opts.Logger.Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) + el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) err = nil } else if err != nil { - el.engine.opts.Logger.Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) + el.getLogger().Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) } el.closeConns() @@ -70,10 +70,10 @@ func (el *eventloop) run() error { err := el.poller.Polling() if err == errors.ErrEngineShutdown { - el.engine.opts.Logger.Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) + el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) err = nil } else if err != nil { - el.engine.opts.Logger.Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) + el.getLogger().Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) } el.closeConns() diff --git a/reactor_kqueue_default.go b/reactor_kqueue_default.go new file mode 100644 index 000000000..4cd533618 --- /dev/null +++ b/reactor_kqueue_default.go @@ -0,0 +1,166 @@ +// Copyright (c) 2019 The Gnet Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build (freebsd || dragonfly || netbsd || openbsd || darwin) && !poll_opt +// +build freebsd dragonfly netbsd openbsd darwin +// +build !poll_opt + +package gnet + +import ( + "io" + "runtime" + + "golang.org/x/sys/unix" + + "github.com/panjf2000/gnet/v2/pkg/errors" +) + +func (el *eventloop) rotate() error { + if el.engine.opts.LockOSThread { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + } + + err := el.poller.Polling(el.engine.accept) + if err == errors.ErrEngineShutdown { + el.getLogger().Debugf("main reactor is exiting in terms of the demand from user, %v", err) + err = nil + } else if err != nil { + el.getLogger().Errorf("main reactor is exiting due to error: %v", err) + } + + el.engine.shutdown(err) + + return err +} + +func (el *eventloop) orbit() error { + if el.engine.opts.LockOSThread { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + } + + err := el.poller.Polling(func(fd int, filter int16, flags uint16) (err error) { + c := el.connections.getConn(fd) + if c == nil { + // This might happen when the connection has already been closed, + // the file descriptor will be deleted from kqueue automatically + // as documented in the manual pages, So we just print a warning log. + el.getLogger().Warnf("received event[fd=%d|filter=%d|flags=%d] of a stale connection from event-loop(%d)", fd, filter, flags, el.idx) + return + } + + switch filter { + case unix.EVFILT_READ: + err = el.read(c) + case unix.EVFILT_WRITE: + err = el.write(c) + } + // EV_EOF indicates that the remote has closed the connection. + // We check for EV_EOF after processing the read/write event + // to ensure that nothing is left out on this event filter. + if flags&unix.EV_EOF != 0 && c.opened && err == nil { + switch filter { + case unix.EVFILT_READ: + // Receive the event of EVFILT_READ | EV_EOF, but the previous eventloop.read + // failed to drain the socket buffer, so we make sure we get it done this time. + c.isEOF = true + err = el.read(c) + case unix.EVFILT_WRITE: + // On macOS, the kqueue in both LT and ET mode will notify with one event for the EOF + // of the TCP remote: EVFILT_READ|EV_ADD|EV_CLEAR|EV_EOF. But for some reason, two + // events will be issued in ET mode for the EOF of the Unix remote in this order: + // 1) EVFILT_WRITE|EV_ADD|EV_CLEAR|EV_EOF, 2) EVFILT_READ|EV_ADD|EV_CLEAR|EV_EOF. + err = el.write(c) + default: + c.outboundBuffer.Release() // don't bother to write to a connection with some unknown error + err = el.close(c, io.EOF) + } + } + return + }) + if err == errors.ErrEngineShutdown { + el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) + err = nil + } else if err != nil { + el.getLogger().Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) + } + + el.closeConns() + el.engine.shutdown(err) + + return err +} + +func (el *eventloop) run() error { + if el.engine.opts.LockOSThread { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + } + + err := el.poller.Polling(func(fd int, filter int16, flags uint16) (err error) { + c := el.connections.getConn(fd) + if c == nil { + if fd == el.ln.fd { + return el.accept(fd, filter, flags) + } + // This might happen when the connection has already been closed, + // the file descriptor will be deleted from kqueue automatically + // as documented in the manual pages, So we just print a warning log. + el.getLogger().Warnf("received event[fd=%d|filter=%d|flags=%d] of a stale connection from event-loop(%d)", fd, filter, flags, el.idx) + return + } + + switch filter { + case unix.EVFILT_READ: + err = el.read(c) + case unix.EVFILT_WRITE: + err = el.write(c) + } + // EV_EOF indicates that the remote has closed the connection. + // We check for EV_EOF after processing the read/write event + // to ensure that nothing is left out on this event filter. + if flags&unix.EV_EOF != 0 && c.opened && err == nil { + switch filter { + case unix.EVFILT_READ: + // Receive the event of EVFILT_READ | EV_EOF, but the previous eventloop.read + // failed to drain the socket buffer, so we make sure we get it done this time. + c.isEOF = true + err = el.read(c) + case unix.EVFILT_WRITE: + // On macOS, the kqueue in both LT and ET mode will notify with one event for the EOF + // of the TCP remote: EVFILT_READ|EV_ADD|EV_CLEAR|EV_EOF. But for some reason, two + // events will be issued in ET mode for the EOF of the Unix remote in this order: + // 1) EVFILT_WRITE|EV_ADD|EV_CLEAR|EV_EOF, 2) EVFILT_READ|EV_ADD|EV_CLEAR|EV_EOF. + err = el.write(c) + default: + c.outboundBuffer.Release() // don't bother to write to a connection with some unknown error + err = el.close(c, io.EOF) + } + } + return + }) + if err == errors.ErrEngineShutdown { + el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) + err = nil + } else if err != nil { + el.getLogger().Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) + } + + el.closeConns() + el.engine.shutdown(err) + + return err +} diff --git a/reactor_optimized_bsd.go b/reactor_kqueue_ultimate.go similarity index 70% rename from reactor_optimized_bsd.go rename to reactor_kqueue_ultimate.go index 2938fa553..e09c296df 100644 --- a/reactor_optimized_bsd.go +++ b/reactor_kqueue_ultimate.go @@ -24,7 +24,7 @@ import ( "github.com/panjf2000/gnet/v2/pkg/errors" ) -func (el *eventloop) activateMainReactor() error { +func (el *eventloop) rotate() error { if el.engine.opts.LockOSThread { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -32,10 +32,10 @@ func (el *eventloop) activateMainReactor() error { err := el.poller.Polling() if err == errors.ErrEngineShutdown { - el.engine.opts.Logger.Debugf("main reactor is exiting in terms of the demand from user, %v", err) + el.getLogger().Debugf("main reactor is exiting in terms of the demand from user, %v", err) err = nil } else if err != nil { - el.engine.opts.Logger.Errorf("main reactor is exiting due to error: %v", err) + el.getLogger().Errorf("main reactor is exiting due to error: %v", err) } el.engine.shutdown(err) @@ -43,7 +43,7 @@ func (el *eventloop) activateMainReactor() error { return err } -func (el *eventloop) activateSubReactor() error { +func (el *eventloop) orbit() error { if el.engine.opts.LockOSThread { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -51,10 +51,10 @@ func (el *eventloop) activateSubReactor() error { err := el.poller.Polling() if err == errors.ErrEngineShutdown { - el.engine.opts.Logger.Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) + el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) err = nil } else if err != nil { - el.engine.opts.Logger.Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) + el.getLogger().Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) } el.closeConns() @@ -71,10 +71,10 @@ func (el *eventloop) run() error { err := el.poller.Polling() if err == errors.ErrEngineShutdown { - el.engine.opts.Logger.Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) + el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) err = nil } else if err != nil { - el.engine.opts.Logger.Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) + el.getLogger().Errorf("event-loop(%d) is exiting due to error: %v", el.idx, err) } el.closeConns() From 791cc48922617e83b74b424d6d5d7326f73404fe Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Fri, 19 Apr 2024 21:46:48 +0800 Subject: [PATCH 02/12] doc: update READMEs --- README.md | 1 + README_ZH.md | 1 + 2 files changed, 2 insertions(+) diff --git a/README.md b/README.md index 6b6ad1680..d9e9d29c9 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,7 @@ English | [中文](README_ZH.md) - [x] Flexible ticker event - [x] Implementation of `gnet` Client - [x] **Windows** platform support (For compatibility in development only, do not use it in production) +- [x] **Edge-triggered** I/O support - [ ] Multiple network addresses binding - [ ] **TLS** support - [ ] [io_uring](https://kernel.dk/io_uring.pdf) support diff --git a/README_ZH.md b/README_ZH.md index 7c42c4d4d..e126e2656 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -42,6 +42,7 @@ - [x] 灵活的事件定时器 - [x] 实现 `gnet` 客户端 - [x] 支持 **Windows** 平台 (仅用于开发环境的兼容性,不要在生产环境中使用) +- [x] **Edge-triggered** I/O 支持 - [ ] 多网络地址绑定 - [ ] 支持 **TLS** - [ ] 支持 [io_uring](https://kernel.dk/io_uring.pdf) From bf7121dfaf6ab1ef3dd122edcc1f459d0b5c00a6 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Sun, 21 Apr 2024 14:17:23 +0800 Subject: [PATCH 03/12] feat: support multiple network addresses binding (#578) Fixes #428 --- README.md | 2 +- README_ZH.md | 2 +- acceptor_unix.go | 10 +- acceptor_windows.go | 100 ++++----- client_test.go | 8 +- client_unix.go | 13 +- client_windows.go | 12 +- connection_unix.go | 6 +- connection_windows.go | 20 +- engine_unix.go | 156 ++++++++------ engine_windows.go | 37 +++- eventloop_unix.go | 25 +-- gnet.go | 140 +++++++++---- gnet_test.go | 407 ++++++++++++++++++++++++++----------- internal/socket/fd_unix.go | 14 +- listener_unix.go | 2 +- listener_windows.go | 14 +- os_unix_test.go | 4 +- pkg/errors/errors.go | 28 +-- reactor_epoll_default.go | 2 +- reactor_kqueue_default.go | 2 +- 21 files changed, 646 insertions(+), 358 deletions(-) diff --git a/README.md b/README.md index d9e9d29c9..0b61d897d 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ English | [中文](README_ZH.md) - [x] Implementation of `gnet` Client - [x] **Windows** platform support (For compatibility in development only, do not use it in production) - [x] **Edge-triggered** I/O support -- [ ] Multiple network addresses binding +- [x] Multiple network addresses binding - [ ] **TLS** support - [ ] [io_uring](https://kernel.dk/io_uring.pdf) support diff --git a/README_ZH.md b/README_ZH.md index e126e2656..47011a4f5 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -43,7 +43,7 @@ - [x] 实现 `gnet` 客户端 - [x] 支持 **Windows** 平台 (仅用于开发环境的兼容性,不要在生产环境中使用) - [x] **Edge-triggered** I/O 支持 -- [ ] 多网络地址绑定 +- [x] 多网络地址绑定 - [ ] 支持 **TLS** - [ ] 支持 [io_uring](https://kernel.dk/io_uring.pdf) diff --git a/acceptor_unix.go b/acceptor_unix.go index 10467b4e6..4c1d3999c 100644 --- a/acceptor_unix.go +++ b/acceptor_unix.go @@ -45,13 +45,13 @@ func (eng *engine) accept1(fd int, _ netpoll.IOEvent, _ netpoll.IOFlags) error { } remoteAddr := socket.SockaddrToTCPOrUnixAddr(sa) - if eng.opts.TCPKeepAlive > 0 && eng.ln.network == "tcp" { + if eng.opts.TCPKeepAlive > 0 && eng.listeners[fd].network == "tcp" { err = socket.SetKeepAlivePeriod(nfd, int(eng.opts.TCPKeepAlive.Seconds())) logging.Error(err) } el := eng.eventLoops.next(remoteAddr) - c := newTCPConn(nfd, el, sa, el.ln.addr, remoteAddr) + c := newTCPConn(nfd, el, sa, el.listeners[fd].addr, remoteAddr) err = el.poller.Trigger(queue.HighPriority, el.register, c) if err != nil { eng.opts.Logger.Errorf("failed to enqueue accepted socket of high-priority: %v", err) @@ -62,7 +62,7 @@ func (eng *engine) accept1(fd int, _ netpoll.IOEvent, _ netpoll.IOFlags) error { } func (el *eventloop) accept1(fd int, ev netpoll.IOEvent, flags netpoll.IOFlags) error { - if el.ln.network == "udp" { + if el.listeners[fd].network == "udp" { return el.readUDP1(fd, ev, flags) } @@ -81,12 +81,12 @@ func (el *eventloop) accept1(fd int, ev netpoll.IOEvent, flags netpoll.IOFlags) } remoteAddr := socket.SockaddrToTCPOrUnixAddr(sa) - if el.engine.opts.TCPKeepAlive > 0 && el.ln.network == "tcp" { + if el.engine.opts.TCPKeepAlive > 0 && el.listeners[fd].network == "tcp" { err = socket.SetKeepAlivePeriod(nfd, int(el.engine.opts.TCPKeepAlive/time.Second)) logging.Error(err) } - c := newTCPConn(nfd, el, sa, el.ln.addr, remoteAddr) + c := newTCPConn(nfd, el, sa, el.listeners[fd].addr, remoteAddr) addEvents := el.poller.AddRead if el.engine.opts.EdgeTriggeredIO { addEvents = el.poller.AddReadWrite diff --git a/acceptor_windows.go b/acceptor_windows.go index 25717e59f..ce6005c0e 100644 --- a/acceptor_windows.go +++ b/acceptor_windows.go @@ -23,7 +23,7 @@ import ( errorx "github.com/panjf2000/gnet/v2/pkg/errors" ) -func (eng *engine) listen() (err error) { +func (eng *engine) listenStream(ln net.Listener) (err error) { if eng.opts.LockOSThread { runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -31,56 +31,64 @@ func (eng *engine) listen() (err error) { defer func() { eng.shutdown(err) }() - var buffer [0x10000]byte for { - if eng.ln.pc != nil { - // Read data from UDP socket. - n, addr, e := eng.ln.pc.ReadFrom(buffer[:]) - if e != nil { - err = e - if atomic.LoadInt32(&eng.beingShutdown) == 0 { - eng.opts.Logger.Errorf("failed to receive data from UDP fd due to error:%v", err) - } else if errors.Is(err, net.ErrClosed) { - err = errorx.ErrEngineShutdown - // TODO: errors.Join() is not supported until Go 1.20, - // we will uncomment this line after we bump up the - // minimal supported go version to 1.20. - // err = errors.Join(err, errorx.ErrEngineShutdown) + // Accept TCP socket. + tc, e := ln.Accept() + if e != nil { + err = e + if atomic.LoadInt32(&eng.beingShutdown) == 0 { + eng.opts.Logger.Errorf("Accept() fails due to error: %v", err) + } else if errors.Is(err, net.ErrClosed) { + err = errorx.ErrEngineShutdown + // TODO: errors.Join() is not supported until Go 1.20, + // we will uncomment this line after we bump up the + // minimal supported go version to 1.20. + // err = errors.Join(err, errorx.ErrEngineShutdown) + } + return + } + el := eng.eventLoops.next(tc.RemoteAddr()) + c := newTCPConn(tc, el) + el.ch <- &openConn{c: c} + go func(c *conn, tc net.Conn, el *eventloop) { + var buffer [0x10000]byte + for { + n, err := tc.Read(buffer[:]) + if err != nil { + el.ch <- &netErr{c, err} + return } - return + el.ch <- packTCPConn(c, buffer[:n]) } + }(c, tc, el) + } +} - el := eng.eventLoops.next(addr) - c := newUDPConn(el, eng.ln.addr, addr) - el.ch <- packUDPConn(c, buffer[:n]) - } else { - // Accept TCP socket. - tc, e := eng.ln.ln.Accept() - if e != nil { - err = e - if atomic.LoadInt32(&eng.beingShutdown) == 0 { - eng.opts.Logger.Errorf("Accept() fails due to error: %v", err) - } else if errors.Is(err, net.ErrClosed) { - err = errorx.ErrEngineShutdown - // TODO: ditto. - // err = errors.Join(err, errorx.ErrEngineShutdown) - } - return +func (eng *engine) ListenUDP(pc net.PacketConn) (err error) { + if eng.opts.LockOSThread { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + } + + defer func() { eng.shutdown(err) }() + + var buffer [0x10000]byte + for { + // Read data from UDP socket. + n, addr, e := pc.ReadFrom(buffer[:]) + if e != nil { + err = e + if atomic.LoadInt32(&eng.beingShutdown) == 0 { + eng.opts.Logger.Errorf("failed to receive data from UDP fd due to error:%v", err) + } else if errors.Is(err, net.ErrClosed) { + err = errorx.ErrEngineShutdown + // TODO: ditto. + // err = errors.Join(err, errorx.ErrEngineShutdown) } - el := eng.eventLoops.next(tc.RemoteAddr()) - c := newTCPConn(tc, el) - el.ch <- &openConn{c: c} - go func(c *conn, tc net.Conn, el *eventloop) { - var buffer [0x10000]byte - for { - n, err := tc.Read(buffer[:]) - if err != nil { - el.ch <- &netErr{c, err} - return - } - el.ch <- packTCPConn(c, buffer[:n]) - } - }(c, tc, el) + return } + el := eng.eventLoops.next(addr) + c := newUDPConn(el, pc, pc.LocalAddr(), addr) + el.ch <- packUDPConn(c, buffer[:n]) } } diff --git a/client_test.go b/client_test.go index 3aa3d1fed..dccbf10dc 100644 --- a/client_test.go +++ b/client_test.go @@ -196,7 +196,7 @@ func TestClient(t *testing.T) { }) }) - t.Run("poll-LT-reuseport", func(t *testing.T) { + t.Run("poll-reuseport-LT", func(t *testing.T) { t.Run("tcp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { runClient(t, "tcp", ":9991", false, true, false, false, 10, RoundRobin) @@ -247,7 +247,7 @@ func TestClient(t *testing.T) { }) }) - t.Run("poll-ET-reuseport", func(t *testing.T) { + t.Run("poll-reuseport-ET", func(t *testing.T) { t.Run("tcp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { runClient(t, "tcp", ":9991", true, true, false, false, 10, RoundRobin) @@ -405,7 +405,7 @@ func (s *testClient) OnTraffic(c Conn) (action Action) { } func (s *testClient) OnTick() (delay time.Duration, action Action) { - delay = time.Second / 5 + delay = 100 * time.Millisecond if atomic.CompareAndSwapInt32(&s.started, 0, 1) { for i := 0; i < s.nclients; i++ { atomic.AddInt32(&s.clientActive, 1) @@ -484,7 +484,7 @@ func startGnetClient(t *testing.T, cli *Client, network, addr string, multicore, require.NoError(t, err) rspCh := handler.rspCh duration := time.Duration((rand.Float64()*2+1)*float64(time.Second)) / 2 - t.Logf("test duration: %dms", duration/time.Millisecond) + logging.Debugf("test duration: %v", duration) start := time.Now() for time.Since(start) < duration { reqData := make([]byte, streamLen) diff --git a/client_unix.go b/client_unix.go index 7c50d0608..10bf99c07 100644 --- a/client_unix.go +++ b/client_unix.go @@ -68,7 +68,7 @@ func NewClient(eh EventHandler, opts ...Option) (cli *Client, err error) { shutdownCtx, shutdown := context.WithCancel(context.Background()) eng := engine{ - ln: &listener{}, + listeners: make(map[int]*listener), opts: options, eventHandler: eh, workerPool: struct { @@ -82,9 +82,9 @@ func NewClient(eh EventHandler, opts ...Option) (cli *Client, err error) { eng.ticker.ctx, eng.ticker.cancel = context.WithCancel(context.Background()) } el := eventloop{ - ln: eng.ln, - engine: &eng, - poller: p, + listeners: eng.listeners, + engine: &eng, + poller: p, } rbc := options.ReadBufferCap @@ -115,7 +115,8 @@ func NewClient(eh EventHandler, opts ...Option) (cli *Client, err error) { // Start starts the client event-loop, handing IO events. func (cli *Client) Start() error { - cli.el.eventHandler.OnBoot(Engine{}) + logging.Infof("Starting gnet client with 1 event-loop") + cli.el.eventHandler.OnBoot(Engine{cli.el.engine}) cli.el.engine.workerPool.Go(cli.el.run) // Start the ticker. if cli.opts.Ticker { @@ -134,7 +135,7 @@ func (cli *Client) Stop() (err error) { } _ = cli.el.engine.workerPool.Wait() logging.Error(cli.el.poller.Close()) - cli.el.eventHandler.OnShutdown(Engine{}) + cli.el.eventHandler.OnShutdown(Engine{cli.el.engine}) logging.Cleanup() return } diff --git a/client_windows.go b/client_windows.go index 07d2294b2..33e0566ff 100644 --- a/client_windows.go +++ b/client_windows.go @@ -50,8 +50,8 @@ func NewClient(eh EventHandler, opts ...Option) (cli *Client, err error) { shutdownCtx, shutdown := context.WithCancel(context.Background()) eng := &engine{ - ln: &listener{}, - opts: options, + listeners: []*listener{}, + opts: options, workerPool: struct { *errgroup.Group shutdownCtx context.Context @@ -70,7 +70,7 @@ func NewClient(eh EventHandler, opts ...Option) (cli *Client, err error) { } func (cli *Client) Start() error { - cli.el.eventHandler.OnBoot(Engine{}) + cli.el.eventHandler.OnBoot(Engine{cli.el.eng}) cli.el.eng.workerPool.Go(cli.el.run) if cli.opts.Ticker { cli.el.eng.ticker.ctx, cli.el.eng.ticker.cancel = context.WithCancel(context.Background()) @@ -89,7 +89,7 @@ func (cli *Client) Stop() (err error) { cli.el.eng.ticker.cancel() } _ = cli.el.eng.workerPool.Wait() - cli.el.eventHandler.OnShutdown(Engine{}) + cli.el.eventHandler.OnShutdown(Engine{cli.el.eng}) logging.Cleanup() return } @@ -202,7 +202,7 @@ func (cli *Client) EnrollContext(nc net.Conn, ctx interface{}) (gc Conn, err err }(c, nc, cli.el) gc = c case *net.UDPConn: - c := newUDPConn(cli.el, nc.LocalAddr(), nc.RemoteAddr()) + c := newUDPConn(cli.el, nil, nc.LocalAddr(), nc.RemoteAddr()) c.SetContext(ctx) c.rawConn = nc cli.el.ch <- &openConn{c: c, isDatagram: true, cb: func() { close(connOpened) }} @@ -213,7 +213,7 @@ func (cli *Client) EnrollContext(nc net.Conn, ctx interface{}) (gc Conn, err err if err != nil { return } - c := newUDPConn(cli.el, uc.LocalAddr(), uc.RemoteAddr()) + c := newUDPConn(cli.el, nil, uc.LocalAddr(), uc.RemoteAddr()) c.SetContext(ctx) c.rawConn = uc el.ch <- packUDPConn(c, buffer[:n]) diff --git a/connection_unix.go b/connection_unix.go index 18fe3e26e..eb0ab3b59 100644 --- a/connection_unix.go +++ b/connection_unix.go @@ -90,13 +90,13 @@ func (c *conn) release() { c.isEOF = false c.ctx = nil c.buffer = nil - if addr, ok := c.localAddr.(*net.TCPAddr); ok && c.localAddr != c.loop.ln.addr && len(addr.Zone) > 0 { + if addr, ok := c.localAddr.(*net.TCPAddr); ok && len(c.loop.listeners) == 0 && len(addr.Zone) > 0 { bsPool.Put(bs.StringToBytes(addr.Zone)) } if addr, ok := c.remoteAddr.(*net.TCPAddr); ok && len(addr.Zone) > 0 { bsPool.Put(bs.StringToBytes(addr.Zone)) } - if addr, ok := c.localAddr.(*net.UDPAddr); ok && c.localAddr != c.loop.ln.addr && len(addr.Zone) > 0 { + if addr, ok := c.localAddr.(*net.UDPAddr); ok && len(c.loop.listeners) == 0 && len(addr.Zone) > 0 { bsPool.Put(bs.StringToBytes(addr.Zone)) } if addr, ok := c.remoteAddr.(*net.UDPAddr); ok && len(addr.Zone) > 0 { @@ -451,7 +451,7 @@ func (c *conn) RemoteAddr() net.Addr { return c.remoteAddr } // func (c *conn) Gfd() gfd.GFD { return c.gfd } func (c *conn) Fd() int { return c.fd } -func (c *conn) Dup() (fd int, err error) { fd, _, err = socket.Dup(c.fd); return } +func (c *conn) Dup() (fd int, err error) { return socket.Dup(c.fd) } func (c *conn) SetReadBuffer(bytes int) error { return socket.SetRecvBuffer(c.fd, bytes) } func (c *conn) SetWriteBuffer(bytes int) error { return socket.SetSendBuffer(c.fd, bytes) } func (c *conn) SetLinger(sec int) error { return socket.SetLinger(c.fd, sec) } diff --git a/connection_windows.go b/connection_windows.go index 21ebb64d4..e46498f9d 100644 --- a/connection_windows.go +++ b/connection_windows.go @@ -49,6 +49,7 @@ type openConn struct { } type conn struct { + pc net.PacketConn ctx interface{} // user-defined context loop *eventloop // owner event-loop buffer *bbPool.ByteBuffer // reuse memory of inbound data as a temporary buffer @@ -102,8 +103,9 @@ func (c *conn) release() { c.buffer = nil } -func newUDPConn(el *eventloop, localAddr, remoteAddr net.Addr) *conn { +func newUDPConn(el *eventloop, pc net.PacketConn, localAddr, remoteAddr net.Addr) *conn { return &conn{ + pc: pc, loop: el, buffer: bbPool.Get(), localAddr: localAddr, @@ -239,13 +241,13 @@ func (c *conn) Discard(n int) (int, error) { } func (c *conn) Write(p []byte) (int, error) { - if c.rawConn == nil && c.loop.eng.ln.pc == nil { + if c.rawConn == nil && c.pc == nil { return 0, net.ErrClosed } if c.rawConn != nil { return c.rawConn.Write(p) } - return c.loop.eng.ln.pc.WriteTo(p, c.remoteAddr) + return c.pc.WriteTo(p, c.remoteAddr) } func (c *conn) Writev(bs [][]byte) (int, error) { @@ -319,7 +321,7 @@ func (c *conn) Fd() (fd int) { } func (c *conn) Dup() (fd int, err error) { - if c.rawConn == nil && c.loop.eng.ln.pc == nil { + if c.rawConn == nil && c.pc == nil { return -1, net.ErrClosed } @@ -330,7 +332,7 @@ func (c *conn) Dup() (fd int, err error) { if c.rawConn != nil { sc, ok = c.rawConn.(syscall.Conn) } else { - sc, ok = c.loop.eng.ln.pc.(syscall.Conn) + sc, ok = c.pc.(syscall.Conn) } if !ok { @@ -365,24 +367,24 @@ func (c *conn) Dup() (fd int, err error) { } func (c *conn) SetReadBuffer(bytes int) error { - if c.rawConn == nil && c.loop.eng.ln.pc == nil { + if c.rawConn == nil && c.pc == nil { return net.ErrClosed } if c.rawConn != nil { return c.rawConn.(interface{ SetReadBuffer(int) error }).SetReadBuffer(bytes) } - return c.loop.eng.ln.pc.(interface{ SetReadBuffer(int) error }).SetReadBuffer(bytes) + return c.pc.(interface{ SetReadBuffer(int) error }).SetReadBuffer(bytes) } func (c *conn) SetWriteBuffer(bytes int) error { - if c.rawConn == nil && c.loop.eng.ln.pc == nil { + if c.rawConn == nil && c.pc == nil { return net.ErrClosed } if c.rawConn != nil { return c.rawConn.(interface{ SetWriteBuffer(int) error }).SetWriteBuffer(bytes) } - return c.loop.eng.ln.pc.(interface{ SetWriteBuffer(int) error }).SetWriteBuffer(bytes) + return c.pc.(interface{ SetWriteBuffer(int) error }).SetWriteBuffer(bytes) } func (c *conn) SetLinger(sec int) error { diff --git a/engine_unix.go b/engine_unix.go index 261b67100..ffba29c1b 100644 --- a/engine_unix.go +++ b/engine_unix.go @@ -20,6 +20,7 @@ package gnet import ( "context" "runtime" + "strings" "sync" "sync/atomic" @@ -29,14 +30,15 @@ import ( "github.com/panjf2000/gnet/v2/internal/netpoll" "github.com/panjf2000/gnet/v2/internal/queue" "github.com/panjf2000/gnet/v2/pkg/errors" + "github.com/panjf2000/gnet/v2/pkg/logging" ) type engine struct { - ln *listener // the listener for accepting new connections - opts *Options // options with engine - acceptor *eventloop // main event-loop for accepting connections - eventLoops loadBalancer // event-loops for handling events - inShutdown int32 // whether the engine is in shutdown + listeners map[int]*listener // listeners for accepting new connections + opts *Options // options with engine + acceptor *eventloop // main event-loop for accepting connections + eventLoops loadBalancer // event-loops for handling events + inShutdown int32 // whether the engine is in shutdown ticker struct { ctx context.Context // context for ticker cancel context.CancelFunc // function to stop the ticker @@ -68,12 +70,16 @@ func (eng *engine) shutdown(err error) { func (eng *engine) closeEventLoops() { eng.eventLoops.iterate(func(_ int, el *eventloop) bool { - el.ln.close() + for _, ln := range el.listeners { + ln.close() + } _ = el.poller.Close() return true }) if eng.acceptor != nil { - eng.ln.close() + for _, ln := range eng.listeners { + ln.close() + } err := eng.acceptor.poller.Close() if err != nil { eng.opts.Logger.Errorf("failed to close poller when stopping engine: %v", err) @@ -81,37 +87,42 @@ func (eng *engine) closeEventLoops() { } } -func (eng *engine) runEventLoops(numEventLoop int) (err error) { - network, address := eng.ln.network, eng.ln.address - ln := eng.ln - var striker *eventloop +func (eng *engine) runEventLoops(numEventLoop int) error { + var el0 *eventloop + lns := eng.listeners // Create loops locally and bind the listeners. for i := 0; i < numEventLoop; i++ { if i > 0 { - if ln, err = initListener(network, address, eng.opts); err != nil { - return + lns = make(map[int]*listener, len(eng.listeners)) + for _, l := range eng.listeners { + ln, err := initListener(l.network, l.address, eng.opts) + if err != nil { + return err + } + lns[ln.fd] = ln } } - var p *netpoll.Poller - if p, err = netpoll.OpenPoller(); err == nil { - el := new(eventloop) - el.ln = ln - el.engine = eng - el.poller = p - el.buffer = make([]byte, eng.opts.ReadBufferCap) - el.connections.init() - el.eventHandler = eng.eventHandler - if err = el.poller.AddRead(el.ln.packPollAttachment(el.accept), false); err != nil { - return + p, err := netpoll.OpenPoller() + if err != nil { + return err + } + el := new(eventloop) + el.listeners = lns + el.engine = eng + el.poller = p + el.buffer = make([]byte, eng.opts.ReadBufferCap) + el.connections.init() + el.eventHandler = eng.eventHandler + for _, ln := range lns { + if err = el.poller.AddRead(ln.packPollAttachment(el.accept), false); err != nil { + return err } - eng.eventLoops.register(el) + } + eng.eventLoops.register(el) - // Start the ticker. - if el.idx == 0 && eng.opts.Ticker { - striker = el - } - } else { - return + // Start the ticker. + if eng.opts.Ticker && el.idx == 0 { + el0 = el } } @@ -121,28 +132,30 @@ func (eng *engine) runEventLoops(numEventLoop int) (err error) { return true }) - eng.workerPool.Go(func() error { - striker.ticker(eng.ticker.ctx) - return nil - }) + if el0 != nil { + eng.workerPool.Go(func() error { + el0.ticker(eng.ticker.ctx) + return nil + }) + } - return + return nil } func (eng *engine) activateReactors(numEventLoop int) error { for i := 0; i < numEventLoop; i++ { - if p, err := netpoll.OpenPoller(); err == nil { - el := new(eventloop) - el.ln = eng.ln - el.engine = eng - el.poller = p - el.buffer = make([]byte, eng.opts.ReadBufferCap) - el.connections.init() - el.eventHandler = eng.eventHandler - eng.eventLoops.register(el) - } else { + p, err := netpoll.OpenPoller() + if err != nil { return err } + el := new(eventloop) + el.listeners = eng.listeners + el.engine = eng + el.poller = p + el.buffer = make([]byte, eng.opts.ReadBufferCap) + el.connections.init() + el.eventHandler = eng.eventHandler + eng.eventLoops.register(el) } // Start sub reactors in background. @@ -151,23 +164,25 @@ func (eng *engine) activateReactors(numEventLoop int) error { return true }) - if p, err := netpoll.OpenPoller(); err == nil { - el := new(eventloop) - el.ln = eng.ln - el.idx = -1 - el.engine = eng - el.poller = p - el.eventHandler = eng.eventHandler - if err = el.poller.AddRead(eng.ln.packPollAttachment(eng.accept), false); err != nil { + p, err := netpoll.OpenPoller() + if err != nil { + return err + } + el := new(eventloop) + el.listeners = eng.listeners + el.idx = -1 + el.engine = eng + el.poller = p + el.eventHandler = eng.eventHandler + for _, ln := range eng.listeners { + if err = el.poller.AddRead(ln.packPollAttachment(eng.accept), false); err != nil { return err } - eng.acceptor = el - - // Start main reactor in background. - eng.workerPool.Go(el.rotate) - } else { - return err } + eng.acceptor = el + + // Start main reactor in background. + eng.workerPool.Go(el.rotate) // Start the ticker. if eng.opts.Ticker { @@ -181,7 +196,7 @@ func (eng *engine) activateReactors(numEventLoop int) error { } func (eng *engine) start(numEventLoop int) error { - if eng.opts.ReusePort || eng.ln.network == "udp" { + if eng.opts.ReusePort { return eng.runEventLoops(numEventLoop) } @@ -225,8 +240,8 @@ func (eng *engine) stop(s Engine) { atomic.StoreInt32(&eng.inShutdown, 1) } -func run(eventHandler EventHandler, listener *listener, options *Options, protoAddr string) error { - // Figure out the proper number of event-loops/goroutines to run. +func run(eventHandler EventHandler, listeners []*listener, options *Options, addrs []string) error { + // Figure out the proper number of event-loop to run. numEventLoop := 1 if options.Multicore { numEventLoop = runtime.NumCPU() @@ -238,10 +253,17 @@ func run(eventHandler EventHandler, listener *listener, options *Options, protoA numEventLoop = gfd.EventLoopIndexMax } + logging.Infof("Launching gnet with %d event-loops, listening on: %s", + numEventLoop, strings.Join(addrs, " | ")) + + lns := make(map[int]*listener, len(listeners)) + for _, ln := range listeners { + lns[ln.fd] = ln + } shutdownCtx, shutdown := context.WithCancel(context.Background()) eng := engine{ - ln: listener, - opts: options, + listeners: lns, + opts: options, workerPool: struct { *errgroup.Group shutdownCtx context.Context @@ -277,7 +299,9 @@ func run(eventHandler EventHandler, listener *listener, options *Options, protoA } defer eng.stop(e) - allEngines.Store(protoAddr, &eng) + for _, addr := range addrs { + allEngines.Store(addr, &eng) + } return nil } diff --git a/engine_windows.go b/engine_windows.go index d0d3cb304..304b14421 100644 --- a/engine_windows.go +++ b/engine_windows.go @@ -18,16 +18,18 @@ import ( "context" "errors" "runtime" + "strings" "sync" "sync/atomic" "golang.org/x/sync/errgroup" errorx "github.com/panjf2000/gnet/v2/pkg/errors" + "github.com/panjf2000/gnet/v2/pkg/logging" ) type engine struct { - ln *listener + listeners []*listener opts *Options // options with engine eventLoops loadBalancer // event-loops for handling events ticker struct { @@ -64,7 +66,9 @@ func (eng *engine) closeEventLoops() { el.ch <- errorx.ErrEngineShutdown return true }) - eng.ln.close() + for _, ln := range eng.listeners { + ln.close() + } } func (eng *engine) start(numEventLoop int) error { @@ -86,7 +90,18 @@ func (eng *engine) start(numEventLoop int) error { } } - eng.workerPool.Go(eng.listen) + for _, ln := range eng.listeners { + l := ln + if l.pc != nil { + eng.workerPool.Go(func() error { + return eng.ListenUDP(l.pc) + }) + } else { + eng.workerPool.Go(func() error { + return eng.listenStream(l.ln) + }) + } + } return nil } @@ -111,7 +126,7 @@ func (eng *engine) stop(engine Engine) error { return nil } -func run(eventHandler EventHandler, listener *listener, options *Options, protoAddr string) error { +func run(eventHandler EventHandler, listeners []*listener, options *Options, addrs []string) error { // Figure out the proper number of event-loops/goroutines to run. numEventLoop := 1 if options.Multicore { @@ -121,11 +136,14 @@ func run(eventHandler EventHandler, listener *listener, options *Options, protoA numEventLoop = options.NumEventLoop } + logging.Infof("Launching gnet with %d event-loops, listening on: %s", + numEventLoop, strings.Join(addrs, " | ")) + shutdownCtx, shutdown := context.WithCancel(context.Background()) eng := engine{ opts: options, eventHandler: eventHandler, - ln: listener, + listeners: listeners, workerPool: struct { *errgroup.Group shutdownCtx context.Context @@ -137,6 +155,11 @@ func run(eventHandler EventHandler, listener *listener, options *Options, protoA switch options.LB { case RoundRobin: eng.eventLoops = new(roundRobinLoadBalancer) + // If there are more than one listener, we can't use roundRobinLoadBalancer because + // it's not concurrency-safe, replace it with leastConnectionsLoadBalancer. + if len(listeners) > 1 { + eng.eventLoops = new(leastConnectionsLoadBalancer) + } case LeastConnections: eng.eventLoops = new(leastConnectionsLoadBalancer) case SourceAddrHash: @@ -160,7 +183,9 @@ func run(eventHandler EventHandler, listener *listener, options *Options, protoA } defer eng.stop(engine) //nolint:errcheck - allEngines.Store(protoAddr, &eng) + for _, addr := range addrs { + allEngines.Store(addr, &eng) + } return nil } diff --git a/eventloop_unix.go b/eventloop_unix.go index 2d6eae077..375b10675 100644 --- a/eventloop_unix.go +++ b/eventloop_unix.go @@ -37,14 +37,14 @@ import ( ) type eventloop struct { - ln *listener // listener - idx int // loop index in the engine loops list - cache bytes.Buffer // temporary buffer for scattered bytes - engine *engine // engine in loop - poller *netpoll.Poller // epoll or kqueue - buffer []byte // read packet buffer whose capacity is set by user, default value is 64KB - connections connMatrix // loop connections storage - eventHandler EventHandler // user eventHandler + listeners map[int]*listener // listeners + idx int // loop index in the engine loops list + cache bytes.Buffer // temporary buffer for scattered bytes + engine *engine // engine in loop + poller *netpoll.Poller // epoll or kqueue + buffer []byte // read packet buffer whose capacity is set by user, default value is 64KB + connections connMatrix // loop connections storage + eventHandler EventHandler // user eventHandler } func (el *eventloop) getLogger() logging.Logger { @@ -198,7 +198,7 @@ loop: func (el *eventloop) close(c *conn, err error) (rerr error) { if addr := c.localAddr; addr != nil && strings.HasPrefix(c.localAddr.Network(), "udp") { rerr = el.poller.Delete(c.fd) - if c.fd != el.ln.fd { + if _, ok := el.listeners[c.fd]; !ok { rerr = unix.Close(c.fd) el.connections.delConn(c) } @@ -260,9 +260,6 @@ func (el *eventloop) wake(c *conn) error { } func (el *eventloop) ticker(ctx context.Context) { - if el == nil { - return - } var ( action Action delay time.Duration @@ -320,8 +317,8 @@ func (el *eventloop) readUDP1(fd int, _ netpoll.IOEvent, _ netpoll.IOFlags) erro fd, el.idx, os.NewSyscallError("recvfrom", err)) } var c *conn - if fd == el.ln.fd { - c = newUDPConn(fd, el, el.ln.addr, sa, false) + if ln, ok := el.listeners[fd]; ok { + c = newUDPConn(fd, el, ln.addr, sa, false) } else { c = el.connections.getConn(fd) } diff --git a/gnet.go b/gnet.go index d511ab5be..643cd7264 100644 --- a/gnet.go +++ b/gnet.go @@ -50,7 +50,7 @@ type Engine struct { // Validate checks whether the engine is available. func (e Engine) Validate() error { - if e.eng == nil { + if e.eng == nil || len(e.eng.listeners) == 0 { return errors.ErrEmptyEngine } if e.eng.isInShutdown() { @@ -76,14 +76,14 @@ func (e Engine) CountConnections() (count int) { // It is the caller's responsibility to close dupFD when finished. // Closing listener does not affect dupFD, and closing dupFD does not affect listener. func (e Engine) Dup() (fd int, err error) { - if err = e.Validate(); err != nil { + if err := e.Validate(); err != nil { return -1, err } - - var sc string - fd, sc, err = e.eng.ln.dup() - if err != nil { - logging.Warnf("%s failed when duplicating new fd\n", sc) + if len(e.eng.listeners) > 1 { + return -1, errors.ErrUnsupportedOp + } + for _, ln := range e.eng.listeners { + fd, err = ln.dup() } return } @@ -314,7 +314,7 @@ type Conn interface { // you must invoke it within any method in EventHandler. LocalAddr() (addr net.Addr) - // RemoteAddr is the connection's remote remote address, it's not concurrency-safe, + // RemoteAddr is the connection's remote address, it's not concurrency-safe, // you must invoke it within any method in EventHandler. RemoteAddr() (addr net.Addr) @@ -419,22 +419,7 @@ func (*BuiltinEventEngine) OnTick() (delay time.Duration, action Action) { // MaxStreamBufferCap is the default buffer size for each stream-oriented connection(TCP/Unix). var MaxStreamBufferCap = 64 * 1024 // 64KB -// Run starts handling events on the specified address. -// -// Address should use a scheme prefix and be formatted -// like `tcp://192.168.0.10:9851` or `unix://socket`. -// Valid network schemes: -// -// tcp - bind to both IPv4 and IPv6 -// tcp4 - IPv4 -// tcp6 - IPv6 -// udp - bind to both IPv4 and IPv6 -// udp4 - IPv4 -// udp6 - IPv6 -// unix - Unix Domain Socket -// -// The "tcp" network scheme is assumed when one is not specified. -func Run(eventHandler EventHandler, protoAddr string, opts ...Option) (err error) { +func createListeners(addrs []string, opts ...Option) ([]*listener, *Options, error) { options := loadOptions(opts...) logger, logFlusher := logging.GetDefaultLogger(), logging.GetDefaultFlusher() @@ -449,8 +434,6 @@ func Run(eventHandler EventHandler, protoAddr string, opts ...Option) (err error } logging.SetDefaultLoggerAndFlusher(logger, logFlusher) - defer logging.Cleanup() - logging.Debugf("default logging level is %s", logging.LogLevel()) // The maximum number of operating system threads that the Go program can use is initially set to 10000, @@ -458,7 +441,7 @@ func Run(eventHandler EventHandler, protoAddr string, opts ...Option) (err error if options.LockOSThread && options.NumEventLoop > 10000 { logging.Errorf("too many event-loops under LockOSThread mode, should be less than 10,000 "+ "while you are trying to set up %d\n", options.NumEventLoop) - return errors.ErrTooManyEventLoopThreads + return nil, nil, errors.ErrTooManyEventLoopThreads } rbc := options.ReadBufferCap @@ -480,19 +463,76 @@ func Run(eventHandler EventHandler, protoAddr string, opts ...Option) (err error options.WriteBufferCap = math.CeilToPowerOfTwo(wbc) } - network, addr := parseProtoAddr(protoAddr) + // If there is UDP listener in the list, enable SO_REUSEPORT and disable edge-triggered I/O by default. + for i := 0; (!options.ReusePort || options.EdgeTriggeredIO) && i < len(addrs); i++ { + proto, _, err := parseProtoAddr(addrs[i]) + if err != nil { + return nil, nil, err + } + if strings.HasPrefix(proto, "udp") { + options.ReusePort = true + options.EdgeTriggeredIO = false + } + } - var ln *listener - if ln, err = initListener(network, addr, options); err != nil { - return + listeners := make([]*listener, len(addrs)) + for i, a := range addrs { + proto, addr, err := parseProtoAddr(a) + if err != nil { + return nil, nil, err + } + ln, err := initListener(proto, addr, options) + if err != nil { + return nil, nil, err + } + listeners[i] = ln } - defer ln.close() - if ln.network == "udp" { - options.EdgeTriggeredIO = false + return listeners, options, nil +} + +// Run starts handling events on the specified address. +// +// Address should use a scheme prefix and be formatted +// like `tcp://192.168.0.10:9851` or `unix://socket`. +// Valid network schemes: +// +// tcp - bind to both IPv4 and IPv6 +// tcp4 - IPv4 +// tcp6 - IPv6 +// udp - bind to both IPv4 and IPv6 +// udp4 - IPv4 +// udp6 - IPv6 +// unix - Unix Domain Socket +// +// The "tcp" network scheme is assumed when one is not specified. +func Run(eventHandler EventHandler, protoAddr string, opts ...Option) error { + listeners, options, err := createListeners([]string{protoAddr}, opts...) + if err != nil { + return err } + defer func() { + for _, ln := range listeners { + ln.close() + } + logging.Cleanup() + }() + return run(eventHandler, listeners, options, []string{protoAddr}) +} - return run(eventHandler, ln, options, protoAddr) +// Rotate is like Run but accepts multiple network addresses. +func Rotate(eventHandler EventHandler, addrs []string, opts ...Option) error { + listeners, options, err := createListeners(addrs, opts...) + if err != nil { + return err + } + defer func() { + for _, ln := range listeners { + ln.close() + } + logging.Cleanup() + }() + return run(eventHandler, listeners, options, addrs) } var ( @@ -504,7 +544,12 @@ var ( // Stop gracefully shuts down the engine without interrupting any active event-loops, // it waits indefinitely for connections and event-loops to be closed and then shuts down. -// Deprecated: The global Stop only shuts down the last registered Engine with the same protocol and IP:Port as the previous Engine's, which can lead to leaks of Engine if you invoke gnet.Run multiple times using the same protocol and IP:Port under the condition that WithReuseAddr(true) and WithReusePort(true) are enabled. Use Engine.Stop instead. +// +// Deprecated: The global Stop only shuts down the last registered Engine with the same +// protocol and IP:Port as the previous Engine's, which can lead to leaks of Engine if +// you invoke gnet.Run multiple times using the same protocol and IP:Port under the +// condition that WithReuseAddr(true) and WithReusePort(true) are enabled. +// Use Engine.Stop instead. func Stop(ctx context.Context, protoAddr string) error { var eng *engine if s, ok := allEngines.Load(protoAddr); ok { @@ -533,13 +578,20 @@ func Stop(ctx context.Context, protoAddr string) error { } } -func parseProtoAddr(addr string) (network, address string) { - network = "tcp" - address = strings.ToLower(addr) - if strings.Contains(address, "://") { - pair := strings.Split(address, "://") - network = pair[0] - address = pair[1] +func parseProtoAddr(protoAddr string) (string, string, error) { + protoAddr = strings.ToLower(protoAddr) + if strings.Count(protoAddr, "://") != 1 { + return "", "", errors.ErrInvalidNetworkAddress } - return + pair := strings.SplitN(protoAddr, "://", 2) + proto, addr := pair[0], pair[1] + switch proto { + case "tcp", "tcp4", "tcp6", "udp", "udp4", "udp6", "unix": + default: + return "", "", errors.ErrUnsupportedProtocol + } + if addr == "" { + return "", "", errors.ErrInvalidNetworkAddress + } + return proto, addr, nil } diff --git a/gnet_test.go b/gnet_test.go index 8d0415079..fec73ad16 100644 --- a/gnet_test.go +++ b/gnet_test.go @@ -10,6 +10,7 @@ import ( "math/rand" "net" "runtime" + "strings" "sync/atomic" "testing" "time" @@ -40,66 +41,66 @@ func TestServer(t *testing.T) { t.Run("poll-LT", func(t *testing.T) { t.Run("tcp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "tcp", ":9991", false, false, false, false, false, 10, RoundRobin) + runServer(t, []string{"tcp://:9991"}, false, false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "tcp", ":9992", false, false, true, false, false, 10, LeastConnections) + runServer(t, []string{"tcp://:9992"}, false, false, true, false, false, 10, LeastConnections) }) }) t.Run("tcp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "tcp", ":9991", false, false, false, true, false, 10, RoundRobin) + runServer(t, []string{"tcp://:9991"}, false, false, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "tcp", ":9992", false, false, true, true, false, 10, LeastConnections) + runServer(t, []string{"tcp://:9992"}, false, false, true, true, false, 10, LeastConnections) }) }) t.Run("tcp-async-writev", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "tcp", ":9991", false, false, false, true, true, 10, RoundRobin) + runServer(t, []string{"tcp://:9991"}, false, false, false, true, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "tcp", ":9992", false, false, true, true, true, 10, LeastConnections) + runServer(t, []string{"tcp://:9992"}, false, false, true, true, true, 10, LeastConnections) }) }) t.Run("udp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "udp", ":9991", false, false, false, false, false, 10, RoundRobin) + runServer(t, []string{"udp://:9991"}, false, false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "udp", ":9992", false, false, true, false, false, 10, LeastConnections) + runServer(t, []string{"udp://:9992"}, false, false, true, false, false, 10, LeastConnections) }) }) t.Run("udp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "udp", ":9991", false, false, false, true, false, 10, RoundRobin) + runServer(t, []string{"udp://:9991"}, false, false, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "udp", ":9992", false, false, true, true, false, 10, LeastConnections) + runServer(t, []string{"udp://:9992"}, false, false, true, true, false, 10, LeastConnections) }) }) t.Run("unix", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "unix", "gnet1.sock", false, false, false, false, false, 10, RoundRobin) + runServer(t, []string{"unix://gnet1.sock"}, false, false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "unix", "gnet2.sock", false, false, true, false, false, 10, SourceAddrHash) + runServer(t, []string{"unix://gnet2.sock"}, false, false, true, false, false, 10, SourceAddrHash) }) }) t.Run("unix-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "unix", "gnet1.sock", false, false, false, true, false, 10, RoundRobin) + runServer(t, []string{"unix://gnet1.sock"}, false, false, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "unix", "gnet2.sock", false, false, true, true, false, 10, SourceAddrHash) + runServer(t, []string{"unix://gnet2.sock"}, false, false, true, true, false, 10, SourceAddrHash) }) }) t.Run("unix-async-writev", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "unix", "gnet1.sock", false, false, false, true, true, 10, RoundRobin) + runServer(t, []string{"unix://gnet1.sock"}, false, false, false, true, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "unix", "gnet2.sock", false, false, true, true, true, 10, SourceAddrHash) + runServer(t, []string{"unix://gnet2.sock"}, false, false, true, true, true, 10, SourceAddrHash) }) }) }) @@ -107,200 +108,340 @@ func TestServer(t *testing.T) { t.Run("poll-ET", func(t *testing.T) { t.Run("tcp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "tcp", ":9991", true, false, false, false, false, 10, RoundRobin) + runServer(t, []string{"tcp://:9991"}, true, false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "tcp", ":9992", true, false, true, false, false, 10, LeastConnections) + runServer(t, []string{"tcp://:9992"}, true, false, true, false, false, 10, LeastConnections) }) }) t.Run("tcp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "tcp", ":9991", true, false, false, true, false, 10, RoundRobin) + runServer(t, []string{"tcp://:9991"}, true, false, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "tcp", ":9992", true, false, true, true, false, 10, LeastConnections) + runServer(t, []string{"tcp://:9992"}, true, false, true, true, false, 10, LeastConnections) }) }) t.Run("tcp-async-writev", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "tcp", ":9991", true, false, false, true, true, 10, RoundRobin) + runServer(t, []string{"tcp://:9991"}, true, false, false, true, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "tcp", ":9992", true, false, true, true, true, 10, LeastConnections) + runServer(t, []string{"tcp://:9992"}, true, false, true, true, true, 10, LeastConnections) }) }) t.Run("udp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "udp", ":9991", true, false, false, false, false, 10, RoundRobin) + runServer(t, []string{"udp://:9991"}, true, false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "udp", ":9992", true, false, true, false, false, 10, LeastConnections) + runServer(t, []string{"udp://:9992"}, true, false, true, false, false, 10, LeastConnections) }) }) t.Run("udp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "udp", ":9991", true, false, false, true, false, 10, RoundRobin) + runServer(t, []string{"udp://:9991"}, true, false, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "udp", ":9992", true, false, true, true, false, 10, LeastConnections) + runServer(t, []string{"udp://:9992"}, true, false, true, true, false, 10, LeastConnections) }) }) t.Run("unix", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "unix", "gnet1.sock", true, false, false, false, false, 10, RoundRobin) + runServer(t, []string{"unix://gnet1.sock"}, true, false, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "unix", "gnet2.sock", true, false, true, false, false, 10, SourceAddrHash) + runServer(t, []string{"unix://gnet2.sock"}, true, false, true, false, false, 10, SourceAddrHash) }) }) t.Run("unix-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "unix", "gnet1.sock", true, false, false, true, false, 10, RoundRobin) + runServer(t, []string{"unix://gnet1.sock"}, true, false, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "unix", "gnet2.sock", true, false, true, true, false, 10, SourceAddrHash) + runServer(t, []string{"unix://gnet2.sock"}, true, false, true, true, false, 10, SourceAddrHash) }) }) t.Run("unix-async-writev", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "unix", "gnet1.sock", true, false, false, true, true, 10, RoundRobin) + runServer(t, []string{"unix://gnet1.sock"}, true, false, false, true, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "unix", "gnet2.sock", true, false, true, true, true, 10, SourceAddrHash) + runServer(t, []string{"unix://gnet2.sock"}, true, false, true, true, true, 10, SourceAddrHash) }) }) }) - t.Run("poll-LT-reuseport", func(t *testing.T) { + t.Run("poll-reuseport-LT", func(t *testing.T) { t.Run("tcp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "tcp", ":9991", false, true, false, false, false, 10, RoundRobin) + runServer(t, []string{"tcp://:9991"}, false, true, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "tcp", ":9992", false, true, true, false, false, 10, LeastConnections) + runServer(t, []string{"tcp://:9992"}, false, true, true, false, false, 10, LeastConnections) }) }) t.Run("tcp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "tcp", ":9991", false, true, false, true, false, 10, RoundRobin) + runServer(t, []string{"tcp://:9991"}, false, true, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "tcp", ":9992", false, true, true, true, false, 10, LeastConnections) + runServer(t, []string{"tcp://:9992"}, false, true, true, true, false, 10, LeastConnections) }) }) t.Run("tcp-async-writev", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "tcp", ":9991", false, true, false, true, true, 10, RoundRobin) + runServer(t, []string{"tcp://:9991"}, false, true, false, true, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "tcp", ":9992", false, true, true, true, true, 10, LeastConnections) + runServer(t, []string{"tcp://:9992"}, false, true, true, true, true, 10, LeastConnections) }) }) t.Run("udp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "udp", ":9991", false, true, false, false, false, 10, RoundRobin) + runServer(t, []string{"udp://:9991"}, false, true, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "udp", ":9992", false, true, true, false, false, 10, LeastConnections) + runServer(t, []string{"udp://:9992"}, false, true, true, false, false, 10, LeastConnections) }) }) t.Run("udp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "udp", ":9991", false, true, false, true, false, 10, RoundRobin) + runServer(t, []string{"udp://:9991"}, false, true, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "udp", ":9992", false, true, true, true, false, 10, LeastConnections) + runServer(t, []string{"udp://:9992"}, false, true, true, true, false, 10, LeastConnections) }) }) t.Run("unix", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "unix", "gnet1.sock", false, true, false, false, false, 10, RoundRobin) + runServer(t, []string{"unix://gnet1.sock"}, false, true, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "unix", "gnet2.sock", false, true, true, false, false, 10, LeastConnections) + runServer(t, []string{"unix://gnet2.sock"}, false, true, true, false, false, 10, LeastConnections) }) }) t.Run("unix-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "unix", "gnet1.sock", false, true, false, true, false, 10, RoundRobin) + runServer(t, []string{"unix://gnet1.sock"}, false, true, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "unix", "gnet2.sock", false, true, true, true, false, 10, LeastConnections) + runServer(t, []string{"unix://gnet2.sock"}, false, true, true, true, false, 10, LeastConnections) }) }) t.Run("unix-async-writev", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "unix", "gnet1.sock", false, true, false, true, true, 10, RoundRobin) + runServer(t, []string{"unix://gnet1.sock"}, false, true, false, true, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "unix", "gnet2.sock", false, true, true, true, true, 10, LeastConnections) + runServer(t, []string{"unix://gnet2.sock"}, false, true, true, true, true, 10, LeastConnections) }) }) }) - t.Run("poll-ET-reuseport", func(t *testing.T) { + t.Run("poll-reuseport-ET", func(t *testing.T) { t.Run("tcp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "tcp", ":9991", true, true, false, false, false, 10, RoundRobin) + runServer(t, []string{"tcp://:9991"}, true, true, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "tcp", ":9992", true, true, true, false, false, 10, LeastConnections) + runServer(t, []string{"tcp://:9992"}, true, true, true, false, false, 10, LeastConnections) }) }) t.Run("tcp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "tcp", ":9991", true, true, false, true, false, 10, RoundRobin) + runServer(t, []string{"tcp://:9991"}, true, true, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "tcp", ":9992", true, true, true, true, false, 10, LeastConnections) + runServer(t, []string{"tcp://:9992"}, true, true, true, true, false, 10, LeastConnections) }) }) t.Run("tcp-async-writev", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "tcp", ":9991", true, true, false, true, true, 10, RoundRobin) + runServer(t, []string{"tcp://:9991"}, true, true, false, true, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "tcp", ":9992", true, true, true, true, true, 10, LeastConnections) + runServer(t, []string{"tcp://:9992"}, true, true, true, true, true, 10, LeastConnections) }) }) t.Run("udp", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "udp", ":9991", true, true, false, false, false, 10, RoundRobin) + runServer(t, []string{"udp://:9991"}, true, true, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "udp", ":9992", true, true, true, false, false, 10, LeastConnections) + runServer(t, []string{"udp://:9992"}, true, true, true, false, false, 10, LeastConnections) }) }) t.Run("udp-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "udp", ":9991", true, true, false, true, false, 10, RoundRobin) + runServer(t, []string{"udp://:9991"}, true, true, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "udp", ":9992", true, true, true, true, false, 10, LeastConnections) + runServer(t, []string{"udp://:9992"}, true, true, true, true, false, 10, LeastConnections) }) }) t.Run("unix", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "unix", "gnet1.sock", true, true, false, false, false, 10, RoundRobin) + runServer(t, []string{"unix://gnet1.sock"}, true, true, false, false, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "unix", "gnet2.sock", true, true, true, false, false, 10, LeastConnections) + runServer(t, []string{"unix://gnet2.sock"}, true, true, true, false, false, 10, LeastConnections) }) }) t.Run("unix-async", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "unix", "gnet1.sock", true, true, false, true, false, 10, RoundRobin) + runServer(t, []string{"unix://gnet1.sock"}, true, true, false, true, false, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "unix", "gnet2.sock", true, true, true, true, false, 10, LeastConnections) + runServer(t, []string{"unix://gnet2.sock"}, true, true, true, true, false, 10, LeastConnections) }) }) t.Run("unix-async-writev", func(t *testing.T) { t.Run("1-loop", func(t *testing.T) { - runServer(t, "unix", "gnet1.sock", true, true, false, true, true, 10, RoundRobin) + runServer(t, []string{"unix://gnet1.sock"}, true, true, false, true, true, 10, RoundRobin) }) t.Run("N-loop", func(t *testing.T) { - runServer(t, "unix", "gnet2.sock", true, true, true, true, true, 10, LeastConnections) + runServer(t, []string{"unix://gnet2.sock"}, true, true, true, true, true, 10, LeastConnections) + }) + }) + }) + + t.Run("poll-multi-addrs-LT", func(t *testing.T) { + t.Run("sync", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "udp://:9993", "udp://:9994", "unix://gnet1.sock", "unix://gnet2.sock"}, false, false, false, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "udp://:9997", "udp://:9998", "unix://gnet1.sock", "unix://gnet2.sock"}, false, false, true, false, false, 10, LeastConnections) + }) + }) + t.Run("sync-writev", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "unix://gnet1.sock", "unix://gnet2.sock"}, false, false, false, false, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "unix://gnet1.sock", "unix://gnet2.sock"}, false, false, true, false, true, 10, LeastConnections) + }) + }) + t.Run("async", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "udp://:9993", "udp://:9994", "unix://gnet1.sock", "unix://gnet2.sock"}, false, false, false, true, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "udp://:9997", "udp://:9998", "unix://gnet1.sock", "unix://gnet2.sock"}, false, false, true, true, false, 10, LeastConnections) + }) + }) + t.Run("async-writev", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "unix://gnet1.sock", "unix://gnet2.sock"}, false, false, false, true, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "unix://gnet1.sock", "unix://gnet2.sock"}, false, false, true, true, true, 10, LeastConnections) + }) + }) + }) + + t.Run("poll-multi-addrs-reuseport-LT", func(t *testing.T) { + t.Run("sync", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "udp://:9993", "udp://:9994", "unix://gnet1.sock", "unix://gnet2.sock"}, false, true, false, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "udp://:9997", "udp://:9998", "unix://gnet1.sock", "unix://gnet2.sock"}, false, true, true, false, false, 10, LeastConnections) + }) + }) + t.Run("sync-writev", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "unix://gnet1.sock", "unix://gnet2.sock"}, false, true, false, false, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "unix://gnet1.sock", "unix://gnet2.sock"}, false, true, true, false, true, 10, LeastConnections) + }) + }) + t.Run("async", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "udp://:9993", "udp://:9994", "unix://gnet1.sock", "unix://gnet2.sock"}, false, true, false, true, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "udp://:9997", "udp://:9998", "unix://gnet1.sock", "unix://gnet2.sock"}, false, true, true, true, false, 10, LeastConnections) + }) + }) + t.Run("async-writev", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "unix://gnet1.sock", "unix://gnet2.sock"}, false, true, false, true, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "unix://gnet1.sock", "unix://gnet2.sock"}, false, true, true, true, true, 10, LeastConnections) + }) + }) + }) + + t.Run("poll-multi-addrs-ET", func(t *testing.T) { + t.Run("sync", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "udp://:9993", "udp://:9994", "unix://gnet1.sock", "unix://gnet2.sock"}, true, false, false, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "udp://:9997", "udp://:9998", "unix://gnet1.sock", "unix://gnet2.sock"}, true, false, true, false, false, 10, LeastConnections) + }) + }) + t.Run("sync-writev", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "unix://gnet1.sock", "unix://gnet2.sock"}, true, false, false, false, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "unix://gnet1.sock", "unix://gnet2.sock"}, true, false, true, false, true, 10, LeastConnections) + }) + }) + t.Run("async", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "udp://:9993", "udp://:9994", "unix://gnet1.sock", "unix://gnet2.sock"}, true, false, false, true, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "udp://:9997", "udp://:9998", "unix://gnet1.sock", "unix://gnet2.sock"}, true, false, true, true, false, 10, LeastConnections) + }) + }) + t.Run("async-writev", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "unix://gnet1.sock", "unix://gnet2.sock"}, true, false, false, true, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "unix://gnet1.sock", "unix://gnet2.sock"}, true, false, true, true, true, 10, LeastConnections) + }) + }) + }) + + t.Run("poll-multi-addrs-reuseport-ET", func(t *testing.T) { + t.Run("sync", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "udp://:9993", "udp://:9994", "unix://gnet1.sock", "unix://gnet2.sock"}, true, true, false, false, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "udp://:9997", "udp://:9998", "unix://gnet1.sock", "unix://gnet2.sock"}, true, true, true, false, false, 10, LeastConnections) + }) + }) + t.Run("sync-writev", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "unix://gnet1.sock", "unix://gnet2.sock"}, true, true, false, false, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "unix://gnet1.sock", "unix://gnet2.sock"}, true, true, true, false, true, 10, LeastConnections) + }) + }) + t.Run("async", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "udp://:9993", "udp://:9994", "unix://gnet1.sock", "unix://gnet2.sock"}, true, true, false, true, false, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "udp://:9997", "udp://:9998", "unix://gnet1.sock", "unix://gnet2.sock"}, true, true, true, true, false, 10, LeastConnections) + }) + }) + t.Run("async-writev", func(t *testing.T) { + t.Run("1-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9991", "tcp://:9992", "unix://gnet1.sock", "unix://gnet2.sock"}, true, true, false, true, true, 10, RoundRobin) + }) + t.Run("N-loop", func(t *testing.T) { + runServer(t, []string{"tcp://:9995", "tcp://:9996", "unix://gnet1.sock", "unix://gnet2.sock"}, true, true, true, true, true, 10, LeastConnections) }) }) }) @@ -310,25 +451,28 @@ type testServer struct { *BuiltinEventEngine tester *testing.T eng Engine - network string - addr string + addrs []string multicore bool async bool writev bool nclients int started int32 connected int32 - clientActive int32 disconnected int32 + clientActive int32 workerPool *goPool.Pool } func (s *testServer) OnBoot(eng Engine) (action Action) { s.eng = eng fd, err := s.eng.Dup() - require.NoErrorf(s.tester, err, "dup error") - assert.Greaterf(s.tester, fd, 2, "expected fd: > 2, but got: %d", fd) - assert.NoErrorf(s.tester, SysClose(fd), "close fd error") + if len(s.addrs) > 1 { + assert.ErrorIsf(s.tester, err, errorx.ErrUnsupportedOp, "dup error") + } else { + require.NoErrorf(s.tester, err, "dup error") + assert.Greaterf(s.tester, fd, 2, "expected fd: > 2, but got: %d", fd) + assert.NoErrorf(s.tester, SysClose(fd), "close fd error") + } return } @@ -343,25 +487,24 @@ func (s *testServer) OnOpen(c Conn) (out []byte, action Action) { func (s *testServer) OnShutdown(_ Engine) { fd, err := s.eng.Dup() - require.NoErrorf(s.tester, err, "dup error") - assert.Greaterf(s.tester, fd, 2, "expected fd: > 2, but got: %d", fd) - assert.NoErrorf(s.tester, SysClose(fd), "close fd error") + if len(s.addrs) > 1 { + assert.ErrorIsf(s.tester, err, errorx.ErrUnsupportedOp, "dup error") + } else { + require.NoErrorf(s.tester, err, "dup error") + assert.Greaterf(s.tester, fd, 2, "expected fd: > 2, but got: %d", fd) + assert.NoErrorf(s.tester, SysClose(fd), "close fd error") + } } func (s *testServer) OnClose(c Conn, err error) (action Action) { if err != nil { logging.Debugf("error occurred on closed, %v\n", err) } - if s.network != "udp" { + if c.LocalAddr().Network() != "udp" { require.Equal(s.tester, c.Context(), c, "invalid context") } - if disconnected := atomic.AddInt32(&s.disconnected, 1); disconnected == atomic.LoadInt32(&s.connected) && disconnected == int32(s.nclients) { //nolint:gocritic - require.EqualValues(s.tester, 0, s.eng.CountConnections()) - action = Shutdown - s.workerPool.Release() - } - + atomic.AddInt32(&s.disconnected, 1) return } @@ -369,8 +512,7 @@ func (s *testServer) OnTraffic(c Conn) (action Action) { if s.async { buf := bbPool.Get() _, _ = c.WriteTo(buf) - - if s.network == "tcp" || s.network == "unix" { + if c.LocalAddr().Network() == "tcp" || c.LocalAddr().Network() == "unix" { // just for test _ = c.InboundBuffered() _ = c.OutboundBuffered() @@ -401,7 +543,7 @@ func (s *testServer) OnTraffic(c Conn) (action Action) { } }) return - } else if s.network == "udp" { + } else if c.LocalAddr().Network() == "udp" { _ = s.workerPool.Submit( func() { _ = c.AsyncWrite(buf.Bytes(), nil) @@ -412,7 +554,12 @@ func (s *testServer) OnTraffic(c Conn) (action Action) { } buf, _ := c.Next(-1) - _, _ = c.Write(buf) + if s.writev { + mid := len(buf) / 2 + _, _ = c.Writev([][]byte{buf[:mid], buf[mid:]}) + } else { + _, _ = c.Write(buf) + } // Only for code coverage of testing. if !s.multicore { @@ -425,14 +572,14 @@ func (s *testServer) OnTraffic(c Conn) (action Action) { // TODO(panjf2000): somehow these two system calls will fail with Unix Domain Socket, // returning "invalid argument" error on macOS in Github actions intermittently, // try to figure it out. - if s.network == "unix" && runtime.GOOS == "darwin" { + if c.LocalAddr().Network() == "unix" && runtime.GOOS == "darwin" { _ = c.SetReadBuffer(streamLen) _ = c.SetWriteBuffer(streamLen) } else { assert.NoErrorf(s.tester, c.SetReadBuffer(streamLen), "set read buffer error") assert.NoErrorf(s.tester, c.SetWriteBuffer(streamLen), "set write buffer error") } - if s.network == "tcp" { + if c.LocalAddr().Network() == "tcp" { assert.NoErrorf(s.tester, c.SetLinger(1), "set linger error") assert.NoErrorf(s.tester, c.SetNoDelay(false), "set no delay error") assert.NoErrorf(s.tester, c.SetKeepAlivePeriod(time.Minute), "set keep alive period error") @@ -443,44 +590,72 @@ func (s *testServer) OnTraffic(c Conn) (action Action) { } func (s *testServer) OnTick() (delay time.Duration, action Action) { - delay = time.Second / 5 + delay = 100 * time.Millisecond if atomic.CompareAndSwapInt32(&s.started, 0, 1) { - for i := 0; i < s.nclients; i++ { - atomic.AddInt32(&s.clientActive, 1) - go func() { - startClient(s.tester, s.network, s.addr, s.multicore, s.async) - atomic.AddInt32(&s.clientActive, -1) - }() + for _, protoAddr := range s.addrs { + proto, addr, err := parseProtoAddr(protoAddr) + assert.NoError(s.tester, err) + for i := 0; i < s.nclients; i++ { + atomic.AddInt32(&s.clientActive, 1) + go func() { + startClient(s.tester, proto, addr, s.multicore, s.async) + atomic.AddInt32(&s.clientActive, -1) + }() + } } } - if s.network == "udp" && atomic.LoadInt32(&s.clientActive) == 0 { - action = Shutdown - return + if atomic.LoadInt32(&s.clientActive) == 0 { + var streamAddrs int + for _, addr := range s.addrs { + if !strings.HasPrefix(addr, "udp") { + streamAddrs++ + } + } + streamConns := s.nclients * streamAddrs + disconnected := atomic.LoadInt32(&s.disconnected) + if int(disconnected) == streamConns && disconnected == atomic.LoadInt32(&s.connected) { + action = Shutdown + s.workerPool.Release() + require.EqualValues(s.tester, 0, s.eng.CountConnections()) + } } return } -func runServer(t *testing.T, network, addr string, et, reuseport, multicore, async, writev bool, nclients int, lb LoadBalancing) { +func runServer(t *testing.T, addrs []string, et, reuseport, multicore, async, writev bool, nclients int, lb LoadBalancing) { ts := &testServer{ tester: t, - network: network, - addr: addr, + addrs: addrs, multicore: multicore, async: async, writev: writev, nclients: nclients, workerPool: goPool.Default(), } - err := Run(ts, - network+"://"+addr, - WithEdgeTriggeredIO(et), - WithLockOSThread(async), - WithMulticore(multicore), - WithReusePort(reuseport), - WithTicker(true), - WithTCPKeepAlive(time.Minute*1), - WithTCPNoDelay(TCPDelay), - WithLoadBalancing(lb)) + var err error + if len(addrs) > 1 { + err = Rotate(ts, + addrs, + WithEdgeTriggeredIO(et), + WithLockOSThread(async), + WithMulticore(multicore), + WithReusePort(reuseport), + WithTicker(true), + WithTCPKeepAlive(time.Minute), + WithTCPNoDelay(TCPDelay), + WithLoadBalancing(lb)) + } else { + err = Run(ts, + addrs[0], + WithEdgeTriggeredIO(et), + WithLockOSThread(async), + WithMulticore(multicore), + WithReusePort(reuseport), + WithTicker(true), + WithTCPKeepAlive(time.Minute), + WithTCPNoDelay(TCPDelay), + WithLoadBalancing(lb)) + } assert.NoError(t, err) } @@ -496,7 +671,7 @@ func startClient(t *testing.T, network, addr string, multicore, async bool) { require.Equal(t, string(msg), "sweetness\r\n", "bad header") } duration := time.Duration((rand.Float64()*2+1)*float64(time.Second)) / 2 - t.Logf("test duration: %dms", duration/time.Millisecond) + logging.Debugf("test duration: %v", duration) start := time.Now() for time.Since(start) < duration { reqData := make([]byte, streamLen) @@ -546,11 +721,11 @@ func (t *testBadAddrServer) OnBoot(_ Engine) (action Action) { func TestBadAddresses(t *testing.T) { events := new(testBadAddrServer) err := Run(events, "tulip://howdy") - assert.Error(t, err) + assert.ErrorIs(t, err, errorx.ErrUnsupportedProtocol) err = Run(events, "howdy") - assert.Error(t, err) + assert.ErrorIs(t, err, errorx.ErrInvalidNetworkAddress) err = Run(events, "tcp://") - assert.NoError(t, err) + assert.ErrorIs(t, err, errorx.ErrInvalidNetworkAddress) } func TestTick(t *testing.T) { @@ -1487,7 +1662,7 @@ func runSimClient(t *testing.T, network, addr string, packetSize, batch int) { default: duration = 5 * time.Second } - t.Logf("test duration: %ds", duration/time.Second) + logging.Debugf("test duration: %v", duration) start := time.Now() for time.Since(start) < duration { batchSendAndRecv(t, c, rd, packetSize, batch) diff --git a/internal/socket/fd_unix.go b/internal/socket/fd_unix.go index 32e1d5d10..82de8ce27 100644 --- a/internal/socket/fd_unix.go +++ b/internal/socket/fd_unix.go @@ -25,7 +25,7 @@ import ( ) // Dup is the wrapper for dupCloseOnExec. -func Dup(fd int) (int, string, error) { +func Dup(fd int) (int, error) { return dupCloseOnExec(fd) } @@ -34,11 +34,11 @@ func Dup(fd int) (int, string, error) { var tryDupCloexec = int32(1) // dupCloseOnExec dups fd and marks it close-on-exec. -func dupCloseOnExec(fd int) (int, string, error) { +func dupCloseOnExec(fd int) (int, error) { if atomic.LoadInt32(&tryDupCloexec) == 1 { r, err := unix.FcntlInt(uintptr(fd), unix.F_DUPFD_CLOEXEC, 0) if err == nil { - return r, "", nil + return r, nil } switch err.(syscall.Errno) { case unix.EINVAL, unix.ENOSYS: @@ -47,7 +47,7 @@ func dupCloseOnExec(fd int) (int, string, error) { // now on. atomic.StoreInt32(&tryDupCloexec, 0) default: - return -1, "fcntl", err + return -1, err } } return dupCloseOnExecOld(fd) @@ -55,13 +55,13 @@ func dupCloseOnExec(fd int) (int, string, error) { // dupCloseOnExecOld is the traditional way to dup an fd and // set its O_CLOEXEC bit, using two system calls. -func dupCloseOnExecOld(fd int) (int, string, error) { +func dupCloseOnExecOld(fd int) (int, error) { syscall.ForkLock.RLock() defer syscall.ForkLock.RUnlock() newFD, err := syscall.Dup(fd) if err != nil { - return -1, "dup", err + return -1, err } syscall.CloseOnExec(newFD) - return newFD, "", nil + return newFD, nil } diff --git a/listener_unix.go b/listener_unix.go index 95bf0171d..e2d497b61 100644 --- a/listener_unix.go +++ b/listener_unix.go @@ -45,7 +45,7 @@ func (ln *listener) packPollAttachment(handler netpoll.PollEventHandler) *netpol return ln.pollAttachment } -func (ln *listener) dup() (int, string, error) { +func (ln *listener) dup() (int, error) { return socket.Dup(ln.fd) } diff --git a/listener_windows.go b/listener_windows.go index 1e92cdf46..b1a898131 100644 --- a/listener_windows.go +++ b/listener_windows.go @@ -37,9 +37,9 @@ type listener struct { addr net.Addr } -func (l *listener) dup() (int, string, error) { +func (l *listener) dup() (int, error) { if l.ln == nil && l.pc == nil { - return -1, "dup", errorx.ErrUnsupportedOp + return -1, errorx.ErrUnsupportedOp } var ( @@ -53,11 +53,11 @@ func (l *listener) dup() (int, string, error) { } if !ok { - return -1, "dup", errors.New("failed to convert net.Conn to syscall.Conn") + return -1, errors.New("failed to convert net.Conn to syscall.Conn") } rc, err := sc.SyscallConn() if err != nil { - return -1, "dup", errors.New("failed to get syscall.RawConn from net.Conn") + return -1, errors.New("failed to get syscall.RawConn from net.Conn") } var dupHandle windows.Handle @@ -74,13 +74,13 @@ func (l *listener) dup() (int, string, error) { ) }) if err != nil { - return -1, "dup", err + return -1, err } if e != nil { - return -1, "dup", e + return -1, e } - return int(dupHandle), "dup", nil + return int(dupHandle), nil } func (l *listener) close() { diff --git a/os_unix_test.go b/os_unix_test.go index c8acc8bc3..f3d0f0724 100644 --- a/os_unix_test.go +++ b/os_unix_test.go @@ -17,6 +17,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sys/unix" + + "github.com/panjf2000/gnet/v2/pkg/logging" ) var ( @@ -111,7 +113,7 @@ func (s *testMcastServer) startMcastClient() { ch := make(chan []byte, 10000) s.mcast.Store(c.LocalAddr().String(), ch) duration := time.Duration((rand.Float64()*2+1)*float64(time.Second)) / 2 - s.t.Logf("test duration: %dms", duration/time.Millisecond) + logging.Debugf("test duration: %v", duration) start := time.Now() for time.Since(start) < duration { reqData := make([]byte, 1024) diff --git a/pkg/errors/errors.go b/pkg/errors/errors.go index cc0a24f7d..835ddedf0 100644 --- a/pkg/errors/errors.go +++ b/pkg/errors/errors.go @@ -18,29 +18,31 @@ import "errors" var ( // ErrEmptyEngine occurs when trying to do something with an empty engine. - ErrEmptyEngine = errors.New("the internal engine is empty") + ErrEmptyEngine = errors.New("gnet: the internal engine is empty") // ErrEngineShutdown occurs when server is closing. - ErrEngineShutdown = errors.New("server is going to be shutdown") + ErrEngineShutdown = errors.New("gnet: server is going to be shutdown") // ErrEngineInShutdown occurs when attempting to shut the server down more than once. - ErrEngineInShutdown = errors.New("server is already in shutdown") + ErrEngineInShutdown = errors.New("gnet: server is already in shutdown") // ErrAcceptSocket occurs when acceptor does not accept the new connection properly. - ErrAcceptSocket = errors.New("accept a new connection error") + ErrAcceptSocket = errors.New("gnet: accept a new connection error") // ErrTooManyEventLoopThreads occurs when attempting to set up more than 10,000 event-loop goroutines under LockOSThread mode. - ErrTooManyEventLoopThreads = errors.New("too many event-loops under LockOSThread mode") + ErrTooManyEventLoopThreads = errors.New("gnet: too many event-loops under LockOSThread mode") // ErrUnsupportedProtocol occurs when trying to use protocol that is not supported. - ErrUnsupportedProtocol = errors.New("only unix, tcp/tcp4/tcp6, udp/udp4/udp6 are supported") + ErrUnsupportedProtocol = errors.New("gnet: only unix, tcp/tcp4/tcp6, udp/udp4/udp6 are supported") // ErrUnsupportedTCPProtocol occurs when trying to use an unsupported TCP protocol. - ErrUnsupportedTCPProtocol = errors.New("only tcp/tcp4/tcp6 are supported") + ErrUnsupportedTCPProtocol = errors.New("gnet: only tcp/tcp4/tcp6 are supported") // ErrUnsupportedUDPProtocol occurs when trying to use an unsupported UDP protocol. - ErrUnsupportedUDPProtocol = errors.New("only udp/udp4/udp6 are supported") + ErrUnsupportedUDPProtocol = errors.New("gnet: only udp/udp4/udp6 are supported") // ErrUnsupportedUDSProtocol occurs when trying to use an unsupported Unix protocol. - ErrUnsupportedUDSProtocol = errors.New("only unix is supported") + ErrUnsupportedUDSProtocol = errors.New("gnet: only unix is supported") // ErrUnsupportedPlatform occurs when running gnet on an unsupported platform. - ErrUnsupportedPlatform = errors.New("unsupported platform in gnet") + ErrUnsupportedPlatform = errors.New("gnet: unsupported platform in gnet") // ErrUnsupportedOp occurs when calling some methods that has not been implemented yet. - ErrUnsupportedOp = errors.New("unsupported operation") + ErrUnsupportedOp = errors.New("gnet: unsupported operation") // ErrNegativeSize occurs when trying to pass a negative size to a buffer. - ErrNegativeSize = errors.New("negative size is invalid") + ErrNegativeSize = errors.New("gnet: negative size is not allowed") // ErrNoIPv4AddressOnInterface occurs when an IPv4 multicast address is set on an interface but IPv4 is not configured. - ErrNoIPv4AddressOnInterface = errors.New("no IPv4 address on interface") + ErrNoIPv4AddressOnInterface = errors.New("gnet: no IPv4 address on interface") + // ErrInvalidNetworkAddress occurs when the network address is invalid. + ErrInvalidNetworkAddress = errors.New("gnet: invalid network address") ) diff --git a/reactor_epoll_default.go b/reactor_epoll_default.go index b2c514dd5..bd0e7d88d 100644 --- a/reactor_epoll_default.go +++ b/reactor_epoll_default.go @@ -124,7 +124,7 @@ func (el *eventloop) run() error { err := el.poller.Polling(func(fd int, ev uint32) error { c := el.connections.getConn(fd) if c == nil { - if fd == el.ln.fd { + if _, ok := el.listeners[fd]; ok { return el.accept(fd, ev) } // Somehow epoll notify with an event for a stale fd that is not in our connection set. diff --git a/reactor_kqueue_default.go b/reactor_kqueue_default.go index 4cd533618..bdc103f19 100644 --- a/reactor_kqueue_default.go +++ b/reactor_kqueue_default.go @@ -113,7 +113,7 @@ func (el *eventloop) run() error { err := el.poller.Polling(func(fd int, filter int16, flags uint16) (err error) { c := el.connections.getConn(fd) if c == nil { - if fd == el.ln.fd { + if _, ok := el.listeners[fd]; ok { return el.accept(fd, filter, flags) } // This might happen when the connection has already been closed, From 39f60023c42d301189bf352828e7d60700656c34 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Sun, 21 Apr 2024 17:04:05 +0800 Subject: [PATCH 04/12] opt: only enable SO_REUSEPORT on Linux and FreeBSD (#580) Fixes #579 --- gnet.go | 18 ++++++++++++++++++ internal/socket/sockopts_freebsd.go | 29 +++++++++++++++++++++++++++++ internal/socket/sockopts_posix.go | 5 ----- internal/socket/sockopts_unix1.go | 29 +++++++++++++++++++++++++++++ 4 files changed, 76 insertions(+), 5 deletions(-) create mode 100644 internal/socket/sockopts_freebsd.go create mode 100644 internal/socket/sockopts_unix1.go diff --git a/gnet.go b/gnet.go index 643cd7264..c1b809649 100644 --- a/gnet.go +++ b/gnet.go @@ -18,6 +18,7 @@ import ( "context" "io" "net" + "runtime" "strings" "sync" "time" @@ -463,6 +464,23 @@ func createListeners(addrs []string, opts ...Option) ([]*listener, *Options, err options.WriteBufferCap = math.CeilToPowerOfTwo(wbc) } + // SO_REUSEPORT enables duplicate address and port bindings across various + // Unix-like OSs, whereas there is platform-specific inconsistency: + // Linux implemented SO_REUSEPORT with load balancing for incoming connections + // while *BSD implemented it for only binding to the same address and port, which + // makes it pointless to enable SO_REUSEPORT on *BSD and Darwin for gnet with + // multiple event-loops because only the first event-loop will be constantly woken + // up to accept incoming connections and handle I/O events while the rest of event + // loops remain idle. + // Thus, we disable SO_REUSEPORT on *BSD and Darwin by default. + // + // Note that FreeBSD 12 introduced a new socket option named SO_REUSEPORT_LB + // with the capability of load balancing, it's the equivalent of Linux's SO_REUSEPORT. + goos := runtime.GOOS + if (options.Multicore || options.NumEventLoop > 1) && options.ReusePort && goos != "linux" && goos != "freebsd" { + options.ReusePort = false + } + // If there is UDP listener in the list, enable SO_REUSEPORT and disable edge-triggered I/O by default. for i := 0; (!options.ReusePort || options.EdgeTriggeredIO) && i < len(addrs); i++ { proto, _, err := parseProtoAddr(addrs[i]) diff --git a/internal/socket/sockopts_freebsd.go b/internal/socket/sockopts_freebsd.go new file mode 100644 index 000000000..01026fd72 --- /dev/null +++ b/internal/socket/sockopts_freebsd.go @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2024 The Gnet Authors. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package socket + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// SetReuseport enables SO_REUSEPORT_LB option on socket. +func SetReuseport(fd, reusePort int) error { + return os.NewSyscallError("setsockopt", unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_REUSEPORT_LB, reusePort)) +} diff --git a/internal/socket/sockopts_posix.go b/internal/socket/sockopts_posix.go index 321ac8a1b..1280285f2 100644 --- a/internal/socket/sockopts_posix.go +++ b/internal/socket/sockopts_posix.go @@ -48,11 +48,6 @@ func SetSendBuffer(fd, size int) error { return unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_SNDBUF, size) } -// SetReuseport enables SO_REUSEPORT option on socket. -func SetReuseport(fd, reusePort int) error { - return os.NewSyscallError("setsockopt", unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_REUSEPORT, reusePort)) -} - // SetReuseAddr enables SO_REUSEADDR option on socket. func SetReuseAddr(fd, reuseAddr int) error { return os.NewSyscallError("setsockopt", unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_REUSEADDR, reuseAddr)) diff --git a/internal/socket/sockopts_unix1.go b/internal/socket/sockopts_unix1.go new file mode 100644 index 000000000..0f7be1da9 --- /dev/null +++ b/internal/socket/sockopts_unix1.go @@ -0,0 +1,29 @@ +// Copyright (c) 2021 The Gnet Authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux || dragonfly || netbsd || openbsd || darwin +// +build linux dragonfly netbsd openbsd darwin + +package socket + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// SetReuseport enables SO_REUSEPORT option on socket. +func SetReuseport(fd, reusePort int) error { + return os.NewSyscallError("setsockopt", unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_REUSEPORT, reusePort)) +} From 4033b4788d5226f411178a57af83ba84a7a4d886 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Sun, 21 Apr 2024 18:38:52 +0800 Subject: [PATCH 05/12] opt: don't disable SO_REUSEPORT on DragonFlyBSD (#583) Fixes #582 --- gnet.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/gnet.go b/gnet.go index c1b809649..cd5441d20 100644 --- a/gnet.go +++ b/gnet.go @@ -469,15 +469,18 @@ func createListeners(addrs []string, opts ...Option) ([]*listener, *Options, err // Linux implemented SO_REUSEPORT with load balancing for incoming connections // while *BSD implemented it for only binding to the same address and port, which // makes it pointless to enable SO_REUSEPORT on *BSD and Darwin for gnet with - // multiple event-loops because only the first event-loop will be constantly woken - // up to accept incoming connections and handle I/O events while the rest of event - // loops remain idle. + // multiple event-loops because only the first or last event-loop will be constantly + // woken up to accept incoming connections and handle I/O events while the rest of + // event-loops remain idle. // Thus, we disable SO_REUSEPORT on *BSD and Darwin by default. // // Note that FreeBSD 12 introduced a new socket option named SO_REUSEPORT_LB // with the capability of load balancing, it's the equivalent of Linux's SO_REUSEPORT. + // Also note that DragonFlyBSD 3.6.0 extended SO_REUSEPORT to distribute workload to + // available sockets, which make it the same as Linux's SO_REUSEPORT. goos := runtime.GOOS - if (options.Multicore || options.NumEventLoop > 1) && options.ReusePort && goos != "linux" && goos != "freebsd" { + if (options.Multicore || options.NumEventLoop > 1) && options.ReusePort && + goos != "linux" && goos != "dragonfly" && goos != "freebsd" { options.ReusePort = false } From fcc7933e6cad70c78cde17dd29e5735ff31949b8 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Sun, 21 Apr 2024 20:03:52 +0800 Subject: [PATCH 06/12] opt: disable SO_REUSEPORT on Unix domain sockets (#584) Fixes #581 --- gnet.go | 29 ++++++++++++++++++----------- options.go | 2 +- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/gnet.go b/gnet.go index cd5441d20..a5214eede 100644 --- a/gnet.go +++ b/gnet.go @@ -464,6 +464,16 @@ func createListeners(addrs []string, opts ...Option) ([]*listener, *Options, err options.WriteBufferCap = math.CeilToPowerOfTwo(wbc) } + var hasUDP, hasUnix bool + for _, addr := range addrs { + proto, _, err := parseProtoAddr(addr) + if err != nil { + return nil, nil, err + } + hasUDP = hasUDP || strings.HasPrefix(proto, "udp") + hasUnix = hasUnix || proto == "unix" + } + // SO_REUSEPORT enables duplicate address and port bindings across various // Unix-like OSs, whereas there is platform-specific inconsistency: // Linux implemented SO_REUSEPORT with load balancing for incoming connections @@ -478,22 +488,19 @@ func createListeners(addrs []string, opts ...Option) ([]*listener, *Options, err // with the capability of load balancing, it's the equivalent of Linux's SO_REUSEPORT. // Also note that DragonFlyBSD 3.6.0 extended SO_REUSEPORT to distribute workload to // available sockets, which make it the same as Linux's SO_REUSEPORT. + // AF_LOCAL with SO_REUSEPORT enables duplicate address and port bindings without + // load balancing on Linux and *BSD. Therefore, disable it for Unix domain sockets. goos := runtime.GOOS if (options.Multicore || options.NumEventLoop > 1) && options.ReusePort && - goos != "linux" && goos != "dragonfly" && goos != "freebsd" { + ((goos != "linux" && goos != "dragonfly" && goos != "freebsd") || hasUnix) { options.ReusePort = false } - // If there is UDP listener in the list, enable SO_REUSEPORT and disable edge-triggered I/O by default. - for i := 0; (!options.ReusePort || options.EdgeTriggeredIO) && i < len(addrs); i++ { - proto, _, err := parseProtoAddr(addrs[i]) - if err != nil { - return nil, nil, err - } - if strings.HasPrefix(proto, "udp") { - options.ReusePort = true - options.EdgeTriggeredIO = false - } + // If there is UDP address in the list, we have no choice but to enable SO_REUSEPORT anyway, + // also disable edge-triggered I/O for UDP by default. + if hasUDP { + options.ReusePort = true + options.EdgeTriggeredIO = false } listeners := make([]*listener, len(addrs)) diff --git a/options.go b/options.go index 7eb355ed3..86caef7fb 100644 --- a/options.go +++ b/options.go @@ -125,7 +125,7 @@ type Options struct { // EdgeTriggeredIO enables the edge-triggered I/O for the underlying epoll/kqueue event-loop. // Don't enable it unless you are 100% sure what you are doing. - // Note that this option is only available for TCP protocol. + // Note that this option is only available for stream-oriented protocol. EdgeTriggeredIO bool } From 050672ff089240d3775d21aa956ffae1eb5ce31d Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Sun, 21 Apr 2024 20:36:36 +0800 Subject: [PATCH 07/12] opt: enable ET mode on listener event-loop by default (#585) --- acceptor_unix.go | 68 ++++++++++++++++++++++++++--------------------- engine_unix.go | 2 +- eventloop_unix.go | 2 +- 3 files changed, 39 insertions(+), 33 deletions(-) diff --git a/acceptor_unix.go b/acceptor_unix.go index 4c1d3999c..ea37b5254 100644 --- a/acceptor_unix.go +++ b/acceptor_unix.go @@ -26,39 +26,43 @@ import ( "github.com/panjf2000/gnet/v2/internal/queue" "github.com/panjf2000/gnet/v2/internal/socket" "github.com/panjf2000/gnet/v2/pkg/errors" - "github.com/panjf2000/gnet/v2/pkg/logging" ) func (eng *engine) accept1(fd int, _ netpoll.IOEvent, _ netpoll.IOFlags) error { - nfd, sa, err := socket.Accept(fd) - if err != nil { - switch err { - case unix.EINTR, unix.EAGAIN, unix.ECONNABORTED: - // ECONNABORTED indicates that a socket on the listen - // queue was closed before we Accept()ed it; - // it's a silly error, so try again. - return nil - default: - eng.opts.Logger.Errorf("Accept() failed due to error: %v", err) - return errors.ErrAcceptSocket + for { + nfd, sa, err := socket.Accept(fd) + if err != nil { + switch err { + case unix.EAGAIN: // the Accept queue has been drained, we can return now + return nil + case unix.EINTR, unix.ECONNRESET, unix.ECONNABORTED: + // ECONNRESET or ECONNABORTED could indicate that a socket + // in the Accept queue was closed before we Accept()ed it. + // It's a silly error, let's retry it. + continue + default: + eng.opts.Logger.Errorf("Accept() failed due to error: %v", err) + return errors.ErrAcceptSocket + } } - } - remoteAddr := socket.SockaddrToTCPOrUnixAddr(sa) - if eng.opts.TCPKeepAlive > 0 && eng.listeners[fd].network == "tcp" { - err = socket.SetKeepAlivePeriod(nfd, int(eng.opts.TCPKeepAlive.Seconds())) - logging.Error(err) - } + remoteAddr := socket.SockaddrToTCPOrUnixAddr(sa) + if eng.opts.TCPKeepAlive > 0 && eng.listeners[fd].network == "tcp" { + err = socket.SetKeepAlivePeriod(nfd, int(eng.opts.TCPKeepAlive.Seconds())) + if err != nil { + eng.opts.Logger.Errorf("failed to set TCP keepalive on fd=%d: %v", fd, err) + } + } - el := eng.eventLoops.next(remoteAddr) - c := newTCPConn(nfd, el, sa, el.listeners[fd].addr, remoteAddr) - err = el.poller.Trigger(queue.HighPriority, el.register, c) - if err != nil { - eng.opts.Logger.Errorf("failed to enqueue accepted socket of high-priority: %v", err) - _ = unix.Close(nfd) - c.release() + el := eng.eventLoops.next(remoteAddr) + c := newTCPConn(nfd, el, sa, el.listeners[fd].addr, remoteAddr) + err = el.poller.Trigger(queue.HighPriority, el.register, c) + if err != nil { + eng.opts.Logger.Errorf("failed to enqueue accepted socket of high-priority: %v", err) + _ = unix.Close(nfd) + c.release() + } } - return nil } func (el *eventloop) accept1(fd int, ev netpoll.IOEvent, flags netpoll.IOFlags) error { @@ -69,10 +73,10 @@ func (el *eventloop) accept1(fd int, ev netpoll.IOEvent, flags netpoll.IOFlags) nfd, sa, err := socket.Accept(fd) if err != nil { switch err { - case unix.EINTR, unix.EAGAIN, unix.ECONNABORTED: - // ECONNABORTED indicates that a socket on the listen - // queue was closed before we Accept()ed it; - // it's a silly error, so try again. + case unix.EINTR, unix.EAGAIN, unix.ECONNRESET, unix.ECONNABORTED: + // ECONNRESET or ECONNABORTED could indicate that a socket + // in the Accept queue was closed before we Accept()ed it. + // It's a silly error, let's retry it. return nil default: el.getLogger().Errorf("Accept() failed due to error: %v", err) @@ -83,7 +87,9 @@ func (el *eventloop) accept1(fd int, ev netpoll.IOEvent, flags netpoll.IOFlags) remoteAddr := socket.SockaddrToTCPOrUnixAddr(sa) if el.engine.opts.TCPKeepAlive > 0 && el.listeners[fd].network == "tcp" { err = socket.SetKeepAlivePeriod(nfd, int(el.engine.opts.TCPKeepAlive/time.Second)) - logging.Error(err) + if err != nil { + el.getLogger().Errorf("failed to set TCP keepalive on fd=%d: %v", fd, err) + } } c := newTCPConn(nfd, el, sa, el.listeners[fd].addr, remoteAddr) diff --git a/engine_unix.go b/engine_unix.go index ffba29c1b..5d380e70f 100644 --- a/engine_unix.go +++ b/engine_unix.go @@ -175,7 +175,7 @@ func (eng *engine) activateReactors(numEventLoop int) error { el.poller = p el.eventHandler = eng.eventHandler for _, ln := range eng.listeners { - if err = el.poller.AddRead(ln.packPollAttachment(eng.accept), false); err != nil { + if err = el.poller.AddRead(ln.packPollAttachment(eng.accept), true); err != nil { return err } } diff --git a/eventloop_unix.go b/eventloop_unix.go index 375b10675..d9ef142bc 100644 --- a/eventloop_unix.go +++ b/eventloop_unix.go @@ -310,7 +310,7 @@ func (el *eventloop) handleAction(c *conn, action Action) error { func (el *eventloop) readUDP1(fd int, _ netpoll.IOEvent, _ netpoll.IOFlags) error { n, sa, err := unix.Recvfrom(fd, el.buffer, 0) if err != nil { - if err == unix.EAGAIN || err == unix.EWOULDBLOCK { + if err == unix.EAGAIN { return nil } return fmt.Errorf("failed to read UDP packet from fd=%d in event-loop(%d), %v", From d473f260f36679dc73918ea89c1a342f32d1351b Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Sun, 21 Apr 2024 20:58:23 +0800 Subject: [PATCH 08/12] chore: don't print warning log in eventloop.close --- eventloop_unix.go | 1 - 1 file changed, 1 deletion(-) diff --git a/eventloop_unix.go b/eventloop_unix.go index d9ef142bc..41341be2d 100644 --- a/eventloop_unix.go +++ b/eventloop_unix.go @@ -220,7 +220,6 @@ func (el *eventloop) close(c *conn, err error) (rerr error) { iov = iov[:iovMax] } if n, e := gio.Writev(c.fd, iov); e != nil { - el.getLogger().Warnf("close: error occurs when sending data back to remote, %v", e) break } else { //nolint:revive _, _ = c.outboundBuffer.Discard(n) From 39c175b2cce959660bb063e409506e5322d344f2 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Sun, 21 Apr 2024 22:52:46 +0800 Subject: [PATCH 09/12] opt: refine the code of I/O handlers (#586) --- acceptor_bsd.go | 28 ----------------------- acceptor_linux.go | 28 ----------------------- acceptor_unix.go | 21 +++++++++-------- connection_bsd.go | 6 +---- connection_linux.go | 6 +---- connection_unix.go | 2 +- engine_unix.go | 18 +++++++-------- eventloop_unix.go | 2 +- internal/netpoll/defs_poller.go | 3 +++ internal/netpoll/defs_poller_epoll.go | 3 --- internal/netpoll/defs_poller_kqueue.go | 3 --- internal/netpoll/poller_epoll_default.go | 2 +- internal/netpoll/poller_epoll_ultimate.go | 2 +- reactor_epoll_default.go | 12 +++++----- reactor_kqueue_default.go | 7 +++--- 15 files changed, 40 insertions(+), 103 deletions(-) delete mode 100644 acceptor_bsd.go delete mode 100644 acceptor_linux.go diff --git a/acceptor_bsd.go b/acceptor_bsd.go deleted file mode 100644 index 3b717e3b0..000000000 --- a/acceptor_bsd.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2023 The Gnet Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build freebsd || dragonfly || netbsd || openbsd || darwin -// +build freebsd dragonfly netbsd openbsd darwin - -package gnet - -import "github.com/panjf2000/gnet/v2/internal/netpoll" - -func (eng *engine) accept(fd int, filter netpoll.IOEvent, flags netpoll.IOFlags) error { - return eng.accept1(fd, filter, flags) -} - -func (el *eventloop) accept(fd int, filter netpoll.IOEvent, flags netpoll.IOFlags) error { - return el.accept1(fd, filter, flags) -} diff --git a/acceptor_linux.go b/acceptor_linux.go deleted file mode 100644 index 04931b64c..000000000 --- a/acceptor_linux.go +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2023 The Gnet Authors. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package gnet - -import "github.com/panjf2000/gnet/v2/internal/netpoll" - -func (eng *engine) accept(fd int, ev netpoll.IOEvent) error { - return eng.accept1(fd, ev, 0) -} - -func (el *eventloop) accept(fd int, ev netpoll.IOEvent) error { - return el.accept1(fd, ev, 0) -} diff --git a/acceptor_unix.go b/acceptor_unix.go index ea37b5254..1afad82b4 100644 --- a/acceptor_unix.go +++ b/acceptor_unix.go @@ -28,7 +28,7 @@ import ( "github.com/panjf2000/gnet/v2/pkg/errors" ) -func (eng *engine) accept1(fd int, _ netpoll.IOEvent, _ netpoll.IOFlags) error { +func (el *eventloop) accept0(fd int, _ netpoll.IOEvent, _ netpoll.IOFlags) error { for { nfd, sa, err := socket.Accept(fd) if err != nil { @@ -41,33 +41,33 @@ func (eng *engine) accept1(fd int, _ netpoll.IOEvent, _ netpoll.IOFlags) error { // It's a silly error, let's retry it. continue default: - eng.opts.Logger.Errorf("Accept() failed due to error: %v", err) + el.getLogger().Errorf("Accept() failed due to error: %v", err) return errors.ErrAcceptSocket } } remoteAddr := socket.SockaddrToTCPOrUnixAddr(sa) - if eng.opts.TCPKeepAlive > 0 && eng.listeners[fd].network == "tcp" { - err = socket.SetKeepAlivePeriod(nfd, int(eng.opts.TCPKeepAlive.Seconds())) + if el.engine.opts.TCPKeepAlive > 0 && el.listeners[fd].network == "tcp" { + err = socket.SetKeepAlivePeriod(nfd, int(el.engine.opts.TCPKeepAlive.Seconds())) if err != nil { - eng.opts.Logger.Errorf("failed to set TCP keepalive on fd=%d: %v", fd, err) + el.getLogger().Errorf("failed to set TCP keepalive on fd=%d: %v", fd, err) } } - el := eng.eventLoops.next(remoteAddr) + el := el.engine.eventLoops.next(remoteAddr) c := newTCPConn(nfd, el, sa, el.listeners[fd].addr, remoteAddr) err = el.poller.Trigger(queue.HighPriority, el.register, c) if err != nil { - eng.opts.Logger.Errorf("failed to enqueue accepted socket of high-priority: %v", err) + el.getLogger().Errorf("failed to enqueue the accepted socket fd=%d to poller: %v", c.fd, err) _ = unix.Close(nfd) c.release() } } } -func (el *eventloop) accept1(fd int, ev netpoll.IOEvent, flags netpoll.IOFlags) error { +func (el *eventloop) accept(fd int, ev netpoll.IOEvent, flags netpoll.IOFlags) error { if el.listeners[fd].network == "udp" { - return el.readUDP1(fd, ev, flags) + return el.readUDP(fd, ev, flags) } nfd, sa, err := socket.Accept(fd) @@ -98,6 +98,9 @@ func (el *eventloop) accept1(fd int, ev netpoll.IOEvent, flags netpoll.IOFlags) addEvents = el.poller.AddReadWrite } if err = addEvents(&c.pollAttachment, el.engine.opts.EdgeTriggeredIO); err != nil { + el.getLogger().Errorf("failed to register the accepted socket fd=%d to poller: %v", c.fd, err) + _ = unix.Close(c.fd) + c.release() return err } el.connections.addConn(c, el.idx) diff --git a/connection_bsd.go b/connection_bsd.go index 6a085c166..b24becffb 100644 --- a/connection_bsd.go +++ b/connection_bsd.go @@ -25,7 +25,7 @@ import ( "github.com/panjf2000/gnet/v2/internal/netpoll" ) -func (c *conn) handleEvents(_ int, filter int16, flags uint16) (err error) { +func (c *conn) processIO(_ int, filter netpoll.IOEvent, flags netpoll.IOFlags) (err error) { el := c.loop switch filter { case unix.EVFILT_READ: @@ -56,7 +56,3 @@ func (c *conn) handleEvents(_ int, filter int16, flags uint16) (err error) { } return } - -func (el *eventloop) readUDP(fd int, filter netpoll.IOEvent, flags netpoll.IOFlags) error { - return el.readUDP1(fd, filter, flags) -} diff --git a/connection_linux.go b/connection_linux.go index 30af24d59..ac8376e4c 100644 --- a/connection_linux.go +++ b/connection_linux.go @@ -25,7 +25,7 @@ import ( "github.com/panjf2000/gnet/v2/internal/netpoll" ) -func (c *conn) handleEvents(_ int, ev uint32) error { +func (c *conn) processIO(_ int, ev netpoll.IOEvent, _ netpoll.IOFlags) error { el := c.loop // First check for any unexpected non-IO events. // For these events we just close the corresponding connection directly. @@ -68,7 +68,3 @@ func (c *conn) handleEvents(_ int, ev uint32) error { } return nil } - -func (el *eventloop) readUDP(fd int, ev netpoll.IOEvent) error { - return el.readUDP1(fd, ev, 0) -} diff --git a/connection_unix.go b/connection_unix.go index eb0ab3b59..101a51e96 100644 --- a/connection_unix.go +++ b/connection_unix.go @@ -63,7 +63,7 @@ func newTCPConn(fd int, el *eventloop, sa unix.Sockaddr, localAddr, remoteAddr n remoteAddr: remoteAddr, pollAttachment: netpoll.PollAttachment{FD: fd}, } - c.pollAttachment.Callback = c.handleEvents + c.pollAttachment.Callback = c.processIO c.outboundBuffer.Reset(el.engine.opts.WriteBufferCap) return } diff --git a/engine_unix.go b/engine_unix.go index 5d380e70f..18f46c89d 100644 --- a/engine_unix.go +++ b/engine_unix.go @@ -34,9 +34,9 @@ import ( ) type engine struct { - listeners map[int]*listener // listeners for accepting new connections + listeners map[int]*listener // listeners for accepting incoming connections opts *Options // options with engine - acceptor *eventloop // main event-loop for accepting connections + ingress *eventloop // main event-loop that monitors all listeners eventLoops loadBalancer // event-loops for handling events inShutdown int32 // whether the engine is in shutdown ticker struct { @@ -76,11 +76,11 @@ func (eng *engine) closeEventLoops() { _ = el.poller.Close() return true }) - if eng.acceptor != nil { + if eng.ingress != nil { for _, ln := range eng.listeners { ln.close() } - err := eng.acceptor.poller.Close() + err := eng.ingress.poller.Close() if err != nil { eng.opts.Logger.Errorf("failed to close poller when stopping engine: %v", err) } @@ -175,11 +175,11 @@ func (eng *engine) activateReactors(numEventLoop int) error { el.poller = p el.eventHandler = eng.eventHandler for _, ln := range eng.listeners { - if err = el.poller.AddRead(ln.packPollAttachment(eng.accept), true); err != nil { + if err = el.poller.AddRead(ln.packPollAttachment(el.accept0), true); err != nil { return err } } - eng.acceptor = el + eng.ingress = el // Start main reactor in background. eng.workerPool.Go(el.rotate) @@ -187,7 +187,7 @@ func (eng *engine) activateReactors(numEventLoop int) error { // Start the ticker. if eng.opts.Ticker { eng.workerPool.Go(func() error { - eng.acceptor.ticker(eng.ticker.ctx) + eng.ingress.ticker(eng.ticker.ctx) return nil }) } @@ -217,8 +217,8 @@ func (eng *engine) stop(s Engine) { } return true }) - if eng.acceptor != nil { - err := eng.acceptor.poller.Trigger(queue.HighPriority, func(_ interface{}) error { return errors.ErrEngineShutdown }, nil) + if eng.ingress != nil { + err := eng.ingress.poller.Trigger(queue.HighPriority, func(_ interface{}) error { return errors.ErrEngineShutdown }, nil) if err != nil { eng.opts.Logger.Errorf("failed to enqueue shutdown signal of high-priority for main event-loop: %v", err) } diff --git a/eventloop_unix.go b/eventloop_unix.go index 41341be2d..c0b326804 100644 --- a/eventloop_unix.go +++ b/eventloop_unix.go @@ -306,7 +306,7 @@ func (el *eventloop) handleAction(c *conn, action Action) error { } } -func (el *eventloop) readUDP1(fd int, _ netpoll.IOEvent, _ netpoll.IOFlags) error { +func (el *eventloop) readUDP(fd int, _ netpoll.IOEvent, _ netpoll.IOFlags) error { n, sa, err := unix.Recvfrom(fd, el.buffer, 0) if err != nil { if err == unix.EAGAIN { diff --git a/internal/netpoll/defs_poller.go b/internal/netpoll/defs_poller.go index 86d5a51d2..79f27d2ea 100644 --- a/internal/netpoll/defs_poller.go +++ b/internal/netpoll/defs_poller.go @@ -20,6 +20,9 @@ package netpoll // IOFlags represents the flags of IO events. type IOFlags = uint16 +// PollEventHandler is the callback for I/O events notified by the poller. +type PollEventHandler func(int, IOEvent, IOFlags) error + // PollAttachment is the user data which is about to be stored in "void *ptr" of epoll_data or "void *udata" of kevent. type PollAttachment struct { FD int diff --git a/internal/netpoll/defs_poller_epoll.go b/internal/netpoll/defs_poller_epoll.go index d67f36d6f..81501524b 100644 --- a/internal/netpoll/defs_poller_epoll.go +++ b/internal/netpoll/defs_poller_epoll.go @@ -36,9 +36,6 @@ const ( ErrEvents = unix.EPOLLERR | unix.EPOLLHUP | unix.EPOLLRDHUP ) -// PollEventHandler is the callback for I/O events notified by the poller. -type PollEventHandler func(int, uint32) error - type eventList struct { size int events []epollevent diff --git a/internal/netpoll/defs_poller_kqueue.go b/internal/netpoll/defs_poller_kqueue.go index 0b2e883b8..89ea29ae8 100644 --- a/internal/netpoll/defs_poller_kqueue.go +++ b/internal/netpoll/defs_poller_kqueue.go @@ -33,9 +33,6 @@ const ( MaxAsyncTasksAtOneTime = 128 ) -// PollEventHandler is the callback for I/O events notified by the poller. -type PollEventHandler func(int, int16, uint16) error - type eventList struct { size int events []unix.Kevent_t diff --git a/internal/netpoll/poller_epoll_default.go b/internal/netpoll/poller_epoll_default.go index c21765772..20b9cb213 100644 --- a/internal/netpoll/poller_epoll_default.go +++ b/internal/netpoll/poller_epoll_default.go @@ -135,7 +135,7 @@ func (p *Poller) Polling(callback PollEventHandler) error { if fd := int(ev.Fd); fd == p.efd { // poller is awakened to run tasks in queues. doChores = true } else { - switch err = callback(fd, ev.Events); err { + switch err = callback(fd, ev.Events, 0); err { case nil: case errors.ErrAcceptSocket, errors.ErrEngineShutdown: return err diff --git a/internal/netpoll/poller_epoll_ultimate.go b/internal/netpoll/poller_epoll_ultimate.go index 479415366..844914d8a 100644 --- a/internal/netpoll/poller_epoll_ultimate.go +++ b/internal/netpoll/poller_epoll_ultimate.go @@ -137,7 +137,7 @@ func (p *Poller) Polling() error { if pollAttachment.FD == p.epa.FD { // poller is awakened to run tasks in queues. doChores = true } else { - switch err = pollAttachment.Callback(pollAttachment.FD, ev.events); err { + switch err = pollAttachment.Callback(pollAttachment.FD, ev.events, 0); err { case nil: case errors.ErrAcceptSocket, errors.ErrEngineShutdown: return err diff --git a/reactor_epoll_default.go b/reactor_epoll_default.go index bd0e7d88d..83be6d525 100644 --- a/reactor_epoll_default.go +++ b/reactor_epoll_default.go @@ -33,7 +33,7 @@ func (el *eventloop) rotate() error { defer runtime.UnlockOSThread() } - err := el.poller.Polling(el.engine.accept) + err := el.poller.Polling(el.accept0) if err == errors.ErrEngineShutdown { el.getLogger().Debugf("main reactor is exiting in terms of the demand from user, %v", err) err = nil @@ -52,10 +52,10 @@ func (el *eventloop) orbit() error { defer runtime.UnlockOSThread() } - err := el.poller.Polling(func(fd int, ev uint32) error { + err := el.poller.Polling(func(fd int, ev netpoll.IOEvent, _ netpoll.IOFlags) error { c := el.connections.getConn(fd) if c == nil { - // Somehow epoll notify with an event for a stale fd that is not in our connection set. + // Somehow epoll notified with an event for a stale fd that is not in our connection set. // We need to delete it from the epoll set. return el.poller.Delete(fd) } @@ -121,13 +121,13 @@ func (el *eventloop) run() error { defer runtime.UnlockOSThread() } - err := el.poller.Polling(func(fd int, ev uint32) error { + err := el.poller.Polling(func(fd int, ev netpoll.IOEvent, flags netpoll.IOFlags) error { c := el.connections.getConn(fd) if c == nil { if _, ok := el.listeners[fd]; ok { - return el.accept(fd, ev) + return el.accept(fd, ev, flags) } - // Somehow epoll notify with an event for a stale fd that is not in our connection set. + // Somehow epoll notified with an event for a stale fd that is not in our connection set. // We need to delete it from the epoll set. return el.poller.Delete(fd) diff --git a/reactor_kqueue_default.go b/reactor_kqueue_default.go index bdc103f19..19ca75602 100644 --- a/reactor_kqueue_default.go +++ b/reactor_kqueue_default.go @@ -24,6 +24,7 @@ import ( "golang.org/x/sys/unix" + "github.com/panjf2000/gnet/v2/internal/netpoll" "github.com/panjf2000/gnet/v2/pkg/errors" ) @@ -33,7 +34,7 @@ func (el *eventloop) rotate() error { defer runtime.UnlockOSThread() } - err := el.poller.Polling(el.engine.accept) + err := el.poller.Polling(el.accept0) if err == errors.ErrEngineShutdown { el.getLogger().Debugf("main reactor is exiting in terms of the demand from user, %v", err) err = nil @@ -52,7 +53,7 @@ func (el *eventloop) orbit() error { defer runtime.UnlockOSThread() } - err := el.poller.Polling(func(fd int, filter int16, flags uint16) (err error) { + err := el.poller.Polling(func(fd int, filter netpoll.IOEvent, flags netpoll.IOFlags) (err error) { c := el.connections.getConn(fd) if c == nil { // This might happen when the connection has already been closed, @@ -110,7 +111,7 @@ func (el *eventloop) run() error { defer runtime.UnlockOSThread() } - err := el.poller.Polling(func(fd int, filter int16, flags uint16) (err error) { + err := el.poller.Polling(func(fd int, filter netpoll.IOEvent, flags netpoll.IOFlags) (err error) { c := el.connections.getConn(fd) if c == nil { if _, ok := el.listeners[fd]; ok { From 3594d228efa61f6bd9ea63ed5c2512b4375eaa6e Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Mon, 22 Apr 2024 12:44:29 +0800 Subject: [PATCH 10/12] opt: reduce duplicate code of I/O processing (#587) --- acceptor_unix.go | 15 +----- connection_bsd.go | 8 +-- connection_linux.go | 10 ++-- eventloop_unix.go | 6 +-- eventloop_windows.go | 13 ++--- gnet_test.go | 2 +- reactor_epoll_default.go | 101 +++---------------------------------- reactor_epoll_ultimate.go | 9 ++-- reactor_kqueue_default.go | 80 ++++------------------------- reactor_kqueue_ultimate.go | 9 ++-- 10 files changed, 51 insertions(+), 202 deletions(-) diff --git a/acceptor_unix.go b/acceptor_unix.go index 1afad82b4..fe7d67cab 100644 --- a/acceptor_unix.go +++ b/acceptor_unix.go @@ -33,7 +33,7 @@ func (el *eventloop) accept0(fd int, _ netpoll.IOEvent, _ netpoll.IOFlags) error nfd, sa, err := socket.Accept(fd) if err != nil { switch err { - case unix.EAGAIN: // the Accept queue has been drained, we can return now + case unix.EAGAIN: // the Accept queue has been drained out, we can return now return nil case unix.EINTR, unix.ECONNRESET, unix.ECONNABORTED: // ECONNRESET or ECONNABORTED could indicate that a socket @@ -93,16 +93,5 @@ func (el *eventloop) accept(fd int, ev netpoll.IOEvent, flags netpoll.IOFlags) e } c := newTCPConn(nfd, el, sa, el.listeners[fd].addr, remoteAddr) - addEvents := el.poller.AddRead - if el.engine.opts.EdgeTriggeredIO { - addEvents = el.poller.AddReadWrite - } - if err = addEvents(&c.pollAttachment, el.engine.opts.EdgeTriggeredIO); err != nil { - el.getLogger().Errorf("failed to register the accepted socket fd=%d to poller: %v", c.fd, err) - _ = unix.Close(c.fd) - c.release() - return err - } - el.connections.addConn(c, el.idx) - return el.open(c) + return el.register0(c) } diff --git a/connection_bsd.go b/connection_bsd.go index b24becffb..3223f01e4 100644 --- a/connection_bsd.go +++ b/connection_bsd.go @@ -39,14 +39,14 @@ func (c *conn) processIO(_ int, filter netpoll.IOEvent, flags netpoll.IOFlags) ( if flags&unix.EV_EOF != 0 && c.opened && err == nil { switch filter { case unix.EVFILT_READ: - // Receive the event of EVFILT_READ | EV_EOF, but the previous eventloop.read + // Received the event of EVFILT_READ|EV_EOF, but the previous eventloop.read // failed to drain the socket buffer, so we make sure we get it done this time. c.isEOF = true err = el.read(c) case unix.EVFILT_WRITE: - // On macOS, the kqueue in both LT and ET mode will notify with one event for the EOF - // of the TCP remote: EVFILT_READ|EV_ADD|EV_CLEAR|EV_EOF. But for some reason, two - // events will be issued in ET mode for the EOF of the Unix remote in this order: + // On macOS, the kqueue in either LT or ET mode will notify with one event for the + // EOF of the TCP remote: EVFILT_READ|EV_ADD|EV_CLEAR|EV_EOF. But for some reason, + // two events will be issued in ET mode for the EOF of the Unix remote in this order: // 1) EVFILT_WRITE|EV_ADD|EV_CLEAR|EV_EOF, 2) EVFILT_READ|EV_ADD|EV_CLEAR|EV_EOF. err = el.write(c) default: diff --git a/connection_linux.go b/connection_linux.go index ac8376e4c..d14e1f32e 100644 --- a/connection_linux.go +++ b/connection_linux.go @@ -28,7 +28,7 @@ import ( func (c *conn) processIO(_ int, ev netpoll.IOEvent, _ netpoll.IOFlags) error { el := c.loop // First check for any unexpected non-IO events. - // For these events we just close the corresponding connection directly. + // For these events we just close the connection directly. if ev&netpoll.ErrEvents != 0 && ev&unix.EPOLLIN == 0 && ev&unix.EPOLLOUT == 0 { c.outboundBuffer.Release() // don't bother to write to a connection with some unknown error return el.close(c, io.EOF) @@ -40,9 +40,9 @@ func (c *conn) processIO(_ int, ev netpoll.IOEvent, _ netpoll.IOFlags) error { // offload the incoming traffic by writing all pending data back to the remotes // before continuing to read and handle requests. // 2. When the connection is dead, we need to try writing any pending data back - // to the remote and close the connection first. + // to the remote first and then close the connection. // - // We perform eventloop.write for EPOLLOUT because it will take good care of either case. + // We perform eventloop.write for EPOLLOUT because it can take good care of either case. if ev&(unix.EPOLLOUT|unix.EPOLLERR) != 0 { if err := el.write(c); err != nil { return err @@ -61,8 +61,8 @@ func (c *conn) processIO(_ int, ev netpoll.IOEvent, _ netpoll.IOFlags) error { if ev&unix.EPOLLIN == 0 { // unreadable EPOLLRDHUP, close the connection directly return el.close(c, io.EOF) } - // Received the event of EPOLLIN | EPOLLRDHUP, but the previous eventloop.read - // failed to drain the socket buffer, so we make sure we get it done this time. + // Received the event of EPOLLIN|EPOLLRDHUP, but the previous eventloop.read + // failed to drain the socket buffer, so we ensure to get it done this time. c.isEOF = true return el.read(c) } diff --git a/eventloop_unix.go b/eventloop_unix.go index c0b326804..b7530a966 100644 --- a/eventloop_unix.go +++ b/eventloop_unix.go @@ -75,20 +75,20 @@ func (el *eventloop) register(itf interface{}) error { c = ccb.c defer ccb.cb() } + return el.register0(c) +} +func (el *eventloop) register0(c *conn) error { addEvents := el.poller.AddRead if el.engine.opts.EdgeTriggeredIO { addEvents = el.poller.AddReadWrite } - if err := addEvents(&c.pollAttachment, el.engine.opts.EdgeTriggeredIO); err != nil { _ = unix.Close(c.fd) c.release() return err } - el.connections.addConn(c, el.idx) - if c.isDatagram && c.remote != nil { return nil } diff --git a/eventloop_windows.go b/eventloop_windows.go index 074318c58..ea0f87377 100644 --- a/eventloop_windows.go +++ b/eventloop_windows.go @@ -17,12 +17,13 @@ package gnet import ( "bytes" "context" + "errors" "runtime" "strings" "sync/atomic" "time" - "github.com/panjf2000/gnet/v2/pkg/errors" + errorx "github.com/panjf2000/gnet/v2/pkg/errors" "github.com/panjf2000/gnet/v2/pkg/logging" ) @@ -79,7 +80,7 @@ func (el *eventloop) run() (err error) { err = v() } - if err == errors.ErrEngineShutdown { + if errors.Is(err, errorx.ErrEngineShutdown) { el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) break } else if err != nil { @@ -121,7 +122,7 @@ func (el *eventloop) read(c *conn) error { case Close: return el.close(c, nil) case Shutdown: - return errors.ErrEngineShutdown + return errorx.ErrEngineShutdown } _, _ = c.inboundBuffer.Write(c.buffer.B) c.buffer.Reset() @@ -132,7 +133,7 @@ func (el *eventloop) read(c *conn) error { func (el *eventloop) readUDP(c *conn) error { action := el.eventHandler.OnTraffic(c) if action == Shutdown { - return errors.ErrEngineShutdown + return errorx.ErrEngineShutdown } c.release() return nil @@ -160,7 +161,7 @@ func (el *eventloop) ticker(ctx context.Context) { case Shutdown: if !shutdown { shutdown = true - el.ch <- errors.ErrEngineShutdown + el.ch <- errorx.ErrEngineShutdown el.getLogger().Debugf("stopping ticker in event-loop(%d) from Tick()", el.idx) } } @@ -220,7 +221,7 @@ func (el *eventloop) handleAction(c *conn, action Action) error { case Close: return el.close(c, nil) case Shutdown: - return errors.ErrEngineShutdown + return errorx.ErrEngineShutdown default: return nil } diff --git a/gnet_test.go b/gnet_test.go index fec73ad16..11c7df997 100644 --- a/gnet_test.go +++ b/gnet_test.go @@ -1484,7 +1484,7 @@ func (s *simServer) OnTraffic(c Conn) (action Action) { var packets [][]byte for { data, err := codec.Decode(c) - if err == errIncompletePacket { + if errors.Is(err, errIncompletePacket) { break } if err != nil { diff --git a/reactor_epoll_default.go b/reactor_epoll_default.go index 83be6d525..4f150e6ec 100644 --- a/reactor_epoll_default.go +++ b/reactor_epoll_default.go @@ -18,13 +18,11 @@ package gnet import ( - "io" + "errors" "runtime" - "golang.org/x/sys/unix" - "github.com/panjf2000/gnet/v2/internal/netpoll" - "github.com/panjf2000/gnet/v2/pkg/errors" + errorx "github.com/panjf2000/gnet/v2/pkg/errors" ) func (el *eventloop) rotate() error { @@ -34,7 +32,7 @@ func (el *eventloop) rotate() error { } err := el.poller.Polling(el.accept0) - if err == errors.ErrEngineShutdown { + if errors.Is(err, errorx.ErrEngineShutdown) { el.getLogger().Debugf("main reactor is exiting in terms of the demand from user, %v", err) err = nil } else if err != nil { @@ -52,57 +50,16 @@ func (el *eventloop) orbit() error { defer runtime.UnlockOSThread() } - err := el.poller.Polling(func(fd int, ev netpoll.IOEvent, _ netpoll.IOFlags) error { + err := el.poller.Polling(func(fd int, ev netpoll.IOEvent, flags netpoll.IOFlags) error { c := el.connections.getConn(fd) if c == nil { // Somehow epoll notified with an event for a stale fd that is not in our connection set. // We need to delete it from the epoll set. return el.poller.Delete(fd) } - - // First check for any unexpected non-IO events. - // For these events we just close the corresponding connection directly. - if ev&netpoll.ErrEvents != 0 && ev&unix.EPOLLIN == 0 && ev&unix.EPOLLOUT == 0 { - c.outboundBuffer.Release() // don't bother to write to a connection with some unknown error - return el.close(c, io.EOF) - } - // Secondly, check for EPOLLOUT before EPOLLIN, the former has a higher priority - // than the latter regardless of the aliveness of the current connection: - // - // 1. When the connection is alive and the system is overloaded, we want to - // offload the incoming traffic by writing all pending data back to the remotes - // before continuing to read and handle requests. - // 2. When the connection is dead, we need to try writing any pending data back - // to the remote and close the connection first. - // - // We perform eventloop.write for EPOLLOUT because it will take good care of either case. - if ev&(unix.EPOLLOUT|unix.EPOLLERR) != 0 { - if err := el.write(c); err != nil { - return err - } - } - // Check for EPOLLIN before EPOLLRDHUP in case that there are pending data in - // the socket buffer. - if ev&(unix.EPOLLIN|unix.EPOLLERR) != 0 { - if err := el.read(c); err != nil { - return err - } - } - // Ultimately, check for EPOLLRDHUP, this event indicates that the remote has - // either closed connection or shut down the writing half of the connection. - if ev&unix.EPOLLRDHUP != 0 && c.opened { - if ev&unix.EPOLLIN == 0 { // unreadable EPOLLRDHUP, close the connection directly - return el.close(c, io.EOF) - } - // Received the event of EPOLLIN | EPOLLRDHUP, but the previous eventloop.read - // failed to drain the socket buffer, so we make sure we get it done this time. - c.isEOF = true - return el.read(c) - } - return nil + return c.processIO(fd, ev, flags) }) - - if err == errors.ErrEngineShutdown { + if errors.Is(err, errorx.ErrEngineShutdown) { el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) err = nil } else if err != nil { @@ -130,52 +87,10 @@ func (el *eventloop) run() error { // Somehow epoll notified with an event for a stale fd that is not in our connection set. // We need to delete it from the epoll set. return el.poller.Delete(fd) - - } - - // First check for any unexpected non-IO events. - // For these events we just close the corresponding connection directly. - if ev&netpoll.ErrEvents != 0 && ev&unix.EPOLLIN == 0 && ev&unix.EPOLLOUT == 0 { - c.outboundBuffer.Release() // don't bother to write to a connection with some unknown error - return el.close(c, io.EOF) - } - // Secondly, check for EPOLLOUT before EPOLLIN, the former has a higher priority - // than the latter regardless of the aliveness of the current connection: - // - // 1. When the connection is alive and the system is overloaded, we want to - // offload the incoming traffic by writing all pending data back to the remotes - // before continuing to read and handle requests. - // 2. When the connection is dead, we need to try writing any pending data back - // to the remote and close the connection first. - // - // We perform eventloop.write for EPOLLOUT because it will take good care of either case. - if ev&(unix.EPOLLOUT|unix.EPOLLERR) != 0 { - if err := el.write(c); err != nil { - return err - } } - // Check for EPOLLIN before EPOLLRDHUP in case that there are pending data in - // the socket buffer. - if ev&(unix.EPOLLIN|unix.EPOLLERR) != 0 { - if err := el.read(c); err != nil { - return err - } - } - // Ultimately, check for EPOLLRDHUP, this event indicates that the remote has - // either closed connection or shut down the writing half of the connection. - if ev&unix.EPOLLRDHUP != 0 && c.opened { - if ev&unix.EPOLLIN == 0 { // unreadable EPOLLRDHUP, close the connection directly - return el.close(c, io.EOF) - } - // Received the event of EPOLLIN | EPOLLRDHUP, but the previous eventloop.read - // failed to drain the socket buffer, so we make sure we get it done this time. - c.isEOF = true - return el.read(c) - } - return nil + return c.processIO(fd, ev, flags) }) - - if err == errors.ErrEngineShutdown { + if errors.Is(err, errorx.ErrEngineShutdown) { el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) err = nil } else if err != nil { diff --git a/reactor_epoll_ultimate.go b/reactor_epoll_ultimate.go index 49e1477ba..2fd3dd693 100644 --- a/reactor_epoll_ultimate.go +++ b/reactor_epoll_ultimate.go @@ -18,9 +18,10 @@ package gnet import ( + "errors" "runtime" - "github.com/panjf2000/gnet/v2/pkg/errors" + errorx "github.com/panjf2000/gnet/v2/pkg/errors" ) func (el *eventloop) rotate() error { @@ -30,7 +31,7 @@ func (el *eventloop) rotate() error { } err := el.poller.Polling() - if err == errors.ErrEngineShutdown { + if errors.Is(err, errorx.ErrEngineShutdown) { el.getLogger().Debugf("main reactor is exiting in terms of the demand from user, %v", err) err = nil } else if err != nil { @@ -49,7 +50,7 @@ func (el *eventloop) orbit() error { } err := el.poller.Polling() - if err == errors.ErrEngineShutdown { + if errors.Is(err, errorx.ErrEngineShutdown) { el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) err = nil } else if err != nil { @@ -69,7 +70,7 @@ func (el *eventloop) run() error { } err := el.poller.Polling() - if err == errors.ErrEngineShutdown { + if errors.Is(err, errorx.ErrEngineShutdown) { el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) err = nil } else if err != nil { diff --git a/reactor_kqueue_default.go b/reactor_kqueue_default.go index 19ca75602..7426c74e0 100644 --- a/reactor_kqueue_default.go +++ b/reactor_kqueue_default.go @@ -19,13 +19,11 @@ package gnet import ( - "io" + "errors" "runtime" - "golang.org/x/sys/unix" - "github.com/panjf2000/gnet/v2/internal/netpoll" - "github.com/panjf2000/gnet/v2/pkg/errors" + errorx "github.com/panjf2000/gnet/v2/pkg/errors" ) func (el *eventloop) rotate() error { @@ -35,7 +33,7 @@ func (el *eventloop) rotate() error { } err := el.poller.Polling(el.accept0) - if err == errors.ErrEngineShutdown { + if errors.Is(err, errorx.ErrEngineShutdown) { el.getLogger().Debugf("main reactor is exiting in terms of the demand from user, %v", err) err = nil } else if err != nil { @@ -53,46 +51,18 @@ func (el *eventloop) orbit() error { defer runtime.UnlockOSThread() } - err := el.poller.Polling(func(fd int, filter netpoll.IOEvent, flags netpoll.IOFlags) (err error) { + err := el.poller.Polling(func(fd int, filter netpoll.IOEvent, flags netpoll.IOFlags) error { c := el.connections.getConn(fd) if c == nil { // This might happen when the connection has already been closed, // the file descriptor will be deleted from kqueue automatically // as documented in the manual pages, So we just print a warning log. el.getLogger().Warnf("received event[fd=%d|filter=%d|flags=%d] of a stale connection from event-loop(%d)", fd, filter, flags, el.idx) - return - } - - switch filter { - case unix.EVFILT_READ: - err = el.read(c) - case unix.EVFILT_WRITE: - err = el.write(c) - } - // EV_EOF indicates that the remote has closed the connection. - // We check for EV_EOF after processing the read/write event - // to ensure that nothing is left out on this event filter. - if flags&unix.EV_EOF != 0 && c.opened && err == nil { - switch filter { - case unix.EVFILT_READ: - // Receive the event of EVFILT_READ | EV_EOF, but the previous eventloop.read - // failed to drain the socket buffer, so we make sure we get it done this time. - c.isEOF = true - err = el.read(c) - case unix.EVFILT_WRITE: - // On macOS, the kqueue in both LT and ET mode will notify with one event for the EOF - // of the TCP remote: EVFILT_READ|EV_ADD|EV_CLEAR|EV_EOF. But for some reason, two - // events will be issued in ET mode for the EOF of the Unix remote in this order: - // 1) EVFILT_WRITE|EV_ADD|EV_CLEAR|EV_EOF, 2) EVFILT_READ|EV_ADD|EV_CLEAR|EV_EOF. - err = el.write(c) - default: - c.outboundBuffer.Release() // don't bother to write to a connection with some unknown error - err = el.close(c, io.EOF) - } + return nil } - return + return c.processIO(fd, filter, flags) }) - if err == errors.ErrEngineShutdown { + if errors.Is(err, errorx.ErrEngineShutdown) { el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) err = nil } else if err != nil { @@ -111,7 +81,7 @@ func (el *eventloop) run() error { defer runtime.UnlockOSThread() } - err := el.poller.Polling(func(fd int, filter netpoll.IOEvent, flags netpoll.IOFlags) (err error) { + err := el.poller.Polling(func(fd int, filter netpoll.IOEvent, flags netpoll.IOFlags) error { c := el.connections.getConn(fd) if c == nil { if _, ok := el.listeners[fd]; ok { @@ -121,39 +91,11 @@ func (el *eventloop) run() error { // the file descriptor will be deleted from kqueue automatically // as documented in the manual pages, So we just print a warning log. el.getLogger().Warnf("received event[fd=%d|filter=%d|flags=%d] of a stale connection from event-loop(%d)", fd, filter, flags, el.idx) - return - } - - switch filter { - case unix.EVFILT_READ: - err = el.read(c) - case unix.EVFILT_WRITE: - err = el.write(c) - } - // EV_EOF indicates that the remote has closed the connection. - // We check for EV_EOF after processing the read/write event - // to ensure that nothing is left out on this event filter. - if flags&unix.EV_EOF != 0 && c.opened && err == nil { - switch filter { - case unix.EVFILT_READ: - // Receive the event of EVFILT_READ | EV_EOF, but the previous eventloop.read - // failed to drain the socket buffer, so we make sure we get it done this time. - c.isEOF = true - err = el.read(c) - case unix.EVFILT_WRITE: - // On macOS, the kqueue in both LT and ET mode will notify with one event for the EOF - // of the TCP remote: EVFILT_READ|EV_ADD|EV_CLEAR|EV_EOF. But for some reason, two - // events will be issued in ET mode for the EOF of the Unix remote in this order: - // 1) EVFILT_WRITE|EV_ADD|EV_CLEAR|EV_EOF, 2) EVFILT_READ|EV_ADD|EV_CLEAR|EV_EOF. - err = el.write(c) - default: - c.outboundBuffer.Release() // don't bother to write to a connection with some unknown error - err = el.close(c, io.EOF) - } + return nil } - return + return c.processIO(fd, filter, flags) }) - if err == errors.ErrEngineShutdown { + if errors.Is(err, errorx.ErrEngineShutdown) { el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) err = nil } else if err != nil { diff --git a/reactor_kqueue_ultimate.go b/reactor_kqueue_ultimate.go index e09c296df..93b998625 100644 --- a/reactor_kqueue_ultimate.go +++ b/reactor_kqueue_ultimate.go @@ -19,9 +19,10 @@ package gnet import ( + "errors" "runtime" - "github.com/panjf2000/gnet/v2/pkg/errors" + errorx "github.com/panjf2000/gnet/v2/pkg/errors" ) func (el *eventloop) rotate() error { @@ -31,7 +32,7 @@ func (el *eventloop) rotate() error { } err := el.poller.Polling() - if err == errors.ErrEngineShutdown { + if errors.Is(err, errorx.ErrEngineShutdown) { el.getLogger().Debugf("main reactor is exiting in terms of the demand from user, %v", err) err = nil } else if err != nil { @@ -50,7 +51,7 @@ func (el *eventloop) orbit() error { } err := el.poller.Polling() - if err == errors.ErrEngineShutdown { + if errors.Is(err, errorx.ErrEngineShutdown) { el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) err = nil } else if err != nil { @@ -70,7 +71,7 @@ func (el *eventloop) run() error { } err := el.poller.Polling() - if err == errors.ErrEngineShutdown { + if errors.Is(err, errorx.ErrEngineShutdown) { el.getLogger().Debugf("event-loop(%d) is exiting in terms of the demand from user, %v", el.idx, err) err = nil } else if err != nil { From 561d15ed2446cc5e9122fa844de3434a9d5dfc3a Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Mon, 22 Apr 2024 13:15:25 +0800 Subject: [PATCH 11/12] chore: update READMEs --- README.md | 2 +- README_ZH.md | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 0b61d897d..f49911c25 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@
- +
diff --git a/README_ZH.md b/README_ZH.md index 47011a4f5..12bcedcde 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -3,7 +3,7 @@
- +
@@ -44,8 +44,8 @@ - [x] 支持 **Windows** 平台 (仅用于开发环境的兼容性,不要在生产环境中使用) - [x] **Edge-triggered** I/O 支持 - [x] 多网络地址绑定 -- [ ] 支持 **TLS** -- [ ] 支持 [io_uring](https://kernel.dk/io_uring.pdf) +- [ ] **TLS** 支持 +- [ ] [io_uring](https://kernel.dk/io_uring.pdf) 支持 # 🎬 开始 From 5cca785f21c3a19a6b189cd2f5687727c77ecf39 Mon Sep 17 00:00:00 2001 From: Andy Pan Date: Mon, 22 Apr 2024 13:18:40 +0800 Subject: [PATCH 12/12] chore: update READMEs --- README.md | 2 +- README_ZH.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index f49911c25..5934bf1b9 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@
- +
diff --git a/README_ZH.md b/README_ZH.md index 12bcedcde..43db28d00 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -3,7 +3,7 @@
- +