Skip to content

Commit

Permalink
Merge pull request #10 from mwitkow/bugfix/streaming-fix
Browse files Browse the repository at this point in the history
Streaming Fix: handle bidirectional case
  • Loading branch information
Michal Witkowski committed Mar 26, 2017
2 parents c2f7c98 + de4d3db commit af55d61
Show file tree
Hide file tree
Showing 4 changed files with 162 additions and 45 deletions.
49 changes: 35 additions & 14 deletions proxy/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ package proxy
import (
"io"

"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/transport"
Expand Down Expand Up @@ -64,27 +65,49 @@ func (s *handler) handler(srv interface{}, serverStream grpc.ServerStream) error
return grpc.Errorf(codes.Internal, "lowLevelServerStream not exists in context")
}
fullMethodName := lowLevelServerStream.Method()
clientCtx, clientCancel := context.WithCancel(serverStream.Context())
backendConn, err := s.director(serverStream.Context(), fullMethodName)
if err != nil {
return err
}
// TODO(mwitkow): Add a `forwarded` header to metadata, https://en.wikipedia.org/wiki/X-Forwarded-For.
clientStream, err := grpc.NewClientStream(serverStream.Context(), clientStreamDescForProxying, backendConn, fullMethodName)
clientStream, err := grpc.NewClientStream(clientCtx, clientStreamDescForProxying, backendConn, fullMethodName)
if err != nil {
return err
}
defer clientStream.CloseSend() // always close this!
s2cErr := <-s.forwardServerToClient(serverStream, clientStream)
c2sErr := <-s.forwardClientToServer(clientStream, serverStream)
if s2cErr != io.EOF {
return grpc.Errorf(codes.Internal, "failed proxying s2c: %v", s2cErr)
}
serverStream.SetTrailer(clientStream.Trailer())
// c2sErr will contain RPC error from client code. If not io.EOF return the RPC error as server stream error.
if c2sErr != io.EOF {
return c2sErr
s2cErrChan := s.forwardServerToClient(serverStream, clientStream)
defer close(s2cErrChan)
c2sErrChan := s.forwardClientToServer(clientStream, serverStream)
defer close(c2sErrChan)
// We don't know which side is going to stop sending first, so we need a select between the two.
for i := 0; i < 2; i++ {
select {
case s2cErr := <-s2cErrChan:
if s2cErr == io.EOF {
// this is the happy case where the sender has encountered io.EOF, and won't be sending anymore./
// the clientStream>serverStream may continue pumping though.
clientStream.CloseSend()
break
} else {
// however, we may have gotten a receive error (stream disconnected, a read error etc) in which case we need
// to cancel the clientStream to the backend, let all of its goroutines be freed up by the CancelFunc and
// exit with an error to the stack
clientCancel()
return grpc.Errorf(codes.Internal, "failed proxying s2c: %v", s2cErr)
}
case c2sErr := <-c2sErrChan:
// This happens when the clientStream has nothing else to offer (io.EOF), returned a gRPC error. In those two
// cases we may have received Trailers as part of the call. In case of other errors (stream closed) the trailers
// will be nil.
serverStream.SetTrailer(clientStream.Trailer())
// c2sErr will contain RPC error from client code. If not io.EOF return the RPC error as server stream error.
if c2sErr != io.EOF {
return c2sErr
}
return nil
}
}
return nil
return grpc.Errorf(codes.Internal, "gRPC proxying should never reach this stage.")
}

func (s *handler) forwardClientToServer(src grpc.ClientStream, dst grpc.ServerStream) chan error {
Expand Down Expand Up @@ -115,7 +138,6 @@ func (s *handler) forwardClientToServer(src grpc.ClientStream, dst grpc.ServerSt
break
}
}
close(ret)
}()
return ret
}
Expand All @@ -134,7 +156,6 @@ func (s *handler) forwardServerToClient(src grpc.ServerStream, dst grpc.ClientSt
break
}
}
close(ret)
}()
return ret
}
57 changes: 42 additions & 15 deletions proxy/handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ import (
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"

"fmt"

pb "github.com/mwitkow/grpc-proxy/testservice"
)

Expand Down Expand Up @@ -72,6 +74,27 @@ func (s *assertingService) PingList(ping *pb.PingRequest, stream pb.TestService_
return nil
}

func (s *assertingService) PingStream(stream pb.TestService_PingStreamServer) error {
stream.SendHeader(metadata.Pairs(serverHeaderMdKey, "I like turtles."))
counter := int32(0)
for {
ping, err := stream.Recv()
if err == io.EOF {
break
} else if err != nil {
require.NoError(s.t, err, "can't fail reading stream")
return err
}
pong := &pb.PingResponse{Value: ping.Value, Counter: counter}
if err := stream.Send(pong); err != nil {
require.NoError(s.t, err, "can't fail sending back a pong")
}
counter += 1
}
stream.SetTrailer(metadata.Pairs(serverTrailerMdKey, "I like ending turtles."))
return nil
}

// ProxyHappySuite tests the "happy" path of handling: that everything works in absence of connection issues.
type ProxyHappySuite struct {
suite.Suite
Expand Down Expand Up @@ -125,24 +148,28 @@ func (s *ProxyHappySuite) TestDirectorErrorIsPropagated() {
assert.Equal(s.T(), "testing rejection", grpc.ErrorDesc(err))
}

func (s *ProxyHappySuite) TestPingListStreamsAll() {
stream, err := s.testClient.PingList(s.ctx(), &pb.PingRequest{Value: "foo"})
require.NoError(s.T(), err, "PingList request should be successful.")
// Check that the header arrives before all entries.
headerMd, err := stream.Header()
require.NoError(s.T(), err, "PingList headers should not error.")
assert.Len(s.T(), headerMd, 1, "PingList response headers user contain metadata")
count := 0
for {
func (s *ProxyHappySuite) TestPingStream_FullDuplexWorks() {
stream, err := s.testClient.PingStream(s.ctx())
require.NoError(s.T(), err, "PingStream request should be successful.")

for i := 0; i < countListResponses; i++ {
ping := &pb.PingRequest{Value: fmt.Sprintf("foo:%d", i)}
require.NoError(s.T(), stream.Send(ping), "sending to PingStream must not fail")
resp, err := stream.Recv()
if err == io.EOF {
break
}
require.NoError(s.T(), err, "PingList stream should not be interrupted.")
require.Equal(s.T(), "foo", resp.Value)
count = count + 1
if i == 0 {
// Check that the header arrives before all entries.
headerMd, err := stream.Header()
require.NoError(s.T(), err, "PingStream headers should not error.")
assert.Len(s.T(), headerMd, 1, "PingStream response headers user contain metadata")
}
assert.EqualValues(s.T(), i, resp.Counter, "ping roundtrip must succeed with the correct id")
}
assert.Equal(s.T(), countListResponses, count, "PingList must successfully return all outputs")
require.NoError(s.T(), stream.CloseSend(), "no error on close send")
_, err = stream.Recv()
require.Equal(s.T(), io.EOF, err, "stream should close with io.EOF, meaining OK")
// Check that the trailer headers are here.
trailerMd := stream.Trailer()
assert.Len(s.T(), trailerMd, 1, "PingList trailer headers user contain metadata")
Expand Down Expand Up @@ -183,12 +210,12 @@ func (s *ProxyHappySuite) SetupSuite() {
"Ping")

// Start the serving loops.
s.T().Logf("starting grpc.Server at: %v", s.serverListener.Addr().String())
go func() {
s.T().Logf("starting grpc.Server at: %v", s.serverListener.Addr().String())
s.server.Serve(s.serverListener)
}()
s.T().Logf("starting grpc.Proxy at: %v", s.proxyListener.Addr().String())
go func() {
s.T().Logf("starting grpc.Proxy at: %v", s.proxyListener.Addr().String())
s.proxy.Serve(s.proxyListener)
}()

Expand Down
98 changes: 82 additions & 16 deletions testservice/test.pb.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions testservice/test.proto
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,8 @@ service TestService {
rpc PingError(PingRequest) returns (Empty) {}

rpc PingList(PingRequest) returns (stream PingResponse) {}

rpc PingStream(stream PingRequest) returns (stream PingResponse) {}

}

0 comments on commit af55d61

Please sign in to comment.