Skip to content

Commit

Permalink
Merge pull request #334 from cloudwego/optimize/failure_retry
Browse files Browse the repository at this point in the history
optimize: improve retry success rate when do failure retry
  • Loading branch information
YangruiEmma committed Feb 16, 2022
2 parents f6af0f0 + 17f72cb commit 83e8798
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 1 deletion.
2 changes: 1 addition & 1 deletion client/rpctimeout.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ func rpcTimeoutMW(mwCtx context.Context) endpoint.Middleware {
e := panicToErr(ctx, panicInfo, ri)
done <- e
}
if !errors.Is(err, kerrors.ErrRPCFinish) {
if err == nil || !errors.Is(err, kerrors.ErrRPCFinish) {
// Don't regards ErrRPCFinish as normal error, it happens in retry scene,
// ErrRPCFinish means previous call returns first but is decoding.
close(done)
Expand Down
3 changes: 3 additions & 0 deletions pkg/remote/codec/default_codec.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,9 @@ func checkRPCState(ctx context.Context, message remote.Message) error {
if message.RPCRole() == remote.Server {
return nil
}
if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled {
return kerrors.ErrRPCFinish
}
if respOp, ok := ctx.Value(retry.CtxRespOp).(*int32); ok {
if !atomic.CompareAndSwapInt32(respOp, retry.OpNo, retry.OpDoing) {
// previous call is being handling or done
Expand Down

0 comments on commit 83e8798

Please sign in to comment.