diff --git a/_vendor/src/github.com/pingcap/kvproto/pkg/pdpb/pdpb.pb.go b/_vendor/src/github.com/pingcap/kvproto/pkg/pdpb/pdpb.pb.go index da0ede880a7..2039c377514 100644 --- a/_vendor/src/github.com/pingcap/kvproto/pkg/pdpb/pdpb.pb.go +++ b/_vendor/src/github.com/pingcap/kvproto/pkg/pdpb/pdpb.pb.go @@ -781,6 +781,8 @@ type RegionHeartbeatRequest struct { // Keys read/written during this period. KeysWritten uint64 `protobuf:"varint,8,opt,name=keys_written,json=keysWritten,proto3" json:"keys_written,omitempty"` KeysRead uint64 `protobuf:"varint,9,opt,name=keys_read,json=keysRead,proto3" json:"keys_read,omitempty"` + // Approximate region size. + ApproximateSize uint64 `protobuf:"varint,10,opt,name=approximate_size,json=approximateSize,proto3" json:"approximate_size,omitempty"` } func (m *RegionHeartbeatRequest) Reset() { *m = RegionHeartbeatRequest{} } @@ -851,6 +853,13 @@ func (m *RegionHeartbeatRequest) GetKeysRead() uint64 { return 0 } +func (m *RegionHeartbeatRequest) GetApproximateSize() uint64 { + if m != nil { + return m.ApproximateSize + } + return 0 +} + type ChangePeer struct { Peer *metapb.Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` // FIXME: replace with actual ConfChangeType once eraftpb uses proto3. @@ -1097,6 +1106,10 @@ type StoreStats struct { BytesWritten uint64 `protobuf:"varint,11,opt,name=bytes_written,json=bytesWritten,proto3" json:"bytes_written,omitempty"` // Keys written for the store. KeysWritten uint64 `protobuf:"varint,12,opt,name=keys_written,json=keysWritten,proto3" json:"keys_written,omitempty"` + // Bytes read for the store. + BytesRead uint64 `protobuf:"varint,13,opt,name=bytes_read,json=bytesRead,proto3" json:"bytes_read,omitempty"` + // Keys read for the store. + KeysRead uint64 `protobuf:"varint,14,opt,name=keys_read,json=keysRead,proto3" json:"keys_read,omitempty"` } func (m *StoreStats) Reset() { *m = StoreStats{} } @@ -1188,6 +1201,20 @@ func (m *StoreStats) GetKeysWritten() uint64 { return 0 } +func (m *StoreStats) GetBytesRead() uint64 { + if m != nil { + return m.BytesRead + } + return 0 +} + +func (m *StoreStats) GetKeysRead() uint64 { + if m != nil { + return m.KeysRead + } + return 0 +} + type StoreHeartbeatRequest struct { Header *RequestHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` Stats *StoreStats `protobuf:"bytes,2,opt,name=stats" json:"stats,omitempty"` @@ -2909,6 +2936,11 @@ func (m *RegionHeartbeatRequest) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.KeysRead)) } + if m.ApproximateSize != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintPdpb(dAtA, i, uint64(m.ApproximateSize)) + } return i, nil } @@ -3290,6 +3322,16 @@ func (m *StoreStats) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintPdpb(dAtA, i, uint64(m.KeysWritten)) } + if m.BytesRead != 0 { + dAtA[i] = 0x68 + i++ + i = encodeVarintPdpb(dAtA, i, uint64(m.BytesRead)) + } + if m.KeysRead != 0 { + dAtA[i] = 0x70 + i++ + i = encodeVarintPdpb(dAtA, i, uint64(m.KeysRead)) + } return i, nil } @@ -3788,6 +3830,9 @@ func (m *RegionHeartbeatRequest) Size() (n int) { if m.KeysRead != 0 { n += 1 + sovPdpb(uint64(m.KeysRead)) } + if m.ApproximateSize != 0 { + n += 1 + sovPdpb(uint64(m.ApproximateSize)) + } return n } @@ -3944,6 +3989,12 @@ func (m *StoreStats) Size() (n int) { if m.KeysWritten != 0 { n += 1 + sovPdpb(uint64(m.KeysWritten)) } + if m.BytesRead != 0 { + n += 1 + sovPdpb(uint64(m.BytesRead)) + } + if m.KeysRead != 0 { + n += 1 + sovPdpb(uint64(m.KeysRead)) + } return n } @@ -7115,6 +7166,25 @@ func (m *RegionHeartbeatRequest) Unmarshal(dAtA []byte) error { break } } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ApproximateSize", wireType) + } + m.ApproximateSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ApproximateSize |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipPdpb(dAtA[iNdEx:]) @@ -8325,6 +8395,44 @@ func (m *StoreStats) Unmarshal(dAtA []byte) error { break } } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BytesRead", wireType) + } + m.BytesRead = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BytesRead |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeysRead", wireType) + } + m.KeysRead = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPdpb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KeysRead |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipPdpb(dAtA[iNdEx:]) @@ -8653,113 +8761,115 @@ var ( func init() { proto.RegisterFile("pdpb.proto", fileDescriptorPdpb) } var fileDescriptorPdpb = []byte{ - // 1725 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xcd, 0x6e, 0x1b, 0xc9, - 0x11, 0xd6, 0xf0, 0x9f, 0xc5, 0x5f, 0xb7, 0x65, 0x69, 0x96, 0xb6, 0x14, 0x6d, 0x7b, 0x11, 0x38, - 0xce, 0x9a, 0xf1, 0x2a, 0x48, 0x10, 0x60, 0xb1, 0xc1, 0x52, 0x3f, 0x5e, 0x0b, 0x5e, 0x8b, 0x42, - 0x93, 0x8b, 0xc5, 0x5e, 0xc2, 0x0c, 0x39, 0x6d, 0x6a, 0x22, 0x72, 0x66, 0x76, 0xba, 0x29, 0x81, - 0x7b, 0xca, 0x29, 0x97, 0x04, 0x48, 0x4e, 0x41, 0x5e, 0x23, 0x0f, 0x90, 0x7b, 0x8e, 0x79, 0x84, - 0xc0, 0x79, 0x8b, 0x9c, 0x82, 0xfe, 0x99, 0xe1, 0xcc, 0x90, 0x72, 0x94, 0x71, 0xf6, 0xc4, 0xe9, - 0xaa, 0xea, 0xaf, 0xab, 0xbe, 0xae, 0xea, 0xee, 0x22, 0x80, 0x6f, 0xfb, 0xe3, 0xae, 0x1f, 0x78, - 0xdc, 0x43, 0x05, 0xf1, 0xdd, 0xa9, 0xcf, 0x29, 0xb7, 0x42, 0x59, 0x67, 0x7b, 0xea, 0x4d, 0x3d, - 0xf9, 0xf9, 0x13, 0xf1, 0xa5, 0xa4, 0xb8, 0x0b, 0x0d, 0x42, 0xbf, 0x5d, 0x50, 0xc6, 0x5f, 0x52, - 0xcb, 0xa6, 0x01, 0xda, 0x03, 0x98, 0xcc, 0x16, 0x8c, 0xd3, 0x60, 0xe4, 0xd8, 0xa6, 0x71, 0x60, - 0x3c, 0x29, 0x90, 0xaa, 0x96, 0x9c, 0xd9, 0x98, 0x40, 0x93, 0x50, 0xe6, 0x7b, 0x2e, 0xa3, 0x77, - 0x9a, 0x80, 0x3e, 0x84, 0x22, 0x0d, 0x02, 0x2f, 0x30, 0x73, 0x07, 0xc6, 0x93, 0xda, 0x61, 0xad, - 0x2b, 0xdd, 0x3c, 0x15, 0x22, 0xa2, 0x34, 0xf8, 0x05, 0x14, 0xe5, 0x18, 0x3d, 0x86, 0x02, 0x5f, - 0xfa, 0x54, 0x82, 0x34, 0x0f, 0x5b, 0x31, 0xd3, 0xe1, 0xd2, 0xa7, 0x44, 0x2a, 0x91, 0x09, 0xe5, - 0x39, 0x65, 0xcc, 0x9a, 0x52, 0x09, 0x59, 0x25, 0xe1, 0x10, 0xf7, 0x01, 0x86, 0xcc, 0xd3, 0xe1, - 0xa0, 0x1f, 0x43, 0xe9, 0x52, 0x7a, 0x28, 0xe1, 0x6a, 0x87, 0xf7, 0x15, 0x5c, 0x22, 0x5a, 0xa2, - 0x4d, 0xd0, 0x36, 0x14, 0x27, 0xde, 0xc2, 0xe5, 0x12, 0xb2, 0x41, 0xd4, 0x00, 0xf7, 0xa0, 0x3a, - 0x74, 0xe6, 0x94, 0x71, 0x6b, 0xee, 0xa3, 0x0e, 0x54, 0xfc, 0xcb, 0x25, 0x73, 0x26, 0xd6, 0x4c, - 0x22, 0xe6, 0x49, 0x34, 0x16, 0x3e, 0xcd, 0xbc, 0xa9, 0x54, 0xe5, 0xa4, 0x2a, 0x1c, 0xe2, 0xdf, - 0x1a, 0x50, 0x93, 0x4e, 0x29, 0xce, 0xd0, 0xc7, 0x29, 0xaf, 0xb6, 0x43, 0xaf, 0xe2, 0x9c, 0xbe, - 0xdb, 0x2d, 0xf4, 0x0c, 0xaa, 0x3c, 0x74, 0xcb, 0xcc, 0x4b, 0x18, 0xcd, 0x55, 0xe4, 0x2d, 0x59, - 0x59, 0xe0, 0x3f, 0x18, 0xd0, 0x3e, 0xf2, 0x3c, 0xce, 0x78, 0x60, 0xf9, 0x99, 0xd8, 0x79, 0x0c, - 0x45, 0xc6, 0xbd, 0x80, 0xea, 0x3d, 0x6c, 0x74, 0x75, 0x62, 0x0d, 0x84, 0x90, 0x28, 0x1d, 0xfa, - 0x21, 0x94, 0x02, 0x3a, 0x75, 0x3c, 0x57, 0xbb, 0xd4, 0x0c, 0xad, 0x88, 0x94, 0x12, 0xad, 0xc5, - 0x3d, 0xb8, 0x17, 0xf3, 0x26, 0x0b, 0x2d, 0xf8, 0x04, 0x1e, 0x9c, 0xb1, 0x08, 0xc4, 0xa7, 0x76, - 0x96, 0xa8, 0xf0, 0x6f, 0x60, 0x27, 0x8d, 0x92, 0x69, 0x93, 0x30, 0xd4, 0xc7, 0x31, 0x14, 0x49, - 0x52, 0x85, 0x24, 0x64, 0xf8, 0x33, 0x68, 0xf6, 0x66, 0x33, 0x6f, 0x72, 0x76, 0x92, 0xc9, 0xd5, - 0x3e, 0xb4, 0xa2, 0xe9, 0x99, 0x7c, 0x6c, 0x42, 0xce, 0x51, 0x9e, 0x15, 0x48, 0xce, 0xb1, 0xf1, - 0x37, 0xd0, 0xfa, 0x82, 0x72, 0xb5, 0x7f, 0x59, 0x32, 0xe2, 0x03, 0xa8, 0xc8, 0x5d, 0x1f, 0x45, - 0xa8, 0x65, 0x39, 0x3e, 0xb3, 0x31, 0x85, 0xf6, 0x0a, 0x3a, 0x93, 0xb3, 0x77, 0x49, 0x37, 0x3c, - 0x81, 0xd6, 0xc5, 0xe2, 0x3d, 0x22, 0xb8, 0xd3, 0x22, 0x9f, 0x43, 0x7b, 0xb5, 0x48, 0xa6, 0x54, - 0xfd, 0x95, 0x64, 0x43, 0x97, 0x40, 0x16, 0x3f, 0xf7, 0x00, 0x54, 0xe1, 0x8c, 0xae, 0xe8, 0x52, - 0x3a, 0x5b, 0x27, 0x55, 0x25, 0x79, 0x45, 0x97, 0xf8, 0x8f, 0x06, 0xdc, 0x8b, 0x2d, 0x90, 0x89, - 0xef, 0x55, 0xe5, 0xe6, 0xde, 0x55, 0xb9, 0xe8, 0x23, 0x28, 0xcd, 0x14, 0xaa, 0xaa, 0xf0, 0x7a, - 0x68, 0x77, 0x41, 0x05, 0x9a, 0xd2, 0xe1, 0x5f, 0xc3, 0x76, 0xe4, 0xd0, 0xd1, 0x32, 0x5b, 0xc2, - 0xa3, 0x87, 0xa0, 0x63, 0x5c, 0x25, 0x58, 0x45, 0x09, 0xce, 0x6c, 0xfc, 0x02, 0x76, 0xbf, 0xa0, - 0xfc, 0x58, 0x5d, 0x31, 0xc7, 0x9e, 0xfb, 0xc6, 0x99, 0x66, 0xaa, 0x2a, 0x06, 0xe6, 0x3a, 0x4e, - 0x26, 0x06, 0x7f, 0x04, 0x65, 0x7d, 0xe3, 0x69, 0x0a, 0x5b, 0x21, 0x35, 0x1a, 0x9d, 0x84, 0x7a, - 0xfc, 0x2d, 0xec, 0x5e, 0x2c, 0xde, 0xdf, 0xf9, 0xff, 0x65, 0xc9, 0x97, 0x60, 0xae, 0x2f, 0x99, - 0x29, 0x9b, 0x6f, 0xa0, 0xf4, 0x9a, 0xce, 0xc7, 0x34, 0x40, 0x08, 0x0a, 0xae, 0x35, 0x57, 0x57, - 0x75, 0x95, 0xc8, 0x6f, 0xb1, 0x69, 0x73, 0xa9, 0x8d, 0x6d, 0x9a, 0x12, 0x9c, 0xd9, 0x42, 0xe9, - 0x53, 0x1a, 0x8c, 0x16, 0xc1, 0x8c, 0x99, 0xf9, 0x83, 0xfc, 0x93, 0x2a, 0xa9, 0x08, 0xc1, 0x57, - 0xc1, 0x8c, 0xa1, 0x1f, 0x40, 0x6d, 0x32, 0x73, 0xa8, 0xcb, 0x95, 0xba, 0x20, 0xd5, 0xa0, 0x44, - 0xc2, 0x00, 0x7f, 0x2e, 0xb3, 0x5c, 0xad, 0xcd, 0x32, 0x6d, 0xf6, 0x9f, 0x0c, 0x40, 0x71, 0x88, - 0x8c, 0x95, 0x52, 0x56, 0x01, 0x31, 0x33, 0x77, 0x90, 0x97, 0x25, 0x20, 0xcd, 0x15, 0x2a, 0x09, - 0x95, 0x1b, 0x2a, 0x25, 0x6e, 0x16, 0x56, 0xca, 0x05, 0x54, 0x45, 0xe5, 0x0c, 0xb8, 0xc5, 0x19, - 0x3a, 0x80, 0x82, 0xa0, 0x43, 0xbb, 0x91, 0x2c, 0x2d, 0xa9, 0x41, 0x1f, 0x42, 0xdd, 0xf6, 0x6e, - 0xdc, 0x11, 0xa3, 0x13, 0xcf, 0xb5, 0x99, 0x66, 0xb8, 0x26, 0x64, 0x03, 0x25, 0xc2, 0xff, 0xce, - 0xc1, 0x8e, 0xaa, 0xbc, 0x97, 0xd4, 0x0a, 0xf8, 0x98, 0x5a, 0x3c, 0x53, 0x72, 0xfd, 0x5f, 0x4f, - 0x04, 0xd4, 0x05, 0x90, 0x8e, 0x8b, 0x28, 0xd4, 0xe6, 0x46, 0x0f, 0x96, 0x28, 0x7e, 0x52, 0x15, - 0x26, 0x62, 0xc8, 0xd0, 0x27, 0xd0, 0xf0, 0xa9, 0x6b, 0x3b, 0xee, 0x54, 0x4f, 0x29, 0x6a, 0xae, - 0xe3, 0xe0, 0x75, 0x6d, 0xa2, 0xa6, 0x3c, 0x86, 0xc6, 0x78, 0xc9, 0x29, 0x1b, 0xdd, 0x04, 0x0e, - 0xe7, 0xd4, 0x35, 0x4b, 0x92, 0x9c, 0xba, 0x14, 0x7e, 0xad, 0x64, 0xe2, 0x28, 0x55, 0x46, 0x01, - 0xb5, 0x6c, 0xb3, 0xac, 0x5e, 0xaa, 0x52, 0x42, 0xa8, 0x25, 0x5e, 0xaa, 0xf5, 0x2b, 0xba, 0x5c, - 0x41, 0x54, 0x14, 0xbf, 0x42, 0x16, 0x22, 0x3c, 0x84, 0xaa, 0x34, 0x91, 0x00, 0x55, 0x95, 0xe1, - 0x42, 0x20, 0xe6, 0x63, 0x0a, 0x70, 0x7c, 0x69, 0xb9, 0x53, 0x2a, 0x5c, 0xba, 0xc3, 0x7e, 0xfe, - 0x0c, 0x6a, 0x13, 0x69, 0x3f, 0x92, 0x8f, 0xde, 0x9c, 0x7c, 0xf4, 0xea, 0xfc, 0x13, 0x55, 0xaa, - 0xc0, 0xe4, 0xcb, 0x17, 0x26, 0xd1, 0x37, 0x3e, 0x84, 0xe6, 0x30, 0xb0, 0x5c, 0xf6, 0x86, 0x06, - 0x5f, 0x2a, 0x7e, 0xff, 0xeb, 0x52, 0xf8, 0x6f, 0x39, 0xd8, 0x5d, 0xcb, 0x8b, 0x4c, 0x15, 0xf0, - 0x49, 0xe4, 0xb4, 0x5c, 0x52, 0xa5, 0x47, 0x5b, 0x3b, 0x1d, 0x45, 0x1f, 0x3a, 0x2c, 0x99, 0xf8, - 0x0c, 0x5a, 0x5c, 0x3b, 0x3c, 0x4a, 0x64, 0x8b, 0x5e, 0x29, 0x19, 0x0d, 0x69, 0xf2, 0x64, 0x74, - 0x89, 0xab, 0xa0, 0x90, 0xbc, 0x0a, 0xd0, 0xcf, 0xa1, 0xae, 0x95, 0xd4, 0xf7, 0x26, 0x97, 0x66, - 0x51, 0xe7, 0x76, 0x22, 0x5d, 0x4f, 0x85, 0x8a, 0xd4, 0x82, 0xd5, 0x00, 0x3d, 0x83, 0x1a, 0xb7, - 0x82, 0x29, 0xe5, 0x2a, 0x8c, 0xd2, 0x06, 0xe6, 0x40, 0x19, 0x88, 0x6f, 0xfc, 0x06, 0x5a, 0x3d, - 0x76, 0x35, 0xf0, 0x67, 0xce, 0xf7, 0x5a, 0x4f, 0xf8, 0x77, 0x06, 0xb4, 0x57, 0x0b, 0x65, 0x7c, - 0x8d, 0x36, 0x5c, 0x7a, 0x33, 0x4a, 0xdf, 0x9e, 0x35, 0x97, 0xde, 0x90, 0x90, 0xb5, 0x03, 0xa8, - 0x0b, 0x1b, 0x79, 0x1e, 0x3b, 0xb6, 0x3a, 0x8e, 0x0b, 0x04, 0x5c, 0x7a, 0x23, 0xa2, 0x3d, 0xb3, - 0x19, 0xfe, 0xbd, 0x01, 0x88, 0x50, 0xdf, 0x0b, 0x78, 0xf6, 0xa0, 0x31, 0x14, 0x66, 0xf4, 0x0d, - 0xbf, 0x25, 0x64, 0xa9, 0x43, 0x1f, 0x41, 0x31, 0x70, 0xa6, 0x97, 0xfc, 0x96, 0x9e, 0x41, 0x29, - 0xf1, 0x31, 0xdc, 0x4f, 0x38, 0x93, 0xe9, 0xee, 0xfa, 0x6b, 0x1e, 0x40, 0xbe, 0xe4, 0xd4, 0x79, - 0x1b, 0x7f, 0xc1, 0x1a, 0x89, 0x17, 0xac, 0xe8, 0xf4, 0x26, 0x96, 0x6f, 0x4d, 0x1c, 0xbe, 0x0c, - 0xaf, 0xb1, 0x70, 0x8c, 0x1e, 0x41, 0xd5, 0xba, 0xb6, 0x9c, 0x99, 0x35, 0x9e, 0x51, 0xe9, 0x74, - 0x81, 0xac, 0x04, 0xe2, 0x08, 0xd1, 0xc4, 0xab, 0xb6, 0xad, 0x20, 0xdb, 0x36, 0x9d, 0x79, 0xc7, - 0xb2, 0x79, 0xfb, 0x18, 0x10, 0xd3, 0x87, 0x1b, 0x73, 0x2d, 0x5f, 0x1b, 0x16, 0xa5, 0x61, 0x5b, - 0x6b, 0x06, 0xae, 0xe5, 0x2b, 0xeb, 0xe7, 0xb0, 0x1d, 0xd0, 0x09, 0x75, 0xae, 0x53, 0xf6, 0x25, - 0x69, 0x8f, 0x22, 0xdd, 0x6a, 0xc6, 0x1e, 0x00, 0xe3, 0x56, 0xc0, 0x47, 0xa2, 0x01, 0x94, 0x87, - 0x5c, 0x83, 0x54, 0xa5, 0x44, 0x34, 0x87, 0xa8, 0x0b, 0xf7, 0x2d, 0xdf, 0x9f, 0x2d, 0x53, 0x78, - 0x15, 0x69, 0x77, 0x2f, 0x54, 0xad, 0xe0, 0x76, 0xa1, 0xec, 0xb0, 0xd1, 0x78, 0xc1, 0x96, 0xf2, - 0xbc, 0xab, 0x90, 0x92, 0xc3, 0x8e, 0x16, 0x6c, 0x29, 0xca, 0x72, 0xc1, 0xa8, 0x3d, 0x62, 0xce, - 0x77, 0xd4, 0x04, 0xc5, 0x92, 0x10, 0x0c, 0x9c, 0xef, 0xe8, 0xfa, 0x71, 0x5c, 0xdb, 0x70, 0x1c, - 0xa7, 0xcf, 0xdb, 0xfa, 0xda, 0x79, 0x8b, 0x67, 0xf0, 0x40, 0x6e, 0xd9, 0xfb, 0xde, 0x66, 0x45, - 0x26, 0xf6, 0x3c, 0x79, 0x5a, 0xad, 0x72, 0x81, 0x28, 0x35, 0x7e, 0x01, 0x3b, 0xe9, 0xd5, 0xb2, - 0x64, 0xda, 0x53, 0x0a, 0xd5, 0xe8, 0x4f, 0x0b, 0x54, 0x82, 0x5c, 0xff, 0x55, 0x7b, 0x0b, 0xd5, - 0xa0, 0xfc, 0xd5, 0xf9, 0xab, 0xf3, 0xfe, 0xd7, 0xe7, 0x6d, 0x03, 0x6d, 0x43, 0xfb, 0xbc, 0x3f, - 0x1c, 0x1d, 0xf5, 0xfb, 0xc3, 0xc1, 0x90, 0xf4, 0x2e, 0x2e, 0x4e, 0x4f, 0xda, 0x39, 0x74, 0x1f, - 0x5a, 0x83, 0x61, 0x9f, 0x9c, 0x8e, 0x86, 0xfd, 0xd7, 0x47, 0x83, 0x61, 0xff, 0xfc, 0xb4, 0x9d, - 0x47, 0x26, 0x6c, 0xf7, 0xbe, 0x24, 0xa7, 0xbd, 0x93, 0x6f, 0x92, 0xe6, 0x85, 0xa7, 0xcf, 0xa0, - 0x99, 0xbc, 0x26, 0xc4, 0x1a, 0x3d, 0xdb, 0x3e, 0xf7, 0x6c, 0xda, 0xde, 0x42, 0x4d, 0x00, 0x42, - 0xe7, 0xde, 0x35, 0x95, 0x63, 0xe3, 0xf0, 0xcf, 0x15, 0xc8, 0x5d, 0x9c, 0xa0, 0x1e, 0xc0, 0xea, - 0x19, 0x84, 0x76, 0x55, 0x20, 0x6b, 0x6f, 0xab, 0x8e, 0xb9, 0xae, 0x50, 0xb1, 0xe2, 0x2d, 0xf4, - 0x1c, 0xf2, 0x43, 0xe6, 0x21, 0xcd, 0xe3, 0xea, 0x2f, 0x97, 0xce, 0xbd, 0x98, 0x24, 0xb4, 0x7e, - 0x62, 0x3c, 0x37, 0xd0, 0x2f, 0xa1, 0x1a, 0x35, 0xda, 0x68, 0x47, 0x59, 0xa5, 0xff, 0x92, 0xe8, - 0xec, 0xae, 0xc9, 0xa3, 0x15, 0x5f, 0x43, 0x33, 0xd9, 0xaa, 0xa3, 0x87, 0xca, 0x78, 0xe3, 0xdf, - 0x00, 0x9d, 0x47, 0x9b, 0x95, 0x11, 0xdc, 0x2f, 0xa0, 0xac, 0xdb, 0x69, 0xa4, 0x77, 0x32, 0xd9, - 0x9c, 0x77, 0x1e, 0xa4, 0xa4, 0xd1, 0xcc, 0x4f, 0xa1, 0x12, 0x36, 0xb7, 0xe8, 0x41, 0x44, 0x51, - 0xbc, 0x0b, 0xed, 0xec, 0xa4, 0xc5, 0xf1, 0xc9, 0x61, 0x37, 0x19, 0x4e, 0x4e, 0xb5, 0xb0, 0xe1, - 0xe4, 0x74, 0xd3, 0xa9, 0x28, 0x48, 0x26, 0x67, 0x48, 0xc1, 0xc6, 0x02, 0x09, 0x29, 0xd8, 0x9c, - 0xcf, 0x78, 0x0b, 0x0d, 0xa1, 0x95, 0x7a, 0x10, 0xa0, 0x47, 0x61, 0x52, 0x6f, 0x7a, 0x3f, 0x76, - 0xf6, 0x6e, 0xd1, 0xa6, 0xf7, 0x39, 0xea, 0xfd, 0xd0, 0x8a, 0x88, 0x44, 0xfb, 0xdb, 0xd9, 0x5d, - 0x93, 0x47, 0x5e, 0xbd, 0x80, 0x46, 0xa2, 0x77, 0x44, 0x9d, 0x94, 0x6d, 0xac, 0xa1, 0x7c, 0x17, - 0xce, 0xa7, 0x50, 0x09, 0xaf, 0xd1, 0x90, 0xe9, 0xd4, 0xfd, 0x1d, 0x32, 0x9d, 0xbe, 0x6d, 0xf1, - 0x16, 0x3a, 0x81, 0x5a, 0xec, 0xb6, 0x41, 0x66, 0x18, 0x78, 0xfa, 0x36, 0xec, 0x7c, 0xb0, 0x41, - 0x13, 0xa1, 0x0c, 0x64, 0xe3, 0x9f, 0x68, 0xba, 0xd0, 0x5e, 0xe4, 0xf1, 0xa6, 0xfe, 0xaf, 0xb3, - 0x7f, 0x9b, 0x3a, 0x0e, 0x9a, 0xee, 0xe4, 0x42, 0xd0, 0x5b, 0x9a, 0xca, 0x10, 0xf4, 0xb6, 0x06, - 0x10, 0x6f, 0x1d, 0x3d, 0xfd, 0xfb, 0xdb, 0x7d, 0xe3, 0x1f, 0x6f, 0xf7, 0x8d, 0x7f, 0xbe, 0xdd, - 0x37, 0xfe, 0xf2, 0xaf, 0xfd, 0x2d, 0x30, 0x27, 0xde, 0xbc, 0xeb, 0x3b, 0xee, 0x74, 0x62, 0xf9, - 0x5d, 0xee, 0x5c, 0x5d, 0x77, 0xaf, 0xae, 0xe5, 0xdf, 0xc5, 0xe3, 0x92, 0xfc, 0xf9, 0xe9, 0x7f, - 0x02, 0x00, 0x00, 0xff, 0xff, 0x9e, 0xdd, 0x36, 0xc7, 0x6d, 0x16, 0x00, 0x00, + // 1757 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0x4b, 0x6f, 0x1b, 0xc9, + 0x11, 0xd6, 0xf0, 0xcd, 0xe2, 0xd3, 0x6d, 0x59, 0x9a, 0xa5, 0x2d, 0x45, 0xdb, 0x5e, 0x04, 0x5e, + 0x67, 0xcd, 0x78, 0x15, 0x24, 0x08, 0xb0, 0xd8, 0x60, 0xa9, 0x87, 0xd7, 0x82, 0xd7, 0xa2, 0xd0, + 0xe4, 0x62, 0xb1, 0x97, 0x30, 0x43, 0x4e, 0x9b, 0x9a, 0x88, 0x9c, 0x99, 0x9d, 0x6e, 0x4a, 0xa1, + 0x4f, 0x39, 0xe5, 0x92, 0x00, 0xc9, 0x29, 0xc8, 0x1f, 0xc8, 0xcf, 0xc8, 0x3d, 0xc7, 0xfc, 0x84, + 0xc0, 0x39, 0xe7, 0x3f, 0x04, 0xfd, 0x98, 0xe1, 0xcc, 0x90, 0x72, 0x9c, 0x71, 0xf6, 0x44, 0x76, + 0x55, 0xf5, 0xd7, 0x55, 0xd5, 0xf5, 0xe8, 0x1a, 0x00, 0xdf, 0xf6, 0xc7, 0x5d, 0x3f, 0xf0, 0xb8, + 0x87, 0x0a, 0xe2, 0x7f, 0xa7, 0x3e, 0xa7, 0xdc, 0x0a, 0x69, 0x9d, 0xed, 0xa9, 0x37, 0xf5, 0xe4, + 0xdf, 0x1f, 0x8b, 0x7f, 0x8a, 0x8a, 0xbb, 0xd0, 0x20, 0xf4, 0xbb, 0x05, 0x65, 0xfc, 0x39, 0xb5, + 0x6c, 0x1a, 0xa0, 0x3d, 0x80, 0xc9, 0x6c, 0xc1, 0x38, 0x0d, 0x46, 0x8e, 0x6d, 0x1a, 0x07, 0xc6, + 0xa3, 0x02, 0xa9, 0x6a, 0xca, 0x99, 0x8d, 0x09, 0x34, 0x09, 0x65, 0xbe, 0xe7, 0x32, 0xfa, 0x4e, + 0x1b, 0xd0, 0x87, 0x50, 0xa4, 0x41, 0xe0, 0x05, 0x66, 0xee, 0xc0, 0x78, 0x54, 0x3b, 0xac, 0x75, + 0xa5, 0x9a, 0xa7, 0x82, 0x44, 0x14, 0x07, 0x3f, 0x83, 0xa2, 0x5c, 0xa3, 0x87, 0x50, 0xe0, 0x4b, + 0x9f, 0x4a, 0x90, 0xe6, 0x61, 0x2b, 0x26, 0x3a, 0x5c, 0xfa, 0x94, 0x48, 0x26, 0x32, 0xa1, 0x3c, + 0xa7, 0x8c, 0x59, 0x53, 0x2a, 0x21, 0xab, 0x24, 0x5c, 0xe2, 0x3e, 0xc0, 0x90, 0x79, 0xda, 0x1c, + 0xf4, 0x23, 0x28, 0x5d, 0x4a, 0x0d, 0x25, 0x5c, 0xed, 0xf0, 0xae, 0x82, 0x4b, 0x58, 0x4b, 0xb4, + 0x08, 0xda, 0x86, 0xe2, 0xc4, 0x5b, 0xb8, 0x5c, 0x42, 0x36, 0x88, 0x5a, 0xe0, 0x1e, 0x54, 0x87, + 0xce, 0x9c, 0x32, 0x6e, 0xcd, 0x7d, 0xd4, 0x81, 0x8a, 0x7f, 0xb9, 0x64, 0xce, 0xc4, 0x9a, 0x49, + 0xc4, 0x3c, 0x89, 0xd6, 0x42, 0xa7, 0x99, 0x37, 0x95, 0xac, 0x9c, 0x64, 0x85, 0x4b, 0xfc, 0x5b, + 0x03, 0x6a, 0x52, 0x29, 0xe5, 0x33, 0xf4, 0x49, 0x4a, 0xab, 0xed, 0x50, 0xab, 0xb8, 0x4f, 0xdf, + 0xae, 0x16, 0x7a, 0x02, 0x55, 0x1e, 0xaa, 0x65, 0xe6, 0x25, 0x8c, 0xf6, 0x55, 0xa4, 0x2d, 0x59, + 0x49, 0xe0, 0x3f, 0x18, 0xd0, 0x3e, 0xf2, 0x3c, 0xce, 0x78, 0x60, 0xf9, 0x99, 0xbc, 0xf3, 0x10, + 0x8a, 0x8c, 0x7b, 0x01, 0xd5, 0x77, 0xd8, 0xe8, 0xea, 0xc0, 0x1a, 0x08, 0x22, 0x51, 0x3c, 0xf4, + 0x43, 0x28, 0x05, 0x74, 0xea, 0x78, 0xae, 0x56, 0xa9, 0x19, 0x4a, 0x11, 0x49, 0x25, 0x9a, 0x8b, + 0x7b, 0x70, 0x27, 0xa6, 0x4d, 0x16, 0xb7, 0xe0, 0x13, 0xb8, 0x77, 0xc6, 0x22, 0x10, 0x9f, 0xda, + 0x59, 0xac, 0xc2, 0xbf, 0x86, 0x9d, 0x34, 0x4a, 0xa6, 0x4b, 0xc2, 0x50, 0x1f, 0xc7, 0x50, 0xa4, + 0x93, 0x2a, 0x24, 0x41, 0xc3, 0x9f, 0x43, 0xb3, 0x37, 0x9b, 0x79, 0x93, 0xb3, 0x93, 0x4c, 0xaa, + 0xf6, 0xa1, 0x15, 0x6d, 0xcf, 0xa4, 0x63, 0x13, 0x72, 0x8e, 0xd2, 0xac, 0x40, 0x72, 0x8e, 0x8d, + 0xbf, 0x85, 0xd6, 0x97, 0x94, 0xab, 0xfb, 0xcb, 0x12, 0x11, 0x1f, 0x40, 0x45, 0xde, 0xfa, 0x28, + 0x42, 0x2d, 0xcb, 0xf5, 0x99, 0x8d, 0x29, 0xb4, 0x57, 0xd0, 0x99, 0x94, 0x7d, 0x97, 0x70, 0xc3, + 0x13, 0x68, 0x5d, 0x2c, 0xde, 0xc3, 0x82, 0x77, 0x3a, 0xe4, 0x0b, 0x68, 0xaf, 0x0e, 0xc9, 0x14, + 0xaa, 0xbf, 0x94, 0xde, 0xd0, 0x29, 0x90, 0x45, 0xcf, 0x3d, 0x00, 0x95, 0x38, 0xa3, 0x2b, 0xba, + 0x94, 0xca, 0xd6, 0x49, 0x55, 0x51, 0x5e, 0xd0, 0x25, 0xfe, 0xa3, 0x01, 0x77, 0x62, 0x07, 0x64, + 0xf2, 0xf7, 0x2a, 0x73, 0x73, 0x6f, 0xcb, 0x5c, 0xf4, 0x11, 0x94, 0x66, 0x0a, 0x55, 0x65, 0x78, + 0x3d, 0x94, 0xbb, 0xa0, 0x02, 0x4d, 0xf1, 0xf0, 0xaf, 0x60, 0x3b, 0x52, 0xe8, 0x68, 0x99, 0x2d, + 0xe0, 0xd1, 0x7d, 0xd0, 0x36, 0xae, 0x02, 0xac, 0xa2, 0x08, 0x67, 0x36, 0x7e, 0x06, 0xbb, 0x5f, + 0x52, 0x7e, 0xac, 0x5a, 0xcc, 0xb1, 0xe7, 0xbe, 0x72, 0xa6, 0x99, 0xb2, 0x8a, 0x81, 0xb9, 0x8e, + 0x93, 0xc9, 0x83, 0x1f, 0x43, 0x59, 0x77, 0x3c, 0xed, 0xc2, 0x56, 0xe8, 0x1a, 0x8d, 0x4e, 0x42, + 0x3e, 0xfe, 0x0e, 0x76, 0x2f, 0x16, 0xef, 0xaf, 0xfc, 0xff, 0x72, 0xe4, 0x73, 0x30, 0xd7, 0x8f, + 0xcc, 0x14, 0xcd, 0x37, 0x50, 0x7a, 0x49, 0xe7, 0x63, 0x1a, 0x20, 0x04, 0x05, 0xd7, 0x9a, 0xab, + 0x56, 0x5d, 0x25, 0xf2, 0xbf, 0xb8, 0xb4, 0xb9, 0xe4, 0xc6, 0x2e, 0x4d, 0x11, 0xce, 0x6c, 0xc1, + 0xf4, 0x29, 0x0d, 0x46, 0x8b, 0x60, 0xc6, 0xcc, 0xfc, 0x41, 0xfe, 0x51, 0x95, 0x54, 0x04, 0xe1, + 0xeb, 0x60, 0xc6, 0xd0, 0x0f, 0xa0, 0x36, 0x99, 0x39, 0xd4, 0xe5, 0x8a, 0x5d, 0x90, 0x6c, 0x50, + 0x24, 0x21, 0x80, 0xbf, 0x90, 0x51, 0xae, 0xce, 0x66, 0x99, 0x2e, 0xfb, 0x4f, 0x06, 0xa0, 0x38, + 0x44, 0xc6, 0x4c, 0x29, 0x2b, 0x83, 0x98, 0x99, 0x3b, 0xc8, 0xcb, 0x14, 0x90, 0xe2, 0x0a, 0x95, + 0x84, 0xcc, 0x0d, 0x99, 0x12, 0x17, 0x0b, 0x33, 0xe5, 0x02, 0xaa, 0x22, 0x73, 0x06, 0xdc, 0xe2, + 0x0c, 0x1d, 0x40, 0x41, 0xb8, 0x43, 0xab, 0x91, 0x4c, 0x2d, 0xc9, 0x41, 0x1f, 0x42, 0xdd, 0xf6, + 0x6e, 0xdc, 0x11, 0xa3, 0x13, 0xcf, 0xb5, 0x99, 0xf6, 0x70, 0x4d, 0xd0, 0x06, 0x8a, 0x84, 0xff, + 0x9a, 0x87, 0x1d, 0x95, 0x79, 0xcf, 0xa9, 0x15, 0xf0, 0x31, 0xb5, 0x78, 0xa6, 0xe0, 0xfa, 0xbf, + 0x56, 0x04, 0xd4, 0x05, 0x90, 0x8a, 0x0b, 0x2b, 0xd4, 0xe5, 0x46, 0x0f, 0x96, 0xc8, 0x7e, 0x52, + 0x15, 0x22, 0x62, 0xc9, 0xd0, 0xa7, 0xd0, 0xf0, 0xa9, 0x6b, 0x3b, 0xee, 0x54, 0x6f, 0x29, 0x6a, + 0x5f, 0xc7, 0xc1, 0xeb, 0x5a, 0x44, 0x6d, 0x79, 0x08, 0x8d, 0xf1, 0x92, 0x53, 0x36, 0xba, 0x09, + 0x1c, 0xce, 0xa9, 0x6b, 0x96, 0xa4, 0x73, 0xea, 0x92, 0xf8, 0x8d, 0xa2, 0x89, 0x52, 0xaa, 0x84, + 0x02, 0x6a, 0xd9, 0x66, 0x59, 0xbd, 0x54, 0x25, 0x85, 0x50, 0x4b, 0xbc, 0x54, 0xeb, 0x57, 0x74, + 0xb9, 0x82, 0xa8, 0x28, 0xff, 0x0a, 0x5a, 0x88, 0x70, 0x1f, 0xaa, 0x52, 0x44, 0x02, 0x54, 0x55, + 0x84, 0x0b, 0x82, 0xdc, 0xff, 0x31, 0xb4, 0x2d, 0xdf, 0x0f, 0xbc, 0xdf, 0x38, 0x73, 0x8b, 0xd3, + 0x11, 0x73, 0x5e, 0x53, 0x13, 0xa4, 0x4c, 0x2b, 0x46, 0x1f, 0x38, 0xaf, 0x29, 0xa6, 0x00, 0xc7, + 0x97, 0x96, 0x3b, 0xa5, 0x42, 0xfb, 0x77, 0xb8, 0xfa, 0x9f, 0x42, 0x6d, 0x22, 0xe5, 0x47, 0xf2, + 0x7d, 0x9c, 0x93, 0xef, 0x63, 0x1d, 0xaa, 0x22, 0xa1, 0x15, 0x98, 0x7c, 0x24, 0xc3, 0x24, 0xfa, + 0x8f, 0x0f, 0xa1, 0x39, 0x0c, 0x2c, 0x97, 0xbd, 0xa2, 0xc1, 0x57, 0xea, 0x2a, 0xfe, 0xeb, 0x51, + 0xf8, 0x6f, 0x39, 0xd8, 0x5d, 0x0b, 0xa1, 0x4c, 0xc9, 0xf2, 0x69, 0xa4, 0xb4, 0x3c, 0x52, 0x45, + 0x52, 0x5b, 0x2b, 0x1d, 0x59, 0x1f, 0x2a, 0x2c, 0x3d, 0xf1, 0x39, 0xb4, 0xb8, 0x56, 0x78, 0x94, + 0x08, 0x2c, 0x7d, 0x52, 0xd2, 0x1a, 0xd2, 0xe4, 0x49, 0xeb, 0x12, 0x5d, 0xa3, 0x90, 0xec, 0x1a, + 0xe8, 0x67, 0x50, 0xd7, 0x4c, 0xea, 0x7b, 0x93, 0x4b, 0xb3, 0xa8, 0xd3, 0x20, 0x11, 0xd9, 0xa7, + 0x82, 0x45, 0x6a, 0xc1, 0x6a, 0x81, 0x9e, 0x40, 0x8d, 0x5b, 0xc1, 0x94, 0x72, 0x65, 0x46, 0x69, + 0x83, 0xe7, 0x40, 0x09, 0x88, 0xff, 0xf8, 0x15, 0xb4, 0x7a, 0xec, 0x6a, 0xe0, 0xcf, 0x9c, 0xef, + 0x35, 0xf5, 0xf0, 0xef, 0x0c, 0x68, 0xaf, 0x0e, 0xca, 0xf8, 0x70, 0x6d, 0xb8, 0xf4, 0x66, 0x94, + 0x6e, 0xb4, 0x35, 0x97, 0xde, 0x90, 0xd0, 0x6b, 0x07, 0x50, 0x17, 0x32, 0xb2, 0x74, 0x3b, 0xb6, + 0xaa, 0xdc, 0x05, 0x02, 0x2e, 0xbd, 0x11, 0xd6, 0x9e, 0xd9, 0x0c, 0xff, 0xde, 0x00, 0x44, 0xa8, + 0xef, 0x05, 0x3c, 0xbb, 0xd1, 0x18, 0x0a, 0x33, 0xfa, 0x8a, 0xdf, 0x62, 0xb2, 0xe4, 0xa1, 0x8f, + 0xa0, 0x18, 0x38, 0xd3, 0x4b, 0x7e, 0xcb, 0x78, 0xa1, 0x98, 0xf8, 0x18, 0xee, 0x26, 0x94, 0xc9, + 0xd4, 0xe6, 0xfe, 0x9d, 0x07, 0x90, 0x8f, 0x3e, 0x55, 0x9a, 0xe3, 0x8f, 0x5d, 0x23, 0xf1, 0xd8, + 0x15, 0x43, 0xe1, 0xc4, 0xf2, 0xad, 0x89, 0xc3, 0x97, 0x61, 0xc7, 0x0b, 0xd7, 0xe8, 0x01, 0x54, + 0xad, 0x6b, 0xcb, 0x99, 0x59, 0xe3, 0x19, 0x95, 0x4a, 0x17, 0xc8, 0x8a, 0x20, 0xaa, 0x8d, 0x76, + 0xbc, 0x9a, 0xf0, 0x0a, 0x72, 0xc2, 0xd3, 0x91, 0x77, 0x2c, 0xe7, 0xbc, 0x4f, 0x00, 0x31, 0x5d, + 0x07, 0x99, 0x6b, 0xf9, 0x5a, 0xb0, 0x28, 0x05, 0xdb, 0x9a, 0x33, 0x70, 0x2d, 0x5f, 0x49, 0x3f, + 0x85, 0xed, 0x80, 0x4e, 0xa8, 0x73, 0x9d, 0x92, 0x2f, 0x49, 0x79, 0x14, 0xf1, 0x56, 0x3b, 0xf6, + 0x00, 0x18, 0xb7, 0x02, 0x3e, 0x12, 0xb3, 0xa2, 0xac, 0x87, 0x0d, 0x52, 0x95, 0x14, 0x31, 0x47, + 0xa2, 0x2e, 0xdc, 0xb5, 0x7c, 0x7f, 0xb6, 0x4c, 0xe1, 0x55, 0xa4, 0xdc, 0x9d, 0x90, 0xb5, 0x82, + 0xdb, 0x85, 0xb2, 0xc3, 0x46, 0xe3, 0x05, 0x5b, 0xca, 0xd2, 0x58, 0x21, 0x25, 0x87, 0x1d, 0x2d, + 0xd8, 0x52, 0xa4, 0xe5, 0x82, 0x51, 0x3b, 0x5e, 0x11, 0x2b, 0x82, 0x20, 0x4a, 0xe1, 0x7a, 0xe5, + 0xae, 0x6d, 0xa8, 0xdc, 0xe9, 0xd2, 0x5c, 0x5f, 0x2f, 0xcd, 0xc9, 0xe2, 0xde, 0x48, 0x17, 0xf7, + 0x44, 0xe5, 0x6e, 0x26, 0x2b, 0x37, 0x9e, 0xc1, 0x3d, 0x79, 0xdd, 0xef, 0xdb, 0x34, 0x8b, 0x4c, + 0xc4, 0x4b, 0xb2, 0xd2, 0xad, 0xe2, 0x88, 0x28, 0x36, 0x7e, 0x06, 0x3b, 0xe9, 0xd3, 0xb2, 0x44, + 0xe9, 0x63, 0x0a, 0xd5, 0xe8, 0xdb, 0x08, 0x2a, 0x41, 0xae, 0xff, 0xa2, 0xbd, 0x85, 0x6a, 0x50, + 0xfe, 0xfa, 0xfc, 0xc5, 0x79, 0xff, 0x9b, 0xf3, 0xb6, 0x81, 0xb6, 0xa1, 0x7d, 0xde, 0x1f, 0x8e, + 0x8e, 0xfa, 0xfd, 0xe1, 0x60, 0x48, 0x7a, 0x17, 0x17, 0xa7, 0x27, 0xed, 0x1c, 0xba, 0x0b, 0xad, + 0xc1, 0xb0, 0x4f, 0x4e, 0x47, 0xc3, 0xfe, 0xcb, 0xa3, 0xc1, 0xb0, 0x7f, 0x7e, 0xda, 0xce, 0x23, + 0x13, 0xb6, 0x7b, 0x5f, 0x91, 0xd3, 0xde, 0xc9, 0xb7, 0x49, 0xf1, 0xc2, 0xe3, 0x27, 0xd0, 0x4c, + 0xb6, 0x18, 0x71, 0x46, 0xcf, 0xb6, 0xcf, 0x3d, 0x9b, 0xb6, 0xb7, 0x50, 0x13, 0x80, 0xd0, 0xb9, + 0x77, 0x4d, 0xe5, 0xda, 0x38, 0xfc, 0x73, 0x05, 0x72, 0x17, 0x27, 0xa8, 0x07, 0xb0, 0x7a, 0x6d, + 0xa1, 0x5d, 0x65, 0xc8, 0xda, 0x13, 0xae, 0x63, 0xae, 0x33, 0x94, 0xad, 0x78, 0x0b, 0x3d, 0x85, + 0xfc, 0x90, 0x79, 0x48, 0xfb, 0x71, 0xf5, 0x65, 0xa7, 0x73, 0x27, 0x46, 0x09, 0xa5, 0x1f, 0x19, + 0x4f, 0x0d, 0xf4, 0x0b, 0xa8, 0x46, 0xf3, 0x3c, 0xda, 0x51, 0x52, 0xe9, 0x2f, 0x1f, 0x9d, 0xdd, + 0x35, 0x7a, 0x74, 0xe2, 0x4b, 0x68, 0x26, 0xbf, 0x08, 0xa0, 0xfb, 0x4a, 0x78, 0xe3, 0xd7, 0x86, + 0xce, 0x83, 0xcd, 0xcc, 0x08, 0xee, 0xe7, 0x50, 0xd6, 0x53, 0x3b, 0xd2, 0x37, 0x99, 0xfc, 0x06, + 0xd0, 0xb9, 0x97, 0xa2, 0x46, 0x3b, 0x3f, 0x83, 0x4a, 0x38, 0x43, 0xa3, 0x7b, 0x91, 0x8b, 0xe2, + 0xc3, 0x6e, 0x67, 0x27, 0x4d, 0x8e, 0x6f, 0x0e, 0x87, 0xd6, 0x70, 0x73, 0x6a, 0x52, 0x0e, 0x37, + 0xa7, 0x67, 0x5b, 0xe5, 0x82, 0x64, 0x70, 0x86, 0x2e, 0xd8, 0x98, 0x20, 0xa1, 0x0b, 0x36, 0xc7, + 0x33, 0xde, 0x42, 0x43, 0x68, 0xa5, 0x1e, 0x13, 0xe8, 0x41, 0x18, 0xd4, 0x9b, 0x9e, 0xa9, 0x9d, + 0xbd, 0x5b, 0xb8, 0xe9, 0x7b, 0x8e, 0x46, 0x4c, 0xb4, 0x72, 0x44, 0x62, 0xca, 0xee, 0xec, 0xae, + 0xd1, 0x23, 0xad, 0x9e, 0x41, 0x23, 0x31, 0xa2, 0xa2, 0x4e, 0x4a, 0x36, 0x36, 0xb7, 0xbe, 0x0d, + 0xe7, 0x33, 0xa8, 0x84, 0x2d, 0x38, 0xf4, 0x74, 0xaa, 0xf7, 0x87, 0x9e, 0x4e, 0x77, 0x6a, 0xbc, + 0x85, 0x4e, 0xa0, 0x16, 0xeb, 0x54, 0xc8, 0x0c, 0x0d, 0x4f, 0x77, 0xd2, 0xce, 0x07, 0x1b, 0x38, + 0x11, 0xca, 0x40, 0x7e, 0x5f, 0x48, 0xcc, 0x76, 0x68, 0x2f, 0xd2, 0x78, 0xd3, 0x98, 0xd9, 0xd9, + 0xbf, 0x8d, 0x1d, 0x07, 0x4d, 0x0f, 0x8c, 0x21, 0xe8, 0x2d, 0xb3, 0x6b, 0x08, 0x7a, 0xdb, 0x9c, + 0x89, 0xb7, 0x8e, 0x1e, 0xff, 0xfd, 0xcd, 0xbe, 0xf1, 0x8f, 0x37, 0xfb, 0xc6, 0x3f, 0xdf, 0xec, + 0x1b, 0x7f, 0xf9, 0xd7, 0xfe, 0x16, 0x98, 0x13, 0x6f, 0xde, 0xf5, 0x1d, 0x77, 0x3a, 0xb1, 0xfc, + 0x2e, 0x77, 0xae, 0xae, 0xbb, 0x57, 0xd7, 0xf2, 0xab, 0xf4, 0xb8, 0x24, 0x7f, 0x7e, 0xf2, 0x9f, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x20, 0xf4, 0x81, 0xa3, 0xd4, 0x16, 0x00, 0x00, } diff --git a/glide.lock b/glide.lock index a77abcd683d..26d5ad26add 100644 --- a/glide.lock +++ b/glide.lock @@ -149,7 +149,7 @@ imports: - name: github.com/pingcap/check version: ce8a2f822ab1e245a4eefcef2996531c79c943f1 - name: github.com/pingcap/kvproto - version: ebe86abfa6519ffefdf2946fba762a894fe88256 + version: aa89780c80ffbad78c6aaaf0c53829547cf3a5c2 subpackages: - pkg/coprocessor - pkg/eraftpb diff --git a/glide.yaml b/glide.yaml index dc8899d222d..7db6a69ab49 100644 --- a/glide.yaml +++ b/glide.yaml @@ -118,7 +118,7 @@ import: - package: github.com/pingcap/check version: ce8a2f822ab1e245a4eefcef2996531c79c943f1 - package: github.com/pingcap/kvproto - version: ebe86abfa6519ffefdf2946fba762a894fe88256 + version: aa89780c80ffbad78c6aaaf0c53829547cf3a5c2 subpackages: - pkg/coprocessor - pkg/eraftpb diff --git a/pdctl/command/hot_command.go b/pdctl/command/hot_command.go index fc5629178a0..fba6a365ea7 100644 --- a/pdctl/command/hot_command.go +++ b/pdctl/command/hot_command.go @@ -21,8 +21,9 @@ import ( ) const ( - hotRegionsPrefix = "pd/api/v1/hotspot/regions" - hotStoresPrefix = "pd/api/v1/hotspot/stores" + hotReadRegionsPrefix = "pd/api/v1/hotspot/regions/read" + hotWriteRegionsPrefix = "pd/api/v1/hotspot/regions/write" + hotStoresPrefix = "pd/api/v1/hotspot/stores" ) // NewHotSpotCommand return a hot subcommand of rootCmd @@ -31,23 +32,43 @@ func NewHotSpotCommand() *cobra.Command { Use: "hot", Short: "show the hotspot status of the cluster", } - cmd.AddCommand(NewHotRegionCommand()) + cmd.AddCommand(NewHotWriteRegionCommand()) + cmd.AddCommand(NewHotReadRegionCommand()) cmd.AddCommand(NewHotStoreCommand()) return cmd } -// NewHotRegionCommand return a hot regions subcommand of hotSpotCmd -func NewHotRegionCommand() *cobra.Command { +// NewHotWriteRegionCommand return a hot regions subcommand of hotSpotCmd +func NewHotWriteRegionCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "region", - Short: "show the hot regions", - Run: showHotRegionsCommandFunc, + Use: "write", + Short: "show the hot write regions", + Run: showHotWriteRegionsCommandFunc, } return cmd } -func showHotRegionsCommandFunc(cmd *cobra.Command, args []string) { - r, err := doRequest(cmd, hotRegionsPrefix, http.MethodGet) +func showHotWriteRegionsCommandFunc(cmd *cobra.Command, args []string) { + r, err := doRequest(cmd, hotWriteRegionsPrefix, http.MethodGet) + if err != nil { + fmt.Printf("Failed to get hotspot: %s\n", err) + return + } + fmt.Println(r) +} + +// NewHotReadRegionCommand return a hot read regions subcommand of hotSpotCmd +func NewHotReadRegionCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "read", + Short: "show the hot read regions", + Run: showHotReadRegionsCommandFunc, + } + return cmd +} + +func showHotReadRegionsCommandFunc(cmd *cobra.Command, args []string) { + r, err := doRequest(cmd, hotReadRegionsPrefix, http.MethodGet) if err != nil { fmt.Printf("Failed to get hotspot: %s\n", err) return diff --git a/server/api/hot_status.go b/server/api/hot_status.go index 0266835bd66..fa92365d8a7 100644 --- a/server/api/hot_status.go +++ b/server/api/hot_status.go @@ -32,8 +32,12 @@ func newHotStatusHandler(handler *server.Handler, rd *render.Render) *hotStatusH } } -func (h *hotStatusHandler) GetHotRegions(w http.ResponseWriter, r *http.Request) { - h.rd.JSON(w, http.StatusOK, h.GetHotWriteRegions()) +func (h *hotStatusHandler) GetHotWriteRegions(w http.ResponseWriter, r *http.Request) { + h.rd.JSON(w, http.StatusOK, h.Handler.GetHotWriteRegions()) +} + +func (h *hotStatusHandler) GetHotReadRegions(w http.ResponseWriter, r *http.Request) { + h.rd.JSON(w, http.StatusOK, h.Handler.GetHotReadRegions()) } func (h *hotStatusHandler) GetHotStores(w http.ResponseWriter, r *http.Request) { diff --git a/server/api/router.go b/server/api/router.go index 6d6c959ffcc..0676dd864b6 100644 --- a/server/api/router.go +++ b/server/api/router.go @@ -67,7 +67,8 @@ func createRouter(prefix string, svr *server.Server) *mux.Router { router.HandleFunc("/api/v1/labels/stores", labelsHandler.GetStores).Methods("GET") hotStatusHandler := newHotStatusHandler(handler, rd) - router.HandleFunc("/api/v1/hotspot/regions", hotStatusHandler.GetHotRegions).Methods("GET") + router.HandleFunc("/api/v1/hotspot/regions/write", hotStatusHandler.GetHotWriteRegions).Methods("GET") + router.HandleFunc("/api/v1/hotspot/regions/read", hotStatusHandler.GetHotReadRegions).Methods("GET") router.HandleFunc("/api/v1/hotspot/stores", hotStatusHandler.GetHotStores).Methods("GET") regionHandler := newRegionHandler(svr, rd) diff --git a/server/balancer_test.go b/server/balancer_test.go index a1f729e03d6..efdd38fa180 100644 --- a/server/balancer_test.go +++ b/server/balancer_test.go @@ -869,16 +869,16 @@ func checkTransferLeaderFrom(c *C, op *schedule.Operator, sourceID uint64) { c.Assert(op.Step(0).(schedule.TransferLeader).FromStore, Equals, sourceID) } -var _ = Suite(&testBalanceHotRegionSchedulerSuite{}) +var _ = Suite(&testBalanceHotWriteRegionSchedulerSuite{}) -type testBalanceHotRegionSchedulerSuite struct{} +type testBalanceHotWriteRegionSchedulerSuite struct{} -func (s *testBalanceHotRegionSchedulerSuite) TestBalance(c *C) { +func (s *testBalanceHotWriteRegionSchedulerSuite) TestBalance(c *C) { cluster := newClusterInfo(newMockIDAllocator()) tc := newTestClusterInfo(cluster) _, opt := newTestScheduleConfig() - hb, err := schedule.CreateScheduler("hotRegion", opt) + hb, err := schedule.CreateScheduler("hotWriteRegion", opt) c.Assert(err, IsNil) // Add stores 1, 2, 3, 4, 5 with region counts 3, 2, 2, 2, 0. @@ -929,3 +929,81 @@ func (s *testBalanceHotRegionSchedulerSuite) TestBalance(c *C) { // so one of the leader will transfer to another store. checkTransferLeaderFrom(c, hb.Schedule(cluster), 1) } + +func (c *testClusterInfo) updateStorageReadBytes(storeID uint64, BytesRead uint64) { + store := c.GetStore(storeID) + store.Stats.BytesRead = BytesRead + c.putStore(store) +} + +func (c *testClusterInfo) addLeaderRegionWithReadInfo(regionID uint64, leaderID uint64, readBytes uint64, followerIds ...uint64) { + region := &metapb.Region{Id: regionID} + leader, _ := c.AllocPeer(leaderID) + region.Peers = []*metapb.Peer{leader} + for _, id := range followerIds { + peer, _ := c.AllocPeer(id) + region.Peers = append(region.Peers, peer) + } + r := core.NewRegionInfo(region, leader) + r.ReadBytes = readBytes + c.updateReadStatus(r) + c.putRegion(r) +} + +var _ = Suite(&testBalanceHotReadRegionSchedulerSuite{}) + +type testBalanceHotReadRegionSchedulerSuite struct{} + +func (s *testBalanceHotReadRegionSchedulerSuite) TestBalance(c *C) { + cluster := newClusterInfo(newMockIDAllocator()) + tc := newTestClusterInfo(cluster) + + _, opt := newTestScheduleConfig() + hb, err := schedule.CreateScheduler("hotReadRegion", opt) + c.Assert(err, IsNil) + + // Add stores 1, 2, 3, 4, 5 with region counts 3, 2, 2, 2, 0. + tc.addRegionStore(1, 3) + tc.addRegionStore(2, 2) + tc.addRegionStore(3, 2) + tc.addRegionStore(4, 2) + tc.addRegionStore(5, 0) + + // Report store read bytes. + tc.updateStorageReadBytes(1, 75*1024*1024) + tc.updateStorageReadBytes(2, 45*1024*1024) + tc.updateStorageReadBytes(3, 45*1024*1024) + tc.updateStorageReadBytes(4, 60*1024*1024) + tc.updateStorageReadBytes(5, 0) + + // Region 1, 2 and 3 are hot regions. + //| region_id | leader_sotre | follower_store | follower_store | read_bytes | + //|-----------|--------------|----------------|----------------|---------------| + //| 1 | 1 | 2 | 3 | 512KB | + //| 2 | 2 | 1 | 3 | 512KB | + //| 3 | 1 | 2 | 3 | 512KB | + tc.addLeaderRegionWithReadInfo(1, 1, 512*1024*regionHeartBeatReportInterval, 2, 3) + tc.addLeaderRegionWithReadInfo(2, 2, 512*1024*regionHeartBeatReportInterval, 1, 3) + tc.addLeaderRegionWithReadInfo(3, 1, 512*1024*regionHeartBeatReportInterval, 2, 3) + hotRegionLowThreshold = 0 + + // Will transfer a hot region leader from store 1 to store 3, because the total count of peers + // which is hot for store 1 is more larger than other stores. + checkTransferLeader(c, hb.Schedule(cluster), 1, 3) + // assume handle the operator + tc.addLeaderRegionWithReadInfo(3, 3, 512*1024*regionHeartBeatReportInterval, 1, 2) + + // After transfer a hot region leader from store 1 to store 3 + // the tree region leader will be evenly distributed in three stores + tc.updateStorageReadBytes(1, 60*1024*1024) + tc.updateStorageReadBytes(2, 30*1024*1024) + tc.updateStorageReadBytes(3, 60*1024*1024) + tc.updateStorageReadBytes(4, 30*1024*1024) + tc.updateStorageReadBytes(5, 30*1024*1024) + tc.addLeaderRegionWithReadInfo(4, 1, 512*1024*regionHeartBeatReportInterval, 2, 3) + tc.addLeaderRegionWithReadInfo(5, 4, 512*1024*regionHeartBeatReportInterval, 2, 5) + + // Now appear two read hot region in store 1 and 4 + // We will Transfer peer from 1 to 5 + checkTransferPeerWithLeaderTransfer(c, hb.Schedule(cluster), 1, 5) +} diff --git a/server/cache.go b/server/cache.go index 432eb6f06e6..c3d9e05c45b 100644 --- a/server/cache.go +++ b/server/cache.go @@ -126,6 +126,16 @@ func (s *storesInfo) totalWrittenBytes() uint64 { return totalWrittenBytes } +func (s *storesInfo) totalReadBytes() uint64 { + var totalReadBytes uint64 + for _, s := range s.stores { + if s.IsUp() { + totalReadBytes += s.Stats.GetBytesRead() + } + } + return totalReadBytes +} + // regionMap wraps a map[uint64]*core.RegionInfo and supports randomly pick a region. type regionMap struct { m map[uint64]*regionEntry @@ -347,6 +357,7 @@ type clusterInfo struct { activeRegions int writeStatistics cache.Cache + readStatistics cache.Cache } func newClusterInfo(id IDAllocator) *clusterInfo { @@ -354,7 +365,8 @@ func newClusterInfo(id IDAllocator) *clusterInfo { id: id, stores: newStoresInfo(), regions: newRegionsInfo(), - writeStatistics: cache.NewDefaultCache(writeStatCacheMaxLen), + readStatistics: cache.NewDefaultCache(statCacheMaxLen), + writeStatistics: cache.NewDefaultCache(statCacheMaxLen), } } @@ -499,6 +511,16 @@ func (c *clusterInfo) getStoresWriteStat() map[uint64]uint64 { return res } +func (c *clusterInfo) getStoresReadStat() map[uint64]uint64 { + c.RLock() + defer c.RUnlock() + res := make(map[uint64]uint64) + for _, s := range c.stores.stores { + res[s.GetId()] = s.Stats.GetBytesRead() + } + return res +} + // GetRegions searches for a region by ID. func (c *clusterInfo) GetRegion(regionID uint64) *core.RegionInfo { c.RLock() @@ -513,7 +535,7 @@ func (c *clusterInfo) updateWriteStatCache(region *core.RegionInfo, hotRegionThr value, isExist := c.writeStatistics.Peek(key) newItem := &core.RegionStat{ RegionID: region.GetId(), - WrittenBytes: region.WrittenBytes, + FlowBytes: region.WrittenBytes, LastUpdateTime: time.Now(), StoreID: region.Leader.GetStoreId(), Version: region.GetRegionEpoch().GetVersion(), @@ -536,11 +558,46 @@ func (c *clusterInfo) updateWriteStatCache(region *core.RegionInfo, hotRegionThr // eliminate some noise newItem.HotDegree = v.HotDegree - 1 newItem.AntiCount = v.AntiCount - 1 - newItem.WrittenBytes = v.WrittenBytes + newItem.FlowBytes = v.FlowBytes } c.writeStatistics.Put(key, newItem) } +// updateReadStatCache updates statistic for a region if it's hot, or remove it from statistics if it cools down +func (c *clusterInfo) updateReadStatCache(region *core.RegionInfo, hotRegionThreshold uint64) { + var v *core.RegionStat + key := region.GetId() + value, isExist := c.readStatistics.Peek(key) + newItem := &core.RegionStat{ + RegionID: region.GetId(), + FlowBytes: region.ReadBytes, + LastUpdateTime: time.Now(), + StoreID: region.Leader.GetStoreId(), + Version: region.GetRegionEpoch().GetVersion(), + AntiCount: hotRegionAntiCount, + } + + if isExist { + v = value.(*core.RegionStat) + newItem.HotDegree = v.HotDegree + 1 + } + + if region.ReadBytes < hotRegionThreshold { + if !isExist { + return + } + if v.AntiCount <= 0 { + c.readStatistics.Remove(key) + return + } + // eliminate some noise + newItem.HotDegree = v.HotDegree - 1 + newItem.AntiCount = v.AntiCount - 1 + newItem.FlowBytes = v.FlowBytes + } + c.readStatistics.Put(key, newItem) +} + // RegionWriteStats returns hot region's write stats. func (c *clusterInfo) RegionWriteStats() []*core.RegionStat { elements := c.writeStatistics.Elems() @@ -551,6 +608,16 @@ func (c *clusterInfo) RegionWriteStats() []*core.RegionStat { return stats } +// RegionReadStats returns hot region's read stats. +func (c *clusterInfo) RegionReadStats() []*core.RegionStat { + elements := c.readStatistics.Elems() + stats := make([]*core.RegionStat, len(elements)) + for i := range elements { + stats[i] = elements[i].Value.(*core.RegionStat) + } + return stats +} + // IsRegionHot checks if a region is in hot state. func (c *clusterInfo) IsRegionHot(id uint64) bool { c.RLock() @@ -767,6 +834,7 @@ func (c *clusterInfo) handleRegionHeartbeat(region *core.RegionInfo) error { } c.updateWriteStatus(region) + c.updateReadStatus(region) return nil } @@ -786,14 +854,40 @@ func (c *clusterInfo) updateWriteStatus(region *core.RegionInfo) { region.WrittenBytes = WrittenBytesPerSec // hotRegionThreshold is use to pick hot region - // suppose the number of the hot regions is writeStatCacheMaxLen + // suppose the number of the hot regions is statCacheMaxLen // and we use total written Bytes past storeHeartBeatReportInterval seconds to divide the number of hot regions // divide 2 because the store reports data about two times than the region record write to rocksdb - divisor := float64(writeStatCacheMaxLen) * 2 * storeHeartBeatReportInterval + divisor := float64(statCacheMaxLen) * 2 * storeHeartBeatReportInterval hotRegionThreshold := uint64(float64(c.stores.totalWrittenBytes()) / divisor) - if hotRegionThreshold < hotRegionMinWriteRate { - hotRegionThreshold = hotRegionMinWriteRate + if hotRegionThreshold < hotRegionMinFlowRate { + hotRegionThreshold = hotRegionMinFlowRate } c.updateWriteStatCache(region, hotRegionThreshold) } + +func (c *clusterInfo) updateReadStatus(region *core.RegionInfo) { + var ReadBytesPerSec uint64 + v, isExist := c.readStatistics.Peek(region.GetId()) + if isExist { + interval := time.Now().Sub(v.(*core.RegionStat).LastUpdateTime).Seconds() + if interval < minHotRegionReportInterval { + return + } + ReadBytesPerSec = uint64(float64(region.ReadBytes) / interval) + } else { + ReadBytesPerSec = uint64(float64(region.ReadBytes) / float64(regionHeartBeatReportInterval)) + } + region.ReadBytes = ReadBytesPerSec + + // hotRegionThreshold is use to pick hot region + // suppose the number of the hot regions is statLRUMaxLen + // and we use total written Bytes past storeHeartBeatReportInterval seconds to divide the number of hot regions + divisor := float64(statCacheMaxLen) * storeHeartBeatReportInterval + hotRegionThreshold := uint64(float64(c.stores.totalReadBytes()) / divisor) + + if hotRegionThreshold < hotRegionMinFlowRate { + hotRegionThreshold = hotRegionMinFlowRate + } + c.updateReadStatCache(region, hotRegionThreshold) +} diff --git a/server/coordinator.go b/server/coordinator.go index 36a7d2c7e59..1fb3d947e79 100644 --- a/server/coordinator.go +++ b/server/coordinator.go @@ -39,14 +39,15 @@ const ( minSlowScheduleInterval = time.Second * 3 scheduleIntervalFactor = 1.3 - writeStatCacheMaxLen = 1000 - hotRegionMinWriteRate = 16 * 1024 + statCacheMaxLen = 1000 + hotRegionMinFlowRate = 16 * 1024 regionHeartBeatReportInterval = 60 regionheartbeatSendChanCap = 1024 storeHeartBeatReportInterval = 10 minHotRegionReportInterval = 3 hotRegionAntiCount = 1 - hotRegionScheduleName = "balance-hot-region-scheduler" + hotWriteRegionScheduleName = "balance-hot-write-region-scheduler" + hotReadRegionScheduleName = "balance-hot-read-region-scheduler" ) var ( @@ -138,7 +139,9 @@ func (c *coordinator) run() { c.addScheduler(s, minScheduleInterval) s, _ = schedule.CreateScheduler("balanceRegion", c.opt) c.addScheduler(s, minScheduleInterval) - s, _ = schedule.CreateScheduler("hotRegion", c.opt) + s, _ = schedule.CreateScheduler("hotWriteRegion", c.opt) + c.addScheduler(s, minSlowScheduleInterval) + s, _ = schedule.CreateScheduler("hotReadRegion", c.opt) c.addScheduler(s, minSlowScheduleInterval) } @@ -156,7 +159,20 @@ type hasHotStatus interface { func (c *coordinator) getHotWriteRegions() *core.StoreHotRegionInfos { c.RLock() defer c.RUnlock() - s, ok := c.schedulers[hotRegionScheduleName] + s, ok := c.schedulers[hotWriteRegionScheduleName] + if !ok { + return nil + } + if h, ok := s.Scheduler.(hasHotStatus); ok { + return h.GetStatus() + } + return nil +} + +func (c *coordinator) getHotReadRegions() *core.StoreHotRegionInfos { + c.RLock() + defer c.RUnlock() + s, ok := c.schedulers[hotReadRegionScheduleName] if !ok { return nil } @@ -195,14 +211,15 @@ func (c *coordinator) collectSchedulerMetrics() { func (c *coordinator) collectHotSpotMetrics() { c.RLock() defer c.RUnlock() - s, ok := c.schedulers[hotRegionScheduleName] + // collect hot write region metrics + s, ok := c.schedulers[hotWriteRegionScheduleName] if !ok { return } status := s.Scheduler.(hasHotStatus).GetStatus() for storeID, stat := range status.AsPeer { store := fmt.Sprintf("store_%d", storeID) - totalWriteBytes := float64(stat.WrittenBytes) + totalWriteBytes := float64(stat.TotalFlowBytes) hotWriteRegionCount := float64(stat.RegionsCount) hotSpotStatusGauge.WithLabelValues(store, "total_written_bytes_as_peer").Set(totalWriteBytes) @@ -210,12 +227,27 @@ func (c *coordinator) collectHotSpotMetrics() { } for storeID, stat := range status.AsLeader { store := fmt.Sprintf("store_%d", storeID) - totalWriteBytes := float64(stat.WrittenBytes) + totalWriteBytes := float64(stat.TotalFlowBytes) hotWriteRegionCount := float64(stat.RegionsCount) hotSpotStatusGauge.WithLabelValues(store, "total_written_bytes_as_leader").Set(totalWriteBytes) hotSpotStatusGauge.WithLabelValues(store, "hot_write_region_as_leader").Set(hotWriteRegionCount) } + + // collect hot read region metrics + s, ok = c.schedulers[hotReadRegionScheduleName] + if !ok { + return + } + status = s.Scheduler.(hasHotStatus).GetStatus() + for storeID, stat := range status.AsLeader { + store := fmt.Sprintf("store_%d", storeID) + totalReadBytes := float64(stat.TotalFlowBytes) + hotReadRegionCount := float64(stat.RegionsCount) + + hotSpotStatusGauge.WithLabelValues(store, "total_read_bytes_as_leader").Set(totalReadBytes) + hotSpotStatusGauge.WithLabelValues(store, "hot_read_region_as_leader").Set(hotReadRegionCount) + } } func (c *coordinator) shouldRun() bool { diff --git a/server/coordinator_test.go b/server/coordinator_test.go index 01a9ddd047e..509e08f870c 100644 --- a/server/coordinator_test.go +++ b/server/coordinator_test.go @@ -310,10 +310,11 @@ func (s *testCoordinatorSuite) TestAddScheduler(c *C) { co.run() defer co.stop() - c.Assert(co.schedulers, HasLen, 3) + c.Assert(co.schedulers, HasLen, 4) c.Assert(co.removeScheduler("balance-leader-scheduler"), IsNil) c.Assert(co.removeScheduler("balance-region-scheduler"), IsNil) - c.Assert(co.removeScheduler("balance-hot-region-scheduler"), IsNil) + c.Assert(co.removeScheduler("balance-hot-write-region-scheduler"), IsNil) + c.Assert(co.removeScheduler("balance-hot-read-region-scheduler"), IsNil) c.Assert(co.schedulers, HasLen, 0) stream := newMockHeartbeatStream() diff --git a/server/core/region.go b/server/core/region.go index 0460721c1f2..5e1b3a17be2 100644 --- a/server/core/region.go +++ b/server/core/region.go @@ -28,6 +28,7 @@ type RegionInfo struct { DownPeers []*pdpb.PeerStats PendingPeers []*metapb.Peer WrittenBytes uint64 + ReadBytes uint64 } // NewRegionInfo creates RegionInfo with region's meta and leader peer. @@ -54,6 +55,7 @@ func (r *RegionInfo) Clone() *RegionInfo { DownPeers: downPeers, PendingPeers: pendingPeers, WrittenBytes: r.WrittenBytes, + ReadBytes: r.ReadBytes, } } @@ -142,8 +144,8 @@ func (r *RegionInfo) GetFollower() *metapb.Peer { // RegionStat records each hot region's statistics type RegionStat struct { - RegionID uint64 `json:"region_id"` - WrittenBytes uint64 `json:"written_bytes"` + RegionID uint64 `json:"region_id"` + FlowBytes uint64 `json:"flow_bytes"` // HotDegree records the hot region update times HotDegree int `json:"hot_degree"` // LastUpdateTime used to calculate average write @@ -160,11 +162,11 @@ type RegionsStat []RegionStat func (m RegionsStat) Len() int { return len(m) } func (m RegionsStat) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m RegionsStat) Less(i, j int) bool { return m[i].WrittenBytes < m[j].WrittenBytes } +func (m RegionsStat) Less(i, j int) bool { return m[i].FlowBytes < m[j].FlowBytes } // HotRegionsStat records all hot regions statistics type HotRegionsStat struct { - WrittenBytes uint64 `json:"total_written_bytes"` - RegionsCount int `json:"regions_count"` - RegionsStat RegionsStat `json:"statistics"` + TotalFlowBytes uint64 `json:"total_flow_bytes"` + RegionsCount int `json:"regions_count"` + RegionsStat RegionsStat `json:"statistics"` } diff --git a/server/grpc_service.go b/server/grpc_service.go index d16fc244eac..9eab522dc3f 100644 --- a/server/grpc_service.go +++ b/server/grpc_service.go @@ -271,6 +271,7 @@ func (s *Server) RegionHeartbeat(server pdpb.PD_RegionHeartbeatServer) error { region.DownPeers = request.GetDownPeers() region.PendingPeers = request.GetPendingPeers() region.WrittenBytes = request.GetBytesWritten() + region.ReadBytes = request.GetBytesRead() if region.GetId() == 0 { msg := fmt.Sprintf("invalid request region, %v", request) hbStreams.sendErr(region, pdpb.ErrorType_UNKNOWN, msg) diff --git a/server/handler.go b/server/handler.go index fd2efd0328a..90a430097d4 100644 --- a/server/handler.go +++ b/server/handler.go @@ -53,7 +53,7 @@ func (h *Handler) GetSchedulers() ([]string, error) { return c.getSchedulers(), nil } -// GetHotWriteRegions gets all hot regions status +// GetHotWriteRegions gets all hot write regions status func (h *Handler) GetHotWriteRegions() *core.StoreHotRegionInfos { c, err := h.getCoordinator() if err != nil { @@ -62,6 +62,15 @@ func (h *Handler) GetHotWriteRegions() *core.StoreHotRegionInfos { return c.getHotWriteRegions() } +// GetHotReadRegions gets all hot read regions status +func (h *Handler) GetHotReadRegions() *core.StoreHotRegionInfos { + c, err := h.getCoordinator() + if err != nil { + return nil + } + return c.getHotReadRegions() +} + // GetHotWriteStores gets all hot write stores status func (h *Handler) GetHotWriteStores() map[uint64]uint64 { return h.s.cluster.cachedCluster.getStoresWriteStat() diff --git a/server/schedule/scheduler.go b/server/schedule/scheduler.go index 86253a8d252..de54427f81d 100644 --- a/server/schedule/scheduler.go +++ b/server/schedule/scheduler.go @@ -38,6 +38,7 @@ type Cluster interface { IsRegionHot(id uint64) bool RegionWriteStats() []*core.RegionStat + RegionReadStats() []*core.RegionStat // TODO: it should be removed. Schedulers don't need to know anything // about peers. diff --git a/server/schedulers/hot_read_region.go b/server/schedulers/hot_read_region.go new file mode 100644 index 00000000000..1bd29dc6281 --- /dev/null +++ b/server/schedulers/hot_read_region.go @@ -0,0 +1,322 @@ +package schedulers + +import ( + "math" + "math/rand" + "sync" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/pd/server/core" + "github.com/pingcap/pd/server/schedule" +) + +func init() { + schedule.RegisterScheduler("hotReadRegion", func(opt schedule.Options, args []string) (schedule.Scheduler, error) { + return newBalanceHotReadRegionsScheduler(opt), nil + }) +} + +type balanceHotReadRegionsScheduler struct { + sync.RWMutex + opt schedule.Options + limit uint64 + + // store id -> hot regions statistics as the role of leader + statisticsAsLeader map[uint64]*core.HotRegionsStat + r *rand.Rand +} + +func newBalanceHotReadRegionsScheduler(opt schedule.Options) *balanceHotReadRegionsScheduler { + return &balanceHotReadRegionsScheduler{ + opt: opt, + limit: 1, + statisticsAsLeader: make(map[uint64]*core.HotRegionsStat), + r: rand.New(rand.NewSource(time.Now().UnixNano())), + } +} + +func (h *balanceHotReadRegionsScheduler) GetName() string { + return "balance-hot-read-region-scheduler" +} + +func (h *balanceHotReadRegionsScheduler) GetResourceKind() core.ResourceKind { + return core.PriorityKind +} + +func (h *balanceHotReadRegionsScheduler) GetResourceLimit() uint64 { + return h.limit +} + +func (h *balanceHotReadRegionsScheduler) Prepare(cluster schedule.Cluster) error { return nil } + +func (h *balanceHotReadRegionsScheduler) Cleanup(cluster schedule.Cluster) {} + +func (h *balanceHotReadRegionsScheduler) Schedule(cluster schedule.Cluster) *schedule.Operator { + schedulerCounter.WithLabelValues(h.GetName(), "schedule").Inc() + h.calcScore(cluster) + + // balance by leader + srcRegion, newLeader := h.balanceByLeader(cluster) + if srcRegion != nil { + schedulerCounter.WithLabelValues(h.GetName(), "move_leader").Inc() + step := schedule.TransferLeader{FromStore: srcRegion.Leader.GetStoreId(), ToStore: newLeader.GetStoreId()} + return schedule.NewOperator("transferHotReadLeader", srcRegion.GetId(), core.PriorityKind, step) + } + + // balance by peer + srcRegion, srcPeer, destPeer := h.balanceByPeer(cluster) + if srcRegion != nil { + schedulerCounter.WithLabelValues(h.GetName(), "move_peer").Inc() + return schedule.CreateMovePeerOperator("moveHotReadRegion", srcRegion, core.PriorityKind, srcPeer.GetStoreId(), destPeer.GetStoreId(), destPeer.GetId()) + } + + schedulerCounter.WithLabelValues(h.GetName(), "skip").Inc() + return nil +} + +func (h *balanceHotReadRegionsScheduler) calcScore(cluster schedule.Cluster) { + h.Lock() + defer h.Unlock() + + h.statisticsAsLeader = make(map[uint64]*core.HotRegionsStat) + items := cluster.RegionReadStats() + for _, r := range items { + if r.HotDegree < h.opt.GetHotRegionLowThreshold() { + continue + } + + regionInfo := cluster.GetRegion(r.RegionID) + leaderStoreID := regionInfo.Leader.GetStoreId() + leaderStat, ok := h.statisticsAsLeader[leaderStoreID] + if !ok { + leaderStat = &core.HotRegionsStat{ + RegionsStat: make(core.RegionsStat, 0, storeHotRegionsDefaultLen), + } + h.statisticsAsLeader[leaderStoreID] = leaderStat + } + + stat := core.RegionStat{ + RegionID: r.RegionID, + FlowBytes: r.FlowBytes, + HotDegree: r.HotDegree, + LastUpdateTime: r.LastUpdateTime, + StoreID: leaderStoreID, + AntiCount: r.AntiCount, + Version: r.Version, + } + leaderStat.TotalFlowBytes += r.FlowBytes + leaderStat.RegionsCount++ + leaderStat.RegionsStat = append(leaderStat.RegionsStat, stat) + } +} + +func (h *balanceHotReadRegionsScheduler) balanceByPeer(cluster schedule.Cluster) (*core.RegionInfo, *metapb.Peer, *metapb.Peer) { + var ( + maxReadBytes uint64 + srcStoreID uint64 + maxHotStoreRegionCount int + ) + + // get the srcStoreId + for storeID, statistics := range h.statisticsAsLeader { + count, readBytes := statistics.RegionsStat.Len(), statistics.TotalFlowBytes + if count >= 2 && (count > maxHotStoreRegionCount || (count == maxHotStoreRegionCount && readBytes > maxReadBytes)) { + maxHotStoreRegionCount = count + maxReadBytes = readBytes + srcStoreID = storeID + } + } + if srcStoreID == 0 { + return nil, nil, nil + } + + stores := cluster.GetStores() + var destStoreID uint64 + for _, i := range h.r.Perm(h.statisticsAsLeader[srcStoreID].RegionsStat.Len()) { + rs := h.statisticsAsLeader[srcStoreID].RegionsStat[i] + srcRegion := cluster.GetRegion(rs.RegionID) + if len(srcRegion.DownPeers) != 0 || len(srcRegion.PendingPeers) != 0 { + continue + } + + filters := []schedule.Filter{ + schedule.NewExcludedFilter(srcRegion.GetStoreIds(), srcRegion.GetStoreIds()), + schedule.NewDistinctScoreFilter(h.opt.GetLocationLabels(), stores, cluster.GetLeaderStore(srcRegion)), + schedule.NewStateFilter(h.opt), + schedule.NewStorageThresholdFilter(h.opt), + } + destStoreIDs := make([]uint64, 0, len(stores)) + for _, store := range stores { + if schedule.FilterTarget(store, filters) { + continue + } + destStoreIDs = append(destStoreIDs, store.GetId()) + } + + destStoreID = h.selectDestStoreByPeer(destStoreIDs, srcRegion, srcStoreID) + if destStoreID != 0 { + srcRegion.ReadBytes = rs.FlowBytes + h.adjustBalanceLimit(srcStoreID, byLeader) + + var srcPeer *metapb.Peer + for _, peer := range srcRegion.GetPeers() { + if peer.GetStoreId() == srcStoreID { + srcPeer = peer + break + } + } + + if srcPeer == nil { + return nil, nil, nil + } + + destPeer, err := cluster.AllocPeer(destStoreID) + if err != nil { + log.Errorf("failed to allocate peer: %v", err) + return nil, nil, nil + } + + return srcRegion, srcPeer, destPeer + } + } + + return nil, nil, nil +} + +func (h *balanceHotReadRegionsScheduler) selectDestStoreByPeer(candidateStoreIDs []uint64, srcRegion *core.RegionInfo, srcStoreID uint64) uint64 { + sr := h.statisticsAsLeader[srcStoreID] + srcReadBytes := sr.TotalFlowBytes + srcHotRegionsCount := sr.RegionsStat.Len() + + var ( + destStoreID uint64 + minReadBytes uint64 = math.MaxUint64 + ) + minRegionsCount := int(math.MaxInt32) + for _, storeID := range candidateStoreIDs { + if s, ok := h.statisticsAsLeader[storeID]; ok { + if srcHotRegionsCount-s.RegionsStat.Len() > 1 && minRegionsCount > s.RegionsStat.Len() { + destStoreID = storeID + minReadBytes = s.TotalFlowBytes + minRegionsCount = s.RegionsStat.Len() + continue + } + if minRegionsCount == s.RegionsStat.Len() && minReadBytes > s.TotalFlowBytes && + uint64(float64(srcReadBytes)*hotRegionScheduleFactor) > s.TotalFlowBytes+2*srcRegion.ReadBytes { + minReadBytes = s.TotalFlowBytes + destStoreID = storeID + } + } else { + destStoreID = storeID + break + } + } + return destStoreID +} + +func (h *balanceHotReadRegionsScheduler) adjustBalanceLimit(storeID uint64, t BalanceType) { + srcStatistics := h.statisticsAsLeader[storeID] + allStatistics := h.statisticsAsLeader + + var hotRegionTotalCount float64 + for _, m := range allStatistics { + hotRegionTotalCount += float64(m.RegionsStat.Len()) + } + + avgRegionCount := hotRegionTotalCount / float64(len(allStatistics)) + // Multiplied by hotRegionLimitFactor to avoid transfer back and forth + limit := uint64((float64(srcStatistics.RegionsStat.Len()) - avgRegionCount) * hotRegionLimitFactor) + h.limit = maxUint64(1, limit) +} + +func (h *balanceHotReadRegionsScheduler) balanceByLeader(cluster schedule.Cluster) (*core.RegionInfo, *metapb.Peer) { + var ( + maxReadBytes uint64 + srcStoreID uint64 + maxHotStoreRegionCount int + ) + + // select srcStoreId by leader + for storeID, statistics := range h.statisticsAsLeader { + if statistics.RegionsStat.Len() < 2 { + continue + } + + if maxHotStoreRegionCount < statistics.RegionsStat.Len() { + maxHotStoreRegionCount = statistics.RegionsStat.Len() + maxReadBytes = statistics.TotalFlowBytes + srcStoreID = storeID + continue + } + + if maxHotStoreRegionCount == statistics.RegionsStat.Len() && maxReadBytes < statistics.TotalFlowBytes { + maxReadBytes = statistics.TotalFlowBytes + srcStoreID = storeID + } + } + if srcStoreID == 0 { + return nil, nil + } + + // select destPeer + for _, i := range h.r.Perm(h.statisticsAsLeader[srcStoreID].RegionsStat.Len()) { + rs := h.statisticsAsLeader[srcStoreID].RegionsStat[i] + srcRegion := cluster.GetRegion(rs.RegionID) + if len(srcRegion.DownPeers) != 0 || len(srcRegion.PendingPeers) != 0 { + continue + } + + destPeer := h.selectDestStoreByLeader(srcRegion) + if destPeer != nil { + h.adjustBalanceLimit(srcStoreID, byLeader) + return srcRegion, destPeer + } + } + return nil, nil +} + +func (h *balanceHotReadRegionsScheduler) selectDestStoreByLeader(srcRegion *core.RegionInfo) *metapb.Peer { + sr := h.statisticsAsLeader[srcRegion.Leader.GetStoreId()] + srcReadBytes := sr.TotalFlowBytes + srcHotRegionsCount := sr.RegionsStat.Len() + + var ( + destPeer *metapb.Peer + minReadBytes uint64 = math.MaxUint64 + ) + minRegionsCount := int(math.MaxInt32) + for storeID, peer := range srcRegion.GetFollowers() { + if s, ok := h.statisticsAsLeader[storeID]; ok { + if srcHotRegionsCount-s.RegionsStat.Len() > 1 && minRegionsCount > s.RegionsStat.Len() { + destPeer = peer + minReadBytes = s.TotalFlowBytes + minRegionsCount = s.RegionsStat.Len() + continue + } + if minRegionsCount == s.RegionsStat.Len() && minReadBytes > s.TotalFlowBytes && + uint64(float64(srcReadBytes)*hotRegionScheduleFactor) > s.TotalFlowBytes+2*srcRegion.ReadBytes { + minReadBytes = s.TotalFlowBytes + destPeer = peer + } + } else { + destPeer = peer + break + } + } + return destPeer +} + +func (h *balanceHotReadRegionsScheduler) GetStatus() *core.StoreHotRegionInfos { + h.RLock() + defer h.RUnlock() + asLeader := make(map[uint64]*core.HotRegionsStat, len(h.statisticsAsLeader)) + for id, stat := range h.statisticsAsLeader { + clone := *stat + asLeader[id] = &clone + } + return &core.StoreHotRegionInfos{ + AsLeader: asLeader, + } +} diff --git a/server/schedulers/hot_region.go b/server/schedulers/hot_write_region.go similarity index 77% rename from server/schedulers/hot_region.go rename to server/schedulers/hot_write_region.go index a97d6c1eace..598d74c396c 100644 --- a/server/schedulers/hot_region.go +++ b/server/schedulers/hot_write_region.go @@ -26,8 +26,8 @@ import ( ) func init() { - schedule.RegisterScheduler("hotRegion", func(opt schedule.Options, args []string) (schedule.Scheduler, error) { - return newBalanceHotRegionScheduler(opt), nil + schedule.RegisterScheduler("hotWriteRegion", func(opt schedule.Options, args []string) (schedule.Scheduler, error) { + return newBalanceHotWriteRegionScheduler(opt), nil }) } @@ -45,7 +45,7 @@ const ( byLeader ) -type balanceHotRegionScheduler struct { +type balanceHotWriteRegionScheduler struct { sync.RWMutex opt schedule.Options limit uint64 @@ -57,10 +57,10 @@ type balanceHotRegionScheduler struct { r *rand.Rand } -// newBalanceHotRegionScheduler creates a scheduler that keeps hot regions on +// newBalanceHotWriteRegionScheduler creates a scheduler that keeps hot regions on // each stores balanced. -func newBalanceHotRegionScheduler(opt schedule.Options) schedule.Scheduler { - return &balanceHotRegionScheduler{ +func newBalanceHotWriteRegionScheduler(opt schedule.Options) schedule.Scheduler { + return &balanceHotWriteRegionScheduler{ opt: opt, limit: 1, statisticsAsPeer: make(map[uint64]*core.HotRegionsStat), @@ -69,23 +69,23 @@ func newBalanceHotRegionScheduler(opt schedule.Options) schedule.Scheduler { } } -func (h *balanceHotRegionScheduler) GetName() string { - return "balance-hot-region-scheduler" +func (h *balanceHotWriteRegionScheduler) GetName() string { + return "balance-hot-write-region-scheduler" } -func (h *balanceHotRegionScheduler) GetResourceKind() core.ResourceKind { +func (h *balanceHotWriteRegionScheduler) GetResourceKind() core.ResourceKind { return core.PriorityKind } -func (h *balanceHotRegionScheduler) GetResourceLimit() uint64 { +func (h *balanceHotWriteRegionScheduler) GetResourceLimit() uint64 { return h.limit } -func (h *balanceHotRegionScheduler) Prepare(cluster schedule.Cluster) error { return nil } +func (h *balanceHotWriteRegionScheduler) Prepare(cluster schedule.Cluster) error { return nil } -func (h *balanceHotRegionScheduler) Cleanup(cluster schedule.Cluster) {} +func (h *balanceHotWriteRegionScheduler) Cleanup(cluster schedule.Cluster) {} -func (h *balanceHotRegionScheduler) Schedule(cluster schedule.Cluster) *schedule.Operator { +func (h *balanceHotWriteRegionScheduler) Schedule(cluster schedule.Cluster) *schedule.Operator { schedulerCounter.WithLabelValues(h.GetName(), "schedule").Inc() h.calcScore(cluster) @@ -93,7 +93,7 @@ func (h *balanceHotRegionScheduler) Schedule(cluster schedule.Cluster) *schedule srcRegion, srcPeer, destPeer := h.balanceByPeer(cluster) if srcRegion != nil { schedulerCounter.WithLabelValues(h.GetName(), "move_peer").Inc() - return schedule.CreateMovePeerOperator("moveHotRegion", srcRegion, core.PriorityKind, srcPeer.GetStoreId(), destPeer.GetStoreId(), destPeer.GetId()) + return schedule.CreateMovePeerOperator("moveHotWriteRegion", srcRegion, core.PriorityKind, srcPeer.GetStoreId(), destPeer.GetStoreId(), destPeer.GetId()) } // balance by leader @@ -101,14 +101,14 @@ func (h *balanceHotRegionScheduler) Schedule(cluster schedule.Cluster) *schedule if srcRegion != nil { schedulerCounter.WithLabelValues(h.GetName(), "move_leader").Inc() step := schedule.TransferLeader{FromStore: srcRegion.Leader.GetStoreId(), ToStore: newLeader.GetStoreId()} - return schedule.NewOperator("transferHotLeader", srcRegion.GetId(), core.PriorityKind, step) + return schedule.NewOperator("transferHotWriteLeader", srcRegion.GetId(), core.PriorityKind, step) } schedulerCounter.WithLabelValues(h.GetName(), "skip").Inc() return nil } -func (h *balanceHotRegionScheduler) calcScore(cluster schedule.Cluster) { +func (h *balanceHotWriteRegionScheduler) calcScore(cluster schedule.Cluster) { h.Lock() defer h.Unlock() @@ -141,19 +141,19 @@ func (h *balanceHotRegionScheduler) calcScore(cluster schedule.Cluster) { stat := core.RegionStat{ RegionID: r.RegionID, - WrittenBytes: r.WrittenBytes, + FlowBytes: r.FlowBytes, HotDegree: r.HotDegree, LastUpdateTime: r.LastUpdateTime, StoreID: storeID, AntiCount: r.AntiCount, Version: r.Version, } - peerStat.WrittenBytes += r.WrittenBytes + peerStat.TotalFlowBytes += r.FlowBytes peerStat.RegionsCount++ peerStat.RegionsStat = append(peerStat.RegionsStat, stat) if storeID == leaderStoreID { - leaderStat.WrittenBytes += r.WrittenBytes + leaderStat.TotalFlowBytes += r.FlowBytes leaderStat.RegionsCount++ leaderStat.RegionsStat = append(leaderStat.RegionsStat, stat) } @@ -161,7 +161,7 @@ func (h *balanceHotRegionScheduler) calcScore(cluster schedule.Cluster) { } } -func (h *balanceHotRegionScheduler) balanceByPeer(cluster schedule.Cluster) (*core.RegionInfo, *metapb.Peer, *metapb.Peer) { +func (h *balanceHotWriteRegionScheduler) balanceByPeer(cluster schedule.Cluster) (*core.RegionInfo, *metapb.Peer, *metapb.Peer) { var ( maxWrittenBytes uint64 srcStoreID uint64 @@ -170,7 +170,7 @@ func (h *balanceHotRegionScheduler) balanceByPeer(cluster schedule.Cluster) (*co // get the srcStoreId for storeID, statistics := range h.statisticsAsPeer { - count, writtenBytes := statistics.RegionsStat.Len(), statistics.WrittenBytes + count, writtenBytes := statistics.RegionsStat.Len(), statistics.TotalFlowBytes if count >= 2 && (count > maxHotStoreRegionCount || (count == maxHotStoreRegionCount && writtenBytes > maxWrittenBytes)) { maxHotStoreRegionCount = count maxWrittenBytes = writtenBytes @@ -206,7 +206,7 @@ func (h *balanceHotRegionScheduler) balanceByPeer(cluster schedule.Cluster) (*co destStoreID = h.selectDestStoreByPeer(destStoreIDs, srcRegion, srcStoreID) if destStoreID != 0 { - srcRegion.WrittenBytes = rs.WrittenBytes + srcRegion.WrittenBytes = rs.FlowBytes h.adjustBalanceLimit(srcStoreID, byPeer) var srcPeer *metapb.Peer @@ -234,9 +234,9 @@ func (h *balanceHotRegionScheduler) balanceByPeer(cluster schedule.Cluster) (*co return nil, nil, nil } -func (h *balanceHotRegionScheduler) selectDestStoreByPeer(candidateStoreIDs []uint64, srcRegion *core.RegionInfo, srcStoreID uint64) uint64 { +func (h *balanceHotWriteRegionScheduler) selectDestStoreByPeer(candidateStoreIDs []uint64, srcRegion *core.RegionInfo, srcStoreID uint64) uint64 { sr := h.statisticsAsPeer[srcStoreID] - srcWrittenBytes := sr.WrittenBytes + srcWrittenBytes := sr.TotalFlowBytes srcHotRegionsCount := sr.RegionsStat.Len() var ( @@ -248,13 +248,13 @@ func (h *balanceHotRegionScheduler) selectDestStoreByPeer(candidateStoreIDs []ui if s, ok := h.statisticsAsPeer[storeID]; ok { if srcHotRegionsCount-s.RegionsStat.Len() > 1 && minRegionsCount > s.RegionsStat.Len() { destStoreID = storeID - minWrittenBytes = s.WrittenBytes + minWrittenBytes = s.TotalFlowBytes minRegionsCount = s.RegionsStat.Len() continue } - if minRegionsCount == s.RegionsStat.Len() && minWrittenBytes > s.WrittenBytes && - uint64(float64(srcWrittenBytes)*hotRegionScheduleFactor) > s.WrittenBytes+2*srcRegion.WrittenBytes { - minWrittenBytes = s.WrittenBytes + if minRegionsCount == s.RegionsStat.Len() && minWrittenBytes > s.TotalFlowBytes && + uint64(float64(srcWrittenBytes)*hotRegionScheduleFactor) > s.TotalFlowBytes+2*srcRegion.WrittenBytes { + minWrittenBytes = s.TotalFlowBytes destStoreID = storeID } } else { @@ -265,7 +265,7 @@ func (h *balanceHotRegionScheduler) selectDestStoreByPeer(candidateStoreIDs []ui return destStoreID } -func (h *balanceHotRegionScheduler) adjustBalanceLimit(storeID uint64, t BalanceType) { +func (h *balanceHotWriteRegionScheduler) adjustBalanceLimit(storeID uint64, t BalanceType) { var srcStatistics *core.HotRegionsStat var allStatistics map[uint64]*core.HotRegionsStat switch t { @@ -288,7 +288,7 @@ func (h *balanceHotRegionScheduler) adjustBalanceLimit(storeID uint64, t Balance h.limit = maxUint64(1, limit) } -func (h *balanceHotRegionScheduler) balanceByLeader(cluster schedule.Cluster) (*core.RegionInfo, *metapb.Peer) { +func (h *balanceHotWriteRegionScheduler) balanceByLeader(cluster schedule.Cluster) (*core.RegionInfo, *metapb.Peer) { var ( maxWrittenBytes uint64 srcStoreID uint64 @@ -303,13 +303,13 @@ func (h *balanceHotRegionScheduler) balanceByLeader(cluster schedule.Cluster) (* if maxHotStoreRegionCount < statistics.RegionsStat.Len() { maxHotStoreRegionCount = statistics.RegionsStat.Len() - maxWrittenBytes = statistics.WrittenBytes + maxWrittenBytes = statistics.TotalFlowBytes srcStoreID = storeID continue } - if maxHotStoreRegionCount == statistics.RegionsStat.Len() && maxWrittenBytes < statistics.WrittenBytes { - maxWrittenBytes = statistics.WrittenBytes + if maxHotStoreRegionCount == statistics.RegionsStat.Len() && maxWrittenBytes < statistics.TotalFlowBytes { + maxWrittenBytes = statistics.TotalFlowBytes srcStoreID = storeID } } @@ -334,9 +334,9 @@ func (h *balanceHotRegionScheduler) balanceByLeader(cluster schedule.Cluster) (* return nil, nil } -func (h *balanceHotRegionScheduler) selectDestStoreByLeader(srcRegion *core.RegionInfo) *metapb.Peer { +func (h *balanceHotWriteRegionScheduler) selectDestStoreByLeader(srcRegion *core.RegionInfo) *metapb.Peer { sr := h.statisticsAsLeader[srcRegion.Leader.GetStoreId()] - srcWrittenBytes := sr.WrittenBytes + srcWrittenBytes := sr.TotalFlowBytes srcHotRegionsCount := sr.RegionsStat.Len() var ( @@ -348,13 +348,13 @@ func (h *balanceHotRegionScheduler) selectDestStoreByLeader(srcRegion *core.Regi if s, ok := h.statisticsAsLeader[storeID]; ok { if srcHotRegionsCount-s.RegionsStat.Len() > 1 && minRegionsCount > s.RegionsStat.Len() { destPeer = peer - minWrittenBytes = s.WrittenBytes + minWrittenBytes = s.TotalFlowBytes minRegionsCount = s.RegionsStat.Len() continue } - if minRegionsCount == s.RegionsStat.Len() && minWrittenBytes > s.WrittenBytes && - uint64(float64(srcWrittenBytes)*hotRegionScheduleFactor) > s.WrittenBytes+2*srcRegion.WrittenBytes { - minWrittenBytes = s.WrittenBytes + if minRegionsCount == s.RegionsStat.Len() && minWrittenBytes > s.TotalFlowBytes && + uint64(float64(srcWrittenBytes)*hotRegionScheduleFactor) > s.TotalFlowBytes+2*srcRegion.WrittenBytes { + minWrittenBytes = s.TotalFlowBytes destPeer = peer } } else { @@ -365,7 +365,7 @@ func (h *balanceHotRegionScheduler) selectDestStoreByLeader(srcRegion *core.Regi return destPeer } -func (h *balanceHotRegionScheduler) GetStatus() *core.StoreHotRegionInfos { +func (h *balanceHotWriteRegionScheduler) GetStatus() *core.StoreHotRegionInfos { h.RLock() defer h.RUnlock() asPeer := make(map[uint64]*core.HotRegionsStat, len(h.statisticsAsPeer))