Skip to content

Commit

Permalink
*: more test code fixes + cleanups
Browse files Browse the repository at this point in the history
Release note (<category, see below>): <what> <show> <why>
  • Loading branch information
irfansharif committed Jun 16, 2020
1 parent b710b86 commit 251678f
Show file tree
Hide file tree
Showing 10 changed files with 185 additions and 215 deletions.
12 changes: 6 additions & 6 deletions pkg/cli/haproxy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,12 +124,12 @@ func TestNodeStatusToNodeInfoConversion(t *testing.T) {
{Desc: roachpb.NodeDescriptor{NodeID: 6}},
},
LivenessByNodeID: map[roachpb.NodeID]kvserverpb.NodeLivenessStatus{
1: kvserverpb.NodeLivenessStatus_DEAD,
2: kvserverpb.NodeLivenessStatus_DECOMMISSIONING,
3: kvserverpb.NodeLivenessStatus_UNKNOWN,
4: kvserverpb.NodeLivenessStatus_UNAVAILABLE,
5: kvserverpb.NodeLivenessStatus_LIVE,
6: kvserverpb.NodeLivenessStatus_DECOMMISSIONED,
1: kvserverpb.NodeLivenessStatus_DEPRECATED_DEAD,
2: kvserverpb.NodeLivenessStatus_DEPRECATED_DECOMMISSIONING,
3: kvserverpb.NodeLivenessStatus_DEPRECATED_UNKNOWN,
4: kvserverpb.NodeLivenessStatus_DEPRECATED_UNAVAILABLE,
5: kvserverpb.NodeLivenessStatus_DEPRECATED_LIVE,
6: kvserverpb.NodeLivenessStatus_DEPRECATED_DECOMMISSIONED,
},
},
[]haProxyNodeInfo{
Expand Down
13 changes: 6 additions & 7 deletions pkg/cli/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -359,8 +359,6 @@ func runDecommissionNodeImpl(
replicaCount += status.ReplicaCount
allDecommissioning = allDecommissioning && status.CommissionStatus.Decommissioning() ||
status.CommissionStatus.Decommissioned()
// XXX: Write tests for recommissioning only canceling out extant
// decommissioning attempts, and no more.
}
if replicaCount == 0 {
// We now mark the node as fully decommissioned.
Expand All @@ -373,11 +371,12 @@ func runDecommissionNodeImpl(
fmt.Fprintln(stderr)
return errors.Wrap(err, "while trying to mark as decommissioned")
}
// We print out the final commission_status stating
// `decommissioned`. This is checked in tests.
fmt.Fprintln(stderr)
if err := printDecommissionStatus(*resp); err != nil {
return err
if !reflect.DeepEqual(&prevResponse, resp) {
fmt.Fprintln(stderr)
if err := printDecommissionStatus(*resp); err != nil {
return err
}
prevResponse = *resp
}
}
if replicaCount == 0 && allDecommissioning {
Expand Down
22 changes: 22 additions & 0 deletions pkg/cmd/roachtest/decommission.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,26 @@ func execCLI(
func runDecommissionAcceptance(ctx context.Context, t *test, c *cluster) {
t.Skip("TODO(irfansharif)", "Rewrite this test to incorporate new recommission semantics")

// XXX: Write tests for recommissioning only canceling out extant
// decommissioning attempts, and no more.

// XXX: Test all the error conditions for setCommissionStatusInternal.

// XXX: Add a test for the following scenario.
// - Decommission n2 from n1, where n2 has no liveness record. n1
// running v20.2
// - n2 is running v20.1, tries to update it's liveness record. Finds
// it's own liveness record written by n1 (but the new proto,
// which it doesn't know how to read fully). Will it parse
// properly? It's important for v20.2 nodes to be able to read v20.1 and
// v20.2 representations. It's important for v20.2 to write a representation
// understood by both v20.1 and v20.2.

// XXX: What's the guarantee that v21.1 nodes will never see v20.1
// representation? A: Heartbeats are only ever sent out by live nodes, and
// operators aren't allowed to deploy clusters such that there's a 20.1
// node heartbeating a 21.1 cluster. Still, can we foolproof this somehow?

args := startArgs("--env=COCKROACH_SCAN_MAX_IDLE_TIME=5ms")
c.Put(ctx, cockroach, "./cockroach")
c.Start(ctx, t, args)
Expand Down Expand Up @@ -376,6 +396,8 @@ func runDecommissionAcceptance(ctx context.Context, t *test, c *cluster) {
return res
}

// XXX: Write tests for what gets inserted into event log, and what doesn't.

// XXX: In this test we're able to issue decommission commands through nodes
// that have been fully decommissioned.

Expand Down
40 changes: 20 additions & 20 deletions pkg/kv/kvserver/allocator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ func createTestAllocator(
stopper, g, manual, storePool, _ := createTestStorePool(
TestTimeUntilStoreDeadOff, deterministic,
func() int { return numNodes },
kvserverpb.NodeLivenessStatus_LIVE)
kvserverpb.NodeLivenessStatus_DEPRECATED_LIVE)
a := MakeAllocator(storePool, func(string) (time.Duration, bool) {
return 0, true
})
Expand All @@ -332,39 +332,39 @@ func mockStorePool(
liveNodeSet := map[roachpb.NodeID]kvserverpb.NodeLivenessStatus{}
storePool.detailsMu.storeDetails = map[roachpb.StoreID]*storeDetail{}
for _, storeID := range aliveStoreIDs {
liveNodeSet[roachpb.NodeID(storeID)] = kvserverpb.NodeLivenessStatus_LIVE
liveNodeSet[roachpb.NodeID(storeID)] = kvserverpb.NodeLivenessStatus_DEPRECATED_LIVE
detail := storePool.getStoreDetailLocked(storeID)
detail.desc = &roachpb.StoreDescriptor{
StoreID: storeID,
Node: roachpb.NodeDescriptor{NodeID: roachpb.NodeID(storeID)},
}
}
for _, storeID := range unavailableStoreIDs {
liveNodeSet[roachpb.NodeID(storeID)] = kvserverpb.NodeLivenessStatus_UNAVAILABLE
liveNodeSet[roachpb.NodeID(storeID)] = kvserverpb.NodeLivenessStatus_DEPRECATED_UNAVAILABLE
detail := storePool.getStoreDetailLocked(storeID)
detail.desc = &roachpb.StoreDescriptor{
StoreID: storeID,
Node: roachpb.NodeDescriptor{NodeID: roachpb.NodeID(storeID)},
}
}
for _, storeID := range deadStoreIDs {
liveNodeSet[roachpb.NodeID(storeID)] = kvserverpb.NodeLivenessStatus_DEAD
liveNodeSet[roachpb.NodeID(storeID)] = kvserverpb.NodeLivenessStatus_DEPRECATED_DEAD
detail := storePool.getStoreDetailLocked(storeID)
detail.desc = &roachpb.StoreDescriptor{
StoreID: storeID,
Node: roachpb.NodeDescriptor{NodeID: roachpb.NodeID(storeID)},
}
}
for _, storeID := range decommissioningStoreIDs {
liveNodeSet[roachpb.NodeID(storeID)] = kvserverpb.NodeLivenessStatus_DECOMMISSIONING
liveNodeSet[roachpb.NodeID(storeID)] = kvserverpb.NodeLivenessStatus_DEPRECATED_DECOMMISSIONING
detail := storePool.getStoreDetailLocked(storeID)
detail.desc = &roachpb.StoreDescriptor{
StoreID: storeID,
Node: roachpb.NodeDescriptor{NodeID: roachpb.NodeID(storeID)},
}
}
for _, storeID := range decommissionedStoreIDs {
liveNodeSet[roachpb.NodeID(storeID)] = kvserverpb.NodeLivenessStatus_DECOMMISSIONED
liveNodeSet[roachpb.NodeID(storeID)] = kvserverpb.NodeLivenessStatus_DEPRECATED_DECOMMISSIONED
detail := storePool.getStoreDetailLocked(storeID)
detail.desc = &roachpb.StoreDescriptor{
StoreID: storeID,
Expand All @@ -378,7 +378,7 @@ func mockStorePool(
if status, ok := liveNodeSet[nodeID]; ok {
return status
}
return kvserverpb.NodeLivenessStatus_UNAVAILABLE
return kvserverpb.NodeLivenessStatus_DEPRECATED_UNAVAILABLE
}
}

Expand Down Expand Up @@ -1230,7 +1230,7 @@ func TestAllocatorTransferLeaseTargetDraining(t *testing.T) {
stopper, g, _, storePool, nl := createTestStorePool(
TestTimeUntilStoreDeadOff, true, /* deterministic */
func() int { return 10 }, /* nodeCount */
kvserverpb.NodeLivenessStatus_LIVE)
kvserverpb.NodeLivenessStatus_DEPRECATED_LIVE)
a := MakeAllocator(storePool, func(string) (time.Duration, bool) {
return 0, true
})
Expand All @@ -1250,7 +1250,7 @@ func TestAllocatorTransferLeaseTargetDraining(t *testing.T) {
sg.GossipStores(stores, t)

// UNAVAILABLE is the node liveness status used for a node that's draining.
nl.setNodeStatus(1, kvserverpb.NodeLivenessStatus_UNAVAILABLE)
nl.setNodeStatus(1, kvserverpb.NodeLivenessStatus_DEPRECATED_UNAVAILABLE)

existing := []roachpb.ReplicaDescriptor{
{StoreID: 1},
Expand Down Expand Up @@ -1625,7 +1625,7 @@ func TestAllocatorShouldTransferLeaseDraining(t *testing.T) {
stopper, g, _, storePool, nl := createTestStorePool(
TestTimeUntilStoreDeadOff, true, /* deterministic */
func() int { return 10 }, /* nodeCount */
kvserverpb.NodeLivenessStatus_LIVE)
kvserverpb.NodeLivenessStatus_DEPRECATED_LIVE)
a := MakeAllocator(storePool, func(string) (time.Duration, bool) {
return 0, true
})
Expand All @@ -1645,7 +1645,7 @@ func TestAllocatorShouldTransferLeaseDraining(t *testing.T) {
sg.GossipStores(stores, t)

// UNAVAILABLE is the node liveness status used for a node that's draining.
nl.setNodeStatus(1, kvserverpb.NodeLivenessStatus_UNAVAILABLE)
nl.setNodeStatus(1, kvserverpb.NodeLivenessStatus_DEPRECATED_UNAVAILABLE)

testCases := []struct {
leaseholder roachpb.StoreID
Expand Down Expand Up @@ -3608,7 +3608,7 @@ func TestAllocatorTransferLeaseTargetLoadBased(t *testing.T) {
stopper, g, _, storePool, _ := createTestStorePool(
TestTimeUntilStoreDeadOff, true, /* deterministic */
func() int { return 10 }, /* nodeCount */
kvserverpb.NodeLivenessStatus_LIVE)
kvserverpb.NodeLivenessStatus_DEPRECATED_LIVE)
defer stopper.Stop(context.Background())

// 3 stores where the lease count for each store is equal to 10x the store ID.
Expand Down Expand Up @@ -4971,7 +4971,7 @@ func TestAllocatorComputeActionDynamicNumReplicas(t *testing.T) {
stopper, _, _, sp, _ := createTestStorePool(
TestTimeUntilStoreDeadOff, false, /* deterministic */
func() int { return numNodes },
kvserverpb.NodeLivenessStatus_LIVE)
kvserverpb.NodeLivenessStatus_DEPRECATED_LIVE)
a := MakeAllocator(sp, func(string) (time.Duration, bool) {
return 0, true
})
Expand Down Expand Up @@ -5586,7 +5586,7 @@ func TestAllocatorFullDisks(t *testing.T) {
const capacity = (1 << 30) + 1
const rangeSize = 16 << 20

mockNodeLiveness := newMockNodeLiveness(kvserverpb.NodeLivenessStatus_LIVE)
mockNodeLiveness := newMockNodeLiveness(kvserverpb.NodeLivenessStatus_DEPRECATED_LIVE)
sp := NewStorePool(
log.AmbientContext{Tracer: st.Tracer},
st,
Expand Down Expand Up @@ -5628,7 +5628,7 @@ func TestAllocatorFullDisks(t *testing.T) {
for i := 0; i < generations; i++ {
// First loop through test stores and randomly add data.
for j := 0; j < len(testStores); j++ {
if mockNodeLiveness.nodeLivenessFunc(roachpb.NodeID(j), time.Time{}, 0) == kvserverpb.NodeLivenessStatus_DEAD {
if mockNodeLiveness.nodeLivenessFunc(roachpb.NodeID(j), time.Time{}, 0) == kvserverpb.NodeLivenessStatus_DEPRECATED_DEAD {
continue
}
ts := &testStores[j]
Expand All @@ -5643,7 +5643,7 @@ func TestAllocatorFullDisks(t *testing.T) {
if ts.Capacity.Available <= 0 {
t.Errorf("testStore %d ran out of space during generation %d (rangesAdded=%d/%d): %+v",
j, i, rangesAdded, rangesToAdd, ts.Capacity)
mockNodeLiveness.setNodeStatus(roachpb.NodeID(j), kvserverpb.NodeLivenessStatus_DEAD)
mockNodeLiveness.setNodeStatus(roachpb.NodeID(j), kvserverpb.NodeLivenessStatus_DEPRECATED_DEAD)
}
wg.Add(1)
if err := g.AddInfoProto(gossip.MakeStoreKey(roachpb.StoreID(j)), &ts.StoreDescriptor, 0); err != nil {
Expand All @@ -5655,7 +5655,7 @@ func TestAllocatorFullDisks(t *testing.T) {
// Loop through each store a number of times and maybe rebalance.
for j := 0; j < 10; j++ {
for k := 0; k < len(testStores); k++ {
if mockNodeLiveness.nodeLivenessFunc(roachpb.NodeID(k), time.Time{}, 0) == kvserverpb.NodeLivenessStatus_DEAD {
if mockNodeLiveness.nodeLivenessFunc(roachpb.NodeID(k), time.Time{}, 0) == kvserverpb.NodeLivenessStatus_DEPRECATED_DEAD {
continue
}
ts := &testStores[k]
Expand Down Expand Up @@ -5689,11 +5689,11 @@ func TestAllocatorFullDisks(t *testing.T) {

// Simulate rocksdb compactions freeing up disk space.
for j := 0; j < len(testStores); j++ {
if mockNodeLiveness.nodeLivenessFunc(roachpb.NodeID(j), time.Time{}, 0) != kvserverpb.NodeLivenessStatus_DEAD {
if mockNodeLiveness.nodeLivenessFunc(roachpb.NodeID(j), time.Time{}, 0) != kvserverpb.NodeLivenessStatus_DEPRECATED_DEAD {
ts := &testStores[j]
if ts.Capacity.Available <= 0 {
t.Errorf("testStore %d ran out of space during generation %d: %+v", j, i, ts.Capacity)
mockNodeLiveness.setNodeStatus(roachpb.NodeID(j), kvserverpb.NodeLivenessStatus_DEAD)
mockNodeLiveness.setNodeStatus(roachpb.NodeID(j), kvserverpb.NodeLivenessStatus_DEPRECATED_DEAD)
} else {
ts.compact()
}
Expand Down Expand Up @@ -5737,7 +5737,7 @@ func Example_rebalancing() {
func() int {
return nodes
},
newMockNodeLiveness(kvserverpb.NodeLivenessStatus_LIVE).nodeLivenessFunc,
newMockNodeLiveness(kvserverpb.NodeLivenessStatus_DEPRECATED_LIVE).nodeLivenessFunc,
/* deterministic */ true,
)
alloc := MakeAllocator(sp, func(string) (time.Duration, bool) {
Expand Down
2 changes: 1 addition & 1 deletion pkg/kv/kvserver/helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ func NewTestStorePool(cfg StoreConfig) *StorePool {
return 1
},
func(roachpb.NodeID, time.Time, time.Duration) kvserverpb.NodeLivenessStatus {
return kvserverpb.NodeLivenessStatus_LIVE
return kvserverpb.NodeLivenessStatus_DEPRECATED_LIVE
},
/* deterministic */ false,
)
Expand Down
14 changes: 8 additions & 6 deletions pkg/kv/kvserver/kvserverpb/liveness.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,14 @@ func (l *Liveness) String() string {
// v20.2 we introduced a dedicated enum to be able to disambiguate between the
// two. That being said, v20.2 nodes need to be able to operate in mixed
// clusters with v20.1 nodes, that only know to interpret the boolean
// representation. EnsureCompatible is able to reconcile across both
// representations by mutating the receiver such that it's understood by both
// v20.1 and v20.2 nodes (See AssertValid for what this entails).
// If the receiver object is clearly one generated from a v20.1 node, we
// consider the deprecated boolean representation as the authoritative one. We
// consider the enum state authoritative if not.
// representation.
//
// EnsureCompatible is able to reconcile across both representations by mutating
// the receiver such that it's understood by both v20.1 and v20.2 nodes (See
// AssertValid for what this entails). If the receiver object is clearly one
// generated from a v20.1 node, we consider the deprecated boolean
// representation as the authoritative one. We consider the enum state
// authoritative if not.
//
// TODO(irfansharif): Remove this once v20.2 is cut.
func (l *Liveness) EnsureCompatible() {
Expand Down
Loading

0 comments on commit 251678f

Please sign in to comment.