Skip to content
Permalink
Browse files
Merge pull request #1392 from kgreav/12510
Test decommission failure when N <= replication factor
  • Loading branch information
pauloricardomg committed Dec 23, 2016
2 parents 928f224 + dbad2f7 commit 58c2d7fbe6d5c7e98f8e2fe8d78934e8786fc57f
Showing 3 changed files with 45 additions and 0 deletions.
@@ -535,10 +535,16 @@ def decommissioned_wiped_node_can_gossip_to_single_seed_test(self):
cluster.populate(1)
cluster.start(wait_for_binary_proto=True)

node1 = cluster.nodelist()[0]
# Add a new node, bootstrap=True ensures that it is not a seed
node2 = new_node(cluster, bootstrap=True)
node2.start(wait_for_binary_proto=True, wait_other_notice=True)

session = self.patient_cql_connection(node1)
# reduce system_distributed RF to 2 so we don't require forceful decommission
session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};")
session.execute("ALTER KEYSPACE system_traces WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};")

# Decommision the new node and kill it
debug("Decommissioning & stopping node2")
node2.decommission()
@@ -235,6 +235,11 @@ def add_and_remove_node_test(self):
waiter.wait_for_notifications(timeout=30, num_notifications=2)
waiter.clear_notifications()

session = self.patient_cql_connection(node1)
# reduce system_distributed RF to 2 so we don't require forceful decommission
session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};")
session.execute("ALTER KEYSPACE system_traces WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};")

debug("Adding second node...")
node2 = Node('node2', self.cluster, True, None, ('127.0.0.2', 7000), '7200', '0', None, binary_interface=('127.0.0.2', 9042))
self.cluster.add(node2, False)
@@ -5,6 +5,7 @@

from cassandra import ConsistencyLevel
from ccmlib.node import TimeoutError, ToolError
from nose.plugins.attrib import attr

from dtest import Tester, debug, create_ks, create_cf
from tools.assertions import assert_almost_equal
@@ -40,6 +41,10 @@ def simple_decommission_test(self):
cluster.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.size_recorder_interval=1"])
node1, node2, node3 = cluster.nodelist()

session = self.patient_cql_connection(node1)
# reduce system_distributed RF to 2 so we don't require forceful decommission
session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'2'};")

# write some data
node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8'])

@@ -104,6 +109,8 @@ def resumable_decommission_test(self):
node1, node2, node3 = cluster.nodelist()

session = self.patient_cql_connection(node2)
# reduce system_distributed RF to 2 so we don't require forceful decommission
session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'2'};")
create_ks(session, 'ks', 2)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)
@@ -336,6 +343,33 @@ def crash_during_decommission_test(self):
out = self.show_status(node2)
self.assertFalse(null_status_pattern.search(out))

@since('3.12')
@attr('resource-intensive')
def stop_decommission_too_few_replicas_multi_dc_test(self):
"""
Decommission should fail when it would result in the number of live replicas being less than
the replication factor. --force should bypass this requirement.
@jira_ticket CASSANDRA-12510
@expected_errors ToolError when # nodes will drop below configured replicas in NTS/SimpleStrategy
"""
cluster = self.cluster
cluster.populate([2, 2]).start(wait_for_binary_proto=True)
node1, node2, node3, node4 = self.cluster.nodelist()
session = self.patient_cql_connection(node2)
session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'2'};")
create_ks(session, 'ks', {'dc1': 2, 'dc2': 2})
with self.assertRaises(ToolError):
node4.nodetool('decommission')

session.execute('DROP KEYSPACE ks')
create_ks(session, 'ks2', 4)
with self.assertRaises(ToolError):
node4.nodetool('decommission')

node4.nodetool('decommission --force')
decommissioned = node4.watch_log_for("DECOMMISSIONED", timeout=120)
self.assertTrue(decommissioned, "Node failed to decommission when passed --force")

def show_status(self, node):
out, _, _ = node.nodetool('status')
debug("Status as reported by node {}".format(node.address()))

0 comments on commit 58c2d7f

Please sign in to comment.