Skip to content
Permalink
Browse files
remove redundant params wait_for_binary_proto=True and wait_other_not…
…ice=True from Cluster.start calls

Since riptano/ccm#561 ccm Cluster.start() defaults to:
wait_for_binary_proto=True
wait_other_notice=True

Since their presence could suggest that these are non-default values, we
clean up the code by removing them.

patch by Christopher Lambert; reviewed by Mick Semb Wever
  • Loading branch information
Christopher Lambert authored and michaelsembwever committed Sep 23, 2020
1 parent 3f34166 commit 4acae58e0c0bbe665f5a1c38aa24a7b4f4ad06d1
Show file tree
Hide file tree
Showing 61 changed files with 236 additions and 244 deletions.
@@ -101,7 +101,6 @@ JAVA7_HOME and JAVA8_HOME, respectively.
Writing Tests
-------------

- Most of the time when you start a cluster with `cluster.start()`, you'll want to pass in `wait_for_binary_proto=True` so the call blocks until the cluster is ready to accept CQL connections. We tried setting this to `True` by default once, but the problems caused there (e.g. when it waited the full timeout time on a node that was deliberately down) were more unpleasant and more difficult to debug than the problems caused by having it `False` by default.
- If you're using JMX via [the `tools.jmxutils` module](tools/jmxutils.py), make sure to call `remove_perf_disable_shared_mem` on the node or nodes you want to query with JMX _before starting the nodes_. `remove_perf_disable_shared_mem` disables a JVM option that's incompatible with JMX (see [this JMX ticket](https://github.com/rhuss/jolokia/issues/198)). It works by performing a string replacement in the node's Cassandra startup script, so changes will only propagate to the node at startup time.

If you'd like to know what to expect during a code review, please see the included [CONTRIBUTING file](CONTRIBUTING.md).
@@ -24,7 +24,7 @@ def test_archiving(self):
'audit_logs_dir': log_dir,
'roll_cycle': 'TEST_SECONDLY',
'archive_command':'%s %%path'%(move_script)}})
cluster.populate(1).start(wait_for_binary_proto=True)
cluster.populate(1).start()
node = cluster.nodelist()[0]
node.stress(['write', 'n=100k', "no-warmup", "cl=ONE", "-rate", "threads=300"])
node.nodetool("disableauditlog")
@@ -38,7 +38,7 @@ def test_fql_nodetool_options(self):
moved_log_dir, move_script = self._create_script()
cluster.set_configuration_options(values={'full_query_logging_options': {'log_dir': log_dir,
'archive_command': 'conf should not be used'}})
cluster.populate(1).start(wait_for_binary_proto=True)
cluster.populate(1).start()
node = cluster.nodelist()[0]
node.nodetool("enablefullquerylog --archive-command \"%s %%path\" --roll-cycle=TEST_SECONDLY"%move_script)
node.stress(['write', 'n=100k', "no-warmup", "cl=ONE", "-rate", "threads=300"])
@@ -58,7 +58,7 @@ def test_archiving_fql(self):
cluster.set_configuration_options(values={'full_query_logging_options': {'log_dir': log_dir,
'roll_cycle': 'TEST_SECONDLY',
'archive_command':'%s %%path'%(move_script)}})
cluster.populate(1).start(wait_for_binary_proto=True)
cluster.populate(1).start()
node = cluster.nodelist()[0]
node.nodetool("enablefullquerylog")
node.stress(['write', 'n=100k', "no-warmup", "cl=ONE", "-rate", "threads=300"])
@@ -87,7 +87,7 @@ def test_archive_on_startup(self):
cluster.set_configuration_options(values={'full_query_logging_options': {'log_dir': log_dir,
'roll_cycle': 'TEST_SECONDLY',
'archive_command':'%s %%path'%(move_script)}})
cluster.populate(1).start(wait_for_binary_proto=True)
cluster.populate(1).start()
node = cluster.nodelist()[0]
node.nodetool("enablefullquerylog")

@@ -104,7 +104,7 @@ def test_archive_on_shutdown(self):
cluster.set_configuration_options(values={'full_query_logging_options': {'log_dir': log_dir,
'roll_cycle': 'TEST_SECONDLY',
'archive_command':'%s %%path'%(move_script)}})
cluster.populate(1).start(wait_for_binary_proto=True)
cluster.populate(1).start()
node = cluster.nodelist()[0]
node.nodetool("enablefullquerylog")

@@ -208,7 +208,7 @@ def prepare(self, nodes=1, roles_expiry=0):
'permissions_validity_in_ms': 0,
'roles_validity_in_ms': roles_expiry}
self.cluster.set_configuration_options(values=config)
self.cluster.populate(nodes).start(wait_for_binary_proto=True)
self.cluster.populate(nodes).start()

self.cluster.wait_for_any_log('Created default superuser', 25)

@@ -76,7 +76,7 @@ def test_system_auth_ks_is_alterable(self):
logger.debug("Stopping cluster..")
self.cluster.stop()
logger.debug("Restarting cluster..")
self.cluster.start(wait_other_notice=True)
self.cluster.start()

# check each node directly
for i in range(3):
@@ -1026,15 +1026,15 @@ def test_restart_node_doesnt_lose_auth_data(self):
if self.dtest_config.cassandra_version_from_build >= '4.0':
config['network_authorizer'] = 'org.apache.cassandra.auth.AllowAllNetworkAuthorizer'
self.cluster.set_configuration_options(values=config)
self.cluster.start(wait_for_binary_proto=True)
self.cluster.start()

self.cluster.stop()
config = {'authenticator': 'org.apache.cassandra.auth.PasswordAuthenticator',
'authorizer': 'org.apache.cassandra.auth.CassandraAuthorizer'}
if self.dtest_config.cassandra_version_from_build >= '4.0':
config['network_authorizer'] = 'org.apache.cassandra.auth.CassandraNetworkAuthorizer'
self.cluster.set_configuration_options(values=config)
self.cluster.start(wait_for_binary_proto=True)
self.cluster.start()

philip = self.get_session(user='philip', password='strongpass')
cathy = self.get_session(user='cathy', password='12345')
@@ -1057,7 +1057,7 @@ def test_auth_metrics(self):
cluster.populate(1)
[node] = cluster.nodelist()
remove_perf_disable_shared_mem(node)
cluster.start(wait_for_binary_proto=True)
cluster.start()

with JolokiaAgent(node) as jmx:
success = jmx.read_attribute(
@@ -1173,7 +1173,7 @@ def fixture_setup_auth(self, fixture_dtest_setup):
'roles_validity_in_ms': 0,
'num_tokens': 1
})
fixture_dtest_setup.cluster.populate(1, debug=True).start(wait_for_binary_proto=True, jvm_args=['-XX:-PerfDisableSharedMem'])
fixture_dtest_setup.cluster.populate(1, debug=True).start(jvm_args=['-XX:-PerfDisableSharedMem'])
nodes = fixture_dtest_setup.cluster.nodelist()
fixture_dtest_setup.superuser = fixture_dtest_setup.patient_exclusive_cql_connection(nodes[0], user='cassandra', password='cassandra')

@@ -2695,7 +2695,7 @@ def prepare(self, nodes=1, roles_expiry=0):
config['network_authorizer'] = 'org.apache.cassandra.auth.CassandraNetworkAuthorizer'

self.cluster.set_configuration_options(values=config)
self.cluster.populate(nodes).start(wait_for_binary_proto=True)
self.cluster.populate(nodes).start()

self.cluster.wait_for_any_log('Created default superuser', 25)

@@ -3040,7 +3040,7 @@ def fixture_setup_auth(self, fixture_dtest_setup):
'network_authorizer': 'org.apache.cassandra.auth.CassandraNetworkAuthorizer',
'num_tokens': 1
})
fixture_dtest_setup.cluster.populate([1, 1], debug=True).start(wait_for_binary_proto=True, jvm_args=['-XX:-PerfDisableSharedMem'])
fixture_dtest_setup.cluster.populate([1, 1], debug=True).start(jvm_args=['-XX:-PerfDisableSharedMem'])
fixture_dtest_setup.dc1_node, fixture_dtest_setup.dc2_node = fixture_dtest_setup.cluster.nodelist()
fixture_dtest_setup.superuser = fixture_dtest_setup.patient_exclusive_cql_connection(fixture_dtest_setup.dc1_node, user='cassandra', password='cassandra')

@@ -442,7 +442,7 @@ def prepare(self, nodes=1, compression=True, version=None, protocol_version=None
for n in self.cluster.nodelist():
remove_perf_disable_shared_mem(n)

self.cluster.start(wait_other_notice=True)
self.cluster.start()

node1 = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node1, protocol_version=protocol_version)
@@ -75,7 +75,7 @@ def default_bootstrap(cluster, token):
logger.debug("starting source node on version {}".format(bootstrap_from_version))
node1.set_install_dir(version=bootstrap_from_version)
node1.set_configuration_options(values={'initial_token': tokens[0]})
cluster.start(wait_other_notice=True)
cluster.start()

session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
@@ -177,7 +177,7 @@ def test_simple_bootstrap_small_keepalive_period(self):
node1.byteman_port = '8100'
node1.import_config_files()

cluster.start(wait_other_notice=True)
cluster.start()

# Create more than one sstable larger than 1MB
node1.stress(['write', 'n=1K', '-rate', 'threads=8', '-schema',
@@ -208,7 +208,7 @@ def test_simple_bootstrap_nodata(self):
cluster = self.cluster
# Create a two-node cluster
cluster.populate(2)
cluster.start(wait_other_notice=True)
cluster.start()

# Bootstrapping a new node
node3 = new_node(cluster)
@@ -348,7 +348,7 @@ def test_resumable_bootstrap(self):
node1.byteman_port = '8100'
node1.import_config_files()

cluster.start(wait_other_notice=True)
cluster.start()
# kill stream to node3 in the middle of streaming to let it fail
if cluster.version() < '4.0':
node1.byteman_submit([self.byteman_submit_path_pre_4_0])
@@ -384,7 +384,7 @@ def test_bootstrap_with_reset_bootstrap_state(self):
"""Test bootstrap with resetting bootstrap progress"""
cluster = self.cluster
cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
cluster.populate(2).start(wait_other_notice=True)
cluster.populate(2).start()

node1 = cluster.nodes['node1']
node1.stress(['write', 'n=100K', '-schema', 'replication(factor=2)'])
@@ -422,7 +422,7 @@ def test_manual_bootstrap(self):
@jira_ticket CASSANDRA-9022
"""
cluster = self.cluster
cluster.populate(2).start(wait_other_notice=True)
cluster.populate(2).start()
(node1, node2) = cluster.nodelist()

node1.stress(['write', 'n=1K', 'no-warmup', '-schema', 'replication(factor=2)',
@@ -515,7 +515,7 @@ def _wiped_node_cannot_join_test(self, gently):
"""
cluster = self.cluster
cluster.populate(3)
cluster.start(wait_for_binary_proto=True)
cluster.start()

stress_table = 'keyspace1.standard1'

@@ -548,7 +548,7 @@ def test_decommissioned_wiped_node_can_join(self):
"""
cluster = self.cluster
cluster.populate(3)
cluster.start(wait_for_binary_proto=True)
cluster.start()

stress_table = 'keyspace1.standard1'

@@ -584,7 +584,7 @@ def test_decommissioned_wiped_node_can_gossip_to_single_seed(self):
"""
cluster = self.cluster
cluster.populate(1)
cluster.start(wait_for_binary_proto=True)
cluster.start()

node1 = cluster.nodelist()[0]
# Add a new node, bootstrap=True ensures that it is not a seed
@@ -627,7 +627,7 @@ def test_failed_bootstrap_wiped_node_can_join(self):
cluster = self.cluster
cluster.populate(1)
cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
cluster.start(wait_for_binary_proto=True)
cluster.start()

stress_table = 'keyspace1.standard1'

@@ -672,7 +672,7 @@ def test_node_cannot_join_as_hibernating_node_without_replace_address(self):
}]
})

cluster.start(wait_for_binary_proto=True)
cluster.start()

node1 = cluster.nodelist()[0]
node2 = cluster.nodelist()[1]
@@ -758,7 +758,7 @@ def test_simultaneous_bootstrap(self):

cluster = self.cluster
cluster.populate(1)
cluster.start(wait_for_binary_proto=True)
cluster.start()

node1, = cluster.nodelist()

@@ -795,7 +795,7 @@ def test_cleanup(self):
cluster = self.cluster
cluster.set_configuration_options(values={'concurrent_compactors': 4})
cluster.populate(1)
cluster.start(wait_for_binary_proto=True)
cluster.start()
node1, = cluster.nodelist()
for x in range(0, 5):
node1.stress(['write', 'n=100k', 'no-warmup', '-schema', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', 'replication(factor=1)', '-rate', 'threads=10'])
@@ -854,7 +854,7 @@ def test_bootstrap_binary_disabled(self):
node1.byteman_port = '8100'
node1.import_config_files()

cluster.start(wait_other_notice=True)
cluster.start()
# kill stream to node2 in the middle of streaming to let it fail
if cluster.version() < '4.0':
node1.byteman_submit([self.byteman_submit_path_pre_4_0])
@@ -265,7 +265,7 @@ def prepare(self, ks_name,
configuration_overrides = {}
self.cluster.populate(1)
self.cluster.set_configuration_options(dict(config_defaults, **configuration_overrides))
self.cluster.start(wait_for_binary_proto=True)
self.cluster.start()
node = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node)
create_ks(session, ks_name, rf=1)
@@ -14,7 +14,7 @@ def test_cfid(self):
"""
cluster = self.cluster

cluster.populate(1).start(wait_other_notice=True)
cluster.populate(1).start()
[node1] = cluster.nodelist()

session = self.patient_cql_connection(node1)

0 comments on commit 4acae58

Please sign in to comment.