Skip to content
Permalink
Browse files
fix dtests for 15108/java11
  • Loading branch information
bdeggleston committed May 10, 2019
1 parent 9dc0052 commit b1167bef169d657dbd7d1eb09c4d4a6fa8ecf6a9
Show file tree
Hide file tree
Showing 9 changed files with 41 additions and 29 deletions.
@@ -451,17 +451,21 @@ def test_local_quorum_bootstrap(self):
with tempfile.NamedTemporaryFile(mode='w+') as stress_config:
stress_config.write(yaml_config)
stress_config.flush()
node1.stress(['user', 'profile=' + stress_config.name, 'n=2M', 'no-warmup',
'ops(insert=1)', '-rate', 'threads=50'])
node1.stress(['user', 'profile=' + stress_config.name, 'n=200K', 'no-warmup',
'ops(insert=1)', '-rate', 'threads=10'])

node3 = new_node(cluster, data_center='dc2')
node3.start(no_wait=True)
time.sleep(3)

ntout = node1.nodetool('status').stdout
assert re.search(r'UJ\s+' + node3.ip_addr, ntout), ntout
out, err, _ = node1.stress(['user', 'profile=' + stress_config.name, 'ops(insert=1)',
'n=500K', 'no-warmup', 'cl=LOCAL_QUORUM',
'-rate', 'threads=5',
'n=10k', 'no-warmup', 'cl=LOCAL_QUORUM',
'-rate', 'threads=10',
'-errors', 'retries=2'])
ntout = node1.nodetool('status').stdout
assert re.search(r'UJ\s+' + node3.ip_addr, ntout), ntout

logger.debug(out)
assert_stderr_clean(err)
@@ -0,0 +1,13 @@
#
# Sleep 60s during validation compaction
#
RULE sleep 60s on validation
CLASS org.apache.cassandra.repair.ValidationManager
METHOD doValidation
AT ENTRY
# set flag to only run this rule once.
IF NOT flagged("done")
DO
flag("done");
Thread.sleep(60000)
ENDRULE
File renamed without changes.
@@ -47,6 +47,7 @@ def test_disk_balance_stress(self):
for node in cluster.nodelist():
self.assert_balanced(node)

@pytest.mark.resource_intensive
def test_disk_balance_bootstrap(self):
cluster = self.cluster
if self.dtest_config.use_vnodes:
@@ -328,7 +328,7 @@ def test_require_client_auth(self):
self.assert_insecure_connection_rejected(node)

# specifying only the truststore containing the server cert should fail
with pytest.raises(ToolError, match=".*SSLHandshakeException.*"):
with pytest.raises(ToolError):
node.nodetool("info --ssl -Djavax.net.ssl.trustStore={ts} -Djavax.net.ssl.trustStorePassword={ts_pwd}"
.format(ts=self.truststore(), ts_pwd=self.truststore_password))

@@ -413,15 +413,15 @@ def fixture_set_cluster_settings(self, fixture_dtest_setup):
cluster = fixture_dtest_setup.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False,
'dynamic_snitch': False,
'write_request_timeout_in_ms': 500,
'read_request_timeout_in_ms': 500})
'write_request_timeout_in_ms': 1000,
'read_request_timeout_in_ms': 1000})
cluster.populate(3, install_byteman=True, debug=True)
byteman_validate(cluster.nodelist()[0], './byteman/read_repair/sorted_live_endpoints.btm', verbose=True)
cluster.start(wait_for_binary_proto=True, jvm_args=['-XX:-PerfDisableSharedMem'])
session = fixture_dtest_setup.patient_exclusive_cql_connection(cluster.nodelist()[0], timeout=2)

session.execute("CREATE KEYSPACE ks WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}")
session.execute("CREATE TABLE ks.tbl (k int, c int, v int, primary key (k, c)) WITH speculative_retry = '250ms';")
session.execute("CREATE TABLE ks.tbl (k int, c int, v int, primary key (k, c)) WITH speculative_retry = '400ms';")

def get_cql_connection(self, node, **kwargs):
return self.patient_exclusive_cql_connection(node, retry_policy=None, **kwargs)
@@ -1186,7 +1186,8 @@ def _test_failure_during_repair(self, phase, initiator=False):
node_to_kill.import_config_files()

logger.debug("Starting cluster..")
cluster.start(wait_other_notice=True)
cluster.start(wait_other_notice=True, jvm_args=['-Djdk.attach.allowAttachSelf=true'])
# cluster.start(wait_other_notice=True)

logger.debug("stopping node3")
node3.stop(gently=False, wait_other_notice=True)
@@ -1201,7 +1202,14 @@ def _test_failure_during_repair(self, phase, initiator=False):
logger.debug("bring back node3")
node3.start(wait_other_notice=True, wait_for_binary_proto=True)

script = 'stream_sleep.btm' if phase == 'sync' else 'repair_{}_sleep.btm'.format(phase)
if phase == 'sync':
script = 'stream_sleep.btm'
else:
script = 'repair_{}_sleep.btm'.format(phase)
if phase == 'validation':
prefix = '4.0' if cluster.version() >= '4.0' else 'pre4.0'
script = prefix + '/' + script

logger.debug("Submitting byteman script to {}".format(node_to_kill.name))
# Sleep on anticompaction/stream so there will be time for node to be killed
node_to_kill.byteman_submit(['./byteman/{}'.format(script)])
@@ -20,9 +20,7 @@ def test_startup_no_live_seeds(self):
"""

self.fixture_dtest_setup.allow_log_errors = True
n1 = self.cluster.create_node('node1', True, None, ('127.0.0.1', 7000), '7100',
None, None, binary_interface=('127.0.0.1', 9042))
self.cluster.add(n1, False)
self.cluster.populate(1)
node1 = self.cluster.nodelist()[0]
self.cluster.set_configuration_options({
'seed_provider': [{'class_name': 'org.apache.cassandra.locator.SimpleSeedProvider',
@@ -52,15 +50,7 @@ def test_startup_non_seed_with_peers(self):

self.fixture_dtest_setup.allow_log_errors = True

n1 = self.cluster.create_node('node1', True, None, ('127.0.0.1', 7000), '7100',
None, None, binary_interface=('127.0.0.1', 9042))
n2 = self.cluster.create_node('node2', True, None, ('127.0.0.2', 7000), '7101',
None, None, binary_interface=('127.0.0.2', 9042))
n3 = self.cluster.create_node('node3', True, None, ('127.0.0.3', 7000), '7102',
None, None, binary_interface=('127.0.0.3', 9042))
self.cluster.add(n1, True)
self.cluster.add(n2, True)
self.cluster.add(n3, True)
self.cluster.populate(3)

node1, node2, node3 = self.cluster.nodelist()

@@ -93,15 +83,10 @@ def test_startup_after_ring_delay(self):
"""
RING_DELAY = 15000 # ms
self.fixture_dtest_setup.allow_log_errors = True
n1 = self.cluster.create_node('node1', True, None, ('127.0.0.1', 7000), '7100',
None, None, binary_interface=('127.0.0.1', 9042))
n2 = self.cluster.create_node('node2', True, None, ('127.0.0.2', 7000), '7101',
None, None, binary_interface=('127.0.0.2', 9042))
self.cluster.add(n1, True)
self.cluster.add(n2, False)
self.cluster.populate(2)
node1, node2 = self.cluster.nodelist()

node2.start(wait_other_notice=False, jvm_args=['-Dcassandra.ring_delay_ms={}'.format(RING_DELAY)])
node2.start(wait_other_notice=False, jvm_args=['-Dcassandra.ring_delay_ms={}'.format(RING_DELAY)], verbose=True)
node2.watch_log_for('Starting shadow gossip round to check for endpoint collision', filename='debug.log')
sleep(RING_DELAY / 1000)
# Start seed, ensure node2 joins before it exits shadow round.
@@ -618,6 +618,7 @@ def replication_factor(self):
def tokens(self):
return [0, 1, 2, 3, 4]

@pytest.mark.resource_intensive
@pytest.mark.no_vnodes
def test_transient_full_merge_read(self):
""" When reading, transient replica should serve a missing read """

0 comments on commit b1167be

Please sign in to comment.