Skip to content
Permalink
Browse files
Fix test expectations for Memtable API (CEP-11)
patch by Branimir Lambov; reviewed by Andrés de la Peña and Caleb Rackliffe for CASSANDRA-17034
  • Loading branch information
blambov committed Apr 29, 2022
1 parent 24e6d2b commit bd5e29c7ca8e0d6987ba9d180d97766cb30eb0fa
Showing 2 changed files with 88 additions and 11 deletions.
@@ -1130,7 +1130,28 @@ def get_test_table_output(self, has_val=True, has_val_idx=True):
PRIMARY KEY (id, col)
"""

if self.cluster.version() >= LooseVersion('4.0'):
if self.cluster.version() >= LooseVersion('4.1'):
create_table += """
) WITH CLUSTERING ORDER BY (col ASC)
AND additional_write_policy = '99p'
AND bloom_filter_fp_chance = 0.01
AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
AND cdc = false
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND memtable = 'default'
AND crc_check_chance = 1.0
AND default_time_to_live = 0
AND extensions = {}
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair = 'BLOCKING'
AND speculative_retry = '99p';
"""
elif self.cluster.version() >= LooseVersion('4.0'):
create_table += """
) WITH CLUSTERING ORDER BY (col ASC)
AND additional_write_policy = '99p'
@@ -1215,7 +1236,32 @@ def get_users_table_output(self):
myindex_output = self.get_index_output('myindex', 'test', 'users', 'age')
create_table = None

if self.cluster.version() >= LooseVersion('4.0'):
if self.cluster.version() >= LooseVersion('4.1'):
create_table = """
CREATE TABLE test.users (
userid text PRIMARY KEY,
age int,
firstname text,
lastname text
) WITH additional_write_policy = '99p'
AND bloom_filter_fp_chance = 0.01
AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
AND cdc = false
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND memtable = 'default'
AND crc_check_chance = 1.0
AND default_time_to_live = 0
AND extensions = {}
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair = 'BLOCKING'
AND speculative_retry = '99p';
"""
elif self.cluster.version() >= LooseVersion('4.0'):
create_table = """
CREATE TABLE test.users (
userid text PRIMARY KEY,
@@ -1320,7 +1366,32 @@ def get_index_output(self, index, ks, table, col):
return "CREATE INDEX {} ON {}.{} ({});".format(index, ks, table, col)

def get_users_by_state_mv_output(self):
if self.cluster.version() >= LooseVersion('4.0'):
if self.cluster.version() >= LooseVersion('4.1'):
return """
CREATE MATERIALIZED VIEW test.users_by_state AS
SELECT *
FROM test.users
WHERE state IS NOT NULL AND username IS NOT NULL
PRIMARY KEY (state, username)
WITH CLUSTERING ORDER BY (username ASC)
AND additional_write_policy = '99p'
AND bloom_filter_fp_chance = 0.01
AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
AND cdc = false
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND memtable = 'default'
AND crc_check_chance = 1.0
AND extensions = {}
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair = 'BLOCKING'
AND speculative_retry = '99p';
"""
elif self.cluster.version() >= LooseVersion('4.0'):
return """
CREATE MATERIALIZED VIEW test.users_by_state AS
SELECT *
@@ -244,7 +244,7 @@ def test_archive_commitlog_with_active_commitlog(self):
"""
Copy the active commitlogs to the archive directory before restoration
"""
self.run_archive_commitlog(restore_point_in_time=False, archive_active_commitlogs=True)
self.run_archive_commitlog(restore_point_in_time=False)

def test_dont_archive_commitlog(self):
"""
@@ -258,19 +258,20 @@ def test_archive_commitlog_point_in_time(self):
"""
self.run_archive_commitlog(restore_point_in_time=True)

def test_archive_commitlog_point_in_time_with_active_commitlog(self):
def test_archive_commitlog_point_in_time_ln(self):
"""
Test archive commit log with restore_point_in_time setting
"""
self.run_archive_commitlog(restore_point_in_time=True, archive_active_commitlogs=True)
self.run_archive_commitlog(restore_point_in_time=True, archive_command='ln')

def test_archive_commitlog_point_in_time_with_active_commitlog_ln(self):
@since('4.1')
def test_archive_commitlog_restore_skip_by_position(self):
"""
Test archive commit log with restore_point_in_time setting
Test archive commit log not restored because of specified snapshot commit log position
"""
self.run_archive_commitlog(restore_point_in_time=True, archive_active_commitlogs=True, archive_command='ln')
self.run_archive_commitlog(restore_point_in_time=True, specify_commitlog_position=True, archive_command='ln')

def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_commitlog=True, archive_active_commitlogs=False, archive_command='cp'):
def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_commitlog=True, specify_commitlog_position=False, archive_command='cp'):
"""
Run archive commit log restoration test
"""
@@ -430,6 +431,11 @@ def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_co
replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
[(r'^restore_point_in_time=.*$', 'restore_point_in_time={restore_time}'.format(restore_time=restore_time))])

if specify_commitlog_position:
# specify a high commit log position to skip replaying any commit log data
replace_in_file(os.path.join(node1.get_path(), 'conf', 'commitlog_archiving.properties'),
[(r'^snapshot_commitlog_position=.*$', 'snapshot_commitlog_position={cl_position}'.format(cl_position="9223372036854775807, 0"))])

logger.debug("Restarting node1..")
node1.stop()
node1.start(wait_for_binary_proto=True)
@@ -441,7 +447,7 @@ def run_archive_commitlog(self, restore_point_in_time=False, restore_archived_co
rows = session.execute('SELECT count(*) from ks.cf')
# Now we should have 30000 rows from the snapshot + 30000 rows
# from the commitlog backups:
if not restore_archived_commitlog:
if not restore_archived_commitlog or specify_commitlog_position:
assert rows[0][0] == 30000
elif restore_point_in_time:
assert rows[0][0] == 60000

0 comments on commit bd5e29c

Please sign in to comment.