Skip to content

Commit

Permalink
Simplify parameterization to plain python
Browse files Browse the repository at this point in the history
  • Loading branch information
mkleen committed Nov 2, 2020
1 parent 1b7902c commit e6487b7
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 21 deletions.
3 changes: 1 addition & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,7 @@ def read(filename):
'Cython',
'asyncpg>=0.18.2, < 0.20',
'pyodbc',
'psycopg2-binary>=2.7.5',
'parameterized'
'psycopg2-binary>=2.7.5'
],
python_requires='>=3.6',
classifiers=[
Expand Down
62 changes: 43 additions & 19 deletions tests/bwc/test_recovery.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,14 @@
import unittest

from cr8.run_crate import get_crate, _extract_version
from parameterized import parameterized
from crate.client import connect
import random
from random import sample

from crate.qa.tests import NodeProvider, insert_data, UpgradePath

UPGRADE_PATHS = [(UpgradePath('4.2.x', '4.3.x'),), (UpgradePath('4.3.x', 'latest-nightly'),)]
UPGRADE_PATHS_FROM_43 = [(UpgradePath('4.3.x', 'latest-nightly'),)]
UPGRADE_PATHS = [UpgradePath('4.2.x', '4.3.x'), UpgradePath('4.3.x', 'latest-nightly')]
UPGRADE_PATHS_FROM_43 = [UpgradePath('4.3.x', 'latest-nightly')]


class RecoveryTest(NodeProvider, unittest.TestCase):
Expand Down Expand Up @@ -83,12 +82,23 @@ def _upgrade_cluster(self, cluster, version: str, nodes: int) -> None:
new_node = self.upgrade_node(node, version)
cluster[i] = new_node

@parameterized.expand(UPGRADE_PATHS)
def test_recovery_with_concurrent_indexing(self, path):
def _run_upgrade_paths(self, test, paths):
for p in paths:
try:
self.setUp()
test(p)
finally:
self.tearDown()

def test_recovery_with_concurrent_indexing(self):
self._run_upgrade_paths(self._test_recovery_with_concurrent_indexing, UPGRADE_PATHS)

def _test_recovery_with_concurrent_indexing(self, path):
"""
This test creates a new table and insert data at every stage of the
rolling upgrade.
"""

cluster = self._new_cluster(path.from_version, self.NUMBER_OF_NODES)
cluster.start()

Expand Down Expand Up @@ -143,8 +153,10 @@ def test_recovery_with_concurrent_indexing(self, path):
for node_id in node_ids:
self.assert_busy(lambda: self._assert_num_docs_by_node_id(conn, 'doc', 'test', node_id[0], 105))

@parameterized.expand(UPGRADE_PATHS)
def test_relocation_with_concurrent_indexing(self, path):
def test_relocation_with_concurrent_indexing(self):
self._run_upgrade_paths(self._test_relocation_with_concurrent_indexing, UPGRADE_PATHS)

def _test_relocation_with_concurrent_indexing(self, path):
cluster = self._new_cluster(path.from_version, self.NUMBER_OF_NODES)
cluster.start()

Expand Down Expand Up @@ -216,8 +228,10 @@ def _assert_shard_state(self, conn, schema, table_name, node_id, state):
self.assertTrue(current_state)
self.assertEqual(current_state[0], state)

@parameterized.expand(UPGRADE_PATHS)
def test_recovery(self, path):
def test_recovery(self):
self._run_upgrade_paths(self._test_recovery, UPGRADE_PATHS)

def _test_recovery(self, path):
"""
This test creates a new table, insert data and asserts the state at every stage of the
rolling upgrade.
Expand Down Expand Up @@ -253,8 +267,10 @@ def test_recovery(self, path):

self.assert_busy(lambda: self._assert_is_green(conn, 'doc', 'test'))

@parameterized.expand(UPGRADE_PATHS)
def test_recovery_closed_index(self, path):
def test_recovery_closed_index(self):
self._run_upgrade_paths(self._test_recovery_closed_index, UPGRADE_PATHS)

def _test_recovery_closed_index(self, path):
"""
This test creates a table in the non upgraded cluster and closes it. It then
checks that the table is effectively closed and potentially replicated.
Expand Down Expand Up @@ -284,8 +300,10 @@ def test_recovery_closed_index(self, path):

self._assert_is_closed(conn, 'doc', 'test')

@parameterized.expand(UPGRADE_PATHS)
def test_closed_index_during_rolling_upgrade(self, path):
def test_closed_index_during_rolling_upgrade(self):
self._run_upgrade_paths(self._test_closed_index_during_rolling_upgrade, UPGRADE_PATHS)

def _test_closed_index_during_rolling_upgrade(self, path):
"""
This test creates and closes a new table at every stage of the rolling
upgrade. It then checks that the table is effectively closed and
Expand Down Expand Up @@ -334,8 +352,10 @@ def test_closed_index_during_rolling_upgrade(self, path):

self._assert_is_closed(conn, 'doc', 'upgraded_cluster')

@parameterized.expand(UPGRADE_PATHS)
def test_update_docs(self, path):
def test_update_docs(self):
self._run_upgrade_paths(self._test_update_docs, UPGRADE_PATHS)

def _test_update_docs(self, path):
"""
This test creates a new table, insert data and updates data at every state at every stage of the
rolling upgrade.
Expand Down Expand Up @@ -384,8 +404,10 @@ def test_update_docs(self, path):
for result in res:
self.assertEqual(result['rowcount'], 1)

@parameterized.expand(UPGRADE_PATHS_FROM_43)
def test_operation_based_recovery(self, path):
def test_operation_based_recovery(self):
self._run_upgrade_paths(self._test_operation_based_recovery, UPGRADE_PATHS_FROM_43)

def _test_operation_based_recovery(self, path):
"""
Tests that we should perform an operation-based recovery if there were
some but not too many uncommitted documents (i.e., less than 10% of
Expand Down Expand Up @@ -435,8 +457,10 @@ def test_operation_based_recovery(self, path):

self._assert_ensure_checkpoints_are_synced(conn, 'doc', 'test')

@parameterized.expand(UPGRADE_PATHS_FROM_43)
def test_turnoff_translog_retention_after_upgraded(self, path):
def test_turnoff_translog_retention_after_upgraded(self):
self._run_upgrade_paths(self._test_turnoff_translog_retention_after_upgraded, UPGRADE_PATHS_FROM_43)

def _test_turnoff_translog_retention_after_upgraded(self, path):
"""
Verifies that once all shard copies on the new version, we should turn
off the translog retention for indices with soft-deletes.
Expand Down

0 comments on commit e6487b7

Please sign in to comment.