Permalink
Browse files

CBQE-2336:: add additional tests for views+graceful failover, MB-11706

Change-Id: I5009315e4f23a49530ff770c3df50680e4db2f88
Reviewed-on: http://review.couchbase.org/39588
Tested-by: buildbot <build@couchbase.com>
Reviewed-by: Parag Agarwal <agarwal.parag@gmail.com>
Tested-by: Parag Agarwal <agarwal.parag@gmail.com>
  • Loading branch information...
1 parent 5159dd3 commit 85f92ca3fe1c343fc24608a7b94e91439c7f3b6f Parag Agarwal committed with paragagarwal Jul 19, 2014
View
@@ -1,6 +1,8 @@
failover.failovertests.FailoverTests:
test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,failoverMaster=True,GROUP=P0
- test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,doc_ops=update:create:delete,withQueries=True,numViews=5,runViews=True,GROUP=P0
+ test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,numViews=5,withViewsOps=True,createIndexesDuringFailover=True,failoverMaster=True,GROUP=P0
+ test_failover_firewall,replicas=2,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,numViews=5,withViewsOps=True,createIndexesDuringFailover=True,failoverMaster=True,GROUP=P1
+ test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,doc_ops=update:create:delete,withQueries=True,numViews=5,withViewsOps=True,GROUP=P0
test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,GROUP=P0
test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,items=40000,sasl_buckets=1,GROUP=P1
test_failover_firewall,replicas=1,graceful=False,num_failed_nodes=1,load_ratio=10,bidirectional=True,GROUP=P0
@@ -10,7 +12,7 @@ failover.failovertests.FailoverTests:
test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,failoverMaster=True,GROUP=P0
test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=100000,standard_buckets=2,sasl_buckets=2,standard_bucket_priority=low:high,sasl_bucket_priority=low:high,dgm_run=True,failoverMaster=True,GROUP=P0
test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,GROUP=P0
- test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,withQueries=True,numViews=5,runViews=True,GROUP=P0
+ test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,withQueries=True,numViews=5,withViewsOps=True,GROUP=P0
test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=40000,standard_buckets=1,GROUP=P0;
test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,load_ratio=10,GROUP=P1
test_failover_normal,replicas=2,graceful=False,num_failed_nodes=2,items=20000,GROUP=P1
@@ -19,9 +21,11 @@ failover.failovertests.FailoverTests:
test_failover_normal,items=100000,graceful=False,during_ops=change_password,GROUP=P1;WINDOWS
test_failover_normal,items=100000,graceful=False,during_ops=change_port,failoverMaster=True,GROUP=P1;WINDOWS
#
+ test_failover_stop_server,replicas=1,graceful=False,num_failed_nodes=1,numViews=5,withViewsOps=True,createIndexesDuringFailover=True,items=100000,dgm_run=True,failoverMaster=True,GROUP=P0
+ test_failover_stop_server,replicas=2,graceful=False,num_failed_nodes=1,numViews=5,withViewsOps=True,createIndexesDuringFailover=True,items=100000,dgm_run=True,failoverMaster=True,GROUP=P1
test_failover_stop_server,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,failoverMaster=True,GROUP=P0
test_failover_stop_server,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,GROUP=P0
- test_failover_stop_server,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,withQueries=True,numViews=5,runViews=True,GROUP=P0
+ test_failover_stop_server,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,withQueries=True,numViews=5,withViewsOps=True,GROUP=P0
test_failover_stop_server,replicas=1,graceful=False,num_failed_nodes=1,load_ratio=10,GROUP=P1
test_failover_stop_server,replicas=1,graceful=False,num_failed_nodes=1,load_ratio=1,GROUP=P2
test_failover_stop_server,replicas=2,graceful=False,num_failed_nodes=2,items=20000,GROUP=P0
@@ -30,7 +34,9 @@ failover.failovertests.FailoverTests:
test_failover_stop_server,replicas=3,graceful=False,num_failed_nodes=3,load_ratio=1,failoverMaster=True,GROUP=P1
# Graceful Failover and or Delta Recovery
- test_failover_normal,replicas=1,num_failed_nodes=1,items=100000,dgm_run=True,,failoverMaster=True,graceful=True,GROUP=P0;GRACEFUL
+ test_failover_normal,replicas=1,num_failed_nodes=1,items=100000,dgm_run=True,withMutationOps=True,doc_ops=create:update:delete,failoverMaster=True,graceful=True,GROUP=P0;GRACEFUL
+ test_failover_normal,replicas=2,num_failed_nodes=1,items=100000,dgm_run=True,withMutationOps=True,doc_ops=create:update:delete,failoverMaster=True,graceful=True,GROUP=P1;GRACEFUL
+ test_failover_normal,replicas=1,num_failed_nodes=1,items=100000,dgm_run=True,failoverMaster=True,graceful=True,GROUP=P0;GRACEFUL
test_failover_normal,replicas=1,num_failed_nodes=1,items=100000,dgm_run=True,standard_buckets=2,sasl_buckets=2,standard_bucket_priority=low:high,sasl_bucket_priority=low:high,failoverMaster=True,graceful=True,GROUP=P0;GRACEFUL
test_failover_normal,replicas=1,num_failed_nodes=1,items=200000,vbuckets=1024,stopGracefulFailover=True,dgm_run=True,,failoverMaster=True,graceful=True,GROUP=P0;GRACEFUL
test_failover_normal,replicas=1,num_failed_nodes=1,items=100000,dgm_run=True,graceful=True,GROUP=P0;GRACEFUL
@@ -42,8 +48,11 @@ failover.failovertests.FailoverTests:
test_failover_then_add_back,replicas=2,num_failed_nodes=2,items=100000,standard_buckets=1,recoveryType=delta:delta,deltaRecoveryBuckets=default,graceful=False,GROUP=P1;GRACEFUL
test_failover_then_add_back,replicas=2,num_failed_nodes=1,items=100000,standard_buckets=1,recoveryType=full,deltaRecoveryBuckets=default,graceful=True,GROUP=P1;GRACEFUL
test_failover_then_add_back,replicas=2,num_failed_nodes=1,items=100000,standard_buckets=1,recoveryType=delta,deltaRecoveryBuckets=default:standard_buckets0,graceful=True,GROUP=P1;GRACEFUL
+ test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=100000,withMutationOps=True,doc_ops=create:update:delete,upr_check=False,recoveryType=full,graceful=True,GROUP=P0;GRACEFUL
+ test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=100000,withMutationOps=True,doc_ops=create:update:delete,upr_check=False,recoveryType=delta,graceful=True,GROUP=P0;GRACEFUL
+ test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=100000,numViews=5,withViewsOps=True,createIndexesDuringFailover=True,sasl_buckets=1,upr_check=False,recoveryType=full,graceful=True,GROUP=P0;GRACEFUL
+ test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=100000,numViews=5,withViewsOps=True,createIndexesDuringFailover=True,sasl_buckets=1,upr_check=False,recoveryType=delta,graceful=True,GROUP=P0;GRACEFUL
test_failover_then_add_back,replicas=1,num_failed_nodes=1,items=100000,sasl_buckets=1,upr_check=False,recoveryType=full,graceful=True,GROUP=P0;GRACEFUL
- test_failover_then_add_back,replicas=2,num_failed_nodes=1,items=100000,recoveryType=delta,standard_buckets=1,upr_check=False,withQueries=True,numViews=5,runViews=True,graceful=True,GROUP=P0;GRACEFUL
test_failover_normal,replicas=1,graceful=True,check_verify_failover_type=True,num_failed_nodes=1,items=100,dgm_run=True,,failoverMaster=True,graceful=True,runRebalanceAfterFailover=False,GROUP=P1;GRACEFUL
test_failover_normal,replicas=2,graceful=True,check_verify_failover_type=True,num_failed_nodes=3,items=100,dgm_run=True,,failoverMaster=True,graceful=True,runRebalanceAfterFailover=False,GROUP=P1;GRACEFUL
test_failover_normal,replicas=3,graceful=True,check_verify_failover_type=True,num_failed_nodes=4,items=100,dgm_run=True,,failoverMaster=True,graceful=True,runRebalanceAfterFailover=False,GROUP=P1;GRACEFUL
View
@@ -52,6 +52,20 @@ def async_create_sasl_bucket(self, server, name, password, size, replicas, enabl
self.task_manager.schedule(_task)
return _task
+ def async_failover(self, servers = [], failover_nodes =[], graceful = True):
+ """Asynchronously failover a set of nodes
+
+ Parameters:
+ servers - servers used for connection. (TestInputServer)
+ failover_nodes - The set of servers that will under go failover .(TestInputServer)
+ graceful = True/False. True - graceful, False - hard. (Boolean)
+
+ Returns:
+ FailOverTask - A task future that is a handle to the scheduled task."""
+ _task = FailoverTask(servers, to_failover = failover_nodes, graceful = graceful)
+ self.task_manager.schedule(_task)
+ return _task
+
def async_create_standard_bucket(self, server, name, port, size, replicas, enable_replica_index=1,
eviction_policy='valueOnly', bucket_priority=None):
"""Asynchronously creates a standard bucket
@@ -691,30 +705,18 @@ def compact_view(self, server, design_doc_name, bucket="default", timeout=None,
_task = self.async_compact_view(server, design_doc_name, bucket, with_rebalance)
return _task.result(timeout)
- def async_failover(self, servers, to_failover):
- """Asyncronously fails over nodes
-
- Parameters:
- servers - All servers participating in the failover ([TestInputServers])
- to_failover - All servers being failed over ([TestInputServers])
-
- Returns:
- FailoverTask - A task future that is a handle to the scheduled task"""
- _task = FailoverTask(servers, to_failover)
- self.task_manager.schedule(_task)
- return _task
-
- def failover(self, servers, to_failover, timeout=None):
- """Syncronously fails over nodes
+ def failover(self, servers = [], failover_nodes =[], graceful = True):
+ """Synchronously flushes a bucket
Parameters:
- servers - All servers participating in the failover ([TestInputServers])
- to_failover - All servers being failed over ([TestInputServers])
+ servers - node used for connection (TestInputServer)
+ failover_nodes - servers to be failover. (TestInputServer)
+ bucket - The name of the bucket to be flushed. (String)
Returns:
- boolean - Whether or not the failover was successful"""
- _task = self.async_failover(servers, to_failover)
- return _task.result(timeout)
+ boolean - Whether or not the bucket was flushed."""
+ _task = self.async_failover(servers, failover_nodes, graceful)
+ return _task.result()
def async_bucket_flush(self, server, bucket='default'):
"""Asynchronously flushes a bucket
@@ -741,6 +743,8 @@ def bucket_flush(self, server, bucket='default', timeout=None):
_task = self.async_bucket_flush(server, bucket)
return _task.result(timeout)
+
+
def async_monitor_db_fragmentation(self, server, fragmentation, bucket, get_view_frag=False):
"""Asyncronously monitor db fragmentation
View
@@ -2219,10 +2219,11 @@ def _is_compacting(self):
'''task class for failover. This task will only failover nodes but doesn't
rebalance as there is already a task to do that'''
class FailoverTask(Task):
- def __init__(self, servers, to_failover=[], wait_for_pending=20):
+ def __init__(self, servers, to_failover=[], wait_for_pending=0,graceful = True):
Task.__init__(self, "failover_task")
self.servers = servers
self.to_failover = to_failover
+ self.graceful = graceful
self.wait_for_pending = wait_for_pending
def execute(self, task_manager):
@@ -2249,7 +2250,7 @@ def _failover_nodes(self, task_manager):
for node in rest.node_statuses():
if server.ip == node.ip and int(server.port) == int(node.port):
self.log.info("Failing over {0}:{1}".format(node.ip, node.port))
- rest.fail_over(node.id)
+ rest.fail_over(node.id,self.graceful)
class GenerateExpectedViewResultsTask(Task):
View
@@ -627,7 +627,7 @@ def _load_doc_data_all_buckets(self, data_op="create", batch_size=1000, gen_load
return gen_load
def verify_cluster_stats(self, servers=None, master=None, max_verify=None, timeout=None, check_items=True,
- only_store_hash=True, replica_to_read=None, batch_size=1000):
+ only_store_hash=True, replica_to_read=None, batch_size=1000, check_bucket_stats = True):
if servers is None:
servers = self.servers
if master is None:
@@ -645,7 +645,8 @@ def verify_cluster_stats(self, servers=None, master=None, max_verify=None, timeo
# get/verify stats if 'ValueError: Not able to get values for following keys' was gotten
self._verify_stats_all_buckets(servers, timeout=(timeout or 120))
raise e
- self._verify_stats_all_buckets(servers, timeout=(timeout or 120))
+ if check_bucket_stats:
+ self._verify_stats_all_buckets(servers, timeout=(timeout or 120))
# verify that curr_items_tot corresponds to sum of curr_items from all nodes
verified = True
for bucket in self.buckets:
@@ -654,6 +655,7 @@ def verify_cluster_stats(self, servers=None, master=None, max_verify=None, timeo
else:
self.log.warn("verification of items was omitted")
+
def _stats_befor_warmup(self, bucket_name):
self.pre_warmup_stats[bucket_name] = {}
self.stats_monitor = self.input.param("stats_monitor", "")
@@ -19,8 +19,9 @@ def setUp(self):
self.failoverMaster = self.input.param("failoverMaster", False)
self.total_vbuckets = self.input.param("total_vbuckets", 1024)
self.std_vbucket_dist = self.input.param("std_vbucket_dist", None)
- self.withOps = self.input.param("withOps", False)
- self.runViews = self.input.param("runViews", False)
+ self.withMutationOps = self.input.param("withMutationOps", False)
+ self.withViewsOps = self.input.param("withViewsOps", False)
+ self.createIndexesDuringFailover = self.input.param("createIndexesDuringFailover", False)
self.upr_check = self.input.param("upr_check", True)
self.withQueries = self.input.param("withQueries", False)
self.numberViews = self.input.param("numberViews", False)
@@ -34,7 +35,6 @@ def setUp(self):
self._value_size = self.input.param("value_size", 256)
self.doc_ops = self.input.param("doc_ops", [])
self.deltaRecoveryBuckets = self.input.param("deltaRecoveryBuckets", None)
- self.runViewsDuringFailover = self.input.param("runViewsDuringFailover", False)
if self.doc_ops:
self.doc_ops = self.doc_ops.split(":")
self.num_failed_nodes = self.input.param("num_failed_nodes", 0)
@@ -52,6 +52,9 @@ def setUp(self):
self.gen_create = BlobGenerator('failover', 'failover', self.value_size, start=self.num_items + 1 , end=self.num_items * 1.5)
self.gen_update = BlobGenerator('failover', 'failover', self.value_size, start=self.num_items / 2, end=self.num_items)
self.gen_delete = BlobGenerator('failover', 'failover', self.value_size, start=self.num_items / 4, end=self.num_items / 2 - 1)
+ self.afterfailover_gen_create = BlobGenerator('failover', 'failover', self.value_size, start=self.num_items * 1.6 , end=self.num_items * 2)
+ self.afterfailover_gen_update = BlobGenerator('failover', 'failover', self.value_size, start=1 , end=self.num_items/4)
+ self.afterfailover_gen_delete = BlobGenerator('failover', 'failover', self.value_size, start=self.num_items * .5 , end=self.num_items* 0.75)
self.log.info("============== FailoverBaseTest setup was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
Oops, something went wrong.

0 comments on commit 85f92ca

Please sign in to comment.