Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

CBQE-2327: Implement bucket priority in basetestcase class and add te…

…sts to rebalance and failover

Change-Id: Ib23a17ff916e68ac4b79adb73b48996f69df2424
Reviewed-on: http://review.couchbase.org/39422
Tested-by: buildbot <build@couchbase.com>
Reviewed-by: Parag Agarwal <agarwal.parag@gmail.com>
Tested-by: Parag Agarwal <agarwal.parag@gmail.com>
  • Loading branch information...
commit 491278dc219404d478b10b2aea4a730e10f65ce6 1 parent 7535c6b
@paragagarwal paragagarwal authored karma2ns committed
View
2  conf/py-newfailover.conf
@@ -8,6 +8,7 @@ failover.failovertests.FailoverTests:
test_failover_firewall,replicas=3,graceful=False,num_failed_nodes=3,load_ratio=10,GROUP=P1
#
test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,failoverMaster=True,GROUP=P0
+ test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=100000,standard_buckets=2,sasl_buckets=2,standard_bucket_priority=low:high,sasl_bucket_priority=low:high,dgm_run=True,failoverMaster=True,GROUP=P0
test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,GROUP=P0
test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=100000,dgm_run=True,withQueries=True,numViews=5,runViews=True,GROUP=P0
test_failover_normal,replicas=1,graceful=False,num_failed_nodes=1,items=40000,standard_buckets=1,GROUP=P0;
@@ -30,6 +31,7 @@ failover.failovertests.FailoverTests:
# Graceful Failover and or Delta Recovery
test_failover_normal,replicas=1,num_failed_nodes=1,items=100000,dgm_run=True,,failoverMaster=True,graceful=True,GROUP=P0;GRACEFUL
+ test_failover_normal,replicas=1,num_failed_nodes=1,items=100000,dgm_run=True,standard_buckets=2,sasl_buckets=2,standard_bucket_priority=low:high,sasl_bucket_priority=low:high,failoverMaster=True,graceful=True,GROUP=P0;GRACEFUL
test_failover_normal,replicas=1,num_failed_nodes=1,items=200000,vbuckets=1024,stopGracefulFailover=True,dgm_run=True,,failoverMaster=True,graceful=True,GROUP=P0;GRACEFUL
test_failover_normal,replicas=1,num_failed_nodes=1,items=100000,dgm_run=True,graceful=True,GROUP=P0;GRACEFUL
test_failover_normal,replicas=1,num_failed_nodes=1,load_ratio=20,sasl_buckets=1,graceful=True,GROUP=P1;GRACEFUL
View
2  conf/rebalance/py-rebalancein.conf
@@ -8,6 +8,7 @@ rebalance.rebalancein.RebalanceInTests:
rebalance_in_with_ops,nodes_in=4,replicas=3,doc_ops=create,GROUP=IN;P2
rebalance_in_with_ops,nodes_in=5,replicas=2,items=50000,doc_ops=create:update:delete,GROUP=IN;P2
rebalance_in_with_ops,nodes_in=3,replicas=1,items=50000,doc_ops=create:update:delete,value_size=1024,GROUP=IN;P0
+ rebalance_in_with_ops,nodes_in=3,replicas=1,items=500000,standard_buckets=2,sasl_buckets=2,standard_bucket_priority=low:high,sasl_bucket_priority=low:high,doc_ops=create:update:delete,value_size=1024,GROUP=IN;P1
rebalance_in_get_random_key,nodes_init=3,nodes_in=2,items=100000,value_size=256,GROUP=IN;P0;FROM_2_0
rebalance_in_get_random_key,nodes_init=2,nodes_in=2,items=500000,max_verify=100000,GROUP=IN;P1;FROM_2_0
incremental_rebalance_in_with_ops,replicas=2,items=0,GROUP=IN;P1
@@ -17,6 +18,7 @@ rebalance.rebalancein.RebalanceInTests:
incremental_rebalance_in_with_ops,replicas=3,items=100000,doc_ops=create,max_verify=100000,GROUP=IN;P2
incremental_rebalance_in_with_ops,items=500000,value_size=512,max_verify=100000,GROUP=IN;P1
rebalance_in_with_queries,nodes_in=2,blob_generator=False,value_size=1024,GROUP=IN;BASIC;P0;FROM_2_0
+ rebalance_in_with_queries,nodes_in=2,blob_generator=False,value_size=1024,standard_buckets=2,sasl_buckets=2,standard_bucket_priority=low:high,sasl_bucket_priority=low:high,GROUP=IN;BASIC;P0;FROM_2_0
rebalance_in_with_queries,nodes_in=3,replicas=3,blob_generator=False,GROUP=IN;P1;FROM_2_0
rebalance_in_with_queries,nodes_in=5,replicas=2,blob_generator=False,GROUP=IN;P2;FROM_2_0
rebalance_in_with_queries,nodes_in=1,replicas=0,num_views=2,is_dev_ddoc=False,reproducer=True,max_verify=10000,nodes_init=3,disabled_consistent_view=True,items=100000,GROUP=IN;P2
View
4 conf/rebalance/py-rebalanceinout.conf
@@ -5,7 +5,8 @@ rebalance.rebalanceinout.RebalanceInOutTests:
#incremental_rebalance_in_out_with_mutation_and_deletion,items=500000,value_size=256,max_verify=100000,GROUP=IN_OUT;P0
#incremental_rebalance_in_out_with_mutation_and_expiration,items=500000,value_size=512,max_verify=100000,GROUP=IN_OUT;P0
incremental_rebalance_out_in_with_mutation,replicas=2,value_size=2048,GROUP=IN_OUT;P1
- incremental_rebalance_out_in_with_mutation,init_num_nodes=3,items=0,GROUP=IN_OUT;P0
+ incremental_rebalance_out_in_with_mutation,init_num_nodes=3,items=400000,GROUP=IN_OUT;P0
+ incremental_rebalance_out_in_with_mutation,init_num_nodes=3,items=400000,standard_buckets=2,sasl_buckets=2,standard_bucket_priority=low:high,sasl_bucket_priority=low:high,GROUP=IN_OUT;P0
incremental_rebalance_out_in_with_mutation,replicas=3,init_num_nodes=3,GROUP=IN_OUT;P2
start_stop_rebalance_in_out,nodes_init=1,nodes_in=2,nodes_out=0,extra_nodes_in=1,extra_nodes_out=0,items=100000,max_verify=10000,value_size=1024,GROUP=IN_OUT;P0
start_stop_rebalance_in_out,nodes_init=1,nodes_in=2,nodes_out=0,extra_nodes_in=3,extra_nodes_out=0,items=100000,GROUP=IN_OUT;P1
@@ -13,6 +14,7 @@ rebalance.rebalanceinout.RebalanceInOutTests:
start_stop_rebalance_in_out,nodes_init=4,nodes_in=0,nodes_out=1,extra_nodes_in=2,extra_nodes_out=2,items=100000,replicas=3,GROUP=IN_OUT;P2
start_stop_rebalance_in_out,nodes_init=2,nodes_in=4,nodes_out=1,items=100000,GROUP=IN_OUT;P1
start_stop_rebalance_in_out,nodes_init=4,nodes_in=2,nodes_out=2,num_replicas=1,items=100000,GROUP=IN_OUT;BASIC;P0
+ start_stop_rebalance_in_out,nodes_init=4,nodes_in=2,nodes_out=2,num_replicas=1,standard_buckets=2,sasl_buckets=2,standard_bucket_priority=low:high,sasl_bucket_priority=low:high,items=100000,GROUP=IN_OUT;BASIC;P0
start_stop_rebalance_in_out,nodes_init=4,nodes_in=2,nodes_out=2,num_replicas=2,GROUP=IN_OUT;P1
start_stop_rebalance_in_out,nodes_init=5,nodes_in=2,nodes_out=2,num_replicas=3,items=100000,GROUP=IN_OUT;P2
start_stop_rebalance_in_out,nodes_init=4,nodes_in=1,nodes_out=2,num_replicas=2,GROUP=IN_OUT;P1
View
4 conf/rebalance/py-rebalanceout.conf
@@ -16,11 +16,13 @@ rebalance.rebalanceout.RebalanceOutTests:
rebalance_out_get_random_key,nodes_out=1,items=100000,value_size=256,GROUP=OUT;BASIC;P0;FROM_2_0
rebalance_out_get_random_key,nodes_out=3,items=500000,max_verify=100000,GROUP=OUT;P2;FROM_2_0
incremental_rebalance_out_with_ops,replicas=2,items=0,value_size=1024,GROUP=OUT;P0
+ incremental_rebalance_out_with_ops,replicas=2,items=0,standard_buckets=2,sasl_buckets=2,standard_bucket_priority=low:high,sasl_bucket_priority=low:high,value_size=1024,GROUP=OUT;P0
incremental_rebalance_out_with_ops,replicas=3,items=100000,doc_ops=delete,GROUP=OUT;P2
incremental_rebalance_out_with_ops,replicas=1,items=100000,max_verify=100000,doc_ops=create,GROUP=OUT;P0
incremental_rebalance_out_with_ops,replicas=3,items=100000,max_verify=100000,doc_ops=create,GROUP=OUT;P2
incremental_rebalance_out_with_ops,items=500000,max_verify=100000,value_size=512,GROUP=OUT;P1
rebalance_out_with_queries,nodes_out=1,blob_generator=False,value_size=1024,GROUP=OUT;BASIC;P0;FROM_2_0
+ rebalance_out_with_queries,nodes_out=1,blob_generator=False,value_size=1024,standard_buckets=2,sasl_buckets=2,standard_bucket_priority=low:high,sasl_bucket_priority=low:high,GROUP=OUT;BASIC;P0;FROM_2_0
rebalance_out_with_queries,nodes_out=2,replicas=2,blob_generator=False,GROUP=OUT;P1;FROM_2_0
rebalance_out_with_queries,nodes_out=3,replicas=3,blob_generator=False,GROUP=OUT;P2;FROM_2_0
incremental_rebalance_out_with_queries,blob_generator=False,items=100000,GROUP=OUT;P0;FROM_2_0
@@ -32,4 +34,4 @@ rebalance.rebalanceout.RebalanceOutTests:
rebalance_out_with_warming_up,items=1000000,max_verify=100000,max_verify=100000,GROUP=OUT;P1,BUGS=MB-7660_curr_items_tot_vb_replica_curr_items_mismatch_mixed_cluster_with_warming_up
rebalance_out_with_warming_up,nodes_out=3,items=500000,replicas=1,max_verify=100000,value_size=1024,GROUP=OUT;P0,BUGS=MB-7660_curr_items_tot_vb_replica_curr_items_mismatch_mixed_cluster_with_warming_up
rebalance_out_with_warming_up,nodes_out=3,items=100000,replicas=3,GROUP=OUT;P2,BUGS=MB-7660_curr_items_tot_vb_replica_curr_items_mismatch_mixed_cluster_with_warming_up
- rebalance_out_with_warming_up,nodes_out=2,items=1000000,replicas=2,master_restart=True,max_verify=100000,GROUP=OUT;P1,BUGS=MB-7660_curr_items_tot_vb_replica_curr_items_mismatch_mixed_cluster_with_warming_up
+ rebalance_out_with_warming_up,nodes_out=2,items=1000000,replicas=2,master_restart=True,max_verify=100000,GROUP=OUT;P1,BUGS=MB-7660_curr_items_tot_vb_replica_curr_items_mismatch_mixed_cluster_with_warming_up
View
10 lib/couchbase/cluster.py
@@ -17,7 +17,7 @@ def __init__(self):
self.task_manager = TaskManager("Cluster_Thread")
self.task_manager.start()
- def async_create_default_bucket(self, server, size, replicas=1, enable_replica_index=1, eviction_policy='valueOnly'):
+ def async_create_default_bucket(self, server, size, replicas=1, enable_replica_index=1, eviction_policy='valueOnly', bucket_priority = None):
"""Asynchronously creates the default bucket
Parameters:
@@ -29,7 +29,7 @@ def async_create_default_bucket(self, server, size, replicas=1, enable_replica_i
BucketCreateTask - A task future that is a handle to the scheduled task."""
_task = BucketCreateTask(server, 'default', replicas, size,
- enable_replica_index=enable_replica_index, eviction_policy=eviction_policy)
+ enable_replica_index=enable_replica_index, eviction_policy=eviction_policy,bucket_priority=bucket_priority)
self.task_manager.schedule(_task)
return _task
@@ -221,7 +221,7 @@ def async_wait_for_xdcr_stat(self, servers, bucket, param, stat, comparison, val
return _task
def create_default_bucket(self, server, size, replicas=1, timeout=600,
- enable_replica_index=1, eviction_policy='valueOnly'):
+ enable_replica_index=1, eviction_policy='valueOnly', bucket_priority = None):
"""Synchronously creates the default bucket
Parameters:
@@ -234,7 +234,7 @@ def create_default_bucket(self, server, size, replicas=1, timeout=600,
_task = self.async_create_default_bucket(server, size, replicas,
enable_replica_index=enable_replica_index,
- eviction_policy=eviction_policy)
+ eviction_policy=eviction_policy, bucket_priority = bucket_priority)
return _task.result(timeout)
def create_sasl_bucket(self, server, name, password, size, replicas, timeout=None, bucket_priority=None):
@@ -249,7 +249,7 @@ def create_sasl_bucket(self, server, name, password, size, replicas, timeout=Non
Returns:
boolean - Whether or not the bucket was created."""
- _task = self.async_create_sasl_bucket(server, name, password, replicas, size, bucket_priority=bucket_priority)
+ _task = self.async_create_sasl_bucket(server, name, password, replicas, size, bucket_priority = bucket_priority)
self.task_manager.schedule(_task)
return _task.result(timeout)
View
26 pytests/basetestcase.py
@@ -98,6 +98,12 @@ def setUp(self):
self.eviction_policy = self.input.param("eviction_policy", 'valueOnly') # or 'fullEviction'
self.absolute_path = self.input.param("absolute_path", True)
self.test_timeout = self.input.param("test_timeout", 3600) # kill hang test and jump to next one.
+ self.sasl_bucket_priority = self.input.param("sasl_bucket_priority", None)
+ self.standard_bucket_priority = self.input.param("standard_bucket_priority", None)
+ if self.sasl_bucket_priority != None:
+ self.sasl_bucket_priority = self.sasl_bucket_priority.split(":")
+ if self.standard_bucket_priority != None:
+ self.standard_bucket_priority = self.standard_bucket_priority.split(":")
self.log.info("============== basetestcase setup was started for test #{0} {1}=============="\
.format(self.case_number, self._testMethodName))
@@ -306,12 +312,16 @@ def _create_sasl_buckets(self, server, num_buckets, server_id=None, bucket_size=
bucket_tasks = []
for i in range(num_buckets):
name = 'bucket' + str(i)
+ bucket_priority = None
+ if self.sasl_bucket_priority != None:
+ bucket_priority = self.get_bucket_priority(self.sasl_bucket_priority[i])
bucket_tasks.append(self.cluster.async_create_sasl_bucket(server, name,
password,
bucket_size,
self.num_replicas,
enable_replica_index=self.enable_replica_index,
- eviction_policy=self.eviction_policy))
+ eviction_policy=self.eviction_policy,
+ bucket_priority = bucket_priority))
self.buckets.append(Bucket(name=name, authType="sasl", saslPassword=password,
num_replicas=self.num_replicas, bucket_size=bucket_size,
master_id=server_id, eviction_policy=self.eviction_policy));
@@ -328,12 +338,16 @@ def _create_standard_buckets(self, server, num_buckets, server_id=None, bucket_s
bucket_tasks = []
for i in range(num_buckets):
name = 'standard_bucket' + str(i)
+ bucket_priority = None
+ if self.standard_bucket_priority != None:
+ bucket_priority = self.get_bucket_priority(self.standard_bucket_priority[i])
bucket_tasks.append(self.cluster.async_create_standard_bucket(server, name,
STANDARD_BUCKET_PORT + i,
bucket_size,
self.num_replicas,
enable_replica_index=self.enable_replica_index,
- eviction_policy=self.eviction_policy))
+ eviction_policy=self.eviction_policy,
+ bucket_priority = bucket_priority))
self.buckets.append(Bucket(name=name, authType=None, saslPassword=None,
num_replicas=self.num_replicas,
bucket_size=bucket_size,
@@ -1273,6 +1287,14 @@ def compare_failovers_logs(self, prev_failovers_stats, servers, buckets, perNode
self.log.info(" End Verification for Failovers logs comparison ")
return new_failovers_stats
+ def get_bucket_priority(self,priority):
+ if priority == None:
+ return None
+ if priority == 'low':
+ return None
+ else:
+ return priority
+
def expire_pager(self, servers, val=10):
for bucket in self.buckets:
for server in servers:
Please sign in to comment.
Something went wrong with that request. Please try again.