Skip to content

Commit

Permalink
CBQE-3669 add 7 seconds sleep times after create bucket. I pick 7 to
Browse files Browse the repository at this point in the history
make sure the bucket fully up after created.

Change-Id: Ie5882393ce47ac269076f8982a6aacad095942fd
Reviewed-on: http://review.couchbase.org/70689
Tested-by: Thuan Nguyen <soccon@gmail.com>
Reviewed-by: Eric Cooper <ericcouchbase@gmail.com>
  • Loading branch information
saigon committed Dec 6, 2016
1 parent 60ca996 commit 7cc73b0
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 1 deletion.
8 changes: 7 additions & 1 deletion lib/membase/api/rest_client.py
Expand Up @@ -10,7 +10,7 @@
from threading import Thread
from TestInput import TestInputSingleton
from testconstants import MIN_KV_QUOTA, INDEX_QUOTA, FTS_QUOTA
from testconstants import COUCHBASE_FROM_VERSION_4
from testconstants import COUCHBASE_FROM_VERSION_4, COUCHBASE_FROM_4DOT6

import httplib2
import logger
Expand Down Expand Up @@ -119,6 +119,12 @@ def vbucket_map_ready(self, bucket, timeout_in_seconds=360):
while time.time() <= end_time:
vBuckets = self.rest.get_vbuckets(bucket)
if vBuckets:
""" Need to find the main slow issue and remove these change """
cb_version = self.rest.get_nodes_self().version[:5]
if cb_version[:5] in COUCHBASE_FROM_4DOT6:
log.info("sleep 7 seconds to make sure bucket ready")
time.sleep(7)
""" End this change """
return True
else:
time.sleep(0.5)
Expand Down
2 changes: 2 additions & 0 deletions lib/remote/remote_util.py
Expand Up @@ -3099,6 +3099,7 @@ def get_data_map_using_cbtransfer(self, buckets, data_path=None, userId="Adminis
path = temp_path + genFileName
dest_path = "/tmp/" + fileName
destination = "csv:" + csv_path
log.info("Run cbtransfer to get data map")
self.execute_cbtransfer(source, destination, options)
file_existed = self.file_exists(temp_path, genFileName)
if file_existed:
Expand Down Expand Up @@ -3126,6 +3127,7 @@ def execute_cbtransfer(self, source, destination, command_options=''):
command = "cmd /c \"%s\" \"%s\" \"%s\" %s" % (transfer_command, source, destination, command_options)
output, error = self.execute_command(command, use_channel=True)
self.log_command_output(output, error)
log.info("done execute cbtransfer")
return output

def execute_cbdocloader(self, username, password, bucket, memory_quota, file):
Expand Down
4 changes: 4 additions & 0 deletions pytests/ent_backup_restore/enterprise_backup_restore_base.py
Expand Up @@ -173,6 +173,9 @@ def backup_create(self):
args += " --disable-data"
remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, args)

self.log.info("Remove any old dir before create new one")
remote_client.execute_command("rm -rf %s" % self.backupset.directory)
output, error = remote_client.execute_command(command)
remote_client.log_command_output(output, error)
return output, error
Expand Down Expand Up @@ -326,6 +329,7 @@ def backup_restore_validate(self, compare_uuid=False, seqno_compare_function="==
self.log.info("Finished restoring backup")

current_vseqno = self.get_vbucket_seqnos(self.cluster_to_restore, self.buckets, self.skip_consistency, self.per_node)
self.log.info("*** Start to validate the restore ")
status, msg = self.validation_helper.validate_restore(self.backupset.end, self.vbucket_seqno, current_vseqno,
compare_uuid=compare_uuid, compare=seqno_compare_function,
get_replica=replicas, mode=mode)
Expand Down
10 changes: 10 additions & 0 deletions pytests/ent_backup_restore/enterprise_backup_restore_test.py
Expand Up @@ -64,7 +64,9 @@ def test_backup_restore_sanity(self):
3. Perform restores for the same number of times with random start and end values
"""
gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
self.log.info("*** start to load items to all buckets")
self._load_all_buckets(self.master, gen, "create", self.expires)
self.log.info("*** done to load items to all buckets")
self.ops_type = self.input.param("ops-type", "update")
self.expected_error = self.input.param("expected_error", None)
if self.auto_failover:
Expand All @@ -74,20 +76,27 @@ def test_backup_restore_sanity(self):
self.backup_create_validate()
for i in range(1, self.backupset.number_of_backups + 1):
if self.ops_type == "update":
self.log.info("*** start to update items in all buckets")
self._load_all_buckets(self.master, gen, "update", self.expires)
self.log.info("*** done update items in all buckets")
elif self.ops_type == "delete":
self.log.info("*** start to delete items in all buckets")
self._load_all_buckets(self.master, gen, "delete", self.expires)
self.log.info("*** done to delete items in all buckets")
self.sleep(10)
self.log.info("*** start to validate backup cluster")
self.backup_cluster_validate()
self.targetMaster = True
start = randrange(1, self.backupset.number_of_backups + 1)
if start == self.backupset.number_of_backups:
end = start
else:
end = randrange(start, self.backupset.number_of_backups + 1)
self.log.info("*** start to restore cluster")
restored = {"{0}/{1}".format(start, end): ""}
for i in range(1, self.backupset.number_of_backups + 1):
if self.reset_restore_cluster:
self.log.info("*** start to reset cluster")
self.backup_reset_clusters(self.cluster_to_restore)
if self.same_cluster:
self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])
Expand All @@ -97,6 +106,7 @@ def test_backup_restore_sanity(self):
self.sleep(10)
self.backupset.start = start
self.backupset.end = end
self.log.info("*** start restore validation")
self.backup_restore_validate(compare_uuid=False,
seqno_compare_function=">=", expected_error=self.expected_error )
if self.backupset.number_of_backups == 1:
Expand Down

0 comments on commit 7cc73b0

Please sign in to comment.