Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

CBQE-2304 add cancel tests to cluster wide collectinfo tests

Change-Id: I11a14462208acf9fb14a9de39fb81d1bf5ff6626
Reviewed-on: http://review.couchbase.org/39586
Reviewed-by: Thuan Nguyen <soccon@gmail.com>
Tested-by: Thuan Nguyen <soccon@gmail.com>
  • Loading branch information...
commit 2bb6711a1087d62350007f498979425b616b6360 1 parent 73ebae8
@saigon saigon authored saigon committed
View
6 conf/py-cwc.conf
@@ -1,10 +1,14 @@
-### test without upload ###
+### test without upload. Test with upload put in jenkins job ###
cwc.cwctests.CWCTests:
test_start_collect_log
+ test_start_collect_log,cancel_collect=true
test_start_collect_log,sasl_buckets=1
+ test_start_collect_log,sasl_buckets=1,cancel_collect=true
test_start_collect_log,sasl_buckets=1,standard_buckets=1
test_start_collect_log,nodes_init=4
+ test_start_collect_log,nodes_init=4,cancel_collect=true
test_start_collect_log,nodes_init=4,sasl_buckets=1
+ test_start_collect_log,nodes_init=4,sasl_buckets=1,cancel_collect=true
test_start_collect_log,nodes_init=4,sasl_buckets=1,standard_buckets=1
test_start_collect_log,nodes_init=4,collect_nodes=3
test_start_collect_log,nodes_init=4,sasl_buckets=1,collect_nodes=3
View
1  pytests/cwc/cwc_base.py
@@ -20,6 +20,7 @@ def setUp(self):
self.customer = self.input.param("customer", "")
self.ticket = self.input.param("ticket", "")
self.collect_nodes = self.input.param("collect_nodes", "*")
+ self.cancel_collect = self.input.param("cancel_collect", False)
self.shutdown_nodes = self.input.param("shutdown_nodes", None)
if self.doc_ops is not None:
self.doc_ops = self.doc_ops.split(";")
View
31 pytests/cwc/cwctests.py
@@ -34,11 +34,17 @@ def test_start_collect_log(self):
upload=self.upload, uploadHost=self.uploadHost, \
customer=self.customer, ticket=self.ticket)
if status:
- collected, uploaded = self._monitor_collecting_log(rest, timeout=1200)
+ collected, uploaded, cancel_collect = \
+ self._monitor_collecting_log(rest, timeout=1200)
if collected:
self._verify_log_file(rest)
if self.upload and uploaded:
self._verify_log_uploaded(rest)
+ if self.cancel_collect:
+ if cancel_collect:
+ self.log.info("Logs collection were cancelled")
+ else:
+ self.fail("Failed to cancel log collection")
shell.disconnect()
else:
self.fail("ERROR: {0}".format(content))
@@ -48,10 +54,22 @@ def _monitor_collecting_log(self, rest, timeout):
end_time = start_time + timeout
collected = False
uploaded = False
+ cancel_collect = False
progress = 0
progress, stt, perNode = rest.get_cluster_logs_collection_status()
while (progress != 100 or stt == "running") and time.time() <= end_time :
progress, stt, perNode = rest.get_cluster_logs_collection_status()
+ if stt is not None and self.cancel_collect:
+ count = 0
+ if "running" in stt and count == 0:
+ self.log.info("Start to cancel collect logs ")
+ status, content = rest.cancel_cluster_logs_collection()
+ count += 1
+ if "cancelled" in stt:
+ cancel_collect = True
+ break
+ elif count == 2:
+ self.fail("Failed to cancel log collection")
self.log.info("Cluster-wide collectinfo progress: {0}".format(progress))
if perNode is not None:
for node in perNode:
@@ -63,12 +81,17 @@ def _monitor_collecting_log(self, rest, timeout):
uploaded = True
self.sleep(10)
if time.time() > end_time:
- self.log.error("log could not collect after {0} seconds ".format(timeout))
- return collected, uploaded
+ if self.cancel_collect:
+ self.log.error("Could not cancel log collection after {0} seconds ".format(timeout))
+ elif self.upload:
+ self.log.error("Log could not upload after {0} seconds ".format(timeout))
+ else:
+ self.log.error("Log could not collect after {0} seconds ".format(timeout))
+ return collected, uploaded, cancel_collect
else:
duration = time.time() - start_time
self.log.info("log collection took {0} seconds ".format(duration))
- return collected, uploaded
+ return collected, uploaded, cancel_collect
def _verify_log_file(self, rest):
Please sign in to comment.
Something went wrong with that request. Please try again.