Permalink
Browse files

CBQE-144: Removed unused variables/imports from pytest/*.py

Change-Id: I00cce74a7738fdaf7d4a6becceec0e33e67309f8
Reviewed-on: http://review.couchbase.org/16592
Reviewed-by: Tommie McAfee <tommie@couchbase.com>
Tested-by: Michael Wiederhold <mike@couchbase.com>
  • Loading branch information...
1 parent e935be9 commit 26cc6c4e661bb8b2827c157d3c679368ab004261 @mikewied mikewied committed with mikewied May 31, 2012
@@ -1,15 +1,12 @@
import unittest
-import uuid
import logger
-import os
-import mc_bin_client
import random
import time
from testconstants import MIN_COMPACTION_THRESHOLD
from testconstants import MAX_COMPACTION_THRESHOLD
from TestInput import TestInputSingleton
-from membase.api.rest_client import RestConnection, RestHelper
+from membase.api.rest_client import RestConnection
from membase.helper.bucket_helper import BucketOperationHelper
from remote.remote_util import RemoteMachineShellConnection
from memcached.helper.data_helper import MemcachedClientHelper
@@ -64,7 +61,7 @@ def _database_fragmentation(self, percent_threshold):
items = (int(available_ram*1000)/2)/item_size
rest.create_bucket(bucket= bucket_name, ramQuotaMB=int(available_ram), authType='sasl',
saslPassword='password', replicaNumber=1, proxyPort=11211)
- ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
+ BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
BucketOperationHelper.wait_for_vbuckets_ready_state(serverInfo, bucket_name)
self.log.info("start to load {0}K keys with {1} bytes/key".format(items, item_size))
@@ -107,4 +104,3 @@ def _viewFragmentationThreshold(self):
self.log.info(serverInfo)
rest = RestConnection(serverInfo)
rest.reset_auto_compaction()
- parallelDBAndView = "false"
@@ -3,10 +3,9 @@
import time
import unittest
-from membase.api.rest_client import RestConnection, RestHelper
+from membase.api.rest_client import RestConnection
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
-from membase.helper.rebalance_helper import RebalanceHelper
from memcached.helper.data_helper import MemcachedClientHelper
from remote.remote_util import RemoteMachineShellConnection
from remote.remote_util import RemoteUtilHelper
@@ -45,8 +44,6 @@ def common_tearDown(servers, testcase):
@staticmethod
def wait_for_failover_or_assert(master, autofailover_count, timeout, testcase):
- log = logger.Logger.get_logger()
-
time_start = time.time()
time_max_end = time_start + timeout + 60
failover_count = 0
View
@@ -276,8 +276,6 @@ def _test_cluster_topology_change_body(self):
self.assertTrue(ready, "wait_for_memcached failed")
self.add_nodes_and_rebalance()
- rest = RestConnection(self.master)
-
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
View
@@ -19,7 +19,7 @@
ACTIVE="active"
REPLICA1="replica1"
REPLICA2="replica2"
-Replica3="replica3"
+REPLICA3="replica3"
class CheckpointTests(unittest.TestCase):
@@ -122,7 +122,6 @@ def checkpoint_collapse(self):
tasks = []
chk_pnt = str(int(m_stats[m_stats.keys()[0]]) + (num_items / chk_size))
- chk_items = num_items - (chk_size * 2)
tasks.append(self.cluster.async_wait_for_stats([master], self.bucket, param, stat_key,
'==', chk_pnt))
tasks.append(self.cluster.async_wait_for_stats([slave1], self.bucket, param, stat_key,
View
@@ -7,6 +7,7 @@
from membase.api.rest_client import RestConnection, RestHelper
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
+from membase.helper.rebalance_helper import RebalanceHelper
from memcached.helper.data_helper import MemcachedClientHelper
from remote.remote_util import RemoteMachineShellConnection
@@ -3,6 +3,7 @@
from TestInput import TestInputSingleton
import logger
import time
+import datetime
from membase.api.rest_client import RestConnection
from membase.helper.bucket_helper import BucketOperationHelper
from remote.remote_util import RemoteMachineShellConnection
@@ -3,6 +3,7 @@
import time
from TestInput import TestInputSingleton
import logger
+import datetime
from membase.api.rest_client import RestConnection, RestHelper
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.rebalance_helper import RebalanceHelper
View
@@ -7,7 +7,7 @@
import uuid
import logger
import time
-import crc32
+import datetime
from membase.api.rest_client import RestConnection
from membase.api.tap import TapConnection
from membase.helper.bucket_helper import BucketOperationHelper
View
@@ -1,4 +1,3 @@
-import json
import os
import random
from threading import Thread
@@ -8,6 +7,7 @@
import time
import uuid
import logger
+import datetime
from membase.api.rest_client import RestConnection, RestHelper
from membase.helper.rebalance_helper import RebalanceHelper
from membase.helper.bucket_helper import BucketOperationHelper
@@ -246,11 +246,6 @@ def setUp(self):
#set an item for 5 seconds
#getl for 15 seconds and verify that setting the item again
#throes Data exists
- def setUp(self):
- self.memcapableTestBase = MemcapableTestBase()
- self.memcapableTestBase.setUp_bucket('default', 11211, 'membase', self)
-
-
def _getl_body(self, prefix, getl_timeout, expiration):
node = self.memcapableTestBase.master
mc = MemcachedClientHelper.direct_client(node, "default")
@@ -742,7 +737,6 @@ def _delete_items(self, item_count, prefix):
self.log.info("deleted {0} items in {1} seconds".format(item_count, time.time() - time_start))
def _eject_items(self, item_count, prefix):
- flags = 0
client = MemcachedClientHelper.proxy_client(self.master, self.bucket_name)
time_start = time.time()
for i in range(item_count):
@@ -1,9 +1,7 @@
-from random import shuffle
import hashlib
import time
import uuid
import unittest
-import uuid
import json
import sys
from TestInput import TestInputSingleton
@@ -13,7 +11,7 @@
from mc_bin_client import MemcachedError
from membase.helper.cluster_helper import ClusterOperationHelper as ClusterHelper, ClusterOperationHelper
from membase.helper.rebalance_helper import RebalanceHelper
-from memcached.helper.data_helper import MemcachedClientHelper, MutationThread, VBucketAwareMemcached
+from memcached.helper.data_helper import VBucketAwareMemcached
from threading import Thread
from memcached.helper.data_helper import DocumentGenerator
from memcached.helper.old_kvstore import ClientKeyValueStore
@@ -133,7 +131,6 @@ def test_rebalance_out(self):
num_of_docs = TestInputSingleton.input.param("num_of_docs",100000)
replica = TestInputSingleton.input.param("replica",100000)
add_items_count = TestInputSingleton.input.param("num_of_creates",30000)
- rebalance_in = TestInputSingleton.input.param("rebalance_in",1)
size = TestInputSingleton.input.param("item_size",256)
params = {"sizes": [size], "count": num_of_docs, "seed": str(uuid.uuid4())[:7]}
rest = RestConnection(master)
@@ -315,15 +312,15 @@ def do_verification(kv_store, rest, bucket):
try:
smart.memcached(k).get(k)
validation_failures[k] = ["expired key"]
- except MemcachedError as e:
+ except MemcachedError:
pass
else:
try:
x, y, value = smart.memcached(k).get(k)
actualmd5 = hashlib.md5(value).digest()
if actualmd5 != expected["value"]:
validation_failures[k] = ["value mismatch"]
- except MemcachedError as e:
+ except MemcachedError:
validation_failures[k] = ["key not found"]
return validation_failures
@@ -2,6 +2,7 @@
import uuid
import TestInput
import logger
+import datetime
from membase.api.rest_client import RestConnection
from membase.helper.bucket_helper import BucketOperationHelper
@@ -7,7 +7,7 @@
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
from membase.helper.rebalance_helper import RebalanceHelper
-from memcached.helper.data_helper import MemcachedClientHelper, VBucketAwareMemcached
+from memcached.helper.data_helper import MemcachedClientHelper
from rebalancetests import RebalanceBaseTest
log = logger.Logger.get_logger()
@@ -115,7 +115,6 @@ def _update_keys(self, version):
rejected_keys = []
#quit after updating max 100,000 keys
self.updated_keys = []
- rest = RestConnection(self.servers[0])
moxi = MemcachedClientHelper.proxy_client(self.servers[0], self.bucket_name)
for key in self.keys:
if len(self.updated_keys) > 10000:
@@ -130,9 +129,6 @@ def _update_keys(self, version):
rejected_keys.append(key)
if len(rejected_keys) > 0:
self.log.error("unable to update {0} keys".format(len(rejected_keys)))
- vbaware.done()
-
- #verify
def _verify_minimum_requirement(self, number_of_replicas):
# we should at least have
@@ -165,7 +161,6 @@ def _cleanup_cluster(self):
def _verify_data(self, version):
#verify all the keys
#let's use vbucketaware
- rest = RestConnection(self.servers[0])
moxi = MemcachedClientHelper.proxy_client(self.servers[0], self.bucket_name)
index = 0
all_verified = True
View
@@ -3,6 +3,7 @@
import mc_bin_client
import uuid
import logger
+import datetime
from membase.api.rest_client import RestConnection
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
@@ -19,7 +19,6 @@ def tearDown(self):
def test_spatial_compaction(self):
self.log.info(
"description : test manual compaction for spatial indexes")
- rest = self.helper.rest
prefix = str(uuid.uuid4())[:7]
design_name = "dev_test_spatial_compaction"
@@ -28,7 +27,7 @@ def test_spatial_compaction(self):
# Insert (resp. update, as they have the same prefix) and query
# the spatial index several time so that the compaction makes sense
for i in range(0, 8):
- doc_names = self.helper.insert_docs(2000, prefix)
+ self.helper.insert_docs(2000, prefix)
self.helper.get_results(design_name)
# Get the index size prior to compaction
@@ -26,7 +26,7 @@ def test_spatial_info(self):
self.helper.create_index_fun(design_name, prefix)
# Fill the database and add an index
- doc_names = self.helper.insert_docs(2000, prefix)
+ self.helper.insert_docs(2000, prefix)
self.helper.get_results(design_name)
status, info = self.helper.info(design_name)
disk_size = info["spatial_index"]["disk_size"]
@@ -46,7 +46,7 @@ def test_spatial_info(self):
# Insert a lot new documents, and return after starting to
# build up (not waiting until it's done) the index to test
# if the updater fields are set correctly
- doc_names = self.helper.insert_docs(50000, prefix)
+ self.helper.insert_docs(50000, prefix)
self.helper.get_results(design_name,
extra_params={"stale": "update_after"})
# Somehow stale=update_after doesn't really return immediately,
@@ -237,7 +237,7 @@ def run(self):
try:
self._run_queries()
- except Exception as ex:
+ except Exception:
self.log.error("Last query result:\n\n{0}\n\n"\
.format(json.dumps(self._last_results,
sort_keys=True)))
@@ -294,7 +294,7 @@ def _run_query(self, query_params, expected_num_docs=None):
self.helper.testcase.assertEquals(num_keys,
expected_num_docs,
error)
- except Exception as ex:
+ except Exception:
self.log.error(error)
raise
else:
View
@@ -6,8 +6,7 @@
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper as ClusterHelper, ClusterOperationHelper
from membase.helper.rebalance_helper import RebalanceHelper
-from memcached.helper.data_helper import MemcachedClientHelper, MutationThread, VBucketAwareMemcached, LoadWithMcsoda
-from membase.helper.failover_helper import FailoverHelper
+from memcached.helper.data_helper import LoadWithMcsoda
from threading import Thread
class SwapRebalanceBase(unittest.TestCase):
@@ -95,10 +94,10 @@ def _create_multiple_buckets(self, replica=1):
def items_verification(master, test):
rest = RestConnection(master)
#Verify items count across all node
- time = 600
+ timeout = 600
for bucket in rest.get_buckets():
- verified = RebalanceHelper.wait_till_total_numbers_match(master, bucket.name, timeout_in_seconds=time)
- test.assertTrue(verified, "Lost items!!.. failing test in {0} secs".format(time))
+ verified = RebalanceHelper.wait_till_total_numbers_match(master, bucket.name, timeout_in_seconds=timeout)
+ test.assertTrue(verified, "Lost items!!.. failing test in {0} secs".format(timeout))
@staticmethod
def start_load_phase(self, master):
@@ -301,7 +300,7 @@ def _common_test_body_failed_swap_rebalance(self):
for i in [1, 2, 3]:
expected_progress = 20*i
self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(expected_progress))
- reached = RestHelper(rest).rebalance_reached(expected_progress)
+ RestHelper(rest).rebalance_reached(expected_progress)
command = "[erlang:exit(element(2, X), kill) || X <- supervisor:which_children(ns_port_sup)]."
memcached_restarted = rest.diag_eval(command)
self.assertTrue(memcached_restarted, "unable to restart memcached/moxi process through diag/eval")
@@ -3,7 +3,6 @@
from threading import Thread
import unittest
import uuid
-import time
from TestInput import TestInputSingleton
import logger
from mc_bin_client import MemcachedError
@@ -2,8 +2,6 @@
import re
import time
import unittest
-import os
-import sys
from TestInput import TestInputSingleton
from builds.build_query import BuildQuery
from membase.api.rest_client import RestConnection, RestHelper
@@ -77,7 +75,6 @@ def _install_and_upgrade(self, initial_version='1.6.5.3',
rest_settings = input.membase_settings
servers = input.servers
server = servers[0]
- save_upgrade_config = False
is_amazon = False
if input.test_params.get('amazon',False):
is_amazon = True
@@ -311,10 +308,6 @@ def test_multiple_version_upgrade_start_all_1(self):
upgrade_path = ['1.7.0', '1.7.1.1']
self._install_and_upgrade('1.6.5.4', True, True, False, 10, False, upgrade_path)
- def test_multiple_version_upgrade_start_one_2(self):
- upgrade_path = ['1.7.1.1']
- self._install_and_upgrade('1.6.5.4', True, True, True, 10, False, upgrade_path)
-
def test_multiple_version_upgrade_start_all_2(self):
upgrade_path = ['1.7.1.1']
self._install_and_upgrade('1.6.5.4', True, True, False, 10, False, upgrade_path)
@@ -417,7 +410,6 @@ def _install_and_upgrade(self, initial_version='1.6.5.3',
input_version = input.test_params['version']
rest_settings = input.membase_settings
servers = input.servers
- save_upgrade_config = False
is_amazon = False
if input.test_params.get('amazon',False):
is_amazon = True
Oops, something went wrong.

0 comments on commit 26cc6c4

Please sign in to comment.