Skip to content
Browse files

CBQE-361 - Change message in ServerJoinException in exception.py

Renamed ServerJoinException to ServerSelfJoinException to handle the specific case
Added new exception AddNodeException for the generic failure in add node

Change-Id: Ifa128ef69b6dfa5087e23fca3a29a51cbebe5a22
Reviewed-on: http://review.couchbase.org/18724
Reviewed-by: Andrei Baranouski <andrei.baranouski@gmail.com>
Tested-by: Deepkaran Salooja <deepkaran.salooja@globallogic.com>
  • Loading branch information...
1 parent 103a049 commit fe1238b501fa34a146158dde56e5785b4ad1ab34 deepkaran committed Jul 23, 2012
Showing with 51 additions and 42 deletions.
  1. +19 −11 lib/membase/api/exception.py
  2. +21 −20 lib/membase/api/rest_client.py
  3. +11 −11 pytests/addnodestests.py
View
30 lib/membase/api/exception.py
@@ -75,7 +75,7 @@ def __init__(self, api, parameters):
format(self.api, self.parameters)
-class ServerJoinException(MembaseHttpException):
+class ServerSelfJoinException(MembaseHttpException):
def __init__(self, nodeIp='', remoteIp=''):
self._message = 'node: {0} already added to this cluster:{1}'.\
format(remoteIp, nodeIp)
@@ -115,29 +115,37 @@ def __init__(self, string=''):
class DesignDocCreationException(MembaseHttpException):
- def __init__(self, design_doc_name, reason = ''):
+ def __init__(self, design_doc_name, reason=''):
self._message = 'Error occured design document %s: %s' % (design_doc_name, reason)
class QueryViewException(MembaseHttpException):
- def __init__(self, view_name, reason = ''):
+ def __init__(self, view_name, reason=''):
self._message = 'Error occured querying view %s: %s' % (view_name, reason)
class ReadDocumentException(MembaseHttpException):
- def __init__(self, doc_id, reason = ''):
+ def __init__(self, doc_id, reason=''):
self._message = 'Error occured looking up document %s: %s' % (doc_id, reason)
class CompactViewFailed(MembaseHttpException):
- def __init__(self, design_doc_name, reason = ''):
- self._message = 'Error occured triggering compaction for design_doc %s: %s' %\
+ def __init__(self, design_doc_name, reason=''):
+ self._message = 'Error occured triggering compaction for design_doc %s: %s' % \
(design_doc_name, reason)
class SetViewInfoNotFound(MembaseHttpException):
- def __init__(self, design_doc_name, reason = ''):
- self._message = 'Error occured reading set_view _info of ddoc %s: %s' %\
+ def __init__(self, design_doc_name, reason=''):
+ self._message = 'Error occured reading set_view _info of ddoc %s: %s' % \
(design_doc_name, reason)
class GetBucketInfoFailed(MembaseHttpException):
- def __init__(self, bucket, reason = ''):
- self._message = 'Error occured getting bucket information %s: %s' %\
- (bucket, reason)
+ def __init__(self, bucket, reason=''):
+ self._message = 'Error occured getting bucket information %s: %s' % \
+ (bucket, reason)
+
+class AddNodeException(MembaseHttpException):
+ def __init__(self, nodeIp='', remoteIp='', reason=''):
+ self._message = 'Error adding node: {0} to the cluster:{1} - {2}'.\
+ format(remoteIp, nodeIp, reason)
+ self.parameters = dict()
+ self.parameters['nodeIp'] = nodeIp
+ self.parameters['remoteIp'] = remoteIp
View
41 lib/membase/api/rest_client.py
@@ -7,9 +7,9 @@
import logger
from couchbase.document import DesignDocument, View
from exception import ServerAlreadyJoinedException, ServerUnavailableException, InvalidArgumentException
-from membase.api.exception import BucketCreationException, ServerJoinException, ClusterRemoteException, \
+from membase.api.exception import BucketCreationException, ServerSelfJoinException, ClusterRemoteException, \
RebalanceFailedException, FailoverFailedException, DesignDocCreationException, QueryViewException, \
- ReadDocumentException, GetBucketInfoFailed, CompactViewFailed, SetViewInfoNotFound
+ ReadDocumentException, GetBucketInfoFailed, CompactViewFailed, SetViewInfoNotFound, AddNodeException
log = logger.Logger.get_logger()
#helper library methods built on top of RestConnection interface
@@ -287,7 +287,7 @@ def get_ddoc(self, bucket, ddoc_name):
return json
- def run_view(self,bucket,view,name):
+ def run_view(self, bucket, view, name):
api = self.baseUrl + 'couchBase/{0}/_design/{1}/_view/{2}'.format(bucket, view, name)
status, content = self._http_request(api, headers=self._create_capi_headers())
@@ -300,7 +300,7 @@ def run_view(self,bucket,view,name):
return json_parsed
- def delete_view(self,bucket,view):
+ def delete_view(self, bucket, view):
status, json = self._delete_design_doc(bucket, view)
if not status:
@@ -393,7 +393,7 @@ def all_docs(self, bucket, params={}, limit=None, timeout=120):
return json.loads(content)
- def get_couch_doc(self, doc_id, bucket = "default", timeout=120):
+ def get_couch_doc(self, doc_id, bucket="default", timeout=120):
""" use couchBase uri to retrieve document from a bucket """
api = self.baseUrl + 'couchBase/%s/%s' % (bucket, doc_id)
@@ -490,7 +490,7 @@ def _create_headers(self):
def _http_request(self, api, method='GET', params='', headers=None, timeout=120):
if not headers:
- headers=self._create_headers()
+ headers = self._create_headers()
end_time = time.time() + timeout
while True:
@@ -654,7 +654,7 @@ def stop_replication(self, database, rep_id):
#can't add the node to itself ( TODO )
#server already added
#returns otpNode
- def add_node(self, user='', password='', remoteIp='', port='8091' ):
+ def add_node(self, user='', password='', remoteIp='', port='8091'):
otpNode = None
log.info('adding remote node : {0} to this cluster @ : {1}'\
.format(remoteIp, self.ip))
@@ -676,12 +676,13 @@ def add_node(self, user='', password='', remoteIp='', port='8091' ):
raise ServerAlreadyJoinedException(nodeIp=self.ip,
remoteIp=remoteIp)
elif content.find('Prepare join failed. Joining node to itself is not allowed') >= 0:
- raise ServerJoinException(nodeIp=self.ip,
+ raise ServerSelfJoinException(nodeIp=self.ip,
remoteIp=remoteIp)
else:
log.error('add_node error : {0}'.format(content))
- raise ServerJoinException(nodeIp=self.ip,
- remoteIp=remoteIp)
+ raise AddNodeException(nodeIp=self.ip,
+ remoteIp=remoteIp,
+ reason=content)
return otpNode
@@ -709,7 +710,7 @@ def eject_node(self, user='', password='', otpNode=None):
log.error('eject_node error {0}'.format(content))
return True
- def fail_over(self, otpNode=None ):
+ def fail_over(self, otpNode=None):
if otpNode is None:
log.error('otpNode parameter required')
return False
@@ -752,7 +753,7 @@ def rebalance(self, otpNodes, ejectedNodes):
return status
- def diag_eval(self,code):
+ def diag_eval(self, code):
api = '{0}{1}'.format(self.baseUrl, 'diag/eval/')
status, content = self._http_request(api, "POST", code)
log.info("/diag/eval : status : {0} content : {1}".format(status, content))
@@ -763,8 +764,8 @@ def monitorRebalance(self, stop_if_loop=False):
start = time.time()
progress = 0
retry = 0
- same_progress_count=0
- previous_progress=0
+ same_progress_count = 0
+ previous_progress = 0
while progress != -1 and (progress != 100 or self._rebalance_progress_status() == 'running') and retry < 20:
#-1 is error , -100 means could not retrieve progress
progress = self._rebalance_progress()
@@ -832,7 +833,7 @@ def _rebalance_progress(self):
count += 1
total_percentage += percentage
if count:
- avg_percentage = (total_percentage/count)
+ avg_percentage = (total_percentage / count)
else:
avg_percentage = 0
log.info('rebalance percentage : {0} %' .format(avg_percentage))
@@ -1246,7 +1247,7 @@ def get_database_disk_size(self, bucket='default'):
disk_size = (json_parsed[0]["basicStats"]["diskUsed"]) / (1024 * 1024)
return status, disk_size
- def ddoc_compaction(self, design_doc_id, bucket = "default"):
+ def ddoc_compaction(self, design_doc_id, bucket="default"):
api = self.baseUrl + "pools/default/buckets/%s/ddocs/%s/controller/compactView" % \
(bucket, design_doc_id)
status, content = self._http_request(api, 'POST')
@@ -1282,7 +1283,7 @@ def set_auto_compaction(self, parallelDBAndVC="false",
allowedTimePeriodToHour=None,
allowedTimePeriodToMin=None,
allowedTimePeriodAbort=None,
- bucket = None):
+ bucket=None):
"""Reset compaction values to default, try with old fields (dp4 build)
and then try with newer fields"""
params = {}
@@ -1297,10 +1298,10 @@ def set_auto_compaction(self, parallelDBAndVC="false",
params["autoCompactionDefined"] = "true"
# reuse current ram quota in mb per node
num_nodes = len(self.node_statuses())
- quota = self.get_bucket_json(bucket)["quota"]["ram"] /(1048576 * num_nodes)
+ quota = self.get_bucket_json(bucket)["quota"]["ram"] / (1048576 * num_nodes)
params["ramQuotaMB"] = quota
- params["parallelDBAndViewCompaction"] = parallelDBAndVC
+ params["parallelDBAndViewCompaction"] = parallelDBAndVC
# Need to verify None because the value could be = 0
if dbFragmentThreshold is not None:
params["databaseFragmentationThreshold[size]"] = dbFragmentThreshold
@@ -1441,7 +1442,7 @@ def __init__(self):
self.availableStorage = []
self.storage = []
self.memoryQuota = 0
- self.moxi =11211
+ self.moxi = 11211
self.memcached = 11210
self.id = ""
self.ip = ""
View
22 pytests/addnodestests.py
@@ -1,7 +1,7 @@
import unittest
from TestInput import TestInputSingleton
import logger
-from membase.api.exception import ServerJoinException, MembaseHttpExceptionTypes, ServerAlreadyJoinedException
+from membase.api.exception import ServerSelfJoinException, MembaseHttpExceptionTypes, ServerAlreadyJoinedException
from membase.api.rest_client import RestConnection, RestHelper
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
@@ -20,18 +20,18 @@ def setUp(self):
self.servers = TestInputSingleton.input.servers
self.membase = TestInputSingleton.input.membase_settings
- def common_setUp(self,with_buckets):
+ def common_setUp(self, with_buckets):
ClusterOperationHelper.cleanup_cluster(self.servers)
server = self.servers[0]
if with_buckets:
- BucketOperationHelper.delete_all_buckets_or_assert(self.servers,test_case=self)
+ BucketOperationHelper.delete_all_buckets_or_assert(self.servers, test_case=self)
ok = BucketOperationHelper.create_multiple_buckets(server, 1)
if not ok:
self.fail("unable to create multiple buckets on this node : {0}".format(server))
def tearDown(self):
- BucketOperationHelper.delete_all_buckets_or_assert(servers=self.servers,test_case=self)
+ BucketOperationHelper.delete_all_buckets_or_assert(servers=self.servers, test_case=self)
#wait for all ns_servers
for server in self.servers:
self.assertTrue(RestHelper(RestConnection(server)).is_ns_server_running(timeout_in_seconds=480),
@@ -44,10 +44,10 @@ def tearDown(self):
def _add_1_node_body(self):
master = self.servers[0]
master_rest = RestConnection(master)
- for i in range(1,len(self.servers)):
+ for i in range(1, len(self.servers)):
ip = self.servers[i].ip
port = self.servers[i].port
- self.log.info('adding node : {0}:{1} to the cluster'.format(ip,port))
+ self.log.info('adding node : {0}:{1} to the cluster'.format(ip, port))
otpNode = master_rest.add_node(user=self.membase.rest_username,
password=self.membase.rest_password,
remoteIp=ip, port=port)
@@ -69,7 +69,7 @@ def _add_all_node_body(self):
master = self.servers[0]
master_rest = RestConnection(master)
added_otps = []
- for i in range(1,len(self.servers)):
+ for i in range(1, len(self.servers)):
ip = self.servers[i].ip
port = self.servers[i].port
self.log.info('adding node : {0} to the cluster'.format(ip))
@@ -101,14 +101,14 @@ def _add_node_itself_body(self):
password=self.membase.rest_password,
remoteIp=master.ip, port=master.port)
self.fail("server did not raise any exception while adding the node to itself")
- except ServerJoinException as ex:
- self.assertEquals(ex.type,MembaseHttpExceptionTypes.NODE_CANT_ADD_TO_ITSELF)
+ except ServerSelfJoinException as ex:
+ self.assertEquals(ex.type, MembaseHttpExceptionTypes.NODE_CANT_ADD_TO_ITSELF)
def _add_node_already_added_body(self):
self.common_setUp(False)
master = self.servers[0]
master_rest = RestConnection(master)
- for i in range(1,len(self.servers)):
+ for i in range(1, len(self.servers)):
ip = self.servers[i].ip
self.log.info('adding node : {0} to the cluster'.format(ip))
otpNode = master_rest.add_node(user=self.membase.rest_username,
@@ -120,7 +120,7 @@ def _add_node_already_added_body(self):
try:
readd_otpNode = master_rest.add_node(user=self.membase.rest_username,
password=self.membase.rest_password,
- remoteIp=ip,port=self.servers[i].port)
+ remoteIp=ip, port=self.servers[i].port)
if readd_otpNode:
self.fail("server did not raise any exception when calling add_node on an already added node")
except ServerAlreadyJoinedException:

0 comments on commit fe1238b

Please sign in to comment.
Something went wrong with that request. Please try again.