Permalink
Browse files

Working on migrating connector servers

  • Loading branch information...
1 parent 4b67451 commit 748ade1aff3c33f108cd4fa1def041ada988890f Dariusz Suchojad committed Apr 26, 2012
@@ -103,7 +103,7 @@ def __init__(self, host=None, port=None, zmq_context=None, crypto_manager=None,
soap11_content_type=None, soap12_content_type=None,
plain_xml_content_type=None, json_content_type=None,
internal_service_modules=None, service_modules=None, base_dir=None,
- work_dir=None, pickup=None, fs_server_config=None):
+ work_dir=None, pickup=None, fs_server_config=None, conn_srv_grace_time=None):
self.host = host
self.port = port
self.zmq_context = zmq_context or zmq.Context()
@@ -128,6 +128,7 @@ def __init__(self, host=None, port=None, zmq_context=None, crypto_manager=None,
self.work_dir = work_dir
self.pickup = pickup
self.fs_server_config = fs_server_config
+ self.conn_srv_grace_time = conn_srv_grace_time
# The main config store
self.config = ConfigStore()
@@ -183,17 +184,17 @@ def _after_init_accepted(self, server):
# Let's see if we can become a connector server, the one to start all
# the connectors and start the connectors only once throughout the whole cluster.
connector_server_keep_alive_job_time = int(self.fs_server_config['singleton']['connector_server_keep_alive_job_time'])
- grace_time = int(self.fs_server_config['singleton']['grace_time_multiplier']) * connector_server_keep_alive_job_time
+ self.conn_srv_grace_time = int(self.fs_server_config['singleton']['grace_time_multiplier']) * connector_server_keep_alive_job_time
base_conn_srv_job_data = Bunch({
'weeks': None, 'days': None,
'hours': None, 'minutes': None,
- 'seconds': connector_server_keep_alive_job_time,
+ 'seconds': 3, #connector_server_keep_alive_job_time,
'repeats': None,
'extra': 'server_id:{};cluster_id:{}'.format(server.id, server.cluster_id),
})
- if self.odb.become_connector_server(grace_time):
+ if self.odb.become_connector_server(self.conn_srv_grace_time):
self.singleton_server.is_connector_server = True
self._init_connectors(server)
@@ -210,7 +211,7 @@ def _after_init_accepted(self, server):
# server is alive or not
conn_srv_job_data = Bunch(base_conn_srv_job_data.copy())
conn_srv_job_data.start_date = datetime.now() + timedelta(seconds=10) # Let's give the other server some time to warm up
- conn_srv_job_data.name = 'zato.CheckConnectorServer'
+ conn_srv_job_data.name = 'zato.EnsureConnectorServer'
conn_srv_job_data.service = 'zato.server.service.internal.EnsureConnectorServer'
self.singleton_server.scheduler.create_interval_based(conn_srv_job_data)
@@ -272,6 +272,7 @@ def become_connector_server(self, grace_time):
elif self.conn_server_past_grace_time(cluster, grace_time):
return self._become_connector_server(cluster)
else:
+ self._session.rollback()
msg = ('Server id:[{}], name:[{}] will not be a connector server for '
'cluster id:[{}], name:[{}], cluster.cn_srv_id:[{}], cluster.cn_srv_keep_alive_dt:[{}]').format(
self.server.id, self.server.name, cluster.id, cluster.name, cluster.cn_srv_id, cluster.cn_srv_keep_alive_dt)
@@ -26,6 +26,9 @@
from traceback import format_exc
from urlparse import parse_qs
+# Bunch
+from bunch import Bunch
+
# Zato
from zato.common import ZatoException, ZATO_OK
from zato.common.broker_message import MESSAGE_TYPE
@@ -122,3 +125,11 @@ def handle(self):
raise ZatoException(self.cid,
'Could not set the connector server keep alive timestamp, current server_id:[{}] != cluster.cn_srv_id:[{}]'.format(
server_id, cluster.cn_srv_id))
+
+class EnsureConnectorServer(AdminService):
+ """ Makes all the other servers know that this particular one, the one that
+ manages the connectors, is indeed still alive.
+ """
+ def handle(self):
+ if self.server.odb.become_connector_server(2):
+ self.server.singleton_server.scheduler.delete(Bunch(name='zato.EnsureConnectorServer'))

0 comments on commit 748ade1

Please sign in to comment.