diff --git a/hazelcast/proxy/__init__.py b/hazelcast/proxy/__init__.py index 8de49f104b..2f509b8644 100644 --- a/hazelcast/proxy/__init__.py +++ b/hazelcast/proxy/__init__.py @@ -39,7 +39,7 @@ SET_SERVICE: Set, TOPIC_SERVICE: Topic, PN_COUNTER_SERVICE: PNCounter, - FLAKE_ID_GENERATOR_SERVICE: FlakeIdGenerator + FLAKE_ID_GENERATOR_SERVICE: FlakeIdGenerator, } diff --git a/hazelcast/proxy/base.py b/hazelcast/proxy/base.py index 8264294c18..dc00acff77 100644 --- a/hazelcast/proxy/base.py +++ b/hazelcast/proxy/base.py @@ -4,7 +4,7 @@ from hazelcast import six from hazelcast.util import get_attr_name -MAX_SIZE = float('inf') +MAX_SIZE = float("inf") def _no_op_response_handler(_): @@ -55,12 +55,16 @@ def _invoke_on_target(self, request, uuid, response_handler=_no_op_response_hand def _invoke_on_key(self, request, key_data, response_handler=_no_op_response_handler): partition_id = self._partition_service.get_partition_id(key_data) - invocation = Invocation(request, partition_id=partition_id, response_handler=response_handler) + invocation = Invocation( + request, partition_id=partition_id, response_handler=response_handler + ) self._invocation_service.invoke(invocation) return invocation.future def _invoke_on_partition(self, request, partition_id, response_handler=_no_op_response_handler): - invocation = Invocation(request, partition_id=partition_id, response_handler=response_handler) + invocation = Invocation( + request, partition_id=partition_id, response_handler=response_handler + ) self._invocation_service.invoke(invocation) return invocation.future @@ -78,7 +82,9 @@ def __init__(self, service_name, name, context): self._partition_id = context.partition_service.get_partition_id(partition_key) def _invoke(self, request, response_handler=_no_op_response_handler): - invocation = Invocation(request, partition_id=self._partition_id, response_handler=response_handler) + invocation = Invocation( + request, partition_id=self._partition_id, response_handler=response_handler + ) self._invocation_service.invoke(invocation) return invocation.future @@ -95,7 +101,9 @@ def __init__(self, name, transaction, context): self._to_data = serialization_service.to_data def _invoke(self, request, response_handler=_no_op_response_handler): - invocation = Invocation(request, connection=self.transaction.connection, response_handler=response_handler) + invocation = Invocation( + request, connection=self.transaction.connection, response_handler=response_handler + ) self._invocation_service.invoke(invocation) return invocation.future @@ -202,8 +210,17 @@ class EntryEvent(object): number_of_affected_entries (int): Number of affected entries by this event. """ - def __init__(self, to_object, key, value, old_value, merging_value, event_type, uuid, - number_of_affected_entries): + def __init__( + self, + to_object, + key, + value, + old_value, + merging_value, + event_type, + uuid, + number_of_affected_entries, + ): self._to_object = to_object self._key_data = key self._value_data = value @@ -234,10 +251,19 @@ def merging_value(self): return self._to_object(self._merging_value_data) def __repr__(self): - return "EntryEvent(key=%s, value=%s, old_value=%s, merging_value=%s, event_type=%s, uuid=%s, " \ - "number_of_affected_entries=%s)" % (self.key, self.value, self.old_value, self.merging_value, - get_attr_name(EntryEventType, self.event_type), self.uuid, - self.number_of_affected_entries) + return ( + "EntryEvent(key=%s, value=%s, old_value=%s, merging_value=%s, event_type=%s, uuid=%s, " + "number_of_affected_entries=%s)" + % ( + self.key, + self.value, + self.old_value, + self.merging_value, + get_attr_name(EntryEventType, self.event_type), + self.uuid, + self.number_of_affected_entries, + ) + ) class TopicMessage(object): diff --git a/hazelcast/proxy/cp/__init__.py b/hazelcast/proxy/cp/__init__.py index 3e0960d056..e9c1c6d67a 100644 --- a/hazelcast/proxy/cp/__init__.py +++ b/hazelcast/proxy/cp/__init__.py @@ -37,7 +37,9 @@ def _invoke(self, request, response_handler=_no_op_response_handler): class SessionAwareCPProxy(BaseCPProxy): def __init__(self, context, group_id, service_name, proxy_name, object_name): - super(SessionAwareCPProxy, self).__init__(context, group_id, service_name, proxy_name, object_name) + super(SessionAwareCPProxy, self).__init__( + context, group_id, service_name, proxy_name, object_name + ) self._session_manager = context.proxy_session_manager def get_group_id(self): diff --git a/hazelcast/proxy/cp/atomic_long.py b/hazelcast/proxy/cp/atomic_long.py index 5e4496bbdc..5717be427b 100644 --- a/hazelcast/proxy/cp/atomic_long.py +++ b/hazelcast/proxy/cp/atomic_long.py @@ -1,6 +1,12 @@ -from hazelcast.protocol.codec import atomic_long_add_and_get_codec, atomic_long_compare_and_set_codec, \ - atomic_long_get_codec, atomic_long_get_and_add_codec, atomic_long_get_and_set_codec, atomic_long_alter_codec, \ - atomic_long_apply_codec +from hazelcast.protocol.codec import ( + atomic_long_add_and_get_codec, + atomic_long_compare_and_set_codec, + atomic_long_get_codec, + atomic_long_get_and_add_codec, + atomic_long_get_and_set_codec, + atomic_long_alter_codec, + atomic_long_apply_codec, +) from hazelcast.proxy.cp import BaseCPProxy from hazelcast.util import check_not_none, check_is_int diff --git a/hazelcast/proxy/cp/atomic_reference.py b/hazelcast/proxy/cp/atomic_reference.py index d538f1fde6..db5477356e 100644 --- a/hazelcast/proxy/cp/atomic_reference.py +++ b/hazelcast/proxy/cp/atomic_reference.py @@ -1,5 +1,10 @@ -from hazelcast.protocol.codec import atomic_ref_compare_and_set_codec, atomic_ref_get_codec, atomic_ref_set_codec, \ - atomic_ref_contains_codec, atomic_ref_apply_codec +from hazelcast.protocol.codec import ( + atomic_ref_compare_and_set_codec, + atomic_ref_get_codec, + atomic_ref_set_codec, + atomic_ref_contains_codec, + atomic_ref_apply_codec, +) from hazelcast.proxy.cp import BaseCPProxy from hazelcast.util import check_true, check_not_none diff --git a/hazelcast/proxy/cp/count_down_latch.py b/hazelcast/proxy/cp/count_down_latch.py index 0a25a7ff2b..ac6a810d5e 100644 --- a/hazelcast/proxy/cp/count_down_latch.py +++ b/hazelcast/proxy/cp/count_down_latch.py @@ -1,8 +1,13 @@ import uuid from hazelcast.errors import OperationTimeoutError -from hazelcast.protocol.codec import count_down_latch_await_codec, count_down_latch_get_round_codec, \ - count_down_latch_count_down_codec, count_down_latch_get_count_codec, count_down_latch_try_set_count_codec +from hazelcast.protocol.codec import ( + count_down_latch_await_codec, + count_down_latch_get_round_codec, + count_down_latch_count_down_codec, + count_down_latch_get_count_codec, + count_down_latch_try_set_count_codec, +) from hazelcast.proxy.cp import BaseCPProxy from hazelcast.util import to_millis, check_true, check_is_number, check_is_int @@ -64,7 +69,9 @@ def await_latch(self, timeout): timeout = max(0, timeout) invocation_uuid = uuid.uuid4() codec = count_down_latch_await_codec - request = codec.encode_request(self._group_id, self._object_name, invocation_uuid, to_millis(timeout)) + request = codec.encode_request( + self._group_id, self._object_name, invocation_uuid, to_millis(timeout) + ) return self._invoke(request, codec.decode_response) def count_down(self): @@ -136,5 +143,7 @@ def _get_round(self): def _request_count_down(self, expected_round, invocation_uuid): codec = count_down_latch_count_down_codec - request = codec.encode_request(self._group_id, self._object_name, invocation_uuid, expected_round) + request = codec.encode_request( + self._group_id, self._object_name, invocation_uuid, expected_round + ) return self._invoke(request) diff --git a/hazelcast/proxy/cp/fenced_lock.py b/hazelcast/proxy/cp/fenced_lock.py index 03b3660b98..e2889ca1fd 100644 --- a/hazelcast/proxy/cp/fenced_lock.py +++ b/hazelcast/proxy/cp/fenced_lock.py @@ -1,11 +1,20 @@ import time import uuid -from hazelcast.errors import LockOwnershipLostError, LockAcquireLimitReachedError, SessionExpiredError, \ - WaitKeyCancelledError, IllegalMonitorStateError +from hazelcast.errors import ( + LockOwnershipLostError, + LockAcquireLimitReachedError, + SessionExpiredError, + WaitKeyCancelledError, + IllegalMonitorStateError, +) from hazelcast.future import ImmediateExceptionFuture -from hazelcast.protocol.codec import fenced_lock_lock_codec, fenced_lock_try_lock_codec, fenced_lock_unlock_codec, \ - fenced_lock_get_lock_ownership_codec +from hazelcast.protocol.codec import ( + fenced_lock_lock_codec, + fenced_lock_try_lock_codec, + fenced_lock_unlock_codec, + fenced_lock_get_lock_ownership_codec, +) from hazelcast.proxy.cp import SessionAwareCPProxy from hazelcast.util import thread_id, to_millis @@ -202,7 +211,9 @@ def check_response(f): self._lock_session_ids.pop(current_thread_id, None) raise e - return self._request_unlock(session_id, current_thread_id, uuid.uuid4()).continue_with(check_response) + return self._request_unlock(session_id, current_thread_id, uuid.uuid4()).continue_with( + check_response + ) def is_locked(self): """Returns whether this lock is locked or not. @@ -314,9 +325,11 @@ def check_fence(fence): return self._do_lock(current_thread_id, invocation_uuid) except WaitKeyCancelledError: self._release_session(session_id) - error = IllegalMonitorStateError("Lock(%s) not acquired because the lock call on the CP group " - "is cancelled, possibly because of another indeterminate call " - "from the same thread." % self._object_name) + error = IllegalMonitorStateError( + "Lock(%s) not acquired because the lock call on the CP group " + "is cancelled, possibly because of another indeterminate call " + "from the same thread." % self._object_name + ) raise error except Exception as e: self._release_session(session_id) @@ -327,11 +340,14 @@ def check_fence(fence): return fence self._release_session(session_id) - error = LockAcquireLimitReachedError("Lock(%s) reentrant lock limit is already reached!" - % self._object_name) + error = LockAcquireLimitReachedError( + "Lock(%s) reentrant lock limit is already reached!" % self._object_name + ) raise error - return self._request_lock(session_id, current_thread_id, invocation_uuid).continue_with(check_fence) + return self._request_lock(session_id, current_thread_id, invocation_uuid).continue_with( + check_fence + ) return self._acquire_session().continue_with(do_lock_once) @@ -367,8 +383,9 @@ def check_fence(fence): return fence - return self._request_try_lock(session_id, current_thread_id, invocation_uuid, timeout).continue_with( - check_fence) + return self._request_try_lock( + session_id, current_thread_id, invocation_uuid, timeout + ).continue_with(check_fence) return self._acquire_session().continue_with(do_try_lock_once) @@ -387,30 +404,42 @@ def _verify_no_locked_session_id_present(self, current_thread_id): raise self._new_lock_ownership_lost_error(lock_session_id) def _new_lock_ownership_lost_error(self, lock_session_id): - error = LockOwnershipLostError("Current thread is not the owner of the Lock(%s) because its " - "Session(%s) is closed by the server." % (self._proxy_name, lock_session_id)) + error = LockOwnershipLostError( + "Current thread is not the owner of the Lock(%s) because its " + "Session(%s) is closed by the server." % (self._proxy_name, lock_session_id) + ) return error def _new_illegal_monitor_state_error(self): - error = IllegalMonitorStateError("Current thread is not the owner of the Lock(%s)" % self._proxy_name) + error = IllegalMonitorStateError( + "Current thread is not the owner of the Lock(%s)" % self._proxy_name + ) return error def _request_lock(self, session_id, current_thread_id, invocation_uuid): codec = fenced_lock_lock_codec - request = codec.encode_request(self._group_id, self._object_name, session_id, current_thread_id, - invocation_uuid) + request = codec.encode_request( + self._group_id, self._object_name, session_id, current_thread_id, invocation_uuid + ) return self._invoke(request, codec.decode_response) def _request_try_lock(self, session_id, current_thread_id, invocation_uuid, timeout): codec = fenced_lock_try_lock_codec - request = codec.encode_request(self._group_id, self._object_name, session_id, current_thread_id, - invocation_uuid, to_millis(timeout)) + request = codec.encode_request( + self._group_id, + self._object_name, + session_id, + current_thread_id, + invocation_uuid, + to_millis(timeout), + ) return self._invoke(request, codec.decode_response) def _request_unlock(self, session_id, current_thread_id, invocation_uuid): codec = fenced_lock_unlock_codec - request = codec.encode_request(self._group_id, self._object_name, session_id, current_thread_id, - invocation_uuid) + request = codec.encode_request( + self._group_id, self._object_name, session_id, current_thread_id, invocation_uuid + ) return self._invoke(request, codec.decode_response) def _request_get_lock_ownership_state(self): @@ -430,4 +459,8 @@ def is_locked(self): return self.fence != FencedLock.INVALID_FENCE def is_locked_by(self, session_id, current_thread_id): - return self.is_locked() and self.session_id == session_id and self.thread_id == current_thread_id + return ( + self.is_locked() + and self.session_id == session_id + and self.thread_id == current_thread_id + ) diff --git a/hazelcast/proxy/cp/semaphore.py b/hazelcast/proxy/cp/semaphore.py index 1fa54c3759..946b86848f 100644 --- a/hazelcast/proxy/cp/semaphore.py +++ b/hazelcast/proxy/cp/semaphore.py @@ -3,8 +3,14 @@ from hazelcast.errors import SessionExpiredError, WaitKeyCancelledError, IllegalStateError from hazelcast.future import ImmediateFuture, ImmediateExceptionFuture -from hazelcast.protocol.codec import semaphore_init_codec, semaphore_acquire_codec, semaphore_available_permits_codec, \ - semaphore_drain_codec, semaphore_change_codec, semaphore_release_codec +from hazelcast.protocol.codec import ( + semaphore_init_codec, + semaphore_acquire_codec, + semaphore_available_permits_codec, + semaphore_drain_codec, + semaphore_change_codec, + semaphore_release_codec, +) from hazelcast.proxy.cp import SessionAwareCPProxy, BaseCPProxy from hazelcast.util import check_not_negative, check_true, thread_id, to_millis @@ -307,8 +313,9 @@ def check_response(response): finally: self._release_session(session_id, permits) - return self._request_release(session_id, current_thread_id, invocation_uuid, permits).continue_with( - check_response) + return self._request_release( + session_id, current_thread_id, invocation_uuid, permits + ).continue_with(check_response) def try_acquire(self, permits=1, timeout=0): check_true(permits > 0, "Permits must be positive") @@ -329,16 +336,19 @@ def check_response(response): return self._do_acquire(current_thread_id, invocation_uuid, permits) except WaitKeyCancelledError: self._release_session(session_id, permits) - error = IllegalStateError("Semaphore(\"%s\") not acquired because the acquire call on the CP " - "group is cancelled, possibly because of another indeterminate call " - "from the same thread." % self._object_name) + error = IllegalStateError( + 'Semaphore("%s") not acquired because the acquire call on the CP ' + "group is cancelled, possibly because of another indeterminate call " + "from the same thread." % self._object_name + ) raise error except Exception as e: self._release_session(session_id, permits) raise e - return self._request_acquire(session_id, current_thread_id, invocation_uuid, permits, -1).continue_with( - check_response) + return self._request_acquire( + session_id, current_thread_id, invocation_uuid, permits, -1 + ).continue_with(check_response) return self._acquire_session(permits).continue_with(do_acquire_once) @@ -358,7 +368,9 @@ def check_count(count): self._release_session(session_id, _DRAIN_SESSION_ACQ_COUNT) raise e - return self._request_drain(session_id, current_thread_id, invocation_uuid).continue_with(check_count) + return self._request_drain( + session_id, current_thread_id, invocation_uuid + ).continue_with(check_count) return self._acquire_session(_DRAIN_SESSION_ACQ_COUNT).continue_with(do_drain_once) @@ -378,8 +390,9 @@ def check_response(response): finally: self._release_session(session_id) - return self._request_change(session_id, current_thread_id, invocation_uuid, delta).continue_with( - check_response) + return self._request_change( + session_id, current_thread_id, invocation_uuid, delta + ).continue_with(check_response) return self._acquire_session().continue_with(do_change_permits_once) @@ -400,7 +413,9 @@ def check_response(response): remaining_timeout = timeout - (time.time() - start) if remaining_timeout <= 0: return False - return self._do_try_acquire(current_thread_id, invocation_uuid, permits, remaining_timeout) + return self._do_try_acquire( + current_thread_id, invocation_uuid, permits, remaining_timeout + ) except WaitKeyCancelledError: self._release_session(session_id, permits) return False @@ -408,13 +423,16 @@ def check_response(response): self._release_session(session_id, permits) raise e - return self._request_acquire(session_id, current_thread_id, invocation_uuid, permits, - timeout).continue_with(check_response) + return self._request_acquire( + session_id, current_thread_id, invocation_uuid, permits, timeout + ).continue_with(check_response) return self._acquire_session(permits).continue_with(do_try_acquire_once) def _new_illegal_state_error(self, cause=None): - error = IllegalStateError("Semaphore[\"%s\"] has no valid session!" % self._object_name, cause) + error = IllegalStateError( + 'Semaphore["%s"] has no valid session!' % self._object_name, cause + ) return error def _request_acquire(self, session_id, current_thread_id, invocation_uuid, permits, timeout): @@ -422,32 +440,49 @@ def _request_acquire(self, session_id, current_thread_id, invocation_uuid, permi if timeout > 0: timeout = to_millis(timeout) - request = codec.encode_request(self._group_id, self._object_name, session_id, current_thread_id, - invocation_uuid, permits, timeout) + request = codec.encode_request( + self._group_id, + self._object_name, + session_id, + current_thread_id, + invocation_uuid, + permits, + timeout, + ) return self._invoke(request, codec.decode_response) def _request_drain(self, session_id, current_thread_id, invocation_uuid): codec = semaphore_drain_codec - request = codec.encode_request(self._group_id, self._object_name, session_id, current_thread_id, - invocation_uuid) + request = codec.encode_request( + self._group_id, self._object_name, session_id, current_thread_id, invocation_uuid + ) return self._invoke(request, codec.decode_response) def _request_change(self, session_id, current_thread_id, invocation_uuid, delta): codec = semaphore_change_codec - request = codec.encode_request(self._group_id, self._object_name, session_id, current_thread_id, - invocation_uuid, delta) + request = codec.encode_request( + self._group_id, self._object_name, session_id, current_thread_id, invocation_uuid, delta + ) return self._invoke(request) def _request_release(self, session_id, current_thread_id, invocation_uuid, permits): codec = semaphore_release_codec - request = codec.encode_request(self._group_id, self._object_name, session_id, current_thread_id, - invocation_uuid, permits) + request = codec.encode_request( + self._group_id, + self._object_name, + session_id, + current_thread_id, + invocation_uuid, + permits, + ) return self._invoke(request) class SessionlessSemaphore(Semaphore): def __init__(self, context, group_id, service_name, proxy_name, object_name): - super(SessionlessSemaphore, self).__init__(context, group_id, service_name, proxy_name, object_name) + super(SessionlessSemaphore, self).__init__( + context, group_id, service_name, proxy_name, object_name + ) self._session_manager = context.proxy_session_manager def acquire(self, permits=1): @@ -457,7 +492,11 @@ def handler(f): f.result() return None - return self._get_thread_id().continue_with(self._do_try_acquire, permits, -1).continue_with(handler) + return ( + self._get_thread_id() + .continue_with(self._do_try_acquire, permits, -1) + .continue_with(handler) + ) def drain_permits(self): return self._get_thread_id().continue_with(self._do_drain_permits) @@ -475,15 +514,17 @@ def try_acquire(self, permits=1, timeout=0): def _do_try_acquire(self, global_thread_id, permits, timeout): global_thread_id = global_thread_id.result() invocation_uuid = uuid.uuid4() - return self._request_acquire(global_thread_id, invocation_uuid, permits, timeout).continue_with( - self._check_acquire_response) + return self._request_acquire( + global_thread_id, invocation_uuid, permits, timeout + ).continue_with(self._check_acquire_response) def _do_drain_permits(self, global_thread_id): global_thread_id = global_thread_id.result() invocation_uuid = uuid.uuid4() codec = semaphore_drain_codec - request = codec.encode_request(self._group_id, self._object_name, _NO_SESSION_ID, global_thread_id, - invocation_uuid) + request = codec.encode_request( + self._group_id, self._object_name, _NO_SESSION_ID, global_thread_id, invocation_uuid + ) return self._invoke(request, codec.decode_response) def _do_change_permits(self, permits): @@ -495,31 +536,52 @@ def _request_acquire(self, global_thread_id, invocation_uuid, permits, timeout): if timeout > 0: timeout = to_millis(timeout) - request = codec.encode_request(self._group_id, self._object_name, _NO_SESSION_ID, global_thread_id, - invocation_uuid, permits, timeout) + request = codec.encode_request( + self._group_id, + self._object_name, + _NO_SESSION_ID, + global_thread_id, + invocation_uuid, + permits, + timeout, + ) return self._invoke(request, codec.decode_response) def _request_change(self, global_thread_id, invocation_uuid, permits): global_thread_id = global_thread_id.result() codec = semaphore_change_codec - request = codec.encode_request(self._group_id, self._object_name, _NO_SESSION_ID, global_thread_id, - invocation_uuid, permits) + request = codec.encode_request( + self._group_id, + self._object_name, + _NO_SESSION_ID, + global_thread_id, + invocation_uuid, + permits, + ) return self._invoke(request) def _request_release(self, global_thread_id, invocation_uuid, permits): global_thread_id = global_thread_id.result() codec = semaphore_release_codec - request = codec.encode_request(self._group_id, self._object_name, _NO_SESSION_ID, global_thread_id, - invocation_uuid, permits) + request = codec.encode_request( + self._group_id, + self._object_name, + _NO_SESSION_ID, + global_thread_id, + invocation_uuid, + permits, + ) return self._invoke(request) def _check_acquire_response(self, response): try: return response.result() except WaitKeyCancelledError: - error = IllegalStateError("Semaphore(\"%s\") not acquired because the acquire call on the " - "CP group is cancelled, possibly because of another indeterminate " - "call from the same thread." % self._object_name) + error = IllegalStateError( + 'Semaphore("%s") not acquired because the acquire call on the ' + "CP group is cancelled, possibly because of another indeterminate " + "call from the same thread." % self._object_name + ) raise error def _get_thread_id(self): diff --git a/hazelcast/proxy/executor.py b/hazelcast/proxy/executor.py index df28427b61..10fc44813b 100644 --- a/hazelcast/proxy/executor.py +++ b/hazelcast/proxy/executor.py @@ -1,8 +1,11 @@ from uuid import uuid4 from hazelcast import future -from hazelcast.protocol.codec import executor_service_shutdown_codec, \ - executor_service_is_shutdown_codec, \ - executor_service_submit_to_partition_codec, executor_service_submit_to_member_codec +from hazelcast.protocol.codec import ( + executor_service_shutdown_codec, + executor_service_is_shutdown_codec, + executor_service_submit_to_partition_codec, + executor_service_submit_to_member_codec, +) from hazelcast.proxy.base import Proxy from hazelcast.util import check_not_none @@ -24,14 +27,18 @@ def execute_on_key_owner(self, key, task): check_not_none(task, "task can't be None") def handler(message): - return self._to_object(executor_service_submit_to_partition_codec.decode_response(message)) + return self._to_object( + executor_service_submit_to_partition_codec.decode_response(message) + ) key_data = self._to_data(key) task_data = self._to_data(task) partition_id = self._context.partition_service.get_partition_id(key_data) uuid = uuid4() - request = executor_service_submit_to_partition_codec.encode_request(self.name, uuid, task_data) + request = executor_service_submit_to_partition_codec.encode_request( + self.name, uuid, task_data + ) return self._invoke_on_partition(request, partition_id, handler) def execute_on_member(self, member, task): @@ -102,5 +109,7 @@ def _execute_on_member(self, uuid, task_data, member_uuid): def handler(message): return self._to_object(executor_service_submit_to_member_codec.decode_response(message)) - request = executor_service_submit_to_member_codec.encode_request(self.name, uuid, task_data, member_uuid) + request = executor_service_submit_to_member_codec.encode_request( + self.name, uuid, task_data, member_uuid + ) return self._invoke_on_target(request, member_uuid, handler) diff --git a/hazelcast/proxy/flake_id_generator.py b/hazelcast/proxy/flake_id_generator.py index 0138d88307..f96d05beeb 100644 --- a/hazelcast/proxy/flake_id_generator.py +++ b/hazelcast/proxy/flake_id_generator.py @@ -12,11 +12,11 @@ class FlakeIdGenerator(Proxy): """A cluster-wide unique ID generator. Generated IDs are int (long in case of the Python 2 on 32 bit architectures) values and are k-ordered (roughly ordered). IDs are in the range from 0 to 2^63 - 1. - + The IDs contain timestamp component and a node ID component, which is assigned when the member joins the cluster. This allows the IDs to be ordered and unique without any coordination between members, which makes the generator safe even in split-brain scenario. - + Timestamp component is in milliseconds since 1.1.2018, 0:00 UTC and has 41 bits. This caps the useful lifespan of the generator to little less than 70 years (until ~2088). The sequence component is 6 bits. If more than 64 IDs are requested in single millisecond, IDs will gracefully overflow to the next @@ -24,13 +24,14 @@ class FlakeIdGenerator(Proxy): by more than 15 seconds, if IDs are requested at higher rate, the call will block. Note, however, that clients are able to generate even faster because each call goes to a different (random) member and the 64 IDs/ms limit is for single member. - + Node ID overflow: It is possible to generate IDs on any member or client as long as there is at least one member with join version smaller than 2^16 in the cluster. The remedy is to restart the cluster: nodeId will be assigned from zero again. Uniqueness after the restart will be preserved thanks to the timestamp component. """ + _BITS_NODE_ID = 16 _BITS_SEQUENCE = 6 @@ -41,14 +42,16 @@ def __init__(self, service_name, name, context): if config is None: config = _FlakeIdGeneratorConfig() - self._auto_batcher = _AutoBatcher(config.prefetch_count, config.prefetch_validity, self._new_id_batch) + self._auto_batcher = _AutoBatcher( + config.prefetch_count, config.prefetch_validity, self._new_id_batch + ) def new_id(self): """Generates and returns a cluster-wide unique ID. - + This method goes to a random member and gets a batch of IDs, which will then be returned locally for limited time. The pre-fetch size and the validity time can be configured. - + Note: Values returned from this method may not be strictly ordered. diff --git a/hazelcast/proxy/list.py b/hazelcast/proxy/list.py index e741582ba2..ba473808b7 100644 --- a/hazelcast/proxy/list.py +++ b/hazelcast/proxy/list.py @@ -1,33 +1,35 @@ -from hazelcast.protocol.codec import list_add_all_codec, \ - list_add_all_with_index_codec, \ - list_add_codec, \ - list_add_listener_codec, \ - list_add_with_index_codec, \ - list_clear_codec, \ - list_compare_and_remove_all_codec, \ - list_compare_and_retain_all_codec, \ - list_contains_all_codec, \ - list_contains_codec, \ - list_get_all_codec, \ - list_get_codec, \ - list_index_of_codec, \ - list_is_empty_codec, \ - list_iterator_codec, \ - list_last_index_of_codec, \ - list_list_iterator_codec, \ - list_remove_codec, \ - list_remove_listener_codec, \ - list_remove_with_index_codec, \ - list_set_codec, \ - list_size_codec, \ - list_sub_codec +from hazelcast.protocol.codec import ( + list_add_all_codec, + list_add_all_with_index_codec, + list_add_codec, + list_add_listener_codec, + list_add_with_index_codec, + list_clear_codec, + list_compare_and_remove_all_codec, + list_compare_and_retain_all_codec, + list_contains_all_codec, + list_contains_codec, + list_get_all_codec, + list_get_codec, + list_index_of_codec, + list_is_empty_codec, + list_iterator_codec, + list_last_index_of_codec, + list_list_iterator_codec, + list_remove_codec, + list_remove_listener_codec, + list_remove_with_index_codec, + list_set_codec, + list_size_codec, + list_sub_codec, +) from hazelcast.proxy.base import PartitionSpecificProxy, ItemEvent, ItemEventType from hazelcast.util import check_not_none, ImmutableLazyDataList class List(PartitionSpecificProxy): """Concurrent, distributed implementation of List. - + The Hazelcast List is not a partitioned data-structure. So all the content of the List is stored in a single machine (and in the backup). So the List will not scale by adding more members in the cluster. """ @@ -130,15 +132,18 @@ def handle_event_item(item, uuid, event_type): if item_removed_func: item_removed_func(item_event) - return self._register_listener(request, lambda r: list_add_listener_codec.decode_response(r), - lambda reg_id: list_remove_listener_codec.encode_request(self.name, reg_id), - lambda m: list_add_listener_codec.handle(m, handle_event_item)) + return self._register_listener( + request, + lambda r: list_add_listener_codec.decode_response(r), + lambda reg_id: list_remove_listener_codec.encode_request(self.name, reg_id), + lambda m: list_add_listener_codec.handle(m, handle_event_item), + ) def clear(self): - """Clears the list. - + """Clears the list. + List will be empty with this call. - + Returns: hazelcast.future.Future[None]: """ @@ -188,6 +193,7 @@ def get(self, index): Returns: hazelcast.future.Future[any]: the item in the specified position in this list. """ + def handler(message): return self._to_object(list_get_codec.decode_response(message)) @@ -200,8 +206,11 @@ def get_all(self): Returns: hazelcast.future.Future[list]: All of the items in this list. """ + def handler(message): - return ImmutableLazyDataList(list_get_all_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + list_get_all_codec.decode_response(message), self._to_object + ) request = list_get_all_codec.encode_request(self.name) return self._invoke(request, handler) @@ -212,15 +221,18 @@ def iterator(self): Returns: hazelcast.future.Future[list]: All of the items in this list. """ + def handler(message): - return ImmutableLazyDataList(list_iterator_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + list_iterator_codec.decode_response(message), self._to_object + ) request = list_iterator_codec.encode_request(self.name) return self._invoke(request, handler) def index_of(self, item): - """Returns the first index of specified items's occurrences in this list. - + """Returns the first index of specified items's occurrences in this list. + If specified item is not present in this list, returns -1. Args: @@ -265,8 +277,8 @@ def last_index_of(self, item): return self._invoke(request, list_last_index_of_codec.decode_response) def list_iterator(self, index=0): - """Returns a list iterator of the elements in this list. - + """Returns a list iterator of the elements in this list. + If an index is provided, iterator starts from this index. Args: @@ -275,8 +287,11 @@ def list_iterator(self, index=0): Returns: hazelcast.future.Future[list]: List of the elements in this list. """ + def handler(message): - return ImmutableLazyDataList(list_list_iterator_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + list_list_iterator_codec.decode_response(message), self._to_object + ) request = list_list_iterator_codec.encode_request(self.name, index) return self._invoke(request, handler) @@ -299,7 +314,7 @@ def remove(self, item): def remove_at(self, index): """Removes the item at the specified position in this list. - + Element in this position and following elements are shifted to the left, if any. Args: @@ -308,6 +323,7 @@ def remove_at(self, index): Returns: hazelcast.future.Future[any]: The item previously at the specified index. """ + def handler(message): return self._to_object(list_remove_with_index_codec.decode_response(message)) @@ -333,8 +349,8 @@ def remove_all(self, items): return self._invoke(request, list_compare_and_remove_all_codec.decode_response) def remove_listener(self, registration_id): - """Removes the specified item listener. - + """Removes the specified item listener. + Returns silently if the specified listener was not added before. Args: @@ -346,8 +362,8 @@ def remove_listener(self, registration_id): return self._deregister_listener(registration_id) def retain_all(self, items): - """Retains only the items that are contained in the specified collection. - + """Retains only the items that are contained in the specified collection. + It means, items which are not present in the specified collection are removed from this list. Args: @@ -367,7 +383,7 @@ def retain_all(self, items): def size(self): """Returns the number of elements in this list. - + Returns: hazelcast.future.Future[int]: Number of elements in this list. """ @@ -382,7 +398,7 @@ def set_at(self, index, item): item: Item to be stored. Returns: - hazelcast.future.Future[any]: the previous item in the specified index. + hazelcast.future.Future[any]: the previous item in the specified index. """ check_not_none(item, "Value can't be None") element_data = self._to_data(item) @@ -395,7 +411,7 @@ def handler(message): def sub_list(self, from_index, to_index): """Returns a sublist from this list, from from_index(inclusive) to to_index(exclusive). - + The returned list is backed by this list, so non-structural changes in the returned list are reflected in this list, and vice-versa. @@ -406,6 +422,7 @@ def sub_list(self, from_index, to_index): Returns: hazelcast.future.Future[list]: A view of the specified range within this list. """ + def handler(message): return ImmutableLazyDataList(list_sub_codec.decode_response(message), self._to_object) diff --git a/hazelcast/proxy/map.py b/hazelcast/proxy/map.py index 43dea9a45f..d9ae305630 100644 --- a/hazelcast/proxy/map.py +++ b/hazelcast/proxy/map.py @@ -4,30 +4,85 @@ from hazelcast.future import combine_futures, ImmediateFuture from hazelcast.invocation import Invocation from hazelcast.protocol import PagingPredicateHolder -from hazelcast.protocol.codec import map_add_entry_listener_codec, map_add_entry_listener_to_key_codec, \ - map_add_entry_listener_with_predicate_codec, map_add_entry_listener_to_key_with_predicate_codec, \ - map_clear_codec, map_contains_key_codec, map_contains_value_codec, map_delete_codec, \ - map_entry_set_codec, map_entries_with_predicate_codec, map_evict_codec, map_evict_all_codec, map_flush_codec, \ - map_force_unlock_codec, map_get_codec, map_get_all_codec, map_get_entry_view_codec, map_is_empty_codec, \ - map_is_locked_codec, map_key_set_codec, map_key_set_with_predicate_codec, map_load_all_codec, \ - map_load_given_keys_codec, map_lock_codec, map_put_codec, map_put_all_codec, map_put_if_absent_codec, \ - map_put_transient_codec, map_size_codec, map_remove_codec, map_remove_if_same_codec, \ - map_remove_entry_listener_codec, map_replace_codec, map_replace_if_same_codec, map_set_codec, map_try_lock_codec, \ - map_try_put_codec, map_try_remove_codec, map_unlock_codec, map_values_codec, map_values_with_predicate_codec, \ - map_add_interceptor_codec, map_execute_on_all_keys_codec, map_execute_on_key_codec, map_execute_on_keys_codec, \ - map_execute_with_predicate_codec, map_add_near_cache_invalidation_listener_codec, map_add_index_codec, \ - map_set_ttl_codec, map_entries_with_paging_predicate_codec, map_key_set_with_paging_predicate_codec, \ - map_values_with_paging_predicate_codec, map_put_with_max_idle_codec, map_put_if_absent_with_max_idle_codec, \ - map_put_transient_with_max_idle_codec, map_set_with_max_idle_codec -from hazelcast.proxy.base import Proxy, EntryEvent, EntryEventType, get_entry_listener_flags, MAX_SIZE +from hazelcast.protocol.codec import ( + map_add_entry_listener_codec, + map_add_entry_listener_to_key_codec, + map_add_entry_listener_with_predicate_codec, + map_add_entry_listener_to_key_with_predicate_codec, + map_clear_codec, + map_contains_key_codec, + map_contains_value_codec, + map_delete_codec, + map_entry_set_codec, + map_entries_with_predicate_codec, + map_evict_codec, + map_evict_all_codec, + map_flush_codec, + map_force_unlock_codec, + map_get_codec, + map_get_all_codec, + map_get_entry_view_codec, + map_is_empty_codec, + map_is_locked_codec, + map_key_set_codec, + map_key_set_with_predicate_codec, + map_load_all_codec, + map_load_given_keys_codec, + map_lock_codec, + map_put_codec, + map_put_all_codec, + map_put_if_absent_codec, + map_put_transient_codec, + map_size_codec, + map_remove_codec, + map_remove_if_same_codec, + map_remove_entry_listener_codec, + map_replace_codec, + map_replace_if_same_codec, + map_set_codec, + map_try_lock_codec, + map_try_put_codec, + map_try_remove_codec, + map_unlock_codec, + map_values_codec, + map_values_with_predicate_codec, + map_add_interceptor_codec, + map_execute_on_all_keys_codec, + map_execute_on_key_codec, + map_execute_on_keys_codec, + map_execute_with_predicate_codec, + map_add_near_cache_invalidation_listener_codec, + map_add_index_codec, + map_set_ttl_codec, + map_entries_with_paging_predicate_codec, + map_key_set_with_paging_predicate_codec, + map_values_with_paging_predicate_codec, + map_put_with_max_idle_codec, + map_put_if_absent_with_max_idle_codec, + map_put_transient_with_max_idle_codec, + map_set_with_max_idle_codec, +) +from hazelcast.proxy.base import ( + Proxy, + EntryEvent, + EntryEventType, + get_entry_listener_flags, + MAX_SIZE, +) from hazelcast.predicate import PagingPredicate -from hazelcast.util import check_not_none, thread_id, to_millis, ImmutableLazyDataList, IterationType +from hazelcast.util import ( + check_not_none, + thread_id, + to_millis, + ImmutableLazyDataList, + IterationType, +) from hazelcast import six class Map(Proxy): """Hazelcast Map client proxy to access the map on the cluster. - + Concurrent, distributed, observable and queryable map. This map can work both async(non-blocking) or sync(blocking). Blocking calls return the value of the call and block the execution until return value is calculated. @@ -35,7 +90,7 @@ class Map(Proxy): Result of the ``hazelcast.future.Future`` can be used whenever ready. A ``hazelcast.future.Future``'s result can be obtained with blocking the execution by calling ``future.result()``. - + Example: >>> my_map = client.get_map("my_map").blocking() # sync map, all operations are blocking >>> print("map.put", my_map.put("key", "value")) @@ -54,7 +109,7 @@ class Map(Proxy): >>> def contains_key_callback(f): >>> print("map.contains_key", f.result()) >>> my_map.contains_key("key").add_done_callback(contains_key_callback) - + This class does not allow ``None`` to be used as a key or value. """ @@ -62,9 +117,21 @@ def __init__(self, service_name, name, context): super(Map, self).__init__(service_name, name, context) self._reference_id_generator = context.lock_reference_id_generator - def add_entry_listener(self, include_value=False, key=None, predicate=None, added_func=None, removed_func=None, - updated_func=None, evicted_func=None, evict_all_func=None, clear_all_func=None, - merged_func=None, expired_func=None, loaded_func=None): + def add_entry_listener( + self, + include_value=False, + key=None, + predicate=None, + added_func=None, + removed_func=None, + updated_func=None, + evicted_func=None, + evict_all_func=None, + clear_all_func=None, + merged_func=None, + expired_func=None, + loaded_func=None, + ): """Adds a continuous entry listener for this map. Listener will get notified for map events filtered with given parameters. @@ -86,30 +153,54 @@ def add_entry_listener(self, include_value=False, key=None, predicate=None, adde Returns: hazelcast.future.Future[str]: A registration id which is used as a key to remove the listener. """ - flags = get_entry_listener_flags(ADDED=added_func, REMOVED=removed_func, UPDATED=updated_func, - EVICTED=evicted_func, EXPIRED=expired_func, EVICT_ALL=evict_all_func, - CLEAR_ALL=clear_all_func, MERGED=merged_func, LOADED=loaded_func) + flags = get_entry_listener_flags( + ADDED=added_func, + REMOVED=removed_func, + UPDATED=updated_func, + EVICTED=evicted_func, + EXPIRED=expired_func, + EVICT_ALL=evict_all_func, + CLEAR_ALL=clear_all_func, + MERGED=merged_func, + LOADED=loaded_func, + ) if key and predicate: codec = map_add_entry_listener_to_key_with_predicate_codec key_data = self._to_data(key) predicate_data = self._to_data(predicate) - request = codec.encode_request(self.name, key_data, predicate_data, include_value, flags, self._is_smart) + request = codec.encode_request( + self.name, key_data, predicate_data, include_value, flags, self._is_smart + ) elif key and not predicate: codec = map_add_entry_listener_to_key_codec key_data = self._to_data(key) - request = codec.encode_request(self.name, key_data, include_value, flags, self._is_smart) + request = codec.encode_request( + self.name, key_data, include_value, flags, self._is_smart + ) elif not key and predicate: codec = map_add_entry_listener_with_predicate_codec predicate = self._to_data(predicate) - request = codec.encode_request(self.name, predicate, include_value, flags, self._is_smart) + request = codec.encode_request( + self.name, predicate, include_value, flags, self._is_smart + ) else: codec = map_add_entry_listener_codec request = codec.encode_request(self.name, include_value, flags, self._is_smart) - def handle_event_entry(key_, value, old_value, merging_value, event_type, uuid, number_of_affected_entries): - event = EntryEvent(self._to_object, key_, value, old_value, merging_value, - event_type, uuid, number_of_affected_entries) + def handle_event_entry( + key_, value, old_value, merging_value, event_type, uuid, number_of_affected_entries + ): + event = EntryEvent( + self._to_object, + key_, + value, + old_value, + merging_value, + event_type, + uuid, + number_of_affected_entries, + ) if event.event_type == EntryEventType.ADDED: added_func(event) @@ -130,13 +221,18 @@ def handle_event_entry(key_, value, old_value, merging_value, event_type, uuid, elif event.event_type == EntryEventType.LOADED: loaded_func(event) - return self._register_listener(request, lambda r: codec.decode_response(r), - lambda reg_id: map_remove_entry_listener_codec.encode_request(self.name, reg_id), - lambda m: codec.handle(m, handle_event_entry)) + return self._register_listener( + request, + lambda r: codec.decode_response(r), + lambda reg_id: map_remove_entry_listener_codec.encode_request(self.name, reg_id), + lambda m: codec.handle(m, handle_event_entry), + ) - def add_index(self, attributes=None, index_type=IndexType.SORTED, name=None, bitmap_index_options=None): + def add_index( + self, attributes=None, index_type=IndexType.SORTED, name=None, bitmap_index_options=None + ): """Adds an index to this map for the specified entries so that queries can run faster. - + Example: Let's say your map values are Employee objects. @@ -147,7 +243,7 @@ def add_index(self, attributes=None, index_type=IndexType.SORTED, name=None, bit >>> #other fields >>> >>> #methods - + If you query your values mostly based on age and active fields, you should consider indexing these. >>> employees = client.get_map("employees") @@ -157,11 +253,11 @@ def add_index(self, attributes=None, index_type=IndexType.SORTED, name=None, bit Index attribute should either have a getter method or be public. You should also make sure to add the indexes before adding entries to this map. - + Indexing time is executed in parallel on each partition by operation threads. The Map is not blocked during this operation. The time taken in proportional to the size of the Map and the number Members. - + Until the index finishes being created, any searches for the attribute will use a full Map scan, thus avoiding using a partially built index and returning incorrect results. @@ -215,7 +311,7 @@ def add_interceptor(self, interceptor): def clear(self): """Clears the map. - + The ``MAP_CLEARED`` event is fired for any registered listeners. Returns: @@ -226,9 +322,9 @@ def clear(self): def contains_key(self, key): """Determines whether this map contains an entry with the key. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -260,16 +356,16 @@ def contains_value(self, value): def delete(self, key): """Removes the mapping for a key from this map if it is present (optional operation). - + Unlike remove(object), this operation does not return the removed value, which avoids the serialization cost of the returned value. If the removed value will not be used, a delete operation is preferred over a remove operation for better performance. - + The map will not contain a mapping for the specified key once the call returns. - + Warning: - This method breaks the contract of EntryListener. - When an entry is removed by delete(), it fires an ``EntryEvent`` with a ``None`` oldValue. + This method breaks the contract of EntryListener. + When an entry is removed by delete(), it fires an ``EntryEvent`` with a ``None`` oldValue. Also, a listener with predicates will have ``None`` values, so only the keys can be queried via predicates. @@ -285,7 +381,7 @@ def delete(self, key): def entry_set(self, predicate=None): """Returns a list clone of the mappings contained in this map. - + Warning: The list is NOT backed by the map, so changes to the map are NOT reflected in the list, and vice-versa. @@ -301,7 +397,9 @@ def entry_set(self, predicate=None): def handler(message): response = codec.decode_response(message) - predicate.anchor_list = response["anchor_data_list"].as_anchor_list(self._to_object) + predicate.anchor_list = response["anchor_data_list"].as_anchor_list( + self._to_object + ) return ImmutableLazyDataList(response["response"], self._to_object) predicate.iteration_type = IterationType.ENTRY @@ -327,7 +425,7 @@ def handler(message): def evict(self, key): """Evicts the specified key from this map. - + Warning: This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. @@ -344,7 +442,7 @@ def evict(self, key): def evict_all(self): """Evicts all keys from this map except the locked ones. - + The ``EVICT_ALL`` event is fired for any registered listeners. Returns: @@ -369,15 +467,23 @@ def execute_on_entries(self, entry_processor, predicate=None): results of the entry process. """ if predicate: + def handler(message): - return ImmutableLazyDataList(map_execute_with_predicate_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + map_execute_with_predicate_codec.decode_response(message), self._to_object + ) entry_processor_data = self._to_data(entry_processor) predicate_data = self._to_data(predicate) - request = map_execute_with_predicate_codec.encode_request(self.name, entry_processor_data, predicate_data) + request = map_execute_with_predicate_codec.encode_request( + self.name, entry_processor_data, predicate_data + ) else: + def handler(message): - return ImmutableLazyDataList(map_execute_on_all_keys_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + map_execute_on_all_keys_codec.decode_response(message), self._to_object + ) entry_processor_data = self._to_data(entry_processor) request = map_execute_on_all_keys_codec.encode_request(self.name, entry_processor_data) @@ -414,7 +520,7 @@ def execute_on_keys(self, keys, entry_processor): actual ``com.hazelcast.map.EntryProcessor`` implementation. Returns: - hazelcast.future.Future[list]: List of map entries which includes the keys + hazelcast.future.Future[list]: List of map entries which includes the keys and the results of the entry process. """ key_list = [] @@ -426,15 +532,19 @@ def execute_on_keys(self, keys, entry_processor): return ImmediateFuture([]) def handler(message): - return ImmutableLazyDataList(map_execute_on_keys_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + map_execute_on_keys_codec.decode_response(message), self._to_object + ) entry_processor_data = self._to_data(entry_processor) - request = map_execute_on_keys_codec.encode_request(self.name, entry_processor_data, key_list) + request = map_execute_on_keys_codec.encode_request( + self.name, entry_processor_data, key_list + ) return self._invoke(request, handler) def flush(self): """Flushes all the local dirty entries. - + Returns: hazelcast.future.Future[None]: """ @@ -442,12 +552,12 @@ def flush(self): return self._invoke(request) def force_unlock(self, key): - """Releases the lock for the specified key regardless of the lock owner. - + """Releases the lock for the specified key regardless of the lock owner. + It always successfully unlocks the key, never blocks, and returns immediately. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -459,21 +569,22 @@ def force_unlock(self, key): check_not_none(key, "key can't be None") key_data = self._to_data(key) - request = map_force_unlock_codec.encode_request(self.name, key_data, - self._reference_id_generator.get_and_increment()) + request = map_force_unlock_codec.encode_request( + self.name, key_data, self._reference_id_generator.get_and_increment() + ) return self._invoke_on_key(request, key_data) def get(self, key): """Returns the value for the specified key, or ``None`` if this map does not contain this key. - + Warning: This method returns a clone of original value, modifying the returned value does not change the actual value in the map. One should put modified value back to make changes visible to all nodes. - + >>> value = my_map.get(key) >>> value.update_some_property() >>> my_map.put(key,value) - + Warning: This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. @@ -490,13 +601,13 @@ def get(self, key): def get_all(self, keys): """Returns the entries for the given keys. - + Warning: The returned map is NOT backed by the original map, so changes to the original map are NOT reflected in the returned map, and vice-versa. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -525,13 +636,13 @@ def get_all(self, keys): def get_entry_view(self, key): """Returns the EntryView for the specified key. - + Warning: - This method returns a clone of original mapping, modifying the returned value does not change the + This method returns a clone of original mapping, modifying the returned value does not change the actual value in the map. One should put modified value back to make changes visible to all nodes. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -558,7 +669,7 @@ def handler(message): def is_empty(self): """Returns whether this map contains no key-value mappings or not. - + Returns: hazelcast.future.Future[bool]: ``True`` if this map contains no key-value mappings, ``False`` otherwise. """ @@ -566,10 +677,10 @@ def is_empty(self): return self._invoke(request, map_is_empty_codec.decode_response) def is_locked(self, key): - """Checks the lock for the specified key. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + """Checks the lock for the specified key. + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -585,9 +696,9 @@ def is_locked(self, key): return self._invoke_on_key(request, key_data, map_is_locked_codec.decode_response) def key_set(self, predicate=None): - """Returns a List clone of the keys contained in this map or + """Returns a List clone of the keys contained in this map or the keys of the entries filtered with the predicate if provided. - + Warning: The list is NOT backed by the map, so changes to the map are NOT reflected in the list, and vice-versa. @@ -603,7 +714,9 @@ def key_set(self, predicate=None): def handler(message): response = codec.decode_response(message) - predicate.anchor_list = response["anchor_data_list"].as_anchor_list(self._to_object) + predicate.anchor_list = response["anchor_data_list"].as_anchor_list( + self._to_object + ) return ImmutableLazyDataList(response["response"], self._to_object) predicate.iteration_type = IterationType.KEY @@ -632,7 +745,7 @@ def load_all(self, keys=None, replace_existing_values=True): Args: keys (list): Keys of the entry values to load. - replace_existing_values (bool): Whether the existing values will be replaced or not + replace_existing_values (bool): Whether the existing values will be replaced or not with those loaded from the server side MapLoader. Returns: @@ -647,22 +760,22 @@ def load_all(self, keys=None, replace_existing_values=True): def lock(self, key, lease_time=None): """Acquires the lock for the specified key infinitely or for the specified lease time if provided. - + If the lock is not available, the current thread becomes disabled for thread scheduling purposes and lies dormant until the lock has been acquired. - + You get a lock whether the value is present in the map or not. Other threads (possibly on other systems) would block on their invoke of lock() until the non-existent key is unlocked. If the lock holder introduces the key to the map, the put() operation is not blocked. If a thread not holding a lock on the non-existent key tries to introduce the key while a lock exists on the non-existent key, the put() operation blocks until it is unlocked. - + Scope of the lock is this map only. Acquired lock is only for the key in this map. - + Locks are re-entrant; so, if the key is locked N times, it should be unlocked N times before another thread can acquire it. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -675,25 +788,30 @@ def lock(self, key, lease_time=None): check_not_none(key, "key can't be None") key_data = self._to_data(key) - request = map_lock_codec.encode_request(self.name, key_data, thread_id(), to_millis(lease_time), - self._reference_id_generator.get_and_increment()) + request = map_lock_codec.encode_request( + self.name, + key_data, + thread_id(), + to_millis(lease_time), + self._reference_id_generator.get_and_increment(), + ) partition_id = self._context.partition_service.get_partition_id(key_data) invocation = Invocation(request, partition_id=partition_id, timeout=MAX_SIZE) self._invocation_service.invoke(invocation) return invocation.future def put(self, key, value, ttl=None, max_idle=None): - """Associates the specified value with the specified key in this map. - - If the map previously contained a mapping for the key, the old value is replaced by the specified value. + """Associates the specified value with the specified key in this map. + + If the map previously contained a mapping for the key, the old value is replaced by the specified value. If ttl is provided, entry will expire and get evicted after the ttl. - + Warning: This method returns a clone of the previous value, not the original (identically equal) value previously put into the map. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -709,7 +827,7 @@ def put(self, key, value, ttl=None, max_idle=None): infinite max idle time. Returns: - hazelcast.future.Future[any]: Previous value associated with key + hazelcast.future.Future[any]: Previous value associated with key or ``None`` if there was no mapping for key. """ check_not_none(key, "key can't be None") @@ -719,9 +837,9 @@ def put(self, key, value, ttl=None, max_idle=None): return self._put_internal(key_data, value_data, ttl, max_idle) def put_all(self, map): - """Copies all of the mappings from the specified map to this map. - - No atomicity guarantees are given. In the case of a failure, some of the key-value tuples may get written, + """Copies all of the mappings from the specified map to this map. + + No atomicity guarantees are given. In the case of a failure, some of the key-value tuples may get written, while others are not. Args: @@ -749,30 +867,32 @@ def put_all(self, map): futures = [] for partition_id, entry_list in six.iteritems(partition_map): - request = map_put_all_codec.encode_request(self.name, entry_list, False) # TODO trigger map loader + request = map_put_all_codec.encode_request( + self.name, entry_list, False + ) # TODO trigger map loader future = self._invoke_on_partition(request, partition_id) futures.append(future) return combine_futures(futures) def put_if_absent(self, key, value, ttl=None, max_idle=None): - """Associates the specified key with the given value if it is not already associated. - + """Associates the specified key with the given value if it is not already associated. + If ttl is provided, entry will expire and get evicted after the ttl. - + This is equivalent to below, except that the action is performed atomically: - + >>> if not my_map.contains_key(key): >>> return my_map.put(key,value) >>> else: >>> return my_map.get(key) - + Warning: This method returns a clone of the previous value, not the original (identically equal) value previously put into the map. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -799,9 +919,9 @@ def put_if_absent(self, key, value, ttl=None, max_idle=None): def put_transient(self, key, value, ttl=None, max_idle=None): """Same as ``put``, but MapStore defined at the server side will not be called. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -827,19 +947,19 @@ def put_transient(self, key, value, ttl=None, max_idle=None): return self._put_transient_internal(key_data, value_data, ttl, max_idle) def remove(self, key): - """Removes the mapping for a key from this map if it is present. - + """Removes the mapping for a key from this map if it is present. + The map will not contain a mapping for the specified key once the call returns. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: key: Key of the mapping to be deleted. Returns: - hazelcast.future.Future[any]: The previous value associated with key, + hazelcast.future.Future[any]: The previous value associated with key, or ``None`` if there was no mapping for key. """ check_not_none(key, "key can't be None") @@ -848,17 +968,17 @@ def remove(self, key): def remove_if_same(self, key, value): """Removes the entry for a key only if it is currently mapped to a given value. - + This is equivalent to below, except that the action is performed atomically: - + >>> if my_map.contains_key(key) and my_map.get(key) == value: >>> my_map.remove(key) >>> return True >>> else: >>> return False - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -876,8 +996,8 @@ def remove_if_same(self, key, value): return self._remove_if_same_internal_(key_data, value_data) def remove_entry_listener(self, registration_id): - """Removes the specified entry listener. - + """Removes the specified entry listener. + Returns silently if there is no such listener added before. Args: @@ -890,18 +1010,18 @@ def remove_entry_listener(self, registration_id): def replace(self, key, value): """Replaces the entry for a key only if it is currently mapped to some value. - - This is equivalent to below, except that the action is performed atomically: - + + This is equivalent to below, except that the action is performed atomically: + >>> if my_map.contains_key(key): >>> return my_map.put(key,value) >>> else: >>> return None - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. - + Warning: This method returns a clone of the previous value, not the original (identically equal) value previously put into the map. @@ -911,7 +1031,7 @@ def replace(self, key, value): value: The value to replace the previous value. Returns: - hazelcast.future.Future[any]: Previous value associated with key, + hazelcast.future.Future[any]: Previous value associated with key, or ``None`` if there was no mapping for key. """ check_not_none(key, "key can't be None") @@ -924,17 +1044,17 @@ def replace(self, key, value): def replace_if_same(self, key, old_value, new_value): """Replaces the entry for a key only if it is currently mapped to a given value. - + This is equivalent to below, except that the action is performed atomically: - + >>> if my_map.contains_key(key) and my_map.get(key) == old_value: >>> my_map.put(key, new_value) >>> return True >>> else: >>> return False - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -960,7 +1080,7 @@ def set(self, key, value, ttl=None, max_idle=None): Similar to the put operation except that set doesn't return the old value, which is more efficient. If ttl is provided, entry will expire and get evicted after the ttl. - + Warning: This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. @@ -987,10 +1107,10 @@ def set(self, key, value, ttl=None, max_idle=None): return self._set_internal(key_data, value_data, ttl, max_idle) def set_ttl(self, key, ttl): - """Updates the TTL (time to live) value of the entry specified by the given key with a new TTL value. - - New TTL value is valid starting from the time this operation is invoked, - not since the time the entry was created. If the entry does not exist or is already expired, + """Updates the TTL (time to live) value of the entry specified by the given key with a new TTL value. + + New TTL value is valid starting from the time this operation is invoked, + not since the time the entry was created. If the entry does not exist or is already expired, this call has no effect. Args: @@ -1008,7 +1128,7 @@ def set_ttl(self, key, ttl): def size(self): """Returns the number of entries in this map. - + Returns: hazelcast.future.Future[int]: Number of entries in this map. """ @@ -1016,8 +1136,8 @@ def size(self): return self._invoke(request, map_size_codec.decode_response) def try_lock(self, key, lease_time=None, timeout=0): - """Tries to acquire the lock for the specified key. - + """Tries to acquire the lock for the specified key. + When the lock is not available: - If the timeout is not provided, the current thread doesn't wait and returns ``False`` immediately. @@ -1026,7 +1146,7 @@ def try_lock(self, key, lease_time=None, timeout=0): - The lock is acquired by the current thread, or - The specified waiting time elapses. - + If the lease time is provided, lock will be released after this time elapses. Args: @@ -1040,12 +1160,21 @@ def try_lock(self, key, lease_time=None, timeout=0): check_not_none(key, "key can't be None") key_data = self._to_data(key) - request = map_try_lock_codec.encode_request(self.name, key_data, thread_id(), - to_millis(lease_time), to_millis(timeout), - self._reference_id_generator.get_and_increment()) + request = map_try_lock_codec.encode_request( + self.name, + key_data, + thread_id(), + to_millis(lease_time), + to_millis(timeout), + self._reference_id_generator.get_and_increment(), + ) partition_id = self._context.partition_service.get_partition_id(key_data) - invocation = Invocation(request, partition_id=partition_id, timeout=MAX_SIZE, - response_handler=map_try_lock_codec.decode_response) + invocation = Invocation( + request, + partition_id=partition_id, + timeout=MAX_SIZE, + response_handler=map_try_lock_codec.decode_response, + ) self._invocation_service.invoke(invocation) return invocation.future @@ -1071,8 +1200,8 @@ def try_put(self, key, value, timeout=0): return self._try_put_internal(key_data, value_data, timeout) def try_remove(self, key, timeout=0): - """Tries to remove the given key from this map and returns immediately if timeout is not provided. - + """Tries to remove the given key from this map and returns immediately if timeout is not provided. + If timeout is provided, operation waits until it is completed or timeout is reached. Args: @@ -1088,9 +1217,9 @@ def try_remove(self, key, timeout=0): return self._try_remove_internal(key_data, timeout) def unlock(self, key): - """Releases the lock for the specified key. - - It never blocks and returns immediately. If the current thread is the holder of this lock, + """Releases the lock for the specified key. + + It never blocks and returns immediately. If the current thread is the holder of this lock, then the hold count is decremented. If the hold count is zero, then the lock is released. Args: @@ -1102,14 +1231,15 @@ def unlock(self, key): check_not_none(key, "key can't be None") key_data = self._to_data(key) - request = map_unlock_codec.encode_request(self.name, key_data, thread_id(), - self._reference_id_generator.get_and_increment()) + request = map_unlock_codec.encode_request( + self.name, key_data, thread_id(), self._reference_id_generator.get_and_increment() + ) return self._invoke_on_key(request, key_data) def values(self, predicate=None): """Returns a list clone of the values contained in this map or values of the entries which are filtered with the predicate if provided. - + Warning: The list is NOT backed by the map, so changes to the map are NOT reflected in the list, and vice-versa. @@ -1126,7 +1256,9 @@ def values(self, predicate=None): def handler(message): response = codec.decode_response(message) - predicate.anchor_list = response["anchor_data_list"].as_anchor_list(self._to_object) + predicate.anchor_list = response["anchor_data_list"].as_anchor_list( + self._to_object + ) return ImmutableLazyDataList(response["response"], self._to_object) predicate.iteration_type = IterationType.VALUE @@ -1167,7 +1299,9 @@ def _get_all_internal(self, partition_to_keys, futures=None): futures = [] def handler(message): - return ImmutableLazyDataList(map_get_all_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + map_get_all_codec.decode_response(message), self._to_object + ) for partition_id, key_dict in six.iteritems(partition_to_keys): request = map_get_all_codec.encode_request(self.name, six.itervalues(key_dict)) @@ -1187,8 +1321,12 @@ def handler(message): return self._invoke_on_key(request, key_data, handler) def _remove_if_same_internal_(self, key_data, value_data): - request = map_remove_if_same_codec.encode_request(self.name, key_data, value_data, thread_id()) - return self._invoke_on_key(request, key_data, response_handler=map_remove_if_same_codec.decode_response) + request = map_remove_if_same_codec.encode_request( + self.name, key_data, value_data, thread_id() + ) + return self._invoke_on_key( + request, key_data, response_handler=map_remove_if_same_codec.decode_response + ) def _delete_internal(self, key_data): request = map_delete_codec.encode_request(self.name, key_data, thread_id()) @@ -1199,18 +1337,24 @@ def handler(message): return self._to_object(map_put_codec.decode_response(message)) if max_idle is not None: - request = map_put_with_max_idle_codec.encode_request(self.name, key_data, value_data, thread_id(), - to_millis(ttl), to_millis(max_idle)) + request = map_put_with_max_idle_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl), to_millis(max_idle) + ) else: - request = map_put_codec.encode_request(self.name, key_data, value_data, thread_id(), to_millis(ttl)) + request = map_put_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl) + ) return self._invoke_on_key(request, key_data, handler) def _set_internal(self, key_data, value_data, ttl, max_idle): if max_idle is not None: - request = map_set_with_max_idle_codec.encode_request(self.name, key_data, value_data, thread_id(), - to_millis(ttl), to_millis(max_idle)) + request = map_set_with_max_idle_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl), to_millis(max_idle) + ) else: - request = map_set_codec.encode_request(self.name, key_data, value_data, thread_id(), to_millis(ttl)) + request = map_set_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl) + ) return self._invoke_on_key(request, key_data) def _set_ttl_internal(self, key_data, ttl): @@ -1218,20 +1362,26 @@ def _set_ttl_internal(self, key_data, ttl): return self._invoke_on_key(request, key_data, map_set_ttl_codec.decode_response) def _try_remove_internal(self, key_data, timeout): - request = map_try_remove_codec.encode_request(self.name, key_data, thread_id(), to_millis(timeout)) + request = map_try_remove_codec.encode_request( + self.name, key_data, thread_id(), to_millis(timeout) + ) return self._invoke_on_key(request, key_data, map_try_remove_codec.decode_response) def _try_put_internal(self, key_data, value_data, timeout): - request = map_try_put_codec.encode_request(self.name, key_data, value_data, thread_id(), to_millis(timeout)) + request = map_try_put_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(timeout) + ) return self._invoke_on_key(request, key_data, map_try_put_codec.decode_response) def _put_transient_internal(self, key_data, value_data, ttl, max_idle): if max_idle is not None: - request = map_put_transient_with_max_idle_codec.encode_request(self.name, key_data, value_data, thread_id(), - to_millis(ttl), to_millis(max_idle)) + request = map_put_transient_with_max_idle_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl), to_millis(max_idle) + ) else: - request = map_put_transient_codec.encode_request(self.name, key_data, value_data, thread_id(), - to_millis(ttl)) + request = map_put_transient_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl) + ) return self._invoke_on_key(request, key_data) def _put_if_absent_internal(self, key_data, value_data, ttl, max_idle): @@ -1239,16 +1389,19 @@ def handler(message): return self._to_object(map_put_if_absent_codec.decode_response(message)) if max_idle is not None: - request = map_put_if_absent_with_max_idle_codec.encode_request(self.name, key_data, value_data, thread_id(), - to_millis(ttl), to_millis(max_idle)) + request = map_put_if_absent_with_max_idle_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl), to_millis(max_idle) + ) else: - request = map_put_if_absent_codec.encode_request(self.name, key_data, value_data, thread_id(), - to_millis(ttl)) + request = map_put_if_absent_codec.encode_request( + self.name, key_data, value_data, thread_id(), to_millis(ttl) + ) return self._invoke_on_key(request, key_data, handler) def _replace_if_same_internal(self, key_data, old_value_data, new_value_data): - request = map_replace_if_same_codec.encode_request(self.name, key_data, old_value_data, new_value_data, - thread_id()) + request = map_replace_if_same_codec.encode_request( + self.name, key_data, old_value_data, new_value_data, thread_id() + ) return self._invoke_on_key(request, key_data, map_replace_if_same_codec.decode_response) def _replace_internal(self, key_data, value_data): @@ -1263,7 +1416,9 @@ def _evict_internal(self, key_data): return self._invoke_on_key(request, key_data, map_evict_codec.decode_response) def _load_all_internal(self, key_data_list, replace_existing_values): - request = map_load_given_keys_codec.encode_request(self.name, key_data_list, replace_existing_values) + request = map_load_given_keys_codec.encode_request( + self.name, key_data_list, replace_existing_values + ) return self._invoke(request) def _execute_on_key_internal(self, key_data, entry_processor): @@ -1271,7 +1426,9 @@ def handler(message): return self._to_object(map_execute_on_key_codec.decode_response(message)) entry_processor_data = self._to_data(entry_processor) - request = map_execute_on_key_codec.encode_request(self.name, entry_processor_data, key_data, thread_id()) + request = map_execute_on_key_codec.encode_request( + self.name, entry_processor_data, key_data, thread_id() + ) return self._invoke_on_key(request, key_data, handler) @@ -1307,9 +1464,11 @@ def _add_near_cache_invalidation_listener(self): codec = map_add_near_cache_invalidation_listener_codec request = codec.encode_request(self.name, EntryEventType.INVALIDATION, self._is_smart) self._invalidation_listener_id = self._register_listener( - request, lambda r: codec.decode_response(r), + request, + lambda r: codec.decode_response(r), lambda reg_id: map_remove_entry_listener_codec.encode_request(self.name, reg_id), - lambda m: codec.handle(m, self._handle_invalidation, self._handle_batch_invalidation)).result() + lambda m: codec.handle(m, self._handle_invalidation, self._handle_batch_invalidation), + ).result() def _remove_near_cache_invalidation_listener(self): if self._invalidation_listener_id: @@ -1392,7 +1551,9 @@ def _replace_internal(self, key_data, value_data): def _replace_if_same_internal(self, key_data, old_value_data, new_value_data): self._invalidate_cache(key_data) - return super(MapFeatNearCache, self)._replace_if_same_internal(key_data, old_value_data, new_value_data) + return super(MapFeatNearCache, self)._replace_if_same_internal( + key_data, old_value_data, new_value_data + ) def _remove_internal(self, key_data): self._invalidate_cache(key_data) @@ -1404,7 +1565,9 @@ def _remove_if_same_internal_(self, key_data, value_data): def _put_transient_internal(self, key_data, value_data, ttl, max_idle): self._invalidate_cache(key_data) - return super(MapFeatNearCache, self)._put_transient_internal(key_data, value_data, ttl, max_idle) + return super(MapFeatNearCache, self)._put_transient_internal( + key_data, value_data, ttl, max_idle + ) def _put_internal(self, key_data, value_data, ttl, max_idle): self._invalidate_cache(key_data) @@ -1412,11 +1575,15 @@ def _put_internal(self, key_data, value_data, ttl, max_idle): def _put_if_absent_internal(self, key_data, value_data, ttl, max_idle): self._invalidate_cache(key_data) - return super(MapFeatNearCache, self)._put_if_absent_internal(key_data, value_data, ttl, max_idle) + return super(MapFeatNearCache, self)._put_if_absent_internal( + key_data, value_data, ttl, max_idle + ) def _load_all_internal(self, key_data_list, replace_existing_values): self._invalidate_cache_batch(key_data_list) - return super(MapFeatNearCache, self)._load_all_internal(key_data_list, replace_existing_values) + return super(MapFeatNearCache, self)._load_all_internal( + key_data_list, replace_existing_values + ) def _execute_on_key_internal(self, key_data, entry_processor): self._invalidate_cache(key_data) diff --git a/hazelcast/proxy/multi_map.py b/hazelcast/proxy/multi_map.py index 4d94dc70dc..da5e67ea49 100644 --- a/hazelcast/proxy/multi_map.py +++ b/hazelcast/proxy/multi_map.py @@ -1,9 +1,26 @@ -from hazelcast.protocol.codec import multi_map_add_entry_listener_codec, multi_map_add_entry_listener_to_key_codec, \ - multi_map_clear_codec, multi_map_contains_entry_codec, multi_map_contains_key_codec, multi_map_contains_value_codec, \ - multi_map_entry_set_codec, multi_map_force_unlock_codec, multi_map_get_codec, multi_map_is_locked_codec, \ - multi_map_key_set_codec, multi_map_lock_codec, multi_map_put_codec, multi_map_remove_codec, \ - multi_map_remove_entry_codec, multi_map_remove_entry_listener_codec, multi_map_size_codec, multi_map_try_lock_codec, \ - multi_map_unlock_codec, multi_map_value_count_codec, multi_map_values_codec +from hazelcast.protocol.codec import ( + multi_map_add_entry_listener_codec, + multi_map_add_entry_listener_to_key_codec, + multi_map_clear_codec, + multi_map_contains_entry_codec, + multi_map_contains_key_codec, + multi_map_contains_value_codec, + multi_map_entry_set_codec, + multi_map_force_unlock_codec, + multi_map_get_codec, + multi_map_is_locked_codec, + multi_map_key_set_codec, + multi_map_lock_codec, + multi_map_put_codec, + multi_map_remove_codec, + multi_map_remove_entry_codec, + multi_map_remove_entry_listener_codec, + multi_map_size_codec, + multi_map_try_lock_codec, + multi_map_unlock_codec, + multi_map_value_count_codec, + multi_map_values_codec, +) from hazelcast.proxy.base import Proxy, EntryEvent, EntryEventType from hazelcast.util import check_not_none, thread_id, to_millis, ImmutableLazyDataList @@ -15,9 +32,11 @@ def __init__(self, service_name, name, context): super(MultiMap, self).__init__(service_name, name, context) self._reference_id_generator = context.lock_reference_id_generator - def add_entry_listener(self, include_value=False, key=None, added_func=None, removed_func=None, clear_all_func=None): - """Adds an entry listener for this multimap. - + def add_entry_listener( + self, include_value=False, key=None, added_func=None, removed_func=None, clear_all_func=None + ): + """Adds an entry listener for this multimap. + The listener will be notified for all multimap add/remove/clear-all events. Args: @@ -38,9 +57,19 @@ def add_entry_listener(self, include_value=False, key=None, added_func=None, rem codec = multi_map_add_entry_listener_codec request = codec.encode_request(self.name, include_value, False) - def handle_event_entry(key, value, old_value, merging_value, event_type, uuid, number_of_affected_entries): - event = EntryEvent(self._to_object, key, value, old_value, merging_value, - event_type, uuid, number_of_affected_entries) + def handle_event_entry( + key, value, old_value, merging_value, event_type, uuid, number_of_affected_entries + ): + event = EntryEvent( + self._to_object, + key, + value, + old_value, + merging_value, + event_type, + uuid, + number_of_affected_entries, + ) if event.event_type == EntryEventType.ADDED and added_func: added_func(event) elif event.event_type == EntryEventType.REMOVED and removed_func: @@ -49,15 +78,17 @@ def handle_event_entry(key, value, old_value, merging_value, event_type, uuid, n clear_all_func(event) return self._register_listener( - request, lambda r: codec.decode_response(r), + request, + lambda r: codec.decode_response(r), lambda reg_id: multi_map_remove_entry_listener_codec.encode_request(self.name, reg_id), - lambda m: codec.handle(m, handle_event_entry)) + lambda m: codec.handle(m, handle_event_entry), + ) def contains_key(self, key): """Determines whether this multimap contains an entry with the key. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -69,7 +100,7 @@ def contains_key(self, key): """ check_not_none(key, "key can't be None") key_data = self._to_data(key) - + request = multi_map_contains_key_codec.encode_request(self.name, key_data, thread_id()) return self._invoke_on_key(request, key_data, multi_map_contains_key_codec.decode_response) @@ -102,13 +133,17 @@ def contains_entry(self, key, value): check_not_none(value, "value can't be None") key_data = self._to_data(key) value_data = self._to_data(value) - - request = multi_map_contains_entry_codec.encode_request(self.name, key_data, value_data, thread_id()) - return self._invoke_on_key(request, key_data, multi_map_contains_entry_codec.decode_response) + + request = multi_map_contains_entry_codec.encode_request( + self.name, key_data, value_data, thread_id() + ) + return self._invoke_on_key( + request, key_data, multi_map_contains_entry_codec.decode_response + ) def clear(self): """Clears the multimap. Removes all key-value tuples. - + Returns: hazelcast.future.Future[None]: """ @@ -117,26 +152,29 @@ def clear(self): def entry_set(self): """Returns the list of key-value tuples in the multimap. - + Warning: The list is NOT backed by the map, so changes to the map are NOT reflected in the list, and vice-versa. - + Returns: hazelcast.future.Future[list]: The list of key-value tuples in the multimap. """ + def handler(message): - return ImmutableLazyDataList(multi_map_entry_set_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + multi_map_entry_set_codec.decode_response(message), self._to_object + ) request = multi_map_entry_set_codec.encode_request(self.name) return self._invoke(request, handler) def get(self, key): """Returns the list of values associated with the key. ``None`` if this map does not contain this key. - + Warning: - This method uses ``__hash__`` and ``__eq__`` of the binary form of the key, not the + This method uses ``__hash__`` and ``__eq__`` of the binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in the key's class. - + Warning: The list is NOT backed by the multimap, so changes to the map are list reflected in the collection, and vice-versa. @@ -150,7 +188,9 @@ def get(self, key): check_not_none(key, "key can't be None") def handler(message): - return ImmutableLazyDataList(multi_map_get_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + multi_map_get_codec.decode_response(message), self._to_object + ) key_data = self._to_data(key) request = multi_map_get_codec.encode_request(self.name, key_data, thread_id()) @@ -158,9 +198,9 @@ def handler(message): def is_locked(self, key): """Checks the lock for the specified key. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -176,12 +216,12 @@ def is_locked(self, key): return self._invoke_on_key(request, key_data, multi_map_is_locked_codec.decode_response) def force_unlock(self, key): - """Releases the lock for the specified key regardless of the lock owner. - + """Releases the lock for the specified key regardless of the lock owner. + It always successfully unlocks the key, never blocks, and returns immediately. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -192,38 +232,42 @@ def force_unlock(self, key): """ check_not_none(key, "key can't be None") key_data = self._to_data(key) - request = multi_map_force_unlock_codec.encode_request(self.name, key_data, - self._reference_id_generator.get_and_increment()) + request = multi_map_force_unlock_codec.encode_request( + self.name, key_data, self._reference_id_generator.get_and_increment() + ) return self._invoke_on_key(request, key_data) def key_set(self): """Returns the list of keys in the multimap. - + Warning: The list is NOT backed by the map, so changes to the map are NOT reflected in the list, and vice-versa. - + Returns: hazelcast.future.Future[list]: A list of the clone of the keys. """ + def handler(message): - return ImmutableLazyDataList(multi_map_key_set_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + multi_map_key_set_codec.decode_response(message), self._to_object + ) request = multi_map_key_set_codec.encode_request(self.name) return self._invoke(request, handler) def lock(self, key, lease_time=None): """Acquires the lock for the specified key infinitely or for the specified lease time if provided. - + If the lock is not available, the current thread becomes disabled for thread scheduling purposes and lies dormant until the lock has been acquired. - + Scope of the lock is this map only. Acquired lock is only for the key in this map. - + Locks are re-entrant; so, if the key is locked N times, it should be unlocked N times before another thread can acquire it. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -235,13 +279,18 @@ def lock(self, key, lease_time=None): """ check_not_none(key, "key can't be None") key_data = self._to_data(key) - request = multi_map_lock_codec.encode_request(self.name, key_data, thread_id(), to_millis(lease_time), - self._reference_id_generator.get_and_increment()) + request = multi_map_lock_codec.encode_request( + self.name, + key_data, + thread_id(), + to_millis(lease_time), + self._reference_id_generator.get_and_increment(), + ) return self._invoke_on_key(request, key_data) def remove(self, key, value): """Removes the given key-value tuple from the multimap. - + Warning: This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. @@ -258,16 +307,18 @@ def remove(self, key, value): check_not_none(key, "value can't be None") key_data = self._to_data(key) value_data = self._to_data(value) - request = multi_map_remove_entry_codec.encode_request(self.name, key_data, value_data, thread_id()) + request = multi_map_remove_entry_codec.encode_request( + self.name, key_data, value_data, thread_id() + ) return self._invoke_on_key(request, key_data, multi_map_remove_entry_codec.decode_response) def remove_all(self, key): """Removes all the entries with the given key and returns the value list associated with this key. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. - + Warning: The returned list is NOT backed by the map, so changes to the map are NOT reflected in the list, and vice-versa. @@ -281,7 +332,9 @@ def remove_all(self, key): check_not_none(key, "key can't be None") def handler(message): - return ImmutableLazyDataList(multi_map_remove_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + multi_map_remove_codec.decode_response(message), self._to_object + ) key_data = self._to_data(key) request = multi_map_remove_codec.encode_request(self.name, key_data, thread_id()) @@ -289,9 +342,9 @@ def handler(message): def put(self, key, value): """Stores a key-value tuple in the multimap. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -310,8 +363,8 @@ def put(self, key, value): return self._invoke_on_key(request, key_data, multi_map_put_codec.decode_response) def remove_entry_listener(self, registration_id): - """Removes the specified entry listener. - + """Removes the specified entry listener. + Returns silently if there is no such listener added before. Args: @@ -324,7 +377,7 @@ def remove_entry_listener(self, registration_id): def size(self): """Returns the number of entries in this multimap. - + Returns: hazelcast.future.Future[int]: Number of entries in this multimap. """ @@ -333,9 +386,9 @@ def size(self): def value_count(self, key): """Returns the number of values that match the given key in the multimap. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -351,7 +404,7 @@ def value_count(self, key): def values(self): """Returns the list of values in the multimap. - + Warning: The returned list is NOT backed by the map, so changes to the map are NOT reflected in the list, and vice-versa. @@ -359,15 +412,18 @@ def values(self): Returns: hazelcast.future.Future[list]: The list of values in the multimap. """ + def handler(message): - return ImmutableLazyDataList(multi_map_values_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + multi_map_values_codec.decode_response(message), self._to_object + ) request = multi_map_values_codec.encode_request(self.name) return self._invoke(request, handler) def try_lock(self, key, lease_time=None, timeout=0): - """Tries to acquire the lock for the specified key. - + """Tries to acquire the lock for the specified key. + When the lock is not available: - If the timeout is not provided, the current thread doesn't wait and returns ``False`` immediately. @@ -376,7 +432,7 @@ def try_lock(self, key, lease_time=None, timeout=0): - The lock is acquired by the current thread, or - The specified waiting time elapses. - + If the lease time is provided, lock will be released after this time elapses. Args: @@ -389,16 +445,21 @@ def try_lock(self, key, lease_time=None, timeout=0): """ check_not_none(key, "key can't be None") key_data = self._to_data(key) - request = multi_map_try_lock_codec.encode_request(self.name, key_data, thread_id(), - to_millis(lease_time), to_millis(timeout), - self._reference_id_generator.get_and_increment()) + request = multi_map_try_lock_codec.encode_request( + self.name, + key_data, + thread_id(), + to_millis(lease_time), + to_millis(timeout), + self._reference_id_generator.get_and_increment(), + ) return self._invoke_on_key(request, key_data, multi_map_try_lock_codec.decode_response) def unlock(self, key): """Releases the lock for the specified key. It never blocks and returns immediately. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -409,6 +470,7 @@ def unlock(self, key): """ check_not_none(key, "key can't be None") key_data = self._to_data(key) - request = multi_map_unlock_codec.encode_request(self.name, key_data, thread_id(), - self._reference_id_generator.get_and_increment()) - return self._invoke_on_key(request, key_data) \ No newline at end of file + request = multi_map_unlock_codec.encode_request( + self.name, key_data, thread_id(), self._reference_id_generator.get_and_increment() + ) + return self._invoke_on_key(request, key_data) diff --git a/hazelcast/proxy/pn_counter.py b/hazelcast/proxy/pn_counter.py index 10310500e0..23e0364217 100644 --- a/hazelcast/proxy/pn_counter.py +++ b/hazelcast/proxy/pn_counter.py @@ -5,8 +5,11 @@ from hazelcast.future import Future from hazelcast.proxy.base import Proxy from hazelcast.cluster import VectorClock -from hazelcast.protocol.codec import pn_counter_add_codec, pn_counter_get_codec, \ - pn_counter_get_configured_replica_count_codec +from hazelcast.protocol.codec import ( + pn_counter_add_codec, + pn_counter_get_codec, + pn_counter_get_configured_replica_count_codec, +) from hazelcast.errors import NoDataMemberInClusterError from hazelcast.six.moves import range @@ -15,7 +18,7 @@ class PNCounter(Proxy): """PN (Positive-Negative) CRDT counter. - + The counter supports adding and subtracting values as well as retrieving the current counter value. Each replica of this counter can perform operations locally without @@ -25,19 +28,19 @@ class PNCounter(Proxy): identical, and any conflicting updates are merged automatically. If no new updates are made to the shared state, all nodes that can communicate will eventually have the same data. - + When invoking updates from the client, the invocation is remote. This may lead to indeterminate state - the update may be applied but the response has not been received. In this case, the caller will be notified with a TargetDisconnectedError. - + The read and write methods provide monotonic read and RYW (read-your-write) guarantees. These guarantees are session guarantees which means that if no replica with the previously observed state is reachable, the session guarantees are lost and the method invocation will throw a ConsistencyLostError. This does not mean that an update is lost. All of the updates are part of some replica and - will be eventually reflected in the state of all other replicas. This + will be eventually reflected in the state of all other replicas. This exception just means that you cannot observe your own writes because all replicas that contain your updates are currently unreachable. After you have received a ConsistencyLostError, you can either @@ -134,7 +137,9 @@ def subtract_and_get(self, delta): ConsistencyLostError: if the session guarantees have been lost. """ - return self._invoke_internal(pn_counter_add_codec, delta=-1 * delta, get_before_update=False) + return self._invoke_internal( + pn_counter_add_codec, delta=-1 * delta, get_before_update=False + ) def get_and_decrement(self): """Decrements the counter value by one and returns the previous value. @@ -200,35 +205,58 @@ def reset(self): def _invoke_internal(self, codec, **kwargs): delegated_future = Future() - self._set_result_or_error(delegated_future, PNCounter._EMPTY_ADDRESS_LIST, None, codec, **kwargs) + self._set_result_or_error( + delegated_future, PNCounter._EMPTY_ADDRESS_LIST, None, codec, **kwargs + ) return delegated_future - def _set_result_or_error(self, delegated_future, excluded_addresses, last_error, codec, **kwargs): + def _set_result_or_error( + self, delegated_future, excluded_addresses, last_error, codec, **kwargs + ): target = self._get_crdt_operation_target(excluded_addresses) if not target: if last_error: delegated_future.set_exception(last_error) return - delegated_future.set_exception(NoDataMemberInClusterError("Cannot invoke operations on a CRDT because " - "the cluster does not contain any data members")) + delegated_future.set_exception( + NoDataMemberInClusterError( + "Cannot invoke operations on a CRDT because " + "the cluster does not contain any data members" + ) + ) return - request = codec.encode_request(name=self.name, replica_timestamps=self._observed_clock.entry_set(), - target_replica_uuid=target.uuid, **kwargs) + request = codec.encode_request( + name=self.name, + replica_timestamps=self._observed_clock.entry_set(), + target_replica_uuid=target.uuid, + **kwargs + ) future = self._invoke_on_target(request, target.uuid, codec.decode_response) - checker_func = functools.partial(self._check_invocation_result, delegated_future=delegated_future, - excluded_addresses=excluded_addresses, target=target, codec=codec, **kwargs) + checker_func = functools.partial( + self._check_invocation_result, + delegated_future=delegated_future, + excluded_addresses=excluded_addresses, + target=target, + codec=codec, + **kwargs + ) future.add_done_callback(checker_func) - def _check_invocation_result(self, future, delegated_future, excluded_addresses, target, codec, **kwargs): + def _check_invocation_result( + self, future, delegated_future, excluded_addresses, target, codec, **kwargs + ): try: result = future.result() self._update_observed_replica_timestamp(result["replica_timestamps"]) delegated_future.set_result(result["value"]) except Exception as ex: - _logger.exception("Exception occurred while invoking operation on target %s, " - "choosing different target", target) + _logger.exception( + "Exception occurred while invoking operation on target %s, " + "choosing different target", + target, + ) if excluded_addresses == PNCounter._EMPTY_ADDRESS_LIST: excluded_addresses = [] @@ -236,8 +264,10 @@ def _check_invocation_result(self, future, delegated_future, excluded_addresses, self._set_result_or_error(delegated_future, excluded_addresses, ex, codec, **kwargs) def _get_crdt_operation_target(self, excluded_addresses): - if self._current_target_replica_address and \ - self._current_target_replica_address not in excluded_addresses: + if ( + self._current_target_replica_address + and self._current_target_replica_address not in excluded_addresses + ): return self._current_target_replica_address self._current_target_replica_address = self._choose_target_replica(excluded_addresses) @@ -253,7 +283,9 @@ def _choose_target_replica(self, excluded_addresses): return replica_addresses[random_replica_index] def _get_replica_addresses(self, excluded_addresses): - data_members = self._context.cluster_service.get_members(lambda member: not member.lite_member) + data_members = self._context.cluster_service.get_members( + lambda member: not member.lite_member + ) replica_count = self._get_max_configured_replica_count() current_count = min(replica_count, len(data_members)) @@ -271,7 +303,9 @@ def _get_max_configured_replica_count(self): return self._max_replica_count request = pn_counter_get_configured_replica_count_codec.encode_request(self.name) - count = self._invoke(request, pn_counter_get_configured_replica_count_codec.decode_response).result() + count = self._invoke( + request, pn_counter_get_configured_replica_count_codec.decode_response + ).result() self._max_replica_count = count return self._max_replica_count diff --git a/hazelcast/proxy/queue.py b/hazelcast/proxy/queue.py index a5435fbf25..e9dbcd7ae9 100644 --- a/hazelcast/proxy/queue.py +++ b/hazelcast/proxy/queue.py @@ -1,34 +1,36 @@ from hazelcast.errors import IllegalStateError -from hazelcast.protocol.codec import \ - queue_add_all_codec, \ - queue_add_listener_codec, \ - queue_clear_codec, \ - queue_compare_and_remove_all_codec, \ - queue_compare_and_retain_all_codec, \ - queue_contains_all_codec, \ - queue_contains_codec, \ - queue_drain_to_max_size_codec, \ - queue_is_empty_codec, \ - queue_iterator_codec, \ - queue_offer_codec, \ - queue_peek_codec, \ - queue_poll_codec, \ - queue_put_codec, \ - queue_remaining_capacity_codec, \ - queue_remove_codec, \ - queue_remove_listener_codec, \ - queue_size_codec, \ - queue_take_codec +from hazelcast.protocol.codec import ( + queue_add_all_codec, + queue_add_listener_codec, + queue_clear_codec, + queue_compare_and_remove_all_codec, + queue_compare_and_retain_all_codec, + queue_contains_all_codec, + queue_contains_codec, + queue_drain_to_max_size_codec, + queue_is_empty_codec, + queue_iterator_codec, + queue_offer_codec, + queue_peek_codec, + queue_poll_codec, + queue_put_codec, + queue_remaining_capacity_codec, + queue_remove_codec, + queue_remove_listener_codec, + queue_size_codec, + queue_take_codec, +) from hazelcast.proxy.base import PartitionSpecificProxy, ItemEvent, ItemEventType from hazelcast.util import check_not_none, to_millis, ImmutableLazyDataList class Queue(PartitionSpecificProxy): - """Concurrent, blocking, distributed, observable queue. - - Queue is not a partitioned data-structure. All of the Queue content is stored in + """Concurrent, blocking, distributed, observable queue. + + Queue is not a partitioned data-structure. All of the Queue content is stored in a single machine (and in the backup). Queue will not scale by adding more members in the cluster. """ + def add(self, item): """Adds the specified item to this queue if there is available space. @@ -38,6 +40,7 @@ def add(self, item): Returns: hazelcast.future.Future[bool]: ``True`` if element is successfully added, ``False`` otherwise. """ + def result_fnc(f): if f.result(): return True @@ -89,13 +92,16 @@ def handle_event_item(item, uuid, event_type): if item_removed_func: item_removed_func(item_event) - return self._register_listener(request, lambda r: codec.decode_response(r), - lambda reg_id: queue_remove_listener_codec.encode_request(self.name, reg_id), - lambda m: codec.handle(m, handle_event_item)) + return self._register_listener( + request, + lambda r: codec.decode_response(r), + lambda reg_id: queue_remove_listener_codec.encode_request(self.name, reg_id), + lambda m: codec.handle(m, handle_event_item), + ) def clear(self): """Clears this queue. Queue will be empty after this call. - + Returns: hazelcast.future.Future[None]: """ @@ -136,11 +142,11 @@ def contains_all(self, items): return self._invoke(request, queue_contains_all_codec.decode_response) def drain_to(self, target_list, max_size=-1): - """Transfers all available items to the given `target_list` and removes these items from this queue. - - If a max_size is specified, it transfers at most the given number of items. + """Transfers all available items to the given `target_list` and removes these items from this queue. + + If a max_size is specified, it transfers at most the given number of items. In case of a failure, an item can exist in both collections or none of them. - + This operation may be more efficient than polling elements repeatedly and putting into collection. Args: @@ -150,29 +156,33 @@ def drain_to(self, target_list, max_size=-1): Returns: hazelcast.future.Future[int]: Number of transferred items. """ + def handler(message): response = queue_drain_to_max_size_codec.decode_response(message) target_list.extend(map(self._to_object, response)) return len(response) - + request = queue_drain_to_max_size_codec.encode_request(self.name, max_size) return self._invoke(request, handler) def iterator(self): """Returns all of the items in this queue. - + Returns: list: Collection of items in this queue. """ + def handler(message): - return ImmutableLazyDataList(queue_iterator_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + queue_iterator_codec.decode_response(message), self._to_object + ) request = queue_iterator_codec.encode_request(self.name) return self._invoke(request, handler) def is_empty(self): """Determines whether this set is empty or not. - + Returns: hazelcast.future.Future[bool]: ``True`` if this queue is empty, ``False`` otherwise. """ @@ -180,9 +190,9 @@ def is_empty(self): return self._invoke(request, queue_is_empty_codec.decode_response) def offer(self, item, timeout=0): - """Inserts the specified element into this queue if it is possible to do so immediately - without violating capacity restrictions. - + """Inserts the specified element into this queue if it is possible to do so immediately + without violating capacity restrictions. + If there is no space currently available: - If the timeout is provided, it waits until this timeout elapses and returns the result. @@ -201,11 +211,12 @@ def offer(self, item, timeout=0): return self._invoke(request, queue_offer_codec.decode_response) def peek(self): - """Retrieves the head of queue without removing it from the queue. - + """Retrieves the head of queue without removing it from the queue. + Returns: hazelcast.future.Future[any]: the head of this queue, or ``None`` if this queue is empty. """ + def handler(message): return self._to_object(queue_peek_codec.decode_response(message)) @@ -214,7 +225,7 @@ def handler(message): def poll(self, timeout=0): """Retrieves and removes the head of this queue. - + If this queue is empty: - If the timeout is provided, it waits until this timeout elapses and returns the result. @@ -224,9 +235,10 @@ def poll(self, timeout=0): timeout (int): Maximum time in seconds to wait for addition. Returns: - hazelcast.future.Future[any]: The head of this queue, or ``None`` if this queue is empty + hazelcast.future.Future[any]: The head of this queue, or ``None`` if this queue is empty or specified timeout elapses before an item is added to the queue. """ + def handler(message): return self._to_object(queue_poll_codec.decode_response(message)) @@ -234,8 +246,8 @@ def handler(message): return self._invoke(request, handler) def put(self, item): - """Adds the specified element into this queue. - + """Adds the specified element into this queue. + If there is no space, it waits until necessary space becomes available. Args: @@ -251,7 +263,7 @@ def put(self, item): def remaining_capacity(self): """Returns the remaining capacity of this queue. - + Returns: hazelcast.future.Future[int]: Remaining capacity of this queue. """ @@ -326,7 +338,7 @@ def size(self): """Returns the number of elements in this collection. If the size is greater than ``2**31 - 1``, it returns ``2**31 - 1`` - + Returns: hazelcast.future.Future[int]: Size of the queue. """ @@ -335,10 +347,11 @@ def size(self): def take(self): """Retrieves and removes the head of this queue, if necessary, waits until an item becomes available. - + Returns: hazelcast.future.Future[any]: The head of this queue. """ + def handler(message): return self._to_object(queue_take_codec.decode_response(message)) diff --git a/hazelcast/proxy/replicated_map.py b/hazelcast/proxy/replicated_map.py index b88cd9ed22..581728bea3 100644 --- a/hazelcast/proxy/replicated_map.py +++ b/hazelcast/proxy/replicated_map.py @@ -1,11 +1,23 @@ from random import randint -from hazelcast.protocol.codec import replicated_map_clear_codec, replicated_map_add_entry_listener_codec, \ - replicated_map_add_entry_listener_to_key_codec, replicated_map_add_entry_listener_to_key_with_predicate_codec, \ - replicated_map_add_entry_listener_with_predicate_codec, replicated_map_contains_key_codec, \ - replicated_map_contains_value_codec, replicated_map_entry_set_codec, replicated_map_get_codec, \ - replicated_map_is_empty_codec, replicated_map_key_set_codec, replicated_map_put_all_codec, replicated_map_put_codec, \ - replicated_map_remove_codec, replicated_map_remove_entry_listener_codec, replicated_map_size_codec, \ - replicated_map_values_codec +from hazelcast.protocol.codec import ( + replicated_map_clear_codec, + replicated_map_add_entry_listener_codec, + replicated_map_add_entry_listener_to_key_codec, + replicated_map_add_entry_listener_to_key_with_predicate_codec, + replicated_map_add_entry_listener_with_predicate_codec, + replicated_map_contains_key_codec, + replicated_map_contains_value_codec, + replicated_map_entry_set_codec, + replicated_map_get_codec, + replicated_map_is_empty_codec, + replicated_map_key_set_codec, + replicated_map_put_all_codec, + replicated_map_put_codec, + replicated_map_remove_codec, + replicated_map_remove_entry_listener_codec, + replicated_map_size_codec, + replicated_map_values_codec, +) from hazelcast.proxy.base import Proxy, EntryEvent, EntryEventType from hazelcast.util import to_millis, check_not_none, ImmutableLazyDataList from hazelcast import six @@ -14,22 +26,31 @@ class ReplicatedMap(Proxy): """A ReplicatedMap is a map-like data structure with weak consistency and values locally stored on every node of the cluster. - + Whenever a value is written asynchronously, the new value will be internally distributed to all existing cluster members, and eventually every node will have the new value. - + When a new node joins the cluster, the new node initially will request existing values from older nodes and replicate them locally. """ + def __init__(self, service_name, name, context): super(ReplicatedMap, self).__init__(service_name, name, context) partition_service = context.partition_service self._partition_id = randint(0, partition_service.partition_count - 1) - def add_entry_listener(self, key=None, predicate=None, added_func=None, removed_func=None, updated_func=None, - evicted_func=None, clear_all_func=None): - """Adds a continuous entry listener for this map. - + def add_entry_listener( + self, + key=None, + predicate=None, + added_func=None, + removed_func=None, + updated_func=None, + evicted_func=None, + clear_all_func=None, + ): + """Adds a continuous entry listener for this map. + Listener will get notified for map events filtered with given parameters. Args: @@ -56,15 +77,24 @@ def add_entry_listener(self, key=None, predicate=None, added_func=None, removed_ elif not key and predicate: codec = replicated_map_add_entry_listener_with_predicate_codec predicate = self._to_data(predicate) - request = codec.encode_request( - self.name, predicate, self._is_smart) + request = codec.encode_request(self.name, predicate, self._is_smart) else: codec = replicated_map_add_entry_listener_codec request = codec.encode_request(self.name, self._is_smart) - def handle_event_entry(key, value, old_value, merging_value, event_type, uuid, number_of_affected_entries): - event = EntryEvent(self._to_object, key, value, old_value, merging_value, - event_type, uuid, number_of_affected_entries) + def handle_event_entry( + key, value, old_value, merging_value, event_type, uuid, number_of_affected_entries + ): + event = EntryEvent( + self._to_object, + key, + value, + old_value, + merging_value, + event_type, + uuid, + number_of_affected_entries, + ) if event.event_type == EntryEventType.ADDED and added_func: added_func(event) elif event.event_type == EntryEventType.REMOVED and removed_func: @@ -77,13 +107,17 @@ def handle_event_entry(key, value, old_value, merging_value, event_type, uuid, n clear_all_func(event) return self._register_listener( - request, lambda r: codec.decode_response(r), - lambda reg_id: replicated_map_remove_entry_listener_codec.encode_request(self.name, reg_id), - lambda m: codec.handle(m, handle_event_entry)) + request, + lambda r: codec.decode_response(r), + lambda reg_id: replicated_map_remove_entry_listener_codec.encode_request( + self.name, reg_id + ), + lambda m: codec.handle(m, handle_event_entry), + ) def clear(self): """Wipes data out of the replicated map. - + Returns: hazelcast.future.Future[None]: """ @@ -92,22 +126,24 @@ def clear(self): def contains_key(self, key): """Determines whether this map contains an entry with the key. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: key: The specified key. Returns: - hazelcast.future.Future[bool]: ``True`` if this map contains an entry for the specified key, + hazelcast.future.Future[bool]: ``True`` if this map contains an entry for the specified key, ``False`` otherwise. """ check_not_none(key, "key can't be None") key_data = self._to_data(key) request = replicated_map_contains_key_codec.encode_request(self.name, key_data) - return self._invoke_on_key(request, key_data, replicated_map_contains_key_codec.decode_response) + return self._invoke_on_key( + request, key_data, replicated_map_contains_key_codec.decode_response + ) def contains_value(self, value): """Determines whether this map contains one or more keys for the specified value. @@ -122,29 +158,33 @@ def contains_value(self, value): check_not_none(value, "value can't be None") value_data = self._to_data(value) request = replicated_map_contains_value_codec.encode_request(self.name, value_data) - return self._invoke_on_partition(request, self._partition_id, - replicated_map_contains_value_codec.decode_response) + return self._invoke_on_partition( + request, self._partition_id, replicated_map_contains_value_codec.decode_response + ) def entry_set(self): """Returns a List clone of the mappings contained in this map. - + Warning: The list is NOT backed by the map, so changes to the map are NOT reflected in the list, and vice-versa. Returns: hazelcast.future.Future[list[tuple]]: The list of key-value tuples in the map. """ + def handler(message): - return ImmutableLazyDataList(replicated_map_entry_set_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + replicated_map_entry_set_codec.decode_response(message), self._to_object + ) request = replicated_map_entry_set_codec.encode_request(self.name) return self._invoke_on_partition(request, self._partition_id, handler) def get(self, key): """Returns the value for the specified key, or ``None`` if this map does not contain this key. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: @@ -164,42 +204,47 @@ def handler(message): def is_empty(self): """Returns ``True`` if this map contains no key-value mappings. - + Returns: hazelcast.future.Future[bool]: ``True`` if this map contains no key-value mappings. """ request = replicated_map_is_empty_codec.encode_request(self.name) - return self._invoke_on_partition(request, self._partition_id, replicated_map_is_empty_codec.decode_response) + return self._invoke_on_partition( + request, self._partition_id, replicated_map_is_empty_codec.decode_response + ) def key_set(self): """Returns the list of keys in the ReplicatedMap. - + Warning: The list is NOT backed by the map, so changes to the map are NOT reflected in the list, and vice-versa. - + Returns: hazelcast.future.Future[list]: A list of the clone of the keys. """ + def handler(message): - return ImmutableLazyDataList(replicated_map_key_set_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + replicated_map_key_set_codec.decode_response(message), self._to_object + ) request = replicated_map_key_set_codec.encode_request(self.name) return self._invoke_on_partition(request, self._partition_id, handler) def put(self, key, value, ttl=0): - """Associates the specified value with the specified key in this map. - - If the map previously contained a mapping for the key, the old value is replaced by the specified value. + """Associates the specified value with the specified key in this map. + + If the map previously contained a mapping for the key, the old value is replaced by the specified value. If ttl is provided, entry will expire and get evicted after the ttl. Args: key: The specified key. value: The value to associate with the key. - ttl (int): Maximum time in seconds for this entry to stay, if not provided, the value configured + ttl (int): Maximum time in seconds for this entry to stay, if not provided, the value configured on server side configuration will be used. Returns: - hazelcast.future.Future[any]: Previous value associated with key or ``None`` + hazelcast.future.Future[any]: Previous value associated with key or ``None`` if there was no mapping for key. """ check_not_none(key, "key can't be None") @@ -210,13 +255,15 @@ def handler(message): key_data = self._to_data(key) value_data = self._to_data(value) - request = replicated_map_put_codec.encode_request(self.name, key_data, value_data, to_millis(ttl)) + request = replicated_map_put_codec.encode_request( + self.name, key_data, value_data, to_millis(ttl) + ) return self._invoke_on_key(request, key_data, handler) def put_all(self, source): - """Copies all of the mappings from the specified map to this map. - - No atomicity guarantees are given. In the case of a failure, + """Copies all of the mappings from the specified map to this map. + + No atomicity guarantees are given. In the case of a failure, some of the key-value tuples may get written, while others are not. Args: @@ -235,19 +282,19 @@ def put_all(self, source): return self._invoke(request) def remove(self, key): - """Removes the mapping for a key from this map if it is present. - + """Removes the mapping for a key from this map if it is present. + The map will not contain a mapping for the specified key once the call returns. - - Warning: - This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the + + Warning: + This method uses ``__hash__`` and ``__eq__`` methods of binary form of the key, not the actual implementations of ``__hash__`` and ``__eq__`` defined in key's class. Args: key: Key of the mapping to be deleted. Returns: - hazelcast.future.Future[any]: The previous value associated with key, or ``None`` + hazelcast.future.Future[any]: The previous value associated with key, or ``None`` if there was no mapping for key. """ check_not_none(key, "key can't be None") @@ -260,8 +307,8 @@ def handler(message): return self._invoke_on_key(request, key_data, handler) def remove_entry_listener(self, registration_id): - """Removes the specified entry listener. - + """Removes the specified entry listener. + Returns silently if there is no such listener added before. Args: @@ -274,25 +321,30 @@ def remove_entry_listener(self, registration_id): def size(self): """Returns the number of entries in this multimap. - + Returns: hazelcast.future.Future[int]: Number of entries in this multimap. """ request = replicated_map_size_codec.encode_request(self.name) - return self._invoke_on_partition(request, self._partition_id, replicated_map_size_codec.decode_response) + return self._invoke_on_partition( + request, self._partition_id, replicated_map_size_codec.decode_response + ) def values(self): """Returns the list of values in the map. - + Warning: The returned list is NOT backed by the map, so changes to the map are NOT reflected in the list, and vice-versa. - + Returns: hazelcast.future.Future[list]: The list of values in the map. """ + def handler(message): - return ImmutableLazyDataList(replicated_map_values_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + replicated_map_values_codec.decode_response(message), self._to_object + ) request = replicated_map_values_codec.encode_request(self.name) - return self._invoke_on_partition(request, self._partition_id, handler) \ No newline at end of file + return self._invoke_on_partition(request, self._partition_id, handler) diff --git a/hazelcast/proxy/ringbuffer.py b/hazelcast/proxy/ringbuffer.py index 95da00784f..0f83cea12b 100644 --- a/hazelcast/proxy/ringbuffer.py +++ b/hazelcast/proxy/ringbuffer.py @@ -1,9 +1,23 @@ from hazelcast.future import ImmediateFuture, Future -from hazelcast.protocol.codec import ringbuffer_add_all_codec, ringbuffer_add_codec, ringbuffer_capacity_codec, \ - ringbuffer_head_sequence_codec, ringbuffer_read_many_codec, ringbuffer_read_one_codec, \ - ringbuffer_remaining_capacity_codec, ringbuffer_size_codec, ringbuffer_tail_sequence_codec +from hazelcast.protocol.codec import ( + ringbuffer_add_all_codec, + ringbuffer_add_codec, + ringbuffer_capacity_codec, + ringbuffer_head_sequence_codec, + ringbuffer_read_many_codec, + ringbuffer_read_one_codec, + ringbuffer_remaining_capacity_codec, + ringbuffer_size_codec, + ringbuffer_tail_sequence_codec, +) from hazelcast.proxy.base import PartitionSpecificProxy -from hazelcast.util import check_not_negative, check_not_none, check_not_empty, check_true, ImmutableLazyDataList +from hazelcast.util import ( + check_not_negative, + check_not_none, + check_not_empty, + check_true, + ImmutableLazyDataList, +) OVERFLOW_POLICY_OVERWRITE = 0 """ @@ -34,36 +48,38 @@ class Ringbuffer(PartitionSpecificProxy): - """A Ringbuffer is a data-structure where the content is stored in a ring like structure. - - A Ringbuffer has a capacity so it won't grow beyond that capacity and endanger the stability of the system. - If that capacity is exceeded, than the oldest item in the Ringbuffer is overwritten. + """A Ringbuffer is a data-structure where the content is stored in a ring like structure. + + A Ringbuffer has a capacity so it won't grow beyond that capacity and endanger the stability of the system. + If that capacity is exceeded, than the oldest item in the Ringbuffer is overwritten. The Ringbuffer has 2 always incrementing sequences: - Tail_sequence: This is the side where the youngest item is found. So the tail is the side of the Ringbuffer where items are added to. - Head_sequence: This is the side where the oldest items are found. So the head is the side where items gets discarded. - + The items in the Ringbuffer can be found by a sequence that is in between (inclusive) the head and tail sequence. - + A Ringbuffer currently is not a distributed data-structure. So all data is stored in a single partition; comparable to the IQueue implementation. But we'll provide an option to partition the data in the near future. A Ringbuffer can be used in a similar way as a queue, but one of the key differences is that a queue.take is destructive, meaning that only 1 thread is able to take an item. A Ringbuffer.read is not destructive, so you can have multiple threads reading the same item multiple times. """ + def __init__(self, service_name, name, context): super(Ringbuffer, self).__init__(service_name, name, context) self._capacity = None def capacity(self): """Returns the capacity of this Ringbuffer. - + Returns: hazelcast.future.Future[int]: The capacity of Ringbuffer. """ if not self._capacity: + def handler(message): self._capacity = ringbuffer_capacity_codec.decode_response(message) return self._capacity @@ -74,7 +90,7 @@ def handler(message): def size(self): """Returns number of items in the Ringbuffer. - + Returns: hazelcast.future.Future[int]: The size of Ringbuffer. """ @@ -82,8 +98,8 @@ def size(self): return self._invoke(request, ringbuffer_size_codec.decode_response) def tail_sequence(self): - """Returns the sequence of the tail. - + """Returns the sequence of the tail. + The tail is the side of the Ringbuffer where the items are added to. The initial value of the tail is -1. Returns: @@ -93,12 +109,12 @@ def tail_sequence(self): return self._invoke(request, ringbuffer_tail_sequence_codec.decode_response) def head_sequence(self): - """Returns the sequence of the head. - - The head is the side of the Ringbuffer where the oldest items in the Ringbuffer are found. + """Returns the sequence of the head. + + The head is the side of the Ringbuffer where the oldest items in the Ringbuffer are found. If the Ringbuffer is empty, the head will be one more than the tail. The initial value of the head is 0 (1 more than tail). - + Returns: hazelcast.future.Future[int]: The sequence of the head. """ @@ -107,7 +123,7 @@ def head_sequence(self): def remaining_capacity(self): """Returns the remaining capacity of the Ringbuffer. - + Returns: hazelcast.future.Future[int]: The remaining capacity of Ringbuffer. """ @@ -115,9 +131,9 @@ def remaining_capacity(self): return self._invoke(request, ringbuffer_remaining_capacity_codec.decode_response) def add(self, item, overflow_policy=OVERFLOW_POLICY_OVERWRITE): - """Adds the specified item to the tail of the Ringbuffer. - - If there is no space in the Ringbuffer, the action is determined by overflow policy + """Adds the specified item to the tail of the Ringbuffer. + + If there is no space in the Ringbuffer, the action is determined by overflow policy as ``OVERFLOW_POLICY_OVERWRITE`` or ``OVERFLOW_POLICY_FAIL``. Args: @@ -132,12 +148,12 @@ def add(self, item, overflow_policy=OVERFLOW_POLICY_OVERWRITE): return self._invoke(request, ringbuffer_add_codec.decode_response) def add_all(self, items, overflow_policy=OVERFLOW_POLICY_OVERWRITE): - """Adds all of the item in the specified collection to the tail of the Ringbuffer. - - An add_all is likely to outperform multiple calls to add(object) due to better io utilization + """Adds all of the item in the specified collection to the tail of the Ringbuffer. + + An add_all is likely to outperform multiple calls to add(object) due to better io utilization and a reduced number of executed operations. The items are added in the order of the Iterator of the collection. - - If there is no space in the Ringbuffer, the action is determined by overflow policy + + If there is no space in the Ringbuffer, the action is determined by overflow policy as ``OVERFLOW_POLICY_OVERWRITE`` or ``OVERFLOW_POLICY_FAIL``. Args: @@ -145,7 +161,7 @@ def add_all(self, items, overflow_policy=OVERFLOW_POLICY_OVERWRITE): overflow_policy (int): The OverflowPolicy to be used when there is no space. Returns: - hazelcast.future.Future[int]: The sequenceId of the last written item, or ``-1`` + hazelcast.future.Future[int]: The sequenceId of the last written item, or ``-1`` of the last write is failed. """ check_not_empty(items, "items can't be empty") @@ -157,13 +173,15 @@ def add_all(self, items, overflow_policy=OVERFLOW_POLICY_OVERWRITE): check_not_none(item, "item can't be None") item_data_list.append(self._to_data(item)) - request = ringbuffer_add_all_codec.encode_request(self.name, item_data_list, overflow_policy) + request = ringbuffer_add_all_codec.encode_request( + self.name, item_data_list, overflow_policy + ) return self._invoke(request, ringbuffer_add_all_codec.decode_response) def read_one(self, sequence): - """Reads one item from the Ringbuffer. - - If the sequence is one beyond the current tail, this call blocks until an item is added. + """Reads one item from the Ringbuffer. + + If the sequence is one beyond the current tail, this call blocks until an item is added. Currently it isn't possible to control how long this call is going to block. Args: @@ -181,10 +199,10 @@ def handler(message): return self._invoke(request, handler) def read_many(self, start_sequence, min_count, max_count): - """Reads a batch of items from the Ringbuffer. - - If the number of available items after the first read item is smaller than the max_count, - these items are returned. So it could be the number of items read is smaller than the max_count. + """Reads a batch of items from the Ringbuffer. + + If the number of available items after the first read item is smaller than the max_count, + these items are returned. So it could be the number of items read is smaller than the max_count. If there are less items available than min_count, then this call blocks. Reading a batch of items is likely to perform better because less overhead is involved. @@ -198,19 +216,28 @@ def read_many(self, start_sequence, min_count, max_count): """ check_not_negative(start_sequence, "sequence can't be smaller than 0") check_true(max_count >= min_count, "max count should be greater or equal to min count") - check_true(max_count < MAX_BATCH_SIZE, "max count can't be greater than %d" % MAX_BATCH_SIZE) + check_true( + max_count < MAX_BATCH_SIZE, "max count can't be greater than %d" % MAX_BATCH_SIZE + ) future = Future() - request = ringbuffer_read_many_codec.encode_request(self.name, start_sequence, min_count, max_count, None) + request = ringbuffer_read_many_codec.encode_request( + self.name, start_sequence, min_count, max_count, None + ) def handler(message): - return ImmutableLazyDataList(ringbuffer_read_many_codec.decode_response(message)["items"], self._to_object) + return ImmutableLazyDataList( + ringbuffer_read_many_codec.decode_response(message)["items"], self._to_object + ) def check_capacity(capacity): try: capacity = capacity.result() - check_true(min_count <= capacity, "min count: %d should be smaller or equal to capacity: %d" - % (min_count, capacity)) + check_true( + min_count <= capacity, + "min count: %d should be smaller or equal to capacity: %d" + % (min_count, capacity), + ) f = self._invoke(request, handler) f.add_done_callback(set_result) except Exception as e: diff --git a/hazelcast/proxy/set.py b/hazelcast/proxy/set.py index 522a98df1d..adf72a115d 100644 --- a/hazelcast/proxy/set.py +++ b/hazelcast/proxy/set.py @@ -1,17 +1,18 @@ -from hazelcast.protocol.codec import \ - set_add_all_codec, \ - set_add_codec, \ - set_add_listener_codec, \ - set_clear_codec, \ - set_compare_and_remove_all_codec, \ - set_compare_and_retain_all_codec, \ - set_contains_all_codec, \ - set_contains_codec, \ - set_get_all_codec, \ - set_is_empty_codec, \ - set_remove_codec, \ - set_remove_listener_codec, \ - set_size_codec +from hazelcast.protocol.codec import ( + set_add_all_codec, + set_add_codec, + set_add_listener_codec, + set_clear_codec, + set_compare_and_remove_all_codec, + set_compare_and_retain_all_codec, + set_contains_all_codec, + set_contains_codec, + set_get_all_codec, + set_is_empty_codec, + set_remove_codec, + set_remove_listener_codec, + set_size_codec, +) from hazelcast.proxy.base import PartitionSpecificProxy, ItemEvent, ItemEventType from hazelcast.util import check_not_none, ImmutableLazyDataList @@ -19,7 +20,7 @@ class Set(PartitionSpecificProxy): """Concurrent, distributed implementation of Set""" - + def add(self, item): """Adds the specified item if it is not exists in this set. @@ -53,8 +54,8 @@ def add_all(self, items): return self._invoke(request, set_add_all_codec.decode_response) def add_listener(self, include_value=False, item_added_func=None, item_removed_func=None): - """Adds an item listener for this container. - + """Adds an item listener for this container. + Listener will be notified for all container add/remove events. Args: @@ -79,13 +80,16 @@ def handle_event_item(item, uuid, event_type): if item_removed_func: item_removed_func(item_event) - return self._register_listener(request, lambda r: set_add_listener_codec.decode_response(r), - lambda reg_id: set_remove_listener_codec.encode_request(self.name, reg_id), - lambda m: set_add_listener_codec.handle(m, handle_event_item)) + return self._register_listener( + request, + lambda r: set_add_listener_codec.decode_response(r), + lambda reg_id: set_remove_listener_codec.encode_request(self.name, reg_id), + lambda m: set_add_listener_codec.handle(m, handle_event_item), + ) def clear(self): """Clears the set. Set will be empty with this call. - + Returns: hazelcast.future.Future[None]: """ @@ -113,7 +117,7 @@ def contains_all(self, items): items (list): The specified collection which includes the items to be searched. Returns: - hazelcast.future.Future[bool]: ``True`` if all of the items in the specified collection exist in this set, + hazelcast.future.Future[bool]: ``True`` if all of the items in the specified collection exist in this set, ``False`` otherwise. """ check_not_none(items, "Value can't be None") @@ -127,19 +131,22 @@ def contains_all(self, items): def get_all(self): """Returns all of the items in the set. - + Returns: hazelcast.future.Future[list]: List of the items in this set. """ + def handler(message): - return ImmutableLazyDataList(set_get_all_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + set_get_all_codec.decode_response(message), self._to_object + ) request = set_get_all_codec.encode_request(self.name) return self._invoke(request, handler) def is_empty(self): """Determines whether this set is empty or not. - + Returns: hazelcast.future.Future[bool]: ``True`` if this set is empty, ``False`` otherwise. """ @@ -179,8 +186,8 @@ def remove_all(self, items): return self._invoke(request, set_compare_and_remove_all_codec.decode_response) def remove_listener(self, registration_id): - """Removes the specified item listener. - + """Removes the specified item listener. + Returns silently if the specified listener was not added before. Args: @@ -192,8 +199,8 @@ def remove_listener(self, registration_id): return self._deregister_listener(registration_id) def retain_all(self, items): - """Removes the items which are not contained in the specified collection. - + """Removes the items which are not contained in the specified collection. + In other words, only the items that are contained in the specified collection will be retained. Args: @@ -213,7 +220,7 @@ def retain_all(self, items): def size(self): """Returns the number of items in this set. - + Returns: hazelcast.future.Future[int]: Number of items in this set. """ diff --git a/hazelcast/proxy/topic.py b/hazelcast/proxy/topic.py index f683ae01d4..eb7f28f3b0 100644 --- a/hazelcast/proxy/topic.py +++ b/hazelcast/proxy/topic.py @@ -1,24 +1,26 @@ -from hazelcast.protocol.codec import \ - topic_add_message_listener_codec, \ - topic_publish_codec, \ - topic_remove_message_listener_codec +from hazelcast.protocol.codec import ( + topic_add_message_listener_codec, + topic_publish_codec, + topic_remove_message_listener_codec, +) from hazelcast.proxy.base import PartitionSpecificProxy, TopicMessage class Topic(PartitionSpecificProxy): - """Hazelcast provides distribution mechanism for publishing messages that are delivered to multiple subscribers, - which is also known as a publish/subscribe (pub/sub) messaging model. - - Publish and subscriptions are cluster-wide. When a member subscribes for a topic, - it is actually registering for messages published by any member in the cluster, + """Hazelcast provides distribution mechanism for publishing messages that are delivered to multiple subscribers, + which is also known as a publish/subscribe (pub/sub) messaging model. + + Publish and subscriptions are cluster-wide. When a member subscribes for a topic, + it is actually registering for messages published by any member in the cluster, including the new members joined after you added the listener. - + Messages are ordered, meaning that listeners(subscribers) will process the messages in the order they are actually published. """ + def add_listener(self, on_message=None): - """Subscribes to this topic. - + """Subscribes to this topic. + When someone publishes a message on this topic, ``on_message`` function is called if provided. Args: @@ -32,13 +34,17 @@ def add_listener(self, on_message=None): def handle(item, publish_time, uuid): member = self._context.cluster_service.get_member(uuid) - item_event = TopicMessage(self.name, item, publish_time / 1000.0, member, self._to_object) + item_event = TopicMessage( + self.name, item, publish_time / 1000.0, member, self._to_object + ) on_message(item_event) return self._register_listener( - request, lambda r: codec.decode_response(r), + request, + lambda r: codec.decode_response(r), lambda reg_id: topic_remove_message_listener_codec.encode_request(self.name, reg_id), - lambda m: codec.handle(m, handle)) + lambda m: codec.handle(m, handle), + ) def publish(self, message): """Publishes the message to all subscribers of this topic diff --git a/hazelcast/proxy/transactional_list.py b/hazelcast/proxy/transactional_list.py index 96b3bfd10f..5798cde1cc 100644 --- a/hazelcast/proxy/transactional_list.py +++ b/hazelcast/proxy/transactional_list.py @@ -1,5 +1,8 @@ -from hazelcast.protocol.codec import transactional_list_add_codec, transactional_list_remove_codec, \ - transactional_list_size_codec +from hazelcast.protocol.codec import ( + transactional_list_add_codec, + transactional_list_remove_codec, + transactional_list_size_codec, +) from hazelcast.proxy.base import TransactionalProxy from hazelcast.util import check_not_none, thread_id @@ -8,6 +11,7 @@ class TransactionalList(TransactionalProxy): """ Transactional implementation of :class:`~hazelcast.proxy.list.List`. """ + def add(self, item): """Transactional implementation of :func:`List.add(item) ` @@ -19,7 +23,9 @@ def add(self, item): """ check_not_none(item, "item can't be none") item_data = self._to_data(item) - request = transactional_list_add_codec.encode_request(self.name, self.transaction.id, thread_id(), item_data) + request = transactional_list_add_codec.encode_request( + self.name, self.transaction.id, thread_id(), item_data + ) return self._invoke(request, transactional_list_add_codec.decode_response) def remove(self, item): @@ -33,14 +39,18 @@ def remove(self, item): """ check_not_none(item, "item can't be none") item_data = self._to_data(item) - request = transactional_list_remove_codec.encode_request(self.name, self.transaction.id, thread_id(), item_data) + request = transactional_list_remove_codec.encode_request( + self.name, self.transaction.id, thread_id(), item_data + ) return self._invoke(request, transactional_list_remove_codec.decode_response) def size(self): """Transactional implementation of :func:`List.size() ` - + Returns: hazelcast.future.Future[int]: The size of the list. """ - request = transactional_list_size_codec.encode_request(self.name, self.transaction.id, thread_id()) + request = transactional_list_size_codec.encode_request( + self.name, self.transaction.id, thread_id() + ) return self._invoke(request, transactional_list_size_codec.decode_response) diff --git a/hazelcast/proxy/transactional_map.py b/hazelcast/proxy/transactional_map.py index 2ee86efa72..8d2911faf2 100644 --- a/hazelcast/proxy/transactional_map.py +++ b/hazelcast/proxy/transactional_map.py @@ -1,9 +1,22 @@ -from hazelcast.protocol.codec import transactional_map_contains_key_codec, transactional_map_delete_codec, \ - transactional_map_get_codec, transactional_map_get_for_update_codec, transactional_map_is_empty_codec, \ - transactional_map_key_set_codec, transactional_map_key_set_with_predicate_codec, transactional_map_put_codec, \ - transactional_map_put_if_absent_codec, transactional_map_remove_codec, transactional_map_remove_if_same_codec, \ - transactional_map_replace_codec, transactional_map_replace_if_same_codec, transactional_map_set_codec, \ - transactional_map_size_codec, transactional_map_values_codec, transactional_map_values_with_predicate_codec +from hazelcast.protocol.codec import ( + transactional_map_contains_key_codec, + transactional_map_delete_codec, + transactional_map_get_codec, + transactional_map_get_for_update_codec, + transactional_map_is_empty_codec, + transactional_map_key_set_codec, + transactional_map_key_set_with_predicate_codec, + transactional_map_put_codec, + transactional_map_put_if_absent_codec, + transactional_map_remove_codec, + transactional_map_remove_if_same_codec, + transactional_map_replace_codec, + transactional_map_replace_if_same_codec, + transactional_map_set_codec, + transactional_map_size_codec, + transactional_map_values_codec, + transactional_map_values_with_predicate_codec, +) from hazelcast.proxy.base import TransactionalProxy from hazelcast.util import check_not_none, to_millis, thread_id, ImmutableLazyDataList @@ -18,13 +31,14 @@ def contains_key(self, key): key: The specified key. Returns: - hazelcast.future.Future[bool]: ``True`` if this map contains an entry for the specified key, + hazelcast.future.Future[bool]: ``True`` if this map contains an entry for the specified key, ``False`` otherwise. """ check_not_none(key, "key can't be none") key_data = self._to_data(key) - request = transactional_map_contains_key_codec.encode_request(self.name, self.transaction.id, thread_id(), - key_data) + request = transactional_map_contains_key_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data + ) return self._invoke(request, transactional_map_contains_key_codec.decode_response) def get(self, key): @@ -42,12 +56,14 @@ def handler(message): return self._to_object(transactional_map_get_codec.decode_response(message)) key_data = self._to_data(key) - request = transactional_map_get_codec.encode_request(self.name, self.transaction.id, thread_id(), key_data) + request = transactional_map_get_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data + ) return self._invoke(request, handler) def get_for_update(self, key): - """Locks the key and then gets and returns the value to which the specified key is mapped. - + """Locks the key and then gets and returns the value to which the specified key is mapped. + Lock will be released at the end of the transaction (either commit or rollback). Args: @@ -55,7 +71,7 @@ def get_for_update(self, key): Returns: hazelcast.future.Future[any]: The value for the specified key. - + See Also: :func:`Map.get(key) ` """ @@ -65,8 +81,9 @@ def handler(message): return self._to_object(transactional_map_get_for_update_codec.decode_response(message)) key_data = self._to_data(key) - request = transactional_map_get_for_update_codec.encode_request(self.name, self.transaction.id, thread_id(), - key_data) + request = transactional_map_get_for_update_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data + ) return self._invoke(request, handler) def size(self): @@ -75,7 +92,9 @@ def size(self): Returns: hazelcast.future.Future[int]: Number of entries in this map. """ - request = transactional_map_size_codec.encode_request(self.name, self.transaction.id, thread_id()) + request = transactional_map_size_codec.encode_request( + self.name, self.transaction.id, thread_id() + ) return self._invoke(request, transactional_map_size_codec.decode_response) def is_empty(self): @@ -84,12 +103,14 @@ def is_empty(self): Returns: hazelcast.future.Future[bool]: ``True`` if this map contains no key-value mappings, ``False`` otherwise. """ - request = transactional_map_is_empty_codec.encode_request(self.name, self.transaction.id, thread_id()) + request = transactional_map_is_empty_codec.encode_request( + self.name, self.transaction.id, thread_id() + ) return self._invoke(request, transactional_map_is_empty_codec.decode_response) def put(self, key, value, ttl=None): """Transactional implementation of :func:`Map.put(key, value, ttl) ` - + The object to be put will be accessible only in the current transaction context till the transaction is committed. @@ -99,7 +120,7 @@ def put(self, key, value, ttl=None): ttl (int): Maximum time in seconds for this entry to stay. Returns: - hazelcast.future.Future[any]: Previous value associated with key or ``None`` + hazelcast.future.Future[any]: Previous value associated with key or ``None`` if there was no mapping for key. """ check_not_none(key, "key can't be none") @@ -110,13 +131,14 @@ def handler(message): key_data = self._to_data(key) value_data = self._to_data(value) - request = transactional_map_put_codec.encode_request(self.name, self.transaction.id, thread_id(), key_data, - value_data, to_millis(ttl)) + request = transactional_map_put_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data, value_data, to_millis(ttl) + ) return self._invoke(request, handler) def put_if_absent(self, key, value): """Transactional implementation of :func:`Map.put_if_absent(key, value) ` - + The object to be put will be accessible only in the current transaction context till the transaction is committed. @@ -135,13 +157,14 @@ def handler(message): key_data = self._to_data(key) value_data = self._to_data(value) - request = transactional_map_put_if_absent_codec.encode_request(self.name, self.transaction.id, thread_id(), - key_data, value_data) + request = transactional_map_put_if_absent_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data, value_data + ) return self._invoke(request, handler) def set(self, key, value): """Transactional implementation of :func:`Map.set(key, value) ` - + The object to be set will be accessible only in the current transaction context till the transaction is committed. @@ -157,13 +180,14 @@ def set(self, key, value): key_data = self._to_data(key) value_data = self._to_data(value) - request = transactional_map_set_codec.encode_request(self.name, self.transaction.id, - thread_id(), key_data, value_data) + request = transactional_map_set_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data, value_data + ) return self._invoke(request) def replace(self, key, value): """Transactional implementation of :func:`Map.replace(key, value) ` - + The object to be replaced will be accessible only in the current transaction context till the transaction is committed. @@ -172,7 +196,7 @@ def replace(self, key, value): value: The value to replace the previous value. Returns: - hazelcast.future.Future[any]: Previous value associated with key, or ``None`` + hazelcast.future.Future[any]: Previous value associated with key, or ``None`` if there was no mapping for key. """ check_not_none(key, "key can't be none") @@ -183,14 +207,15 @@ def handler(message): key_data = self._to_data(key) value_data = self._to_data(value) - request = transactional_map_replace_codec.encode_request(self.name, self.transaction.id, thread_id(), - key_data, value_data) + request = transactional_map_replace_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data, value_data + ) return self._invoke(request, handler) def replace_if_same(self, key, old_value, new_value): """Transactional implementation of :func:`Map.replace_if_same(key, old_value, new_value) ` - + The object to be replaced will be accessible only in the current transaction context till the transaction is committed. @@ -209,13 +234,14 @@ def replace_if_same(self, key, old_value, new_value): key_data = self._to_data(key) old_value_data = self._to_data(old_value) new_value_data = self._to_data(new_value) - request = transactional_map_replace_if_same_codec.encode_request(self.name, self.transaction.id, thread_id(), - key_data, old_value_data, new_value_data) + request = transactional_map_replace_if_same_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data, old_value_data, new_value_data + ) return self._invoke(request, transactional_map_replace_if_same_codec.decode_response) def remove(self, key): """Transactional implementation of :func:`Map.remove(key) ` - + The object to be removed will be removed from only the current transaction context until the transaction is committed. @@ -223,7 +249,7 @@ def remove(self, key): key: Key of the mapping to be deleted. Returns: - hazelcast.future.Future[any]: The previous value associated with key, or ``None`` + hazelcast.future.Future[any]: The previous value associated with key, or ``None`` if there was no mapping for key. """ check_not_none(key, "key can't be none") @@ -232,13 +258,15 @@ def handler(message): return self._to_object(transactional_map_remove_codec.decode_response(message)) key_data = self._to_data(key) - request = transactional_map_remove_codec.encode_request(self.name, self.transaction.id, thread_id(), key_data) + request = transactional_map_remove_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data + ) return self._invoke(request, handler) def remove_if_same(self, key, value): """Transactional implementation of :func:`Map.remove_if_same(key, value) ` - + The object to be removed will be removed from only the current transaction context until the transaction is committed. @@ -254,13 +282,14 @@ def remove_if_same(self, key, value): key_data = self._to_data(key) value_data = self._to_data(value) - request = transactional_map_remove_if_same_codec.encode_request(self.name, self.transaction.id, thread_id(), - key_data, value_data) + request = transactional_map_remove_if_same_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data, value_data + ) return self._invoke(request, transactional_map_remove_if_same_codec.decode_response) def delete(self, key): """Transactional implementation of :func:`Map.delete(key) ` - + The object to be deleted will be removed from only the current transaction context until the transaction is committed. @@ -273,7 +302,9 @@ def delete(self, key): check_not_none(key, "key can't be none") key_data = self._to_data(key) - request = transactional_map_delete_codec.encode_request(self.name, self.transaction.id, thread_id(), key_data) + request = transactional_map_delete_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data + ) return self._invoke(request) def key_set(self, predicate=None): @@ -286,18 +317,27 @@ def key_set(self, predicate=None): hazelcast.future.Future[list]: A list of the clone of the keys. """ if predicate: + def handler(message): - return ImmutableLazyDataList(transactional_map_key_set_with_predicate_codec.decode_response(message), - self._to_object) + return ImmutableLazyDataList( + transactional_map_key_set_with_predicate_codec.decode_response(message), + self._to_object, + ) predicate_data = self._to_data(predicate) - request = transactional_map_key_set_with_predicate_codec.encode_request(self.name, self.transaction.id, - thread_id(), predicate_data) + request = transactional_map_key_set_with_predicate_codec.encode_request( + self.name, self.transaction.id, thread_id(), predicate_data + ) else: + def handler(message): - return ImmutableLazyDataList(transactional_map_key_set_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + transactional_map_key_set_codec.decode_response(message), self._to_object + ) - request = transactional_map_key_set_codec.encode_request(self.name, self.transaction.id, thread_id()) + request = transactional_map_key_set_codec.encode_request( + self.name, self.transaction.id, thread_id() + ) return self._invoke(request, handler) @@ -311,17 +351,26 @@ def values(self, predicate=None): hazelcast.future.Future[list]: A list of clone of the values contained in this map. """ if predicate: + def handler(message): - return ImmutableLazyDataList(transactional_map_values_with_predicate_codec.decode_response(message), - self._to_object) + return ImmutableLazyDataList( + transactional_map_values_with_predicate_codec.decode_response(message), + self._to_object, + ) predicate_data = self._to_data(predicate) - request = transactional_map_values_with_predicate_codec.encode_request(self.name, self.transaction.id, - thread_id(), predicate_data) + request = transactional_map_values_with_predicate_codec.encode_request( + self.name, self.transaction.id, thread_id(), predicate_data + ) else: + def handler(message): - return ImmutableLazyDataList(transactional_map_values_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + transactional_map_values_codec.decode_response(message), self._to_object + ) - request = transactional_map_values_codec.encode_request(self.name, self.transaction.id, thread_id()) + request = transactional_map_values_codec.encode_request( + self.name, self.transaction.id, thread_id() + ) return self._invoke(request, handler) diff --git a/hazelcast/proxy/transactional_multi_map.py b/hazelcast/proxy/transactional_multi_map.py index 193ccd1cbd..4d6e4dc653 100644 --- a/hazelcast/proxy/transactional_multi_map.py +++ b/hazelcast/proxy/transactional_multi_map.py @@ -1,6 +1,11 @@ -from hazelcast.protocol.codec import transactional_multi_map_get_codec, transactional_multi_map_put_codec, \ - transactional_multi_map_remove_codec, transactional_multi_map_remove_entry_codec, \ - transactional_multi_map_size_codec, transactional_multi_map_value_count_codec +from hazelcast.protocol.codec import ( + transactional_multi_map_get_codec, + transactional_multi_map_put_codec, + transactional_multi_map_remove_codec, + transactional_multi_map_remove_entry_codec, + transactional_multi_map_size_codec, + transactional_multi_map_value_count_codec, +) from hazelcast.proxy.base import TransactionalProxy from hazelcast.util import check_not_none, thread_id, ImmutableLazyDataList @@ -24,8 +29,9 @@ def put(self, key, value): key_data = self._to_data(key) value_data = self._to_data(value) - request = transactional_multi_map_put_codec.encode_request(self.name, self.transaction.id, - thread_id(), key_data, value_data) + request = transactional_multi_map_put_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data, value_data + ) return self._invoke(request, transactional_multi_map_put_codec.decode_response) def get(self, key): @@ -40,11 +46,14 @@ def get(self, key): check_not_none(key, "key can't be none") def handler(message): - return ImmutableLazyDataList(transactional_multi_map_get_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + transactional_multi_map_get_codec.decode_response(message), self._to_object + ) key_data = self._to_data(key) - request = transactional_multi_map_get_codec.encode_request(self.name, self.transaction.id, thread_id(), - key_data) + request = transactional_multi_map_get_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data + ) return self._invoke(request, handler) def remove(self, key, value): @@ -63,8 +72,9 @@ def remove(self, key, value): key_data = self._to_data(key) value_data = self._to_data(value) - request = transactional_multi_map_remove_entry_codec.encode_request(self.name, self.transaction.id, - thread_id(), key_data, value_data) + request = transactional_multi_map_remove_entry_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data, value_data + ) return self._invoke(request, transactional_multi_map_remove_entry_codec.decode_response) def remove_all(self, key): @@ -80,11 +90,14 @@ def remove_all(self, key): check_not_none(key, "key can't be none") def handler(message): - return ImmutableLazyDataList(transactional_multi_map_remove_codec.decode_response(message), self._to_object) + return ImmutableLazyDataList( + transactional_multi_map_remove_codec.decode_response(message), self._to_object + ) key_data = self._to_data(key) - request = transactional_multi_map_remove_codec.encode_request(self.name, self.transaction.id, thread_id(), - key_data) + request = transactional_multi_map_remove_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data + ) return self._invoke(request, handler) def value_count(self, key): @@ -100,15 +113,18 @@ def value_count(self, key): check_not_none(key, "key can't be none") key_data = self._to_data(key) - request = transactional_multi_map_value_count_codec.encode_request(self.name, self.transaction.id, thread_id(), - key_data) + request = transactional_multi_map_value_count_codec.encode_request( + self.name, self.transaction.id, thread_id(), key_data + ) return self._invoke(request, transactional_multi_map_value_count_codec.decode_response) def size(self): """Transactional implementation of :func:`MultiMap.size() ` - + Returns: hazelcast.future.Future[int]: the number of key-value tuples in the multimap. """ - request = transactional_multi_map_size_codec.encode_request(self.name, self.transaction.id, thread_id()) + request = transactional_multi_map_size_codec.encode_request( + self.name, self.transaction.id, thread_id() + ) return self._invoke(request, transactional_multi_map_size_codec.decode_response) diff --git a/hazelcast/proxy/transactional_queue.py b/hazelcast/proxy/transactional_queue.py index dfedfd6731..3600b0db1f 100644 --- a/hazelcast/proxy/transactional_queue.py +++ b/hazelcast/proxy/transactional_queue.py @@ -1,11 +1,17 @@ -from hazelcast.protocol.codec import transactional_queue_offer_codec, transactional_queue_peek_codec, \ - transactional_queue_poll_codec, transactional_queue_size_codec, transactional_queue_take_codec +from hazelcast.protocol.codec import ( + transactional_queue_offer_codec, + transactional_queue_peek_codec, + transactional_queue_poll_codec, + transactional_queue_size_codec, + transactional_queue_take_codec, +) from hazelcast.proxy.base import TransactionalProxy from hazelcast.util import check_not_none, to_millis, thread_id class TransactionalQueue(TransactionalProxy): """Transactional implementation of :class:`~hazelcast.proxy.queue.Queue`.""" + def offer(self, item, timeout=0): """Transactional implementation of :func:`Queue.offer(item, timeout) ` @@ -19,20 +25,24 @@ def offer(self, item, timeout=0): check_not_none(item, "item can't be none") item_data = self._to_data(item) - request = transactional_queue_offer_codec.encode_request(self.name, self.transaction.id, thread_id(), - item_data, to_millis(timeout)) + request = transactional_queue_offer_codec.encode_request( + self.name, self.transaction.id, thread_id(), item_data, to_millis(timeout) + ) return self._invoke(request, transactional_queue_offer_codec.decode_response) def take(self): """Transactional implementation of :func:`Queue.take() ` - + Returns: hazelcast.future.Future[any]: The head of this queue. """ + def handler(message): return self._to_object(transactional_queue_take_codec.decode_response(message)) - request = transactional_queue_take_codec.encode_request(self.name, self.transaction.id, thread_id()) + request = transactional_queue_take_codec.encode_request( + self.name, self.transaction.id, thread_id() + ) return self._invoke(request, handler) def poll(self, timeout=0): @@ -42,14 +52,16 @@ def poll(self, timeout=0): timeout (int): Maximum time in seconds to wait for addition. Returns: - hazelcast.future.Future[any]: The head of this queue, or ``None`` if this queue is empty + hazelcast.future.Future[any]: The head of this queue, or ``None`` if this queue is empty or specified timeout elapses before an item is added to the queue. """ + def handler(message): return self._to_object(transactional_queue_poll_codec.decode_response(message)) - request = transactional_queue_poll_codec.encode_request(self.name, self.transaction.id, thread_id(), - to_millis(timeout)) + request = transactional_queue_poll_codec.encode_request( + self.name, self.transaction.id, thread_id(), to_millis(timeout) + ) return self._invoke(request, handler) def peek(self, timeout=0): @@ -59,21 +71,25 @@ def peek(self, timeout=0): timeout (int): Maximum time in seconds to wait for addition. Returns: - hazelcast.future.Future[any]: The head of this queue, or ``None`` if this queue is empty + hazelcast.future.Future[any]: The head of this queue, or ``None`` if this queue is empty or specified timeout elapses before an item is added to the queue. """ + def handler(message): return self._to_object(transactional_queue_peek_codec.decode_response(message)) - request = transactional_queue_peek_codec.encode_request(self.name, self.transaction.id, thread_id(), - to_millis(timeout)) + request = transactional_queue_peek_codec.encode_request( + self.name, self.transaction.id, thread_id(), to_millis(timeout) + ) return self._invoke(request, handler) def size(self): """Transactional implementation of :func:`Queue.size() ` - + Returns: hazelcast.future.Future[int]: Size of the queue. """ - request = transactional_queue_size_codec.encode_request(self.name, self.transaction.id, thread_id()) + request = transactional_queue_size_codec.encode_request( + self.name, self.transaction.id, thread_id() + ) return self._invoke(request, transactional_queue_size_codec.decode_response) diff --git a/hazelcast/proxy/transactional_set.py b/hazelcast/proxy/transactional_set.py index 49f71ebd07..62f0047605 100644 --- a/hazelcast/proxy/transactional_set.py +++ b/hazelcast/proxy/transactional_set.py @@ -1,12 +1,15 @@ -from hazelcast.protocol.codec import transactional_set_add_codec, transactional_set_remove_codec, \ - transactional_set_size_codec +from hazelcast.protocol.codec import ( + transactional_set_add_codec, + transactional_set_remove_codec, + transactional_set_size_codec, +) from hazelcast.proxy.base import TransactionalProxy from hazelcast.util import check_not_none, thread_id class TransactionalSet(TransactionalProxy): """Transactional implementation of :class:`~hazelcast.proxy.set.Set`.""" - + def add(self, item): """Transactional implementation of :func:`Set.add(item) ` @@ -18,7 +21,9 @@ def add(self, item): """ check_not_none(item, "item can't be none") item_data = self._to_data(item) - request = transactional_set_add_codec.encode_request(self.name, self.transaction.id, thread_id(), item_data) + request = transactional_set_add_codec.encode_request( + self.name, self.transaction.id, thread_id(), item_data + ) return self._invoke(request, transactional_set_add_codec.decode_response) def remove(self, item): @@ -32,14 +37,18 @@ def remove(self, item): """ check_not_none(item, "item can't be none") item_data = self._to_data(item) - request = transactional_set_remove_codec.encode_request(self.name, self.transaction.id, thread_id(), item_data) + request = transactional_set_remove_codec.encode_request( + self.name, self.transaction.id, thread_id(), item_data + ) return self._invoke(request, transactional_set_remove_codec.decode_response) def size(self): """Transactional implementation of :func:`Set.size() ` - + Returns: hazelcast.future.Future[int]: Size of the set. """ - request = transactional_set_size_codec.encode_request(self.name, self.transaction.id, thread_id()) + request = transactional_set_size_codec.encode_request( + self.name, self.transaction.id, thread_id() + ) return self._invoke(request, transactional_set_size_codec.decode_response) diff --git a/pyproject.toml b/pyproject.toml index 506eceba31..aff3d3c4c1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,6 @@ exclude = ''' | benchmarks | hazelcast/protocol/codec - | hazelcast/proxy | hazelcast/serialization | tests )/