Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP

Loading…

Handle pool.size and pool.max_size values safely #11

Merged
merged 9 commits into from

2 participants

eli Benoit Chesneau
eli

Currently pool.size is not threadsafe. This may not be a problem, but add code to make it safe regardless, using a BoundedSemaphore. A BoundedSemaphore has identical semantics across the current backend modules, so it seemed a reasonable choice as the synchronization primitive.

In addition to more safely handling pool.size, add support for an upper limit on pool members based on the pool.max_size value. In the case where max_size pool size is reached, and tries are exhaused, then raise the connection pool exception.

Finally, provide a unified reap method to reduce duplication of member connection close semantics.

related to issue #10

eli

Hmm. Not quite right apparently. If the queue is drained and no connections are released, the while loop just spins never attempting to fetch from the queue again, until max tries is hit.

Will work on a fix for that.

cactus added some commits
eli cactus refactor connection get loop to include pool drain
move the pool match attempt inside the looping construct, so that if a
'try' run fails, we will check the pool again on the chance that another
connection has been released back into it.
29db141
eli cactus add some jiggle/splay value to avoid excessive simultaneous expiry 038986d
eli cactus remove the first part of the or cluase 94e8043
eli cactus fix for expensive while loop
connection reaper, when it ran and the queue was non-empty, would put
items into the same queue it was reading from. This effectively became
an expensive while loop.
b604066
eli

added a fix for #6

Benoit Chesneau benoitc merged commit b604066 into from
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Commits on Apr 30, 2012
  1. eli
  2. eli

    refactor out duplication

    cactus authored
  3. eli
  4. eli
  5. eli
  6. eli

    refactor connection get loop to include pool drain

    cactus authored
    move the pool match attempt inside the looping construct, so that if a
    'try' run fails, we will check the pool again on the chance that another
    connection has been released back into it.
Commits on May 1, 2012
  1. eli
  2. eli
  3. eli

    fix for expensive while loop

    cactus authored
    connection reaper, when it ran and the queue was non-empty, would put
    items into the same queue it was reading from. This effectively became
    an expensive while loop.
This page is out of date. Refresh to see the latest.
1  socketpool/backend_eventlet.py
View
@@ -13,6 +13,7 @@
sleep = eventlet.sleep
Socket = socket.socket
Select = select.select
+Semaphore = eventlet.semaphore.BoundedSemaphore
class PriorityQueue(queue.PriorityQueue):
2  socketpool/backend_gevent.py
View
@@ -7,10 +7,12 @@
from gevent import select
from gevent import socket
from gevent import queue
+from gevent import coros
from socketpool.pool import ConnectionPool
sleep = gevent.sleep
+Semaphore = gevent.coros.BoundedSemaphore
Socket = socket.socket
Select = select.select
1  socketpool/backend_thread.py
View
@@ -16,6 +16,7 @@
Select = select.select
Socket = socket.socket
sleep = time.sleep
+Semaphore = threading.BoundedSemaphore
class PriorityQueue(queue.PriorityQueue):
6 socketpool/conn.py
View
@@ -6,6 +6,7 @@
import select
import socket
import time
+import random
class Connector(object):
def matches(self, **match_options):
@@ -33,7 +34,10 @@ def __init__(self, host, port, backend_mod, pool=None):
self.port = port
self.backend_mod = backend_mod
self._connected = True
- self._life = time.time()
+ # use a 'jiggle' value to make sure there is some
+ # randomization to expiry, to avoid many conns expiring very
+ # closely together.
+ self._life = time.time() - random.randint(0, 10)
self._pool = pool
def __del__(self):
104 socketpool/pool.py
View
@@ -39,6 +39,9 @@ def __init__(self, factory,
self.options["backend_mod"] = self.backend_mod
self.options["pool"] = self
+ # bounded semaphore to make self.size 'safe'
+ self._sem = self.backend_mod.Semaphore(1)
+
self._reaper = None
if reap_connections:
self.start_reaper()
@@ -47,21 +50,32 @@ def too_old(self, conn):
return time.time() - conn.get_lifetime() > self.max_lifetime
def murder_connections(self):
- pool = self.pool
- if pool.qsize():
- for priority, candidate in pool:
+ current_pool_size = self.pool.qsize()
+ if current_pool_size > 0:
+ for priority, candidate in self.pool:
+ current_pool_size -= 1
if not self.too_old(candidate):
- pool.put((priority, candidate))
+ self.pool.put((priority, candidate))
+ else:
+ self._reap_connection(candidate)
+ if current_pool_size <= 0:
+ break
def start_reaper(self):
self._reaper = self.backend_mod.ConnectionReaper(self,
delay=self.max_lifetime)
self._reaper.ensure_started()
+ def _reap_connection(self, conn):
+ if conn.is_connected():
+ conn.invalidate()
+ with self._sem:
+ self.size -= 1
+
def release_all(self):
if self.pool.qsize():
for priority, conn in self.pool:
- conn.invalidate()
+ self._reap_connection(conn)
def release_connection(self, conn):
if self._reaper is not None:
@@ -71,53 +85,59 @@ def release_connection(self, conn):
if connected and not self.too_old(conn):
self.pool.put((conn.get_lifetime(), conn))
else:
- conn.invalidate()
+ self._reap_connection(conn)
def get(self, **options):
options.update(self.options)
- # first let's try to find a matching one
found = None
i = self.pool.qsize()
- if self.size >= self.max_size or self.pool.qsize():
- for priority, candidate in self.pool:
- i -= 1
- if self.too_old(candidate):
- # let's drop it
- continue
-
- matches = candidate.matches(**options)
- if not matches:
- # let's put it back
- self.pool.put((priority, candidate))
- else:
- if candidate.is_connected():
- found = candidate
- break
-
- if i <= 0:
- break
-
- # we got one.. we use it
- if found is not None:
- return found
-
-
- # we build a new one and send it back
tries = 0
last_error = None
while tries < self.retry_max:
- self.size += 1
- try:
- new_item = self.factory(**options)
- except Exception, e:
- self.size -= 1
- last_error = e
- else:
- # we should be connected now
- if new_item.is_connected():
- return new_item
+ # first let's try to find a matching one from pool
+ if self.pool.qsize():
+ for priority, candidate in self.pool:
+ i -= 1
+ if self.too_old(candidate):
+ # let's drop it
+ self._reap_connection(candidate)
+ continue
+
+ matches = candidate.matches(**options)
+ if not matches:
+ # let's put it back
+ self.pool.put((priority, candidate))
+ else:
+ if candidate.is_connected():
+ found = candidate
+ break
+ else:
+ # conn is dead for some reason.
+ # reap it.
+ self._reap_connection(candidate)
+
+ if i <= 0:
+ break
+
+ # we got one.. we use it
+ if found is not None:
+ return found
+
+ # didn't get one.
+ # see if we have room to make a new one
+ if self.size < self.max_size:
+ try:
+ new_item = self.factory(**options)
+ except Exception, e:
+ last_error = e
+ else:
+ # we should be connected now
+ if new_item.is_connected():
+ with self._sem:
+ self.size += 1
+ return new_item
tries += 1
self.backend_mod.sleep(self.retry_delay)
Something went wrong with that request. Please try again.