Permalink
Browse files

Give the tcp-pool API a protocol identifier, so we don't accidentally

recycle connections between protocols.
  • Loading branch information...
bsdphk committed Oct 5, 2017
1 parent 43a7282 commit f6b9c11a2ed6234a64792e5756215c76df3f6c55
@@ -163,14 +163,14 @@ vbe_dir_finish(const struct director *d, struct worker *wrk,
if (bo->htc->doclose != SC_NULL || bp->proxy_header != 0) {
VSLb(bo->vsl, SLT_BackendClose, "%d %s", vtp->fd,
bp->display_name);
VTP_Close(bp->tcp_pool, &vtp);
VTP_Close(&vtp);
Lck_Lock(&bp->mtx);
} else {
VSLb(bo->vsl, SLT_BackendReuse, "%d %s", vtp->fd,
bp->display_name);
Lck_Lock(&bp->mtx);
VSC_C_main->backend_recycle++;
VTP_Recycle(wrk, bp->tcp_pool, &vtp);
VTP_Recycle(wrk, &vtp);
}
assert(bp->n_conn > 0);
bp->n_conn--;
@@ -50,6 +50,8 @@
#include "VSC_vbe.h"
const char *vbe_proto_ident = "HTTP Backend";
static VTAILQ_HEAD(, backend) backends = VTAILQ_HEAD_INITIALIZER(backends);
static VTAILQ_HEAD(, backend) cool_backends =
VTAILQ_HEAD_INITIALIZER(cool_backends);
@@ -115,7 +117,8 @@ VRT_new_backend(VRT_CTX, const struct vrt_backend *vrt)
Lck_Lock(&backends_mtx);
VTAILQ_INSERT_TAIL(&backends, b, list);
VSC_C_main->n_backend++;
b->tcp_pool = VTP_Ref(vrt->ipv4_suckaddr, vrt->ipv6_suckaddr);
b->tcp_pool = VTP_Ref(vrt->ipv4_suckaddr, vrt->ipv6_suckaddr,
vbe_proto_ident);
Lck_Unlock(&backends_mtx);
VBE_fill_director(b);
@@ -48,7 +48,7 @@ struct tcp_pool {
unsigned magic;
#define TCP_POOL_MAGIC 0x28b0e42a
char *name;
const void *id;
struct suckaddr *ip4;
struct suckaddr *ip6;
@@ -63,7 +63,6 @@ struct tcp_pool {
int n_kill;
int n_used;
};
static struct lock tcp_pools_mtx;
@@ -119,14 +118,16 @@ tcp_handle(struct waited *w, enum wait_event ev, double now)
*/
struct tcp_pool *
VTP_Ref(const struct suckaddr *ip4, const struct suckaddr *ip6)
VTP_Ref(const struct suckaddr *ip4, const struct suckaddr *ip6, const void *id)
{
struct tcp_pool *tp;
assert(ip4 != NULL || ip6 != NULL);
Lck_Lock(&tcp_pools_mtx);
VTAILQ_FOREACH(tp, &tcp_pools, list) {
assert(tp->refcnt > 0);
if (tp->id != id)
continue;
if (ip4 == NULL) {
if (tp->ip4 != NULL)
continue;
@@ -158,6 +159,7 @@ VTP_Ref(const struct suckaddr *ip4, const struct suckaddr *ip6)
if (ip6 != NULL)
tp->ip6 = VSA_Clone(ip6);
tp->refcnt = 1;
tp->id = id;
Lck_New(&tp->mtx, lck_tcp_pool);
VTAILQ_INIT(&tp->connlist);
VTAILQ_INIT(&tp->killlist);
@@ -206,7 +208,6 @@ VTP_Rel(struct tcp_pool **tpp)
VTAILQ_REMOVE(&tcp_pools, tp, list);
Lck_Unlock(&tcp_pools_mtx);
free(tp->name);
free(tp->ip4);
free(tp->ip6);
Lck_Lock(&tp->mtx);
@@ -254,7 +255,9 @@ VTP_Open(const struct tcp_pool *tp, double tmo, const struct suckaddr **sa)
}
*sa = tp->ip4;
s = VTCP_connect(tp->ip4, msec);
if (s < 0 && !cache_param->prefer_ipv6) {
if (s >= 0)
return (s);
if (!cache_param->prefer_ipv6) {
*sa = tp->ip6;
s = VTCP_connect(tp->ip6, msec);
}
@@ -266,16 +269,18 @@ VTP_Open(const struct tcp_pool *tp, double tmo, const struct suckaddr **sa)
*/
void
VTP_Recycle(const struct worker *wrk, struct tcp_pool *tp, struct vtp **vtpp)
VTP_Recycle(const struct worker *wrk, struct vtp **vtpp)
{
struct vtp *vtp;
struct tcp_pool *tp;
int i = 0;
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
vtp = *vtpp;
*vtpp = NULL;
CHECK_OBJ_NOTNULL(vtp, VTP_MAGIC);
tp = vtp->tcp_pool;
CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
assert(vtp->state == VTP_STATE_USED);
assert(vtp->fd > 0);
@@ -327,14 +332,16 @@ VTP_Recycle(const struct worker *wrk, struct tcp_pool *tp, struct vtp **vtpp)
*/
void
VTP_Close(struct tcp_pool *tp, struct vtp **vtpp)
VTP_Close(struct vtp **vtpp)
{
struct vtp *vtp;
struct tcp_pool *tp;
CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
vtp = *vtpp;
*vtpp = NULL;
CHECK_OBJ_NOTNULL(vtp, VTP_MAGIC);
tp = vtp->tcp_pool;
CHECK_OBJ_NOTNULL(tp, TCP_POOL_MAGIC);
assert(vtp->state == VTP_STATE_USED);
assert(vtp->fd > 0);
@@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* TCP connection pools
* Outgoing TCP connection pools
*
*/
@@ -53,12 +53,45 @@ struct vtp {
* Prototypes
*/
/* cache_tcp_pool.c */
struct tcp_pool *VTP_Ref(const struct suckaddr *ip4, const struct suckaddr *ip6);
struct tcp_pool *VTP_Ref(const struct suckaddr *ip4, const struct suckaddr *ip6, const void *id);
/*
* Get a reference to a TCP pool. Either ip4 or ip6 arg must be non-NULL.
* If recycling is to be used, the id pointer distinguishes the pool per protocol.
*/
void VTP_AddRef(struct tcp_pool *);
void VTP_Rel(struct tcp_pool **tpp);
int VTP_Open(const struct tcp_pool *tp, double tmo, const struct suckaddr **sa);
void VTP_Recycle(const struct worker *, struct tcp_pool *, struct vtp **);
void VTP_Close(struct tcp_pool *tp, struct vtp **);
/*
* Get another reference to an already referenced TCP pool.
*/
void VTP_Rel(struct tcp_pool **);
/*
* Release reference to a TCP pool. When last reference is released
* the pool is destroyed and all cached connections closed.
*/
int VTP_Open(const struct tcp_pool *, double tmo, const struct suckaddr **);
/*
* Open a new connection and return the adress used.
*/
void VTP_Close(struct vtp **);
/*
* Close a connection.
*/
void VTP_Recycle(const struct worker *, struct vtp **);
/*
* Recycle an open connection.
*/
struct vtp *VTP_Get(struct tcp_pool *, double tmo, struct worker *);
/*
* Get a (possibly) recycled connection.
*/
void VTP_Wait(struct worker *, struct vtp *);
/*
* If the connection was recycled (state != VTP_STATE_USED) call this
* function before attempting to receive on the connection.
*/

0 comments on commit f6b9c11

Please sign in to comment.