diff --git a/bin/varnishd/cache/cache_vrt.c b/bin/varnishd/cache/cache_vrt.c index e4c0261f87..b0f5619558 100644 --- a/bin/varnishd/cache/cache_vrt.c +++ b/bin/varnishd/cache/cache_vrt.c @@ -650,8 +650,7 @@ VRT_VSM_Cluster_Destroy(VRT_CTX, struct vsmw_cluster **vcp) CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); AN(vcp); - AN(*vcp); - *vcp = NULL; + VSMW_DestroyCluster(heritage.proc_vsmw, vcp); } /*-------------------------------------------------------------------- diff --git a/bin/varnishd/common/common_vsmw.c b/bin/varnishd/common/common_vsmw.c index 8e55aebe25..beb6fbd3b5 100644 --- a/bin/varnishd/common/common_vsmw.c +++ b/bin/varnishd/common/common_vsmw.c @@ -72,6 +72,7 @@ struct vsmw_cluster { #define VSMW_CLUSTER_MAGIC 0x28b74c00 VTAILQ_ENTRY(vsmw_cluster) list; + struct vsmwseg *cseg; char *fn; size_t len; void *ptr; @@ -182,9 +183,6 @@ vsmw_delseg(struct vsmw *vsmw, struct vsmwseg *seg, int fixidx) CHECK_OBJ_NOTNULL(vsmw, VSMW_MAGIC); CHECK_OBJ_NOTNULL(seg, VSMWSEG_MAGIC); - if (!--seg->cluster->refs) - VSMW_DestroyCluster(vsmw, &seg->cluster); - VTAILQ_REMOVE(&vsmw->segs, seg, list); REPLACE(seg->class, NULL); REPLACE(seg->id, NULL); @@ -208,8 +206,8 @@ vsmw_delseg(struct vsmw *vsmw, struct vsmwseg *seg, int fixidx) /*--------------------------------------------------------------------*/ -struct vsmw_cluster * -VSMW_NewCluster(struct vsmw *vsmw, size_t len, const char *pfx) +static struct vsmw_cluster * +vsmw_newcluster(struct vsmw *vsmw, size_t len, const char *pfx) { struct vsmw_cluster *vc; int fd; @@ -245,6 +243,26 @@ VSMW_NewCluster(struct vsmw *vsmw, size_t len, const char *pfx) return (vc); } +struct vsmw_cluster * +VSMW_NewCluster(struct vsmw *vsmw, size_t len, const char *pfx) +{ + struct vsmw_cluster *vc; + struct vsmwseg *seg; + + vc = vsmw_newcluster(vsmw, len, pfx); + + ALLOC_OBJ(seg, VSMWSEG_MAGIC); + AN(seg); + vc->cseg = seg; + seg->len = len; + seg->cluster = vc; + REPLACE(seg->class, ""); + REPLACE(seg->id, ""); + vsmw_addseg(vsmw, seg); + + return (vc); +} + void VSMW_DestroyCluster(struct vsmw *vsmw, struct vsmw_cluster **vcp) { @@ -255,10 +273,22 @@ VSMW_DestroyCluster(struct vsmw *vsmw, struct vsmw_cluster **vcp) vc = *vcp; *vcp = NULL; CHECK_OBJ_NOTNULL(vc, VSMW_CLUSTER_MAGIC); - AZ(vc->refs); + if (vc->cseg != NULL) { + /* + * Backends go on the cool list, so the VGC cluster is + * destroyed before they are. Solve this by turning the + * cluster into an anonymous cluster which dies with the + * refcount on it. + */ + vsmw_delseg(vsmw, vc->cseg, 1); + vc->cseg = NULL; + if (vc->refs > 0) + return; + } AZ(munmap(vc->ptr, vc->len)); + AZ(vc->refs); VTAILQ_REMOVE(&vsmw->clusters, vc, list); if (unlinkat(vsmw->vdirfd, vc->fn, 0)) assert (errno == ENOENT); @@ -289,7 +319,7 @@ VSMW_Allocv(struct vsmw *vsmw, struct vsmw_cluster *vc, REPLACE(seg->id, VSB_data(vsmw->vsb)); if (vc == NULL) - vc = VSMW_NewCluster(vsmw, seg->len, class); + vc = vsmw_newcluster(vsmw, seg->len, class); AN(vc); vc->refs++; @@ -334,6 +364,10 @@ VSMW_Free(struct vsmw *vsmw, void **pp) if (seg->ptr == p) break; AN(seg); + + if (!--seg->cluster->refs && seg->cluster->cseg == NULL) + VSMW_DestroyCluster(vsmw, &seg->cluster); + vsmw_delseg(vsmw, seg, 1); } diff --git a/bin/varnishd/flint.lnt b/bin/varnishd/flint.lnt index e31a0d58f8..923b7028e4 100644 --- a/bin/varnishd/flint.lnt +++ b/bin/varnishd/flint.lnt @@ -96,6 +96,7 @@ +libh mgt_event.h +-sem(vsmw_addseg, custodial(2)) -sem(BAN_Free, custodial(1)) -sem(EXP_Inject, custodial(1)) -sem(HSH_Insert, custodial(3)) diff --git a/lib/libvarnishapi/vsm.c b/lib/libvarnishapi/vsm.c index be15a14ec1..0766e539e6 100644 --- a/lib/libvarnishapi/vsm.c +++ b/lib/libvarnishapi/vsm.c @@ -418,9 +418,7 @@ vsm_refresh_set2(struct vsm *vd, struct vsm_set *vs, struct vsb *vsb) return (retval|VSM_NUKE_ALL); /* - * Examine the ident line - * XXX: for now ignore that one of the ID's is a pid which could - * XXX: be kill(pid,0)'ed for more rapid abandonment detection. + * First line is ident comment */ i = sscanf(VSB_data(vsb), "# %ju %ju\n%n", &id1, &id2, &ac); if (i != 2) { @@ -445,7 +443,7 @@ vsm_refresh_set2(struct vsm *vd, struct vsm_set *vs, struct vsb *vsb) vg->markscan = 0; /* - * Efficient comparison walking the two lists side-by-side is ok because + * Efficient comparison by walking the two lists side-by-side because * segment inserts always happen at the tail (VSMW_Allocv()). So, as * soon as vg is exhausted, we only insert. * @@ -465,13 +463,17 @@ vsm_refresh_set2(struct vsm *vd, struct vsm_set *vs, struct vsb *vsb) av = VAV_Parse(p, &ac, 0); p = e + 1; - if (av[0] != NULL || ac < 5 || ac > 6) { + if (av[0] != NULL || ac < 4 || ac > 6) { (void)(vsm_diag(vd, "vsm_refresh_set2: bad index (%d/%s)", ac, av[0])); VAV_Free(av); break; } + if (ac == 4) { + VAV_Free(av); + continue; + } if (vg == NULL) { ALLOC_OBJ(vg2, VSM_SEG_MAGIC); diff --git a/lib/libvcc/vcc_backend.c b/lib/libvcc/vcc_backend.c index 43f9eea577..a5a433f327 100644 --- a/lib/libvcc/vcc_backend.c +++ b/lib/libvcc/vcc_backend.c @@ -506,5 +506,5 @@ vcc_Backend_Init(struct vcc *tl) VSB_printf(ifp->ini, "\tvsc_cluster = VRT_VSM_Cluster_New(ctx,\n" "\t ndirector * VRT_backend_vsm_need(ctx));\n"); VSB_printf(ifp->ini, "\tif (vsc_cluster == 0)\n\t\treturn(1);"); - VSB_printf(ifp->fin, "\tVRT_VSM_Cluster_Destroy(ctx, &vsc_cluster);"); + VSB_printf(ifp->fin, "\t\tVRT_VSM_Cluster_Destroy(ctx, &vsc_cluster);"); }