Skip to content
Permalink
Browse files

Speed up vsm_findseg

This speeds up vsm_findseg by keeping a direct pointer to the struct
vsm_seg it points to in the vsm_fantom.

To prevent stale vsm_fantoms in an application from causing segfaults, the
last serial number seen is also kept on the fantom. If this serial number
does not match, the slow path of iterating all segs to find the right
match is done, and the fantom is updated to make any subsequent calls on
the same fantom take the fast path.

With this patch the fantoms store 2 serial numbers and a direct pointer to
the seg. To fit this in struct vsm_fantom without breaking the ABI, the
unused chunk pointer value has been repurposed, giving us two uintptr_t
priv variables to work with. The direct segment pointer occupies one, and
the two serial numbers occupy one half each of the other. This limits the
bits for each serial to only 16 bits on 32 bit systems. But only direct
matches is done, and it is unlikely that a stale fantom should have the
exact right value to be accepted as valid.
  • Loading branch information...
mbgrydeland committed Aug 30, 2019
1 parent e9436f6 commit 1e7b50b9fbd865aafc19e344f90679d9255f8d15
Showing with 57 additions and 16 deletions.
  1. +1 −1 include/vapi/vsm.h
  2. +56 −15 lib/libvarnishapi/vsm.c
@@ -44,7 +44,7 @@ struct vsm;

struct vsm_fantom {
uintptr_t priv; /* VSM private */
struct VSM_chunk *chunk;
uintptr_t priv2; /* VSM private */
void *b; /* first byte of payload */
void *e; /* first byte past payload */
char *class;
@@ -72,6 +72,17 @@ const struct vsm_valid VSM_valid[1] = {{"valid"}};

static vlu_f vsm_vlu_func;

#define VSM_PRIV_SHIFT \
(sizeof (uintptr_t) * 4)
#define VSM_PRIV_MASK \
(~((uintptr_t)UINTPTR_MAX << VSM_PRIV_SHIFT))
#define VSM_PRIV_LOW(u) \
((uintptr_t)(u) & VSM_PRIV_MASK)
#define VSM_PRIV_HIGH(u) \
(((uintptr_t)(u) >> VSM_PRIV_SHIFT) & VSM_PRIV_MASK)
#define VSM_PRIV_MERGE(low, high) \
(VSM_PRIV_LOW(low) | (VSM_PRIV_LOW(high) << VSM_PRIV_SHIFT))

/*--------------------------------------------------------------------*/

struct vsm_set;
@@ -518,7 +529,8 @@ vsm_vlu_plus(struct vsm *vd, struct vsm_set *vs, const char *line)
vg->av = av;
vg->set = vs;
vg->flags = VSM_FLAG_MARKSCAN;
vg->serial = ++vd->serial;
vg->serial = vd->serial;

VTAILQ_INSERT_TAIL(&vs->segs, vg, list);
if (ac == 4) {
vg->flags |= VSM_FLAG_CLUSTER;
@@ -576,6 +588,11 @@ vsm_vlu_func(void *priv, const char *line)
CHECK_OBJ_NOTNULL(vd, VSM_MAGIC);
AN(line);

/* Up the serial counter. This wraps at UINTPTR_MAX/2
* because thats the highest value we can store in struct
* vsm_fantom. */
vd->serial = VSM_PRIV_LOW(vd->serial + 1);

switch (line[0]) {
case '#':
i = vsm_vlu_hash(vd, vs, line);
@@ -602,6 +619,7 @@ vsm_readlines(struct vsm_set *vs)
int i;

do {
assert(vs->fd >= 0);
i = VLU_Fd(vs->vlu, vs->fd);
} while (!i);
assert(i == -2);
@@ -771,22 +789,44 @@ vsm_findseg(const struct vsm *vd, const struct vsm_fantom *vf)
struct vsm_seg *vg;
uintptr_t x;

x = vf->priv;
x = VSM_PRIV_HIGH(vf->priv);
if (x == vd->serial) {
vg = (struct vsm_seg *)vf->priv2;
if (!VALID_OBJ(vg, VSM_SEG_MAGIC) ||
vg->serial != VSM_PRIV_LOW(vf->priv))
WRONG("Corrupt fantom");
return (vg);
}

x = VSM_PRIV_LOW(vf->priv);
vs = vd->mgt;
VTAILQ_FOREACH(vg, &vs->segs, list)
if (vg->serial == x)
return (vg);
VTAILQ_FOREACH(vg, &vs->stale, list)
if (vg->serial == x)
return (vg);
break;
if (vg == NULL) {
VTAILQ_FOREACH(vg, &vs->stale, list)
if (vg->serial == x)
break;
}
vs = vd->child;
VTAILQ_FOREACH(vg, &vs->segs, list)
if (vg->serial == x)
return (vg);
VTAILQ_FOREACH(vg, &vs->stale, list)
if (vg->serial == x)
return (vg);
return (NULL);
if (vg == NULL) {
VTAILQ_FOREACH(vg, &vs->segs, list)
if (vg->serial == x)
break;
}
if (vg == NULL) {
VTAILQ_FOREACH(vg, &vs->stale, list)
if (vg->serial == x)
break;
}
if (vg == NULL)
return (NULL);

/* Update the fantom with the new priv so that lookups will be
* fast on the next call. Note that this casts away the const. */
((struct vsm_fantom *)TRUST_ME(vf))->priv =
VSM_PRIV_MERGE(vg->serial, vd->serial);
return (vg);
}

/*--------------------------------------------------------------------*/
@@ -831,7 +871,8 @@ VSM__itern(struct vsm *vd, struct vsm_fantom *vf)
}
}
memset(vf, 0, sizeof *vf);
vf->priv = vg->serial;
vf->priv = VSM_PRIV_MERGE(vg->serial, vd->serial);
vf->priv2 = (uintptr_t)vg;
vf->class = vg->av[4];
vf->ident = vg->av[5];
AN(vf->class);
@@ -854,7 +895,7 @@ VSM_Map(struct vsm *vd, struct vsm_fantom *vf)
if (vg == NULL)
return (vsm_diag(vd, "VSM_Map: bad fantom"));

assert(vg->serial == vf->priv);
assert(vg->serial == VSM_PRIV_LOW(vf->priv));
assert(vg->av[4] == vf->class);
assert(vg->av[5] == vf->ident);

0 comments on commit 1e7b50b

Please sign in to comment.
You can’t perform that action at this time.