Skip to content
Permalink
Browse files

gc vcl argument of VCL_Task*

It is unclear to me what the original intention was, but at any rate,
the privs are not vcl-specific and the vcl argument was not used.

The motivation for this change is the follow up commit
"cleanup implicit rollback for return(vcl(...))": For this commit, it
would be unclear which vcl to roll back with, but as it actually does
not matter, I think this change is warranted.
  • Loading branch information...
nigoroll committed Nov 13, 2019
1 parent 2d4c2a5 commit 78c65ef9cc50f5b449bed1ac040322556c0183c7
@@ -186,7 +186,7 @@ ved_include(struct req *preq, const char *src, const char *host,
req->transport_priv = ecx;

CNT_Embark(wrk, req);
VCL_TaskEnter(req->vcl, req->privs);
VCL_TaskEnter(req->privs);

while (1) {
ecx->woken = 0;
@@ -105,8 +105,8 @@ void Bereq_Rollback(struct busyobj *bo)
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);

vbf_cleanup(bo);
VCL_TaskLeave(bo->vcl, bo->privs);
VCL_TaskEnter(bo->vcl, bo->privs);
VCL_TaskLeave(bo->privs);
VCL_TaskEnter(bo->privs);
HTTP_Clone(bo->bereq, bo->bereq0);
WS_Reset(bo->bereq->ws, bo->ws_bo);
WS_Reset(bo->ws, bo->ws_bo);
@@ -907,7 +907,7 @@ vbf_fetch_thread(struct worker *wrk, void *priv)
}
#endif

VCL_TaskEnter(bo->vcl, bo->privs);
VCL_TaskEnter(bo->privs);
while (stp != F_STP_DONE) {
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC);
assert(bo->fetch_objcore->boc->refcount >= 1);
@@ -928,7 +928,7 @@ vbf_fetch_thread(struct worker *wrk, void *priv)

assert(bo->director_state == DIR_S_NULL);

VCL_TaskLeave(bo->vcl, bo->privs);
VCL_TaskLeave(bo->privs);
http_Teardown(bo->bereq);
http_Teardown(bo->beresp);

@@ -190,11 +190,11 @@ void
Req_Rollback(struct req *req)
{
if (IS_TOPREQ(req))
VCL_TaskLeave(req->vcl, req->top->privs);
VCL_TaskLeave(req->vcl, req->privs);
VCL_TaskEnter(req->vcl, req->privs);
VCL_TaskLeave(req->top->privs);
VCL_TaskLeave(req->privs);
VCL_TaskEnter(req->privs);
if (IS_TOPREQ(req))
VCL_TaskEnter(req->vcl, req->top->privs);
VCL_TaskEnter(req->top->privs);
HTTP_Clone(req->http, req->http0);
if (WS_Overflowed(req->ws))
req->wrk->stats->ws_client_overflow++;
@@ -1105,11 +1105,11 @@ CNT_Request(struct req *req)
wrk->vsl = NULL;
if (nxt == REQ_FSM_DONE) {
if (IS_TOPREQ(req)) {
VCL_TaskLeave(req->vcl, req->top->privs);
VCL_TaskLeave(req->top->privs);
if (req->top->vcl0 != NULL)
VCL_Rel(&req->top->vcl0);
}
VCL_TaskLeave(req->vcl, req->privs);
VCL_TaskLeave(req->privs);
AN(req->vsl->wid);
VRB_Free(req);
req->wrk = NULL;
@@ -420,8 +420,8 @@ void VCL_Ref(struct vcl *);
void VCL_Refresh(struct vcl **);
void VCL_Recache(struct worker *, struct vcl **);
void VCL_Rel(struct vcl **);
void VCL_TaskEnter(const struct vcl *, struct vrt_privs *);
void VCL_TaskLeave(const struct vcl *, struct vrt_privs *);
void VCL_TaskEnter(struct vrt_privs *);
void VCL_TaskLeave(struct vrt_privs *);
const char *VCL_Return_Name(unsigned);
const char *VCL_Method_Name(unsigned);
void VCL_Bo2Ctx(struct vrt_ctx *, struct busyobj *);
@@ -176,9 +176,9 @@ vcl_send_event(VRT_CTX, enum vcl_event_e ev)
if (ev == VCL_EVENT_LOAD || ev == VCL_EVENT_WARM)
AN(ctx->msg);

VCL_TaskEnter(ctx->vcl, cli_task_privs);
VCL_TaskEnter(cli_task_privs);
r = ctx->vcl->conf->event_vcl(ctx, ev);
VCL_TaskLeave(ctx->vcl, cli_task_privs);
VCL_TaskLeave(cli_task_privs);

if (r && (ev == VCL_EVENT_COLD || ev == VCL_EVENT_DISCARD))
WRONG("A VMOD cannot fail COLD or DISCARD events");
@@ -100,8 +100,8 @@ VPI_vcl_select(VRT_CTX, VCL_VCL vcl)
* from FSM for VCL_RET_VCL. Keeping them here to ensure there are no
* tasks during calls to VCL_Rel / vcl_get
*/
VCL_TaskLeave(req->vcl, req->top->privs);
VCL_TaskLeave(req->vcl, req->privs);
VCL_TaskLeave(req->top->privs);
VCL_TaskLeave(req->privs);
if (IS_TOPREQ(req)) {
AN(req->top);
AZ(req->top->vcl0);
@@ -113,6 +113,6 @@ VPI_vcl_select(VRT_CTX, VCL_VCL vcl)
vcl_get(&req->vcl, vcl);
VSLb(ctx->req->vsl, SLT_VCL_use, "%s via %s",
req->vcl->loaded_name, vcl->loaded_name);
VCL_TaskEnter(req->vcl, req->privs);
VCL_TaskEnter(req->vcl, req->top->privs);
VCL_TaskEnter(req->privs);
VCL_TaskEnter(req->top->privs);
}
@@ -194,19 +194,17 @@ VRT_priv_fini(const struct vmod_priv *p)
/*--------------------------------------------------------------------*/

void
VCL_TaskEnter(const struct vcl *vcl, struct vrt_privs *privs)
VCL_TaskEnter(struct vrt_privs *privs)
{

AN(vcl);
VRTPRIV_init(privs);
}

void
VCL_TaskLeave(const struct vcl *vcl, struct vrt_privs *privs)
VCL_TaskLeave(struct vrt_privs *privs)
{
struct vrt_priv *vp, *vp1;

AN(vcl);
/*
* NB: We don't bother removing entries as we finish them because it's
* a costly operation. Instead we safely walk the whole tree and clear
@@ -167,12 +167,14 @@ VPI_Vmod_Unload(VRT_CTX, struct vmod **hdl)
{
struct vmod *v;

(void) ctx;

ASSERT_CLI();

TAKE_OBJ_NOTNULL(v, hdl, VMOD_MAGIC);

VCL_TaskLeave(ctx->vcl, cli_task_privs);
VCL_TaskEnter(ctx->vcl, cli_task_privs);
VCL_TaskLeave(cli_task_privs);
VCL_TaskEnter(cli_task_privs);

#ifndef DONT_DLCLOSE_VMODS
/*
@@ -411,8 +411,8 @@ HTTP1_Session(struct worker *wrk, struct req *req)
wrk->stats->client_req++;
CNT_Embark(wrk, req);
if (req->req_step == R_STP_TRANSPORT) {
VCL_TaskEnter(req->vcl, req->privs);
VCL_TaskEnter(req->vcl, req->top->privs);
VCL_TaskEnter(req->privs);
VCL_TaskEnter(req->top->privs);
}
if (CNT_Request(req) == REQ_FSM_DISEMBARK)
return;
@@ -524,8 +524,8 @@ h2_do_req(struct worker *wrk, void *priv)
THR_SetRequest(req);
CNT_Embark(wrk, req);
if (req->req_step == R_STP_TRANSPORT) {
VCL_TaskEnter(req->vcl, req->privs);
VCL_TaskEnter(req->vcl, req->top->privs);
VCL_TaskEnter(req->privs);
VCL_TaskEnter(req->top->privs);
}

wrk->stats->client_req++;

0 comments on commit 78c65ef

Please sign in to comment.
You can’t perform that action at this time.