Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP

Comparing changes

Choose two branches to see what's changed or to start a new pull request. If you need to, you can also compare across forks.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also compare across forks.
base fork: weicao/otp
...
head fork: weicao/otp
Checking mergeability… Don't worry, you can still create the pull request.
  • 7 commits
  • 14 files changed
  • 0 commit comments
  • 1 contributor
Commits on Jul 09, 2012
@weicao Change pollset implementation to use one pollset for each scheduler
In our proxy project which has massive network IO, we found erts_check_io()
become system bottleneck, a call to erts_check_io() would take 500 microseconds,
and invoked about 3.5k times per second under heavy preesure,
which resulted in the average response time of user request longer than 2 milliseconds,
so that limited system throughput down, while cpus were idle(in starvation)
at the same time.

This patch allocates one pollset for each scheduler, so that each scheduler could invoke
erts_check_io() on its own pollset concurrently. To be incorporate well with port migration,
all used fd in each port're recorded, removed from old scheduler's pollset and
added to new scheduler's when port is migrating between schedulers.

After applying this patch, together with binding process&port with schedulers,
erts_check_io() is invoked about 230k times per second (dozens of times more than before),
and throughtput increases from 45k to 105k, so it works.
121afd0
Commits on Jul 12, 2012
@weicao move erts_deliver_time out of erl_check_io 7577507
Commits on Jul 18, 2012
@weicao fix the assertion error at erts_init_check_io cab6b2c
@weicao fix various assertion failures with DEBUG build a89a910
@weicao fix 'move erts_deliver_time out of erl_check_io' commit to avoid it d…
…eadlocks the non-smp emulator
d1c5f30
Commits on Jul 19, 2012
@weicao fix compiler warnings ff209d9
Commits on Jul 25, 2012
@weicao Fix a bug which causes erts_check_io() keep omitting some I/O event. 0ff0e78
View
2  erts/emulator/beam/dist.h
@@ -208,7 +208,7 @@ void erts_schedule_dist_command(Port *prt, DistEntry *dist_entry)
&dep->dist_cmd,
ERTS_PORT_TASK_DIST_CMD,
(ErlDrvEvent) -1,
- NULL);
+ NULL, -1);
}
}
View
4 erts/emulator/beam/erl_lock_check.c
@@ -121,6 +121,10 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "drv_ev_state_grow", NULL, },
{ "drv_ev_state", "address" },
{ "safe_hash", "address" },
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ { "sfl_tab_lock", NULL },
+ { "sfl_lock", NULL },
+#endif
{ "pollset_rm_list", NULL },
{ "removed_fd_pre_alloc_lock", "address" },
{ "state_prealloc", NULL },
View
125 erts/emulator/beam/erl_port_task.c
@@ -199,6 +199,37 @@ pop_port(ErtsRunQueue *runq)
return pp;
}
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+int erts_transfer_outstanding_io_tasks(Port* pp, ErtsRunQueue* from, ErtsRunQueue* to)
+{
+ ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(from));
+ if(pp) {
+ ErtsPortTaskQueue *ptqp = pp->sched.taskq;
+ if (ptqp) {
+ ErtsPortTask *ptp = ptqp->first;
+ int io_tasks = 0;
+ while (ptp) {
+ switch (ptp->type) {
+ case ERTS_PORT_TASK_INPUT:
+ case ERTS_PORT_TASK_OUTPUT:
+ case ERTS_PORT_TASK_EVENT:
+ io_tasks ++;
+ break;
+ default:
+ break;
+ }
+ ptp = ptp->next;
+ }
+ if(io_tasks) {
+ ASSERT(erts_smp_atomic_read_nob(&from->ports.outstanding_io_tasks) >= io_tasks);
+ erts_smp_atomic_add_relb(&from->ports.outstanding_io_tasks, -1*io_tasks);
+ erts_smp_atomic_add_relb(&to->ports.outstanding_io_tasks, io_tasks);
+ }
+ }
+ }
+ return 0;
+}
+#endif
#ifdef HARD_DEBUG
@@ -457,6 +488,10 @@ erts_port_task_abort(Eterm id, ErtsPortTaskHandle *pthp)
case ERTS_PORT_TASK_EVENT:
ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) > 0);
erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ ASSERT(erts_smp_atomic_read_nob(&runq->ports.outstanding_io_tasks) > 0);
+ erts_smp_atomic_dec_relb(&runq->ports.outstanding_io_tasks);
+#endif
break;
default:
break;
@@ -489,7 +524,8 @@ erts_port_task_schedule(Eterm id,
ErtsPortTaskHandle *pthp,
ErtsPortTaskType type,
ErlDrvEvent event,
- ErlDrvEventData event_data)
+ ErlDrvEventData event_data,
+ int ix)
{
ErtsRunQueue *runq;
Port *pp;
@@ -503,13 +539,6 @@ erts_port_task_schedule(Eterm id,
* tasks_lock is held.
*/
- if (pthp && erts_port_task_is_scheduled(pthp)) {
- ASSERT(0);
- erts_port_task_abort(id, pthp);
- }
-
- ptp = port_task_alloc();
-
ASSERT(is_internal_port(id));
pp = &erts_port[internal_port_index(id)];
runq = erts_port_runq(pp);
@@ -519,8 +548,21 @@ erts_port_task_schedule(Eterm id,
erts_smp_runq_unlock(runq);
return -1;
}
+
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ /* port has been migrated to another pollset */
+ if (ix >=0 && runq->ix != ix) {
+ erts_smp_runq_unlock(runq);
+ return 0;
+ }
+#endif
+
+ if (pthp && erts_port_task_is_scheduled(pthp)) {
+ ASSERT(0);
+ erts_port_task_abort(id, pthp);
+ }
- ASSERT(!erts_port_task_is_scheduled(pthp));
+ ptp = port_task_alloc();
ERTS_PT_CHK_PRES_PORTQ(runq, pp);
@@ -530,17 +572,22 @@ erts_port_task_schedule(Eterm id,
}
#ifdef ERTS_SMP
- if (enq_port) {
- ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
- if (xrunq) {
- /* Port emigrated ... */
- erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
- erts_smp_runq_unlock(runq);
- runq = erts_port_runq(pp);
- if (!runq)
- return -1;
- }
- }
+// if (enq_port) {
+// ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
+// if (xrunq) {
+// /* Port emigrated ... */
+// erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
+// erts_smp_runq_unlock(runq);
+//
+//#ifdef ERTS_POLLSET_PER_SCHEDULER
+// erts_change_port_pollset(pp->id, xrunq->ix);
+//#endif
+//
+// runq = erts_port_runq(pp);
+// if (!runq)
+// return -1;
+// }
+// }
#endif
ASSERT(pp->sched.taskq);
@@ -561,6 +608,9 @@ erts_port_task_schedule(Eterm id,
case ERTS_PORT_TASK_OUTPUT:
case ERTS_PORT_TASK_EVENT:
erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks);
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ erts_smp_atomic_inc_relb(&runq->ports.outstanding_io_tasks);
+#endif
/* Fall through... */
default:
enqueue_task(pp->sched.taskq, ptp);
@@ -703,7 +753,12 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
if (!pp->sched.taskq) {
if (erts_system_profile_flags.runnable_ports)
profile_runnable_port(pp, am_inactive);
- res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
+ res =
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ (erts_smp_atomic_read_nob(&runq->ports.outstanding_io_tasks)
+#else
+ (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
+#endif
!= (erts_aint_t) 0);
goto done;
}
@@ -851,6 +906,10 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
>= io_tasks_executed);
erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks,
-1*io_tasks_executed);
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ ASSERT(erts_smp_atomic_read_nob(&runq->ports.outstanding_io_tasks) >= io_tasks_executed);
+ erts_smp_atomic_add_relb(&runq->ports.outstanding_io_tasks, -1*io_tasks_executed);
+#endif
}
*curr_port_pp = NULL;
@@ -885,15 +944,23 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
}
else {
/* Port emigrated ... */
+#ifdef ERTS_POLLSET_PER_SCHEDULER
+ erts_transfer_outstanding_io_tasks(pp, runq, xrunq);
+#endif
erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
erts_smp_runq_unlock(runq);
+#ifdef ERTS_POLLSET_PER_SCHEDULER
+ erts_change_port_pollset(pp->id, xrunq->ix);
+#endif
+
xrunq = erts_port_runq(pp);
if (xrunq) {
enqueue_port(xrunq, pp);
ASSERT(pp->sched.exe_taskq);
pp->sched.exe_taskq = NULL;
erts_smp_runq_unlock(xrunq);
+
erts_smp_notify_inc_runq(xrunq);
}
@@ -902,7 +969,12 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
#endif
}
- res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
+ res =
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ (erts_smp_atomic_read_nob(&runq->ports.outstanding_io_tasks)
+#else
+ (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
+#endif
!= (erts_aint_t) 0);
ERTS_PT_CHK_PRES_PORTQ(runq, pp);
@@ -925,7 +997,12 @@ erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
erts_smp_runq_unlock(runq);
erts_port_cleanup(pp); /* Might aquire runq lock */
erts_smp_runq_lock(runq);
- res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
+ res =
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ (erts_smp_atomic_read_nob(&runq->ports.outstanding_io_tasks)
+#else
+ (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
+#endif
!= (erts_aint_t) 0);
}
}
@@ -1004,6 +1081,7 @@ erts_port_is_scheduled(Port *pp)
return res;
}
+
#ifdef ERTS_SMP
void
erts_enqueue_port(ErtsRunQueue *rq, Port *pp)
@@ -1023,6 +1101,7 @@ erts_dequeue_port(ErtsRunQueue *rq)
ASSERT(!pp
|| rq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
ASSERT(!pp || pp->sched.in_runq);
+
return pp;
}
View
7 erts/emulator/beam/erl_port_task.h
@@ -120,10 +120,15 @@ int erts_port_task_schedule(Eterm,
ErtsPortTaskHandle *,
ErtsPortTaskType,
ErlDrvEvent,
- ErlDrvEventData);
+ ErlDrvEventData,
+ int);
void erts_port_task_free_port(Port *);
int erts_port_is_scheduled(Port *);
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+int erts_transfer_outstanding_io_tasks(Port* pp, ErtsRunQueue* from, ErtsRunQueue* to);
+#endif
+
#ifdef ERTS_SMP
void erts_enqueue_port(ErtsRunQueue *rq, Port *pp);
Port *erts_dequeue_port(ErtsRunQueue *rq);
View
122 erts/emulator/beam/erl_process.c
@@ -1066,14 +1066,22 @@ erts_sched_finish_poke(ErtsSchedulerSleepInfo *ssi, erts_aint32_t flags)
{
switch (flags & ERTS_SSI_FLGS_SLEEP_TYPE) {
case ERTS_SSI_FLG_POLL_SLEEPING:
- erts_sys_schedule_interrupt(1);
+#ifdef ERTS_POLLSET_PER_SCHEDULER
+ erts_sys_schedule_interrupt_rq(ssi->ix, 1);
+#else
+ erts_sys_schedule_interrupt(1);
+#endif
break;
case ERTS_SSI_FLG_POLL_SLEEPING|ERTS_SSI_FLG_TSE_SLEEPING:
/*
* Thread progress blocking while poll sleeping; need
* to signal on both...
*/
- erts_sys_schedule_interrupt(1);
+#ifdef ERTS_POLLSET_PER_SCHEDULER
+ erts_sys_schedule_interrupt_rq(ssi->ix, 1);
+#else
+ erts_sys_schedule_interrupt(1);
+#endif
/* fall through */
case ERTS_SSI_FLG_TSE_SLEEPING:
erts_tse_set(ssi->event);
@@ -1106,7 +1114,7 @@ set_aux_work_flags_wakeup_nob(ErtsSchedulerSleepInfo *ssi,
#ifdef ERTS_SMP
erts_sched_poke(ssi);
#else
- erts_sys_schedule_interrupt(1);
+ erts_sys_schedule_interrupt(1);
#endif
}
}
@@ -1126,7 +1134,7 @@ set_aux_work_flags_wakeup_relb(ErtsSchedulerSleepInfo *ssi,
#ifdef ERTS_SMP
erts_sched_poke(ssi);
#else
- erts_sys_schedule_interrupt(1);
+ erts_sys_schedule_interrupt(1);
#endif
}
}
@@ -2075,18 +2083,33 @@ try_set_sys_scheduling(void)
return 0 == erts_smp_atomic32_cmpxchg_acqb(&doing_sys_schedule, 1, 0);
}
+#ifdef ERTS_POLLSET_PER_SCHEDULER
+
+static ERTS_INLINE int
+runq_have_outstanding_io_tasks(ErtsRunQueue* rq)
+{
+ return 0 != erts_smp_atomic_read_acqb(&rq->ports.outstanding_io_tasks);
+}
+
+#endif
+
#endif
static ERTS_INLINE int
-prepare_for_sys_schedule(void)
+prepare_for_sys_schedule(ErtsRunQueue *rq)
{
#ifdef ERTS_SMP
+#ifdef ERTS_POLLSET_PER_SCHEDULER
+ if (!runq_have_outstanding_io_tasks(rq))
+ return 1;
+#else
while (!erts_port_task_have_outstanding_io_tasks()
&& try_set_sys_scheduling()) {
if (!erts_port_task_have_outstanding_io_tasks())
return 1;
clear_sys_scheduling();
}
+#endif
return 0;
#else
return !erts_port_task_have_outstanding_io_tasks();
@@ -2239,7 +2262,11 @@ sched_set_sleeptype(ErtsSchedulerSleepInfo *ssi, erts_aint32_t sleep_type)
erts_tse_reset(ssi->event);
else {
ASSERT(sleep_type == ERTS_SSI_FLG_POLL_SLEEPING);
- erts_sys_schedule_interrupt(0);
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ erts_sys_schedule_interrupt_rq(ssi->ix, 0);
+#else
+ erts_sys_schedule_interrupt(0);
+#endif
}
while (1) {
@@ -2395,7 +2422,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* be waiting in erl_sys_schedule()
*/
- if (!prepare_for_sys_schedule()) {
+ if (!prepare_for_sys_schedule(rq)) {
sched_waiting(esdp->no, rq);
@@ -2500,9 +2527,12 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
if (working)
sched_wall_time_change(esdp, working = 0);
+
+#ifndef ERTS_POLLSET_PER_SCHEDULER
ASSERT(!erts_port_task_have_outstanding_io_tasks());
+#endif
- erl_sys_schedule(1); /* Might give us something to do */
+ erl_sys_schedule(rq->ix, 1); /* Might give us something to do */
dt = erts_do_time_read_and_reset();
if (dt) erts_bump_timer(dt);
@@ -2541,13 +2571,17 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* If we got new I/O tasks we aren't allowed to
* call erl_sys_schedule() until it is handled.
*/
+#ifdef ERTS_POLLSET_PER_SCHEDULER
+ if (runq_have_outstanding_io_tasks(rq)) {
+#else
if (erts_port_task_have_outstanding_io_tasks()) {
+#endif
clear_sys_scheduling();
/*
* Got to check that we still got I/O tasks; otherwise
* we have to continue checking for I/O...
*/
- if (!prepare_for_sys_schedule()) {
+ if (!prepare_for_sys_schedule(rq)) {
spincount *= ERTS_SCHED_TSE_SLEEP_SPINCOUNT_FACT;
goto tse_wait;
}
@@ -2562,14 +2596,18 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
* If we got new I/O tasks we aren't allowed to
* sleep in erl_sys_schedule().
*/
+#ifdef ERTS_POLLSET_PER_SCHEDULER
+ if (runq_have_outstanding_io_tasks(rq)) {
+#else
if (erts_port_task_have_outstanding_io_tasks()) {
+#endif
clear_sys_scheduling();
/*
* Got to check that we still got I/O tasks; otherwise
* we have to wait in erl_sys_schedule() after all...
*/
- if (!prepare_for_sys_schedule()) {
+ if (!prepare_for_sys_schedule(rq)) {
/*
* Not allowed to wait in erl_sys_schedule;
* do tse wait instead...
@@ -2598,7 +2636,9 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
ASSERT(!(flgs & ERTS_SSI_FLG_SLEEPING));
goto sys_woken;
}
+#ifndef ERTS_POLLSET_PER_SCHEDULER
ASSERT(!erts_port_task_have_outstanding_io_tasks());
+#endif
goto sys_poll_aux_work;
}
@@ -2616,9 +2656,11 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
erts_thr_progress_active(esdp, thr_prgr_active = 0);
#endif
+#ifndef ERTS_POLLSET_PER_SCHEDULER
ASSERT(!erts_port_task_have_outstanding_io_tasks());
+#endif
- erl_sys_schedule(0);
+ erl_sys_schedule(rq->ix, 0);
dt = erts_do_time_read_and_reset();
if (dt) erts_bump_timer(dt);
@@ -2969,7 +3011,7 @@ check_immigration_need(ErtsRunQueue *c_rq, ErtsMigrationPath *mp, int prio)
int len;
Uint32 f_flags, f_rq_flags;
ErtsRunQueue *f_rq;
-
+
f_flags = mp->prio[prio].flags;
ASSERT(ERTS_CHK_RUNQ_FLG_IMMIGRATE(mp->flags, prio));
@@ -3054,10 +3096,17 @@ immigrate(ErtsRunQueue *c_rq, ErtsMigrationPath *mp)
if (prio == ERTS_PORT_PRIO_LEVEL) {
Port *prt;
prt = erts_dequeue_port(rq);
- if (prt)
+ if (prt) {
+#ifdef ERTS_POLLSET_PER_SCHEDULER
+ erts_transfer_outstanding_io_tasks(prt, rq, c_rq);
+#endif
RUNQ_SET_RQ(&prt->run_queue, c_rq);
+ }
erts_smp_runq_unlock(rq);
if (prt) {
+#ifdef ERTS_POLLSET_PER_SCHEDULER
+ erts_change_port_pollset(prt->id, c_rq->ix);
+#endif
/* port might terminate while we have no lock... */
rq = erts_port_runq(prt);
if (rq) {
@@ -3226,8 +3275,14 @@ evacuate_run_queue(ErtsRunQueue *rq,
while (prt) {
ErtsRunQueue *prt_rq;
prt = erts_dequeue_port(rq);
+#ifdef ERTS_POLLSET_PER_SCHEDULER
+ erts_transfer_outstanding_io_tasks(prt, rq, to_rq);
+#endif
RUNQ_SET_RQ(&prt->run_queue, to_rq);
erts_smp_runq_unlock(rq);
+#ifdef ERTS_POLLSET_PER_SCHEDULER
+ erts_change_port_pollset(prt->id, to_rq->ix);
+#endif
/*
* The port might terminate while
* we have no lock on it...
@@ -3372,12 +3427,17 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq,
/*
* Check for a runnable port to steal...
*/
-
if (vrq->ports.start) {
ErtsRunQueue *prt_rq;
Port *prt = erts_dequeue_port(vrq);
+#ifdef ERTS_POLLSET_PER_SCHEDULER
+ erts_transfer_outstanding_io_tasks(prt, vrq, rq);
+#endif
RUNQ_SET_RQ(&prt->run_queue, rq);
erts_smp_runq_unlock(vrq);
+#ifdef ERTS_POLLSET_PER_SCHEDULER
+ erts_change_port_pollset(prt->id, rq->ix);
+#endif
/*
* The port might terminate while
@@ -3397,7 +3457,6 @@ try_steal_task_from_victim(ErtsRunQueue *rq, int *rq_lockedp, ErtsRunQueue *vrq,
return !0;
}
}
-
erts_smp_runq_unlock(vrq);
return 0;
@@ -4637,6 +4696,10 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
rq->ports.info.reds = 0;
rq->ports.start = NULL;
rq->ports.end = NULL;
+
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ erts_smp_atomic_init_nob(&rq->ports.outstanding_io_tasks, 0);
+#endif
}
#ifdef ERTS_SMP
@@ -4672,6 +4735,7 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online)
ssi->next = NULL;
ssi->prev = NULL;
#endif
+ ssi->ix = ix-1;
erts_smp_atomic32_init_nob(&ssi->flags, 0);
ssi->event = NULL; /* initialized in sched_thread_func */
#endif
@@ -5788,8 +5852,24 @@ sched_thread_func(void *vesdp)
return NULL;
}
+#ifdef ERTS_SMP
+static void *
+deliver_time_thread(void *unused)
+{
+ while (1) {
+ erts_deliver_time();
+ erts_milli_sleep(1);
+ }
+ return NULL;
+}
+#endif
+
static ethr_tid aux_tid;
+#ifdef ERTS_SMP
+static ethr_tid deliver_time_tid;
+#endif
+
void
erts_start_schedulers(void)
{
@@ -5827,6 +5907,12 @@ erts_start_schedulers(void)
res = ethr_thr_create(&aux_tid, aux_thread, NULL, &opts);
if (res != 0)
erl_exit(1, "Failed to create aux thread\n");
+
+#ifdef ERTS_SMP
+ res = ethr_thr_create(&deliver_time_tid, deliver_time_thread, NULL, &opts);
+ if (res != 0)
+ erl_exit(1, "Failed to create deliver time thread\n");
+#endif
if (actual < 1)
erl_exit(1,
@@ -6980,7 +7066,7 @@ Process *schedule(Process *p, int calls)
goto check_activities_to_run;
}
- else if (fcalls > input_reductions && prepare_for_sys_schedule()) {
+ else if (fcalls > input_reductions && prepare_for_sys_schedule(rq)) {
/*
* Schedule system-level activities.
*/
@@ -6988,13 +7074,15 @@ Process *schedule(Process *p, int calls)
erts_smp_atomic32_set_relb(&function_calls, 0);
fcalls = 0;
+#ifndef ERTS_POLLSET_PER_SCHEDULER
ASSERT(!erts_port_task_have_outstanding_io_tasks());
+#endif
#if 0 /* Not needed since we wont wait in sys schedule */
erts_sys_schedule_interrupt(0);
#endif
erts_smp_runq_unlock(rq);
- erl_sys_schedule(1);
+ erl_sys_schedule(rq->ix, 1);
dt = erts_do_time_read_and_reset();
if (dt) erts_bump_timer(dt);
View
4 erts/emulator/beam/erl_process.h
@@ -291,6 +291,7 @@ struct ErtsSchedulerSleepInfo_ {
#ifdef ERTS_SMP
ErtsSchedulerSleepInfo *next;
ErtsSchedulerSleepInfo *prev;
+ int ix;
erts_smp_atomic32_t flags;
erts_tse_t *event;
#endif
@@ -403,6 +404,9 @@ struct ErtsRunQueue_ {
ErtsRunQueueInfo info;
struct port *start;
struct port *end;
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ erts_smp_atomic_t outstanding_io_tasks;
+#endif
} ports;
};
View
3  erts/emulator/beam/global.h
@@ -187,6 +187,9 @@ struct port {
ErtsPrtSD *psd; /* Port specific data */
};
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+int erts_change_port_pollset(Eterm, int);
+#endif
ERTS_GLB_INLINE ErtsRunQueue *erts_port_runq(Port *prt);
View
2  erts/emulator/beam/io.c
@@ -2833,7 +2833,7 @@ static void schedule_port_timeout(Port *p)
&p->timeout_task,
ERTS_PORT_TASK_TIMEOUT,
(ErlDrvEvent) -1,
- NULL);
+ NULL, -1);
}
ErlDrvTermData driver_mk_term_nil(void)
View
4 erts/emulator/beam/sys.h
@@ -661,7 +661,9 @@ extern char *erts_sys_ddll_error(int code);
*/
#include "erl_time.h"
+void erts_sys_schedule_interrupt_rq(int ix, int set);
void erts_sys_schedule_interrupt(int set);
+
#ifdef ERTS_SMP
void erts_sys_schedule_interrupt_timed(int set, erts_short_time_t msec);
void erts_sys_main_thread(void);
@@ -671,7 +673,7 @@ extern void erts_sys_prepare_crash_dump(void);
extern void erts_sys_pre_init(void);
extern void erl_sys_init(void);
extern void erl_sys_args(int *argc, char **argv);
-extern void erl_sys_schedule(int);
+extern void erl_sys_schedule(int, int);
void sys_tty_reset(int);
int sys_max_files(void);
View
604 erts/emulator/sys/common/erl_check_io.c
@@ -78,16 +78,25 @@ typedef char EventStateFlags;
#define ERTS_CIO_POLL_INIT ERTS_POLL_EXPORT(erts_poll_init)
#define ERTS_CIO_POLL_INFO ERTS_POLL_EXPORT(erts_poll_info)
-static struct pollset_info
+struct pollset_info
{
ErtsPollSet ps;
erts_smp_atomic_t in_poll_wait; /* set while doing poll */
+};
+
+static struct pollset_info *erts_pollsets;
+static Uint erts_no_pollsets;
+
+#define ERTS_POLLSET_IX(IX) \
+ (ASSERT_EXPR(0 <= (IX) && (IX) < erts_no_pollsets), \
+ &erts_pollsets[(IX)])
+
+struct deselected_info {
#ifdef ERTS_SMP
struct removed_fd* removed_list; /* list of deselected fd's*/
erts_smp_spinlock_t removed_list_lock;
#endif
-}pollset;
-#define NUM_OF_POLLSETS 1
+} deselected;
typedef struct {
#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
@@ -120,6 +129,23 @@ struct removed_fd {
};
#endif
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+struct selected_fd {
+ struct selected_fd *next;
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ ErtsSysFdType fd;
+#else
+ ErtsDrvEventState *state;
+#endif
+};
+
+struct selected_fd_list { /* list of selected fd in each port */
+ struct selected_fd *head;
+ erts_smp_mtx_t lock;
+};
+
+#endif
+
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
static int max_fds = -1;
#endif
@@ -212,17 +238,250 @@ drvport2id(ErlDrvPort dp)
}
}
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+static struct selected_fd_list *sfl_tab;
+static erts_smp_mtx_t sfl_tab_lock;
+ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(selected_fd, struct selected_fd, 64, ERTS_ALC_T_UNDEF)
+
+static void init_sfl_tab(void)
+{
+ erts_smp_mtx_lock(&sfl_tab_lock);
+ if (sfl_tab == NULL) {
+ int i;
+ sfl_tab = (struct selected_fd_list *) erts_alloc(ERTS_ALC_T_UNDEF,
+ erts_max_ports * sizeof(struct selected_fd_list));
+ for (i = 0; i < erts_max_ports; i++) {
+ sfl_tab[i].head = NULL;
+ erts_smp_mtx_init(&sfl_tab[i].lock,
+ "sfl_lock");
+ }
+ }
+ erts_smp_mtx_unlock(&sfl_tab_lock);
+}
+
+static void add_selected_fd(ErlDrvPort drvport, ErtsDrvEventState* state)
+{
+ int ix = (int) drvport;
+ struct selected_fd *sflp;
+ struct selected_fd **insp;
+
+ ASSERT(ix >= 0 && ix < erts_max_ports);
+ if(sfl_tab == NULL) init_sfl_tab();
+
+ erts_smp_mtx_lock(&sfl_tab[ix].lock);
+ sflp = sfl_tab[ix].head;
+ insp = &sfl_tab[ix].head;
+ while (sflp) {
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if (sflp->fd == state->fd)
+ break;
+#else
+ if (sflp->state == state)
+ break;
+#endif
+ insp = &sflp->next;
+ sflp = sflp->next;
+ }
+
+ if(!sflp) {
+ struct selected_fd *p = selected_fd_alloc();
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ p->fd = state->fd;
+#else
+ p->state = state;
+#endif
+ p->next = NULL;
+ *insp = p;
+ }
+ erts_smp_mtx_unlock(&sfl_tab[ix].lock);
+}
+
+static void del_selected_fd(ErlDrvPort drvport, ErtsDrvEventState* state)
+{
+ int ix = (int) drvport;
+ struct selected_fd *sflp;
+ struct selected_fd **delp;
+
+ ASSERT(ix >= 0 && ix < erts_max_ports);
+ if(sfl_tab == NULL)
+ return;
+
+ erts_smp_mtx_lock(&sfl_tab[ix].lock);
+ sflp = sfl_tab[ix].head;
+ delp = &sfl_tab[ix].head;
+ while (sflp) {
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ if (sflp->fd == state->fd)
+ break;
+#else
+ if (sflp->state == state)
+ break;
+#endif
+ delp = &sflp->next;
+ sflp = sflp->next;
+ }
+
+ if (sflp) {
+ *delp = sflp->next;
+ selected_fd_free(sflp);
+ }
+ erts_smp_mtx_unlock(&sfl_tab[ix].lock);
+}
+
+static struct selected_fd* deepcopy_sfl(struct selected_fd *sflp)
+{
+ struct selected_fd *copy, **insp;
+ insp = &copy;
+ /* deep copy */
+ while(sflp) {
+ struct selected_fd *sf = selected_fd_alloc();
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ sf->fd = sflp->fd;
+#else
+ sf->state = sflp->state;
+#endif
+ *insp = sf;
+ insp = &sf->next;
+ sflp = sflp->next;
+ }
+ *insp = NULL;
+ return copy;
+}
+
+static void free_sfl(struct selected_fd *sflp)
+{
+ struct selected_fd *tofree;
+ while(sflp) {
+ tofree = sflp;
+ sflp = sflp->next;
+ selected_fd_free(tofree);
+ }
+}
+
+#define ERTS_PORT_TASK_INVALID_PORT(P, ID) \
+ ((erts_port_status_get((P)) & ERTS_PORT_SFLGS_DEAD) || (P)->id != (ID))
+
+int
+ERTS_CIO_EXPORT(erts_check_io_change_port_pollset)(Eterm id, int to_rq_ix)
+{
+ Port *pp;
+ int ix;
+ ErtsPollSet to_ps = ERTS_POLLSET_IX(to_rq_ix)->ps;
+ struct selected_fd *copy, *sflp;
+ int do_wake;
+
+ if(sfl_tab == NULL) /* can safe migrate this port */
+ return 0;
+
+ ASSERT(is_internal_port(id));
+ ix = internal_port_index(id);
+ pp = &erts_port[ix];
+
+ erts_smp_mtx_lock(&sfl_tab[ix].lock);
+ copy = sflp = deepcopy_sfl(sfl_tab[ix].head);
+ erts_smp_mtx_unlock(&sfl_tab[ix].lock);
+
+ if (!copy) return 0;
+
+ while(sflp) {
+ ErtsDrvEventState* state ;
+ ErtsSysFdType fd;
+ erts_smp_mtx_t* mtx;
+ ErtsRunQueue *runq;
+
+#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
+ fd = sflp->fd;
+ mtx = fd_mtx(fd);
+ erts_smp_mtx_lock(mtx);
+ state = &drv_ev_state[fd];
+#else
+ state = sflp->state;
+ fd = state->fd;
+ mtx = fd_mtx(fd);
+ erts_smp_mtx_lock(mtx);
+#endif
+
+ /* lock rq after lock fd_mutex to avoid deadlock */
+ runq = erts_port_runq(pp);
+
+ /* it's possible port had been migrated to another rq or closed */
+ if ( !runq || runq->ix != to_rq_ix || ERTS_PORT_TASK_INVALID_PORT(pp, id) ) {
+ if (runq) {
+ erts_smp_runq_unlock(runq);
+ erts_smp_mtx_unlock(mtx);
+ }
+ free_sfl(copy);
+ return -1;
+ }
+
+ switch (state->type) {
+ case ERTS_EV_TYPE_STOP_USE:
+ case ERTS_EV_TYPE_NONE:
+ break;
+ case ERTS_EV_TYPE_DRV_SEL: {
+ Eterm iid = state->driver.select->inport;
+ Eterm oid = state->driver.select->outport;
+
+ if( id == iid ) {
+ ErtsPollSet from_ps = state->driver.select->inps;
+ if( from_ps != to_ps ) {
+ state->driver.select->inps = to_ps;
+ do_wake = 0;
+ ERTS_CIO_POLL_CTL(from_ps, state->fd, ERTS_POLL_EV_IN, 0, &do_wake);
+ do_wake = 1;
+ state->events = ERTS_CIO_POLL_CTL(to_ps, state->fd, ERTS_POLL_EV_IN, 1, &do_wake);
+ }
+ }
+
+ if( id == oid ) {
+ ErtsPollSet from_ps = state->driver.select->outps;
+ if( from_ps != to_ps ) {
+ state->driver.select->outps = to_ps;
+ do_wake = 0;
+ ERTS_CIO_POLL_CTL(from_ps, state->fd, ERTS_POLL_EV_OUT, 0, &do_wake);
+ do_wake = 1;
+ state->events = ERTS_CIO_POLL_CTL(to_ps, state->fd, ERTS_POLL_EV_OUT, 1, &do_wake);
+ }
+ }
+ break;
+ }
+ case ERTS_EV_TYPE_DRV_EV: {
+ ErtsPollSet from_ps = state->driver.event->ps;
+ if (from_ps != to_ps) {
+ state->driver.event->ps = to_ps;
+ do_wake = 0;
+ ERTS_CIO_POLL_CTL(from_ps, state->fd, state->events, 0, &do_wake);
+ do_wake = 1;
+ state->events = ERTS_CIO_POLL_CTL(to_ps, state->fd, state->events, 1, &do_wake);
+ }
+ break;
+ }
+ default:
+ ASSERT(0);
+ }
+ erts_smp_runq_unlock(runq);
+ erts_smp_mtx_unlock(mtx);
+
+ sflp = sflp->next;
+ }
+
+ free_sfl(copy);
+ return 0;
+}
+
+#endif
+
#ifdef ERTS_SMP
ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(removed_fd, struct removed_fd, 64, ERTS_ALC_T_FD_LIST)
#endif
static ERTS_INLINE void
-remember_removed(ErtsDrvEventState *state, struct pollset_info* psi)
+remember_removed(ErtsDrvEventState *state, struct deselected_info* psi)
{
#ifdef ERTS_SMP
struct removed_fd *fdlp;
ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd)));
- if (erts_smp_atomic_read_nob(&psi->in_poll_wait)) {
+ //if (erts_smp_atomic_read_nob(&psi->in_poll_wait)) {
state->remove_cnt++;
ASSERT(state->remove_cnt > 0);
fdlp = removed_fd_alloc();
@@ -236,7 +495,7 @@ remember_removed(ErtsDrvEventState *state, struct pollset_info* psi)
fdlp->next = psi->removed_list;
psi->removed_list = fdlp;
erts_smp_spin_unlock(&psi->removed_list_lock);
- }
+ //}
#endif
}
@@ -257,7 +516,7 @@ is_removed(ErtsDrvEventState *state)
}
static void
-forget_removed(struct pollset_info* psi)
+forget_removed(struct deselected_info* psi)
{
#ifdef ERTS_SMP
struct removed_fd* fdlp;
@@ -425,12 +684,60 @@ static void
deselect(ErtsDrvEventState *state, int mode)
{
int do_wake = 0;
- ErtsPollEvents rm_events;
ERTS_SMP_LC_ASSERT(erts_smp_lc_mtx_is_locked(fd_mtx(state->fd)));
ASSERT(state->events);
abort_tasks(state, mode);
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ switch(state->type) {
+ case ERTS_EV_TYPE_DRV_SEL: {
+ Eterm iid = state->driver.select->inport;
+ Eterm oid = state->driver.select->outport;
+ if (!mode) {
+ mode = ERL_DRV_READ | ERL_DRV_WRITE;
+ }
+ if (mode & ERL_DRV_READ && is_not_nil(iid)) {
+ state->events = ERTS_CIO_POLL_CTL(state->driver.select->inps,
+ state->fd, ERTS_POLL_EV_IN, 0, &do_wake);
+ state->driver.select->inport = NIL;
+ state->driver.select->inps = NULL;
+
+ if( is_nil(oid) || iid != oid || !(mode & ERL_DRV_WRITE) ) {
+ del_selected_fd(internal_port_index(iid), state);
+ }
+ }
+ if (mode & ERL_DRV_WRITE && is_not_nil(oid)) {
+ do_wake = 0;
+ state->events = ERTS_CIO_POLL_CTL(state->driver.select->outps,
+ state->fd, ERTS_POLL_EV_OUT, 0, &do_wake);
+ state->driver.select->outport = NIL;
+ state->driver.select->outps = NULL;
+
+ if( is_nil(iid) || iid != oid || mode & ERL_DRV_READ)
+ del_selected_fd(internal_port_index(oid), state);
+ }
+
+ break;
+ }
+#if ERTS_CIO_HAVE_DRV_EVENT
+ case ERTS_EV_TYPE_DRV_EV:
+ ASSERT(!mode);
+ state->events = ERTS_CIO_POLL_CTL(state->driver.event->ps,
+ state->fd, state->events, 0, &do_wake);
+ del_selected_fd(internal_port_index(state->driver.event->port), state);
+ break;
+#endif
+ case ERTS_EV_TYPE_NONE:
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+#else
+ {
+ ErtsPollEvents rm_events;
if (!mode)
rm_events = state->events;
else {
@@ -446,7 +753,9 @@ deselect(ErtsDrvEventState *state, int mode)
}
}
- state->events = ERTS_CIO_POLL_CTL(pollset.ps, state->fd, rm_events, 0, &do_wake);
+ state->events = ERTS_CIO_POLL_CTL(ERTS_POLLSET_IX(0)->ps, state->fd, rm_events, 0, &do_wake);
+ }
+#endif
if (!(state->events)) {
switch (state->type) {
@@ -473,7 +782,7 @@ deselect(ErtsDrvEventState *state, int mode)
state->driver.select = NULL;
state->type = ERTS_EV_TYPE_NONE;
state->flags = 0;
- remember_removed(state, &pollset);
+ remember_removed(state, &deselected);
}
}
@@ -499,6 +808,9 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
ErtsDrvEventState *state;
int wake_poller;
int ret;
+ ErtsPollSet inps = NULL;
+ ErtsPollSet outps = NULL;
+
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(name, 64);
#endif
@@ -506,6 +818,10 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
ERTS_SMP_LC_ASSERT(erts_drvport2port(ix)
&& erts_lc_is_port_locked(erts_drvport2port(ix)));
+#if !defined(ERTS_SMP) || !defined(ERTS_POLLSET_PER_SCHEDULER)
+ inps = outps = ERTS_POLLSET_IX(0)->ps;
+#endif
+
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) {
if (fd < 0) {
@@ -585,14 +901,45 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
(state->type == ERTS_EV_TYPE_NONE && !state->events));
if (!on && !(state->flags & ERTS_EV_FLAG_USED)
- && state->events && !(state->events & ~ctl_events)) {
+ && state->events && !(state->events & ~ctl_events)) {
/* Old driver removing all events. At least wake poller.
It will not make close() 100% safe but it will prevent
actions delayed by poll timeout. */
wake_poller = 1;
}
- new_events = ERTS_CIO_POLL_CTL(pollset.ps, state->fd, ctl_events, on, &wake_poller);
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ new_events = state->events;
+ {
+ int wake_poller_cmd;
+ int wake_poller_res = 0;
+ if (ctl_events & ERTS_POLL_EV_IN) {
+ wake_poller_cmd = wake_poller;
+ if(state->type == ERTS_EV_TYPE_DRV_SEL && state->driver.select->inps ) {
+ inps = state->driver.select->inps;
+ } else {
+ inps = ERTS_POLLSET_IX(((ErtsRunQueue *) erts_smp_atomic_read_nob(
+ &erts_drvport2port(ix)->run_queue))->ix)->ps;
+ }
+ new_events = ERTS_CIO_POLL_CTL(inps, state->fd, ERTS_POLL_EV_IN, on, &wake_poller_cmd);
+ wake_poller_res |= wake_poller_cmd;
+ }
+ if (ctl_events & ERTS_POLL_EV_OUT) {
+ wake_poller_cmd = wake_poller;
+ if(state->type == ERTS_EV_TYPE_DRV_SEL && state->driver.select->outps ) {
+ outps = state->driver.select->outps;
+ } else {
+ outps = ERTS_POLLSET_IX(((ErtsRunQueue *) erts_smp_atomic_read_nob(
+ &erts_drvport2port(ix)->run_queue))->ix)->ps;
+ }
+ new_events = ERTS_CIO_POLL_CTL(outps, state->fd, ERTS_POLL_EV_OUT, on, &wake_poller_cmd);
+ wake_poller_res |= wake_poller_cmd;
+ }
+ wake_poller = wake_poller_res;
+ }
+#else
+ new_events = ERTS_CIO_POLL_CTL(inps, state->fd, ctl_events, on, &wake_poller);
+#endif
if (new_events & (ERTS_POLL_EV_ERR|ERTS_POLL_EV_NVAL)) {
if (state->type == ERTS_EV_TYPE_DRV_SEL && !state->events) {
@@ -625,15 +972,32 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
dsdsp->outport = NIL;
erts_port_task_handle_init(&dsdsp->intask);
erts_port_task_handle_init(&dsdsp->outtask);
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ dsdsp->inps = NULL;
+ dsdsp->outps = NULL;
+#endif
ASSERT(state->driver.select == NULL);
state->driver.select = dsdsp;
state->type = ERTS_EV_TYPE_DRV_SEL;
}
ASSERT(state->type == ERTS_EV_TYPE_DRV_SEL);
- if (ctl_events & ERTS_POLL_EV_IN)
+ if (ctl_events & ERTS_POLL_EV_IN) {
state->driver.select->inport = id;
- if (ctl_events & ERTS_POLL_EV_OUT)
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ state->driver.select->inps = inps;
+#endif
+ }
+ if (ctl_events & ERTS_POLL_EV_OUT) {
state->driver.select->outport = id;
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ state->driver.select->outps = outps;
+#endif
+ }
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ if (ctl_events & (ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) {
+ add_selected_fd(ix, state);
+ }
+#endif
if (mode & ERL_DRV_USE) {
state->flags |= ERTS_EV_FLAG_USED;
}
@@ -643,16 +1007,30 @@ ERTS_CIO_EXPORT(driver_select)(ErlDrvPort ix,
if (ctl_events & ERTS_POLL_EV_IN) {
abort_tasks(state, ERL_DRV_READ);
state->driver.select->inport = NIL;
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ state->driver.select->inps = NULL;
+#endif
}
if (ctl_events & ERTS_POLL_EV_OUT) {
abort_tasks(state, ERL_DRV_WRITE);
state->driver.select->outport = NIL;
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ state->driver.select->outps = NULL;
+#endif
}
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ if (ctl_events & (ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) {
+ if( state->driver.select->inport != id &&
+ state->driver.select->outport != id) {
+ del_selected_fd(ix, state);
+ }
+ }
+#endif
if (new_events == 0) {
ASSERT(!erts_port_task_is_scheduled(&state->driver.select->intask));
ASSERT(!erts_port_task_is_scheduled(&state->driver.select->outtask));
if (old_events != 0) {
- remember_removed(state, &pollset);
+ remember_removed(state, &deselected);
}
if ((mode & ERL_DRV_USE) || !(state->flags & ERTS_EV_FLAG_USED)) {
state->type = ERTS_EV_TYPE_NONE;
@@ -723,10 +1101,15 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
ErtsDrvEventState *state;
int do_wake = 0;
int ret;
+ ErtsPollSet ps;
ERTS_SMP_LC_ASSERT(erts_drvport2port(ix)
&& erts_lc_is_port_locked(erts_drvport2port(ix)));
+#if !defined(ERTS_SMP) || !defined(ERTS_POLLSET_PER_SCHEDULER)
+ ps = ERTS_POLLSET_IX(0)->ps;
+#endif
+
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
if ((unsigned)fd >= (unsigned)erts_smp_atomic_read_nob(&drv_ev_state_len)) {
if (fd < 0)
@@ -769,6 +1152,15 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
ASSERT(state->type == ERTS_EV_TYPE_DRV_EV
|| state->type == ERTS_EV_TYPE_NONE);
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ if(state->type == ERTS_EV_TYPE_DRV_EV && state->driver.event->ps ) {
+ ps = state->driver.event->ps;
+ } else {
+ ps = ERTS_POLLSET_IX(((ErtsRunQueue *) erts_smp_atomic_read_nob(
+ &erts_drvport2port(ix)->run_queue))->ix)->ps;
+ }
+#endif
+
events = state->events;
if (!event_data) {
@@ -781,14 +1173,14 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
}
if (add_events) {
- events = ERTS_CIO_POLL_CTL(pollset.ps, state->fd, add_events, 1, &do_wake);
+ events = ERTS_CIO_POLL_CTL(ps, state->fd, add_events, 1, &do_wake);
if (events & (ERTS_POLL_EV_ERR|ERTS_POLL_EV_NVAL)) {
ret = -1;
goto done;
}
}
if (remove_events) {
- events = ERTS_CIO_POLL_CTL(pollset.ps, state->fd, remove_events, 0, &do_wake);
+ events = ERTS_CIO_POLL_CTL(ps, state->fd, remove_events, 0, &do_wake);
if (events & (ERTS_POLL_EV_ERR|ERTS_POLL_EV_NVAL)) {
ret = -1;
goto done;
@@ -806,7 +1198,13 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
erts_port_task_handle_init(&state->driver.event->task);
state->driver.event->port = id;
state->driver.event->removed_events = (ErtsPollEvents) 0;
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ state->driver.event->ps = ps;
+#endif
state->type = ERTS_EV_TYPE_DRV_EV;
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ add_selected_fd(ix, state);
+#endif
}
state->driver.event->data = event_data;
}
@@ -818,7 +1216,10 @@ ERTS_CIO_EXPORT(driver_event)(ErlDrvPort ix,
}
state->driver.select = NULL;
state->type = ERTS_EV_TYPE_NONE;
- remember_removed(state, &pollset);
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ del_selected_fd(ix, state);
+#endif
+ remember_removed(state, &deselected);
}
state->events = events;
ASSERT(event_data ? events == event_data->events : events == 0);
@@ -1095,38 +1496,38 @@ event_large_fd_error(ErlDrvPort ix, ErtsSysFdType fd, ErlDrvEventData event_data
#endif
static ERTS_INLINE void
-iready(Eterm id, ErtsDrvEventState *state)
+iready(Eterm id, ErtsDrvEventState *state, int ix)
{
if (erts_port_task_schedule(id,
&state->driver.select->intask,
ERTS_PORT_TASK_INPUT,
(ErlDrvEvent) state->fd,
- NULL) != 0) {
+ NULL, ix) != 0) {
stale_drv_select(id, state, ERL_DRV_READ);
}
}
static ERTS_INLINE void
-oready(Eterm id, ErtsDrvEventState *state)
+oready(Eterm id, ErtsDrvEventState *state, int ix)
{
if (erts_port_task_schedule(id,
&state->driver.select->outtask,
ERTS_PORT_TASK_OUTPUT,
(ErlDrvEvent) state->fd,
- NULL) != 0) {
+ NULL, ix) != 0) {
stale_drv_select(id, state, ERL_DRV_WRITE);
}
}
#if ERTS_CIO_HAVE_DRV_EVENT
static ERTS_INLINE void
-eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data)
+eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data, int ix)
{
if (erts_port_task_schedule(id,
&state->driver.event->task,
ERTS_PORT_TASK_EVENT,
(ErlDrvEvent) state->fd,
- event_data) != 0) {
+ event_data, ix) != 0) {
stale_drv_select(id, state, 0);
}
}
@@ -1134,34 +1535,52 @@ eready(Eterm id, ErtsDrvEventState *state, ErlDrvEventData event_data)
static void bad_fd_in_pollset( ErtsDrvEventState *, Eterm, Eterm, ErtsPollEvents);
+
+void
+ERTS_CIO_EXPORT(erts_check_io_interrupt_rq)(int ix, int set)
+{
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ ERTS_CIO_POLL_INTR(ERTS_POLLSET_IX(ix)->ps, set);
+#else
+ ERTS_CIO_POLL_INTR(ERTS_POLLSET_IX(0)->ps, set);
+#endif
+}
+
#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
void
ERTS_CIO_EXPORT(erts_check_io_async_sig_interrupt)(void)
{
- ERTS_CIO_POLL_AS_INTR(pollset.ps);
+ ERTS_CIO_POLL_AS_INTR(ERTS_POLLSET_IX(0)->ps); /* rq 0 never block*/
}
#endif
void
ERTS_CIO_EXPORT(erts_check_io_interrupt)(int set)
{
- ERTS_CIO_POLL_INTR(pollset.ps, set);
+ ERTS_CIO_POLL_INTR(ERTS_POLLSET_IX(0)->ps, set); /* rq 0 never block*/
}
void
ERTS_CIO_EXPORT(erts_check_io_interrupt_timed)(int set,
erts_short_time_t msec)
{
- ERTS_CIO_POLL_INTR_TMD(pollset.ps, set, msec);
+ ERTS_CIO_POLL_INTR_TMD(ERTS_POLLSET_IX(0)->ps, set, msec); /* rq 0 never block*/
}
void
-ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
+ERTS_CIO_EXPORT(erts_check_io)(int ix, int do_wait)
{
+ struct pollset_info *psi;
ErtsPollResFd pollres[256];
int pollres_len;
SysTimeval wait_time;
int poll_ret, i;
+
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ psi = ERTS_POLLSET_IX(ix);
+#else
+ psi = ERTS_POLLSET_IX(0);
+#endif
restart:
@@ -1183,15 +1602,17 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
#endif
pollres_len = sizeof(pollres)/sizeof(ErtsPollResFd);
- erts_smp_atomic_set_nob(&pollset.in_poll_wait, 1);
+ erts_smp_atomic_set_nob(&psi->in_poll_wait, 1);
- poll_ret = ERTS_CIO_POLL_WAIT(pollset.ps, pollres, &pollres_len, &wait_time);
+ poll_ret = ERTS_CIO_POLL_WAIT(psi->ps, pollres, &pollres_len, &wait_time);
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0); /* No locks should be locked */
#endif
+#ifndef ERTS_SMP
erts_deliver_time(); /* sync the machine's idea of time */
+#endif
#ifdef ERTS_BREAK_REQUESTED
if (ERTS_BREAK_REQUESTED)
@@ -1199,8 +1620,8 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
#endif
if (poll_ret != 0) {
- erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0);
- forget_removed(&pollset);
+ erts_smp_atomic_set_nob(&psi->in_poll_wait, 0);
+ forget_removed(&deselected);
if (poll_ret == EAGAIN) {
goto restart;
}
@@ -1249,6 +1670,16 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
revent_mask |= state->events;
revents = pollres[i].events & revent_mask;
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ /* port may be deselected */
+ if (revents & ERTS_POLL_EV_IN && is_nil(state->driver.select->inport)) {
+ revents &= ~ERTS_POLL_EV_IN;
+ }
+ if (revents & ERTS_POLL_EV_OUT && is_nil(state->driver.select->outport)) {
+ revents &= ~ERTS_POLL_EV_OUT;
+ }
+#endif
+
if (revents & ERTS_POLL_EV_ERR) {
/*
* Let the driver handle the error condition. Only input,
@@ -1260,15 +1691,15 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
if ((revents & ERTS_POLL_EV_IN)
|| (!(revents & ERTS_POLL_EV_OUT)
&& state->events & ERTS_POLL_EV_IN)) {
- iready(state->driver.select->inport, state);
+ iready(state->driver.select->inport, state, ix);
}
else if (state->events & ERTS_POLL_EV_OUT) {
- oready(state->driver.select->outport, state);
+ oready(state->driver.select->outport, state, ix);
}
}
else if (revents & (ERTS_POLL_EV_IN|ERTS_POLL_EV_OUT)) {
if (revents & ERTS_POLL_EV_OUT) {
- oready(state->driver.select->outport, state);
+ oready(state->driver.select->outport, state, ix);
}
/* Someone might have deselected input since revents
was read (true also on the non-smp emulator since
@@ -1276,7 +1707,7 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
revents... */
revents &= ~(~state->events & ERTS_POLL_EV_IN);
if (revents & ERTS_POLL_EV_IN) {
- iready(state->driver.select->inport, state);
+ iready(state->driver.select->inport, state, ix);
}
}
else if (revents & ERTS_POLL_EV_NVAL) {
@@ -1297,12 +1728,19 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
event_data = state->driver.event->data;
revents = pollres[i].events;
revents &= ~state->driver.event->removed_events;
+
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ /* port maybe deselected */
+ if(is_nil(state->driver.event->port)) {
+ goto next_pollres;
+ }
+#endif
if (revents) {
event_data->events = state->events;
event_data->revents = revents;
- eready(state->driver.event->port, state, event_data);
+ eready(state->driver.event->port, state, event_data, ix);
}
break;
}
@@ -1330,8 +1768,8 @@ ERTS_CIO_EXPORT(erts_check_io)(int do_wait)
#endif
}
- erts_smp_atomic_set_nob(&pollset.in_poll_wait, 0);
- forget_removed(&pollset);
+ erts_smp_atomic_set_nob(&psi->in_poll_wait, 0);
+ forget_removed(&deselected);
}
static void
@@ -1445,14 +1883,28 @@ static void drv_ev_state_free(void *des)
void
ERTS_CIO_EXPORT(erts_init_check_io)(void)
{
- erts_smp_atomic_init_nob(&pollset.in_poll_wait, 0);
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ int n = erts_no_schedulers;
+#else
+ int n = 1;
+#endif
+ int ix;
+
ERTS_CIO_POLL_INIT();
- pollset.ps = ERTS_CIO_NEW_POLLSET();
+
+ erts_pollsets = erts_alloc(ERTS_ALC_T_UNDEF,
+ n * sizeof(struct pollset_info));
+ erts_no_pollsets = n;
+ for (ix = 0; ix < n; ix ++) {
+ struct pollset_info *pollset = ERTS_POLLSET_IX(ix);
+ erts_smp_atomic_init_nob(&pollset->in_poll_wait, 0);
+ pollset->ps = ERTS_CIO_NEW_POLLSET();
+ }
#ifdef ERTS_SMP
init_removed_fd_alloc();
- pollset.removed_list = NULL;
- erts_smp_spinlock_init(&pollset.removed_list_lock,
+ deselected.removed_list = NULL;
+ erts_smp_spinlock_init(&deselected.removed_list_lock,
"pollset_rm_list");
{
int i;
@@ -1465,6 +1917,7 @@ ERTS_CIO_EXPORT(erts_init_check_io)(void)
}
}
#endif
+
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
max_fds = ERTS_CIO_POLL_MAX_FDS();
erts_smp_atomic_init_nob(&drv_ev_state_len, 0);
@@ -1485,6 +1938,12 @@ ERTS_CIO_EXPORT(erts_init_check_io)(void)
DRV_EV_STATE_HTAB_SIZE, hf);
}
#endif
+
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ init_selected_fd_alloc();
+ sfl_tab = NULL;
+ erts_smp_mtx_init(&sfl_tab_lock, "sfl_tab_lock");
+#endif
}
int
@@ -1500,10 +1959,14 @@ ERTS_CIO_EXPORT(erts_check_io_max_files)(void)
Uint
ERTS_CIO_EXPORT(erts_check_io_size)(void)
{
- Uint res;
+ Uint res = 0;
ErtsPollInfo pi;
- ERTS_CIO_POLL_INFO(pollset.ps, &pi);
- res = pi.memory_size;
+
+ int ix;
+ for (ix = 0; ix < erts_no_pollsets; ix++ ) {
+ ERTS_CIO_POLL_INFO(ERTS_POLLSET_IX(ix)->ps, &pi);
+ res += pi.memory_size;
+ }
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
res += sizeof(ErtsDrvEventState) * erts_smp_atomic_read_nob(&drv_ev_state_len);
#else
@@ -1525,12 +1988,28 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc)
{
Process *p = (Process *) proc;
Eterm tags[15], values[15], res;
- Uint sz, *szp, *hp, **hpp, memory_size;
+ Uint sz, *szp, *hp, **hpp;
+ Uint memory_size = 0, poll_set_size = 0, fallback_poll_set_size = 0, pending_updates = 0;
+#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
+ Uint no_avoided_wakeups = 0, no_avoided_interrupts = 0, no_interrupt_timed = 0;
+#endif
Sint i;
ErtsPollInfo pi;
- ERTS_CIO_POLL_INFO(pollset.ps, &pi);
- memory_size = pi.memory_size;
+ int ix;
+ for (ix = 0 ; ix < erts_no_pollsets; ix++ ) {
+ ERTS_CIO_POLL_INFO(ERTS_POLLSET_IX(ix)->ps, &pi);
+ memory_size += pi.memory_size;
+ poll_set_size += pi.poll_set_size;
+ fallback_poll_set_size += pi.fallback_poll_set_size;
+ pending_updates += pi.pending_updates;
+#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
+ no_avoided_wakeups += pi.no_avoided_wakeups;
+ no_avoided_interrupts += pi.no_avoided_interrupts;
+ no_interrupt_timed += pi.no_interrupt_timed;
+#endif
+ }
+
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
memory_size += sizeof(ErtsDrvEventState) * erts_smp_atomic_read_nob(&drv_ev_state_len);
#else
@@ -1569,11 +2048,11 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc)
values[i++] = erts_bld_uint(hpp, szp, memory_size);
tags[i] = erts_bld_atom(hpp, szp, "total_poll_set_size");
- values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.poll_set_size);
+ values[i++] = erts_bld_uint(hpp, szp, poll_set_size);
if (pi.fallback) {
tags[i] = erts_bld_atom(hpp, szp, "fallback_poll_set_size");
- values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.fallback_poll_set_size);
+ values[i++] = erts_bld_uint(hpp, szp, fallback_poll_set_size);
}
tags[i] = erts_bld_atom(hpp, szp, "lazy_updates");
@@ -1581,7 +2060,7 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc)
if (pi.lazy_updates) {
tags[i] = erts_bld_atom(hpp, szp, "pending_updates");
- values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.pending_updates);
+ values[i++] = erts_bld_uint(hpp, szp, pending_updates);
}
tags[i] = erts_bld_atom(hpp, szp, "batch_updates");
@@ -1595,13 +2074,13 @@ ERTS_CIO_EXPORT(erts_check_io_info)(void *proc)
#ifdef ERTS_POLL_COUNT_AVOIDED_WAKEUPS
tags[i] = erts_bld_atom(hpp, szp, "no_avoided_wakeups");
- values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.no_avoided_wakeups);
+ values[i++] = erts_bld_uint(hpp, szp, no_avoided_wakeups);
tags[i] = erts_bld_atom(hpp, szp, "no_avoided_interrupts");
- values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.no_avoided_interrupts);
+ values[i++] = erts_bld_uint(hpp, szp, no_avoided_interrupts);
tags[i] = erts_bld_atom(hpp, szp, "no_interrupt_timed");
- values[i++] = erts_bld_uint(hpp, szp, (Uint) pi.no_interrupt_timed);
+ values[i++] = erts_bld_uint(hpp, szp, no_interrupt_timed);
#endif
res = erts_bld_2tup_list(hpp, szp, i, tags, values);
@@ -1887,6 +2366,8 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
int fd, len;
#endif
IterDebugCounters counters;
+ ErtsPollEvents *mask;
+ int i, ix;
ErtsDrvEventState null_des;
null_des.driver.select = NULL;
@@ -1904,7 +2385,18 @@ ERTS_CIO_EXPORT(erts_check_io_debug)(void)
#ifdef ERTS_SYS_CONTINOUS_FD_NUMBERS
counters.epep = erts_alloc(ERTS_ALC_T_TMP, sizeof(ErtsPollEvents)*max_fds);
- ERTS_POLL_EXPORT(erts_poll_get_selected_events)(pollset.ps, counters.epep, max_fds);
+ mask = erts_alloc(ERTS_ALC_T_TMP, sizeof(ErtsPollEvents)*max_fds);
+ for (ix = 0; ix < erts_no_pollsets; ix++ ) {
+ for (i = 0; i < max_fds; i++ ) {
+ mask[i] = 0;
+ }
+ ERTS_POLL_EXPORT(erts_poll_get_selected_events)(
+ ERTS_POLLSET_IX(ix)->ps, mask, max_fds);
+ for (i = 0; i < max_fds; i++ ) {
+ counters.epep[i] |= mask[i];
+ }
+ }
+ erts_free(ERTS_ALC_T_TMP, (void*) mask);
counters.internal_fds = 0;
#endif
counters.used_fds = 0;
View
23 erts/emulator/sys/common/erl_check_io.h
@@ -34,6 +34,10 @@ int driver_select_kp(ErlDrvPort, ErlDrvEvent, int, int);
int driver_select_nkp(ErlDrvPort, ErlDrvEvent, int, int);
int driver_event_kp(ErlDrvPort, ErlDrvEvent, ErlDrvEventData);
int driver_event_nkp(ErlDrvPort, ErlDrvEvent, ErlDrvEventData);
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+int erts_check_io_change_port_pollset_kp(Eterm, int);
+int erts_check_io_change_port_pollset_nkp(Eterm, int);
+#endif
Uint erts_check_io_size_kp(void);
Uint erts_check_io_size_nkp(void);
Eterm erts_check_io_info_kp(void *);
@@ -44,12 +48,14 @@ int erts_check_io_max_files_nkp(void);
void erts_check_io_async_sig_interrupt_kp(void);
void erts_check_io_async_sig_interrupt_nkp(void);
#endif
+void erts_check_io_interrupt_rq_kp(int, int);
+void erts_check_io_interrupt_rq_nkp(int, int);
void erts_check_io_interrupt_kp(int);
void erts_check_io_interrupt_nkp(int);
void erts_check_io_interrupt_timed_kp(int, erts_short_time_t);
void erts_check_io_interrupt_timed_nkp(int, erts_short_time_t);
-void erts_check_io_kp(int);
-void erts_check_io_nkp(int);
+void erts_check_io_kp(int, int);
+void erts_check_io_nkp(int, int);
void erts_init_check_io_kp(void);
void erts_init_check_io_nkp(void);
int erts_check_io_debug_kp(void);
@@ -57,15 +63,19 @@ int erts_check_io_debug_nkp(void);
#else /* !ERTS_ENABLE_KERNEL_POLL */
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+int erts_check_io_change_port_pollset(Eterm, int);
+#endif
Uint erts_check_io_size(void);
Eterm erts_check_io_info(void *);
int erts_check_io_max_files(void);
#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
void erts_check_io_async_sig_interrupt(void);
#endif
+void erts_check_io_interrupt_rq(int, int);
void erts_check_io_interrupt(int);
void erts_check_io_interrupt_timed(int, erts_short_time_t);
-void erts_check_io(int);
+void erts_check_io(int, int);
void erts_init_check_io(void);
#endif
@@ -92,6 +102,9 @@ typedef struct {
ErlDrvEventData data;
ErtsPollEvents removed_events;
ErtsPortTaskHandle task;
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ ErtsPollSet ps;
+#endif
} ErtsDrvEventDataState;
typedef struct {
@@ -99,5 +112,9 @@ typedef struct {
Eterm outport;
ErtsPortTaskHandle intask;
ErtsPortTaskHandle outtask;
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ ErtsPollSet inps;
+ ErtsPollSet outps;
+#endif
} ErtsDrvSelectDataState;
#endif /* #ifndef ERL_CHECK_IO_INTERNAL__ */
View
44 erts/emulator/sys/unix/sys.c
@@ -263,10 +263,14 @@ int erts_use_kernel_poll = 0;
struct {
int (*select)(ErlDrvPort, ErlDrvEvent, int, int);
int (*event)(ErlDrvPort, ErlDrvEvent, ErlDrvEventData);
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ int (*check_io_change_port_pollset)(Eterm, int);
+#endif
void (*check_io_as_interrupt)(void);
+ void (*check_io_interrupt_rq)(int, int);
void (*check_io_interrupt)(int);
void (*check_io_interrupt_tmd)(int, erts_short_time_t);
- void (*check_io)(int);
+ void (*check_io)(int, int);
Uint (*size)(void);
Eterm (*info)(void *);
int (*check_io_debug)(void);
@@ -303,9 +307,13 @@ init_check_io(void)
if (erts_use_kernel_poll) {
io_func.select = driver_select_kp;
io_func.event = driver_event_kp;
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ io_func.check_io_change_port_pollset = erts_check_io_change_port_pollset_kp;
+#endif
#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
io_func.check_io_as_interrupt = erts_check_io_async_sig_interrupt_kp;
#endif
+ io_func.check_io_interrupt_rq = erts_check_io_interrupt_rq_kp;
io_func.check_io_interrupt = erts_check_io_interrupt_kp;
io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_kp;
io_func.check_io = erts_check_io_kp;
@@ -318,9 +326,13 @@ init_check_io(void)
else {
io_func.select = driver_select_nkp;
io_func.event = driver_event_nkp;
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+ io_func.check_io_change_port_pollset = erts_check_io_change_port_pollset_nkp;
+#endif
#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
io_func.check_io_as_interrupt = erts_check_io_async_sig_interrupt_nkp;
#endif
+ io_func.check_io_interrupt_rq = erts_check_io_interrupt_rq_nkp;
io_func.check_io_interrupt = erts_check_io_interrupt_nkp;
io_func.check_io_interrupt_tmd = erts_check_io_interrupt_timed_nkp;
io_func.check_io = erts_check_io_nkp;
@@ -332,11 +344,15 @@ init_check_io(void)
}
}
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+#define ERTS_CHK_IO_CHANGE_PORT_POLLSET (*io_func.check_io_change_port_pollset)
+#endif
#ifdef ERTS_POLL_NEED_ASYNC_INTERRUPT_SUPPORT
#define ERTS_CHK_IO_AS_INTR() (*io_func.check_io_as_interrupt)()
#else
#define ERTS_CHK_IO_AS_INTR() (*io_func.check_io_interrupt)(1)
#endif
+#define ERTS_CHK_IO_INTR_RQ (*io_func.check_io_interrupt_rq)
#define ERTS_CHK_IO_INTR (*io_func.check_io_interrupt)
#define ERTS_CHK_IO_INTR_TMD (*io_func.check_io_interrupt_tmd)
#define ERTS_CHK_IO (*io_func.check_io)
@@ -351,11 +367,16 @@ init_check_io(void)
max_files = erts_check_io_max_files();
}
+#if defined(ERTS_SMP) && defined(ERTS_POLLSET_PER_SCHEDULER)
+#define ERTS_CHK_IO_CHANGE_PORT_POLLSET erts_check_io_change_port_pollset
+#endif
+