Navigation Menu

Skip to content

Commit

Permalink
dpif-netdev: Trigger parallel pmd reloads.
Browse files Browse the repository at this point in the history
pmd reloads are currently serialised in each steps calling
reload_affected_pmds.
Any pmd processing packets, waiting on a mutex etc... will make other
pmd threads wait for a delay that can be undeterministic when syscalls
adds up.

Switch to a little busy loop on the control thread using the existing
per-pmd reload boolean.

The memory order on this atomic is rel-acq to have an explicit
synchronisation between the pmd threads and the control thread.

Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Acked-by: Ilya Maximets <i.maximets@samsung.com>
Signed-off-by: Ian Stokes <ian.stokes@intel.com>
  • Loading branch information
david-marchand authored and istokes committed Jul 10, 2019
1 parent 299c8d6 commit 8f077b3
Showing 1 changed file with 14 additions and 14 deletions.
28 changes: 14 additions & 14 deletions lib/dpif-netdev.c
Expand Up @@ -649,9 +649,6 @@ struct dp_netdev_pmd_thread {
struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */
struct cmap_node node; /* In 'dp->poll_threads'. */

pthread_cond_t cond; /* For synchronizing pmd thread reload. */
struct ovs_mutex cond_mutex; /* Mutex for condition variable. */

/* Per thread exact-match cache. Note, the instance for cpu core
* NON_PMD_CORE_ID can be accessed by multiple threads, and thusly
* need to be protected by 'non_pmd_mutex'. Every other instance
Expand Down Expand Up @@ -1758,11 +1755,8 @@ dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd)
return;
}

ovs_mutex_lock(&pmd->cond_mutex);
seq_change(pmd->reload_seq);
atomic_store_explicit(&pmd->reload, true, memory_order_release);
ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
ovs_mutex_unlock(&pmd->cond_mutex);
}

static uint32_t
Expand Down Expand Up @@ -4655,6 +4649,19 @@ reload_affected_pmds(struct dp_netdev *dp)
if (pmd->need_reload) {
flow_mark_flush(pmd);
dp_netdev_reload_pmd__(pmd);
}
}

CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
if (pmd->need_reload) {
if (pmd->core_id != NON_PMD_CORE_ID) {
bool reload;

do {
atomic_read_explicit(&pmd->reload, &reload,
memory_order_acquire);
} while (reload);
}
pmd->need_reload = false;
}
}
Expand Down Expand Up @@ -5842,11 +5849,8 @@ dpif_netdev_enable_upcall(struct dpif *dpif)
static void
dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd)
{
ovs_mutex_lock(&pmd->cond_mutex);
atomic_store_relaxed(&pmd->reload, false);
pmd->last_reload_seq = seq_read(pmd->reload_seq);
xpthread_cond_signal(&pmd->cond);
ovs_mutex_unlock(&pmd->cond_mutex);
atomic_store_explicit(&pmd->reload, false, memory_order_release);
}

/* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns
Expand Down Expand Up @@ -5931,8 +5935,6 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
pmd->reload_seq = seq_create();
pmd->last_reload_seq = seq_read(pmd->reload_seq);
atomic_init(&pmd->reload, false);
xpthread_cond_init(&pmd->cond, NULL);
ovs_mutex_init(&pmd->cond_mutex);
ovs_mutex_init(&pmd->flow_mutex);
ovs_mutex_init(&pmd->port_mutex);
cmap_init(&pmd->flow_table);
Expand Down Expand Up @@ -5975,8 +5977,6 @@ dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd)
cmap_destroy(&pmd->flow_table);
ovs_mutex_destroy(&pmd->flow_mutex);
seq_destroy(pmd->reload_seq);
xpthread_cond_destroy(&pmd->cond);
ovs_mutex_destroy(&pmd->cond_mutex);
ovs_mutex_destroy(&pmd->port_mutex);
free(pmd);
}
Expand Down

0 comments on commit 8f077b3

Please sign in to comment.