From 8f077b31e910580db86f2a197c618d079b8f869d Mon Sep 17 00:00:00 2001 From: David Marchand Date: Tue, 9 Jul 2019 18:19:55 +0200 Subject: [PATCH] dpif-netdev: Trigger parallel pmd reloads. pmd reloads are currently serialised in each steps calling reload_affected_pmds. Any pmd processing packets, waiting on a mutex etc... will make other pmd threads wait for a delay that can be undeterministic when syscalls adds up. Switch to a little busy loop on the control thread using the existing per-pmd reload boolean. The memory order on this atomic is rel-acq to have an explicit synchronisation between the pmd threads and the control thread. Signed-off-by: David Marchand Acked-by: Eelco Chaudron Acked-by: Ilya Maximets Signed-off-by: Ian Stokes --- lib/dpif-netdev.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 8eeec631791..e0ffa4a5b33 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -649,9 +649,6 @@ struct dp_netdev_pmd_thread { struct ovs_refcount ref_cnt; /* Every reference must be refcount'ed. */ struct cmap_node node; /* In 'dp->poll_threads'. */ - pthread_cond_t cond; /* For synchronizing pmd thread reload. */ - struct ovs_mutex cond_mutex; /* Mutex for condition variable. */ - /* Per thread exact-match cache. Note, the instance for cpu core * NON_PMD_CORE_ID can be accessed by multiple threads, and thusly * need to be protected by 'non_pmd_mutex'. Every other instance @@ -1758,11 +1755,8 @@ dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd) return; } - ovs_mutex_lock(&pmd->cond_mutex); seq_change(pmd->reload_seq); atomic_store_explicit(&pmd->reload, true, memory_order_release); - ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex); - ovs_mutex_unlock(&pmd->cond_mutex); } static uint32_t @@ -4655,6 +4649,19 @@ reload_affected_pmds(struct dp_netdev *dp) if (pmd->need_reload) { flow_mark_flush(pmd); dp_netdev_reload_pmd__(pmd); + } + } + + CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { + if (pmd->need_reload) { + if (pmd->core_id != NON_PMD_CORE_ID) { + bool reload; + + do { + atomic_read_explicit(&pmd->reload, &reload, + memory_order_acquire); + } while (reload); + } pmd->need_reload = false; } } @@ -5842,11 +5849,8 @@ dpif_netdev_enable_upcall(struct dpif *dpif) static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd) { - ovs_mutex_lock(&pmd->cond_mutex); - atomic_store_relaxed(&pmd->reload, false); pmd->last_reload_seq = seq_read(pmd->reload_seq); - xpthread_cond_signal(&pmd->cond); - ovs_mutex_unlock(&pmd->cond_mutex); + atomic_store_explicit(&pmd->reload, false, memory_order_release); } /* Finds and refs the dp_netdev_pmd_thread on core 'core_id'. Returns @@ -5931,8 +5935,6 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, pmd->reload_seq = seq_create(); pmd->last_reload_seq = seq_read(pmd->reload_seq); atomic_init(&pmd->reload, false); - xpthread_cond_init(&pmd->cond, NULL); - ovs_mutex_init(&pmd->cond_mutex); ovs_mutex_init(&pmd->flow_mutex); ovs_mutex_init(&pmd->port_mutex); cmap_init(&pmd->flow_table); @@ -5975,8 +5977,6 @@ dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd) cmap_destroy(&pmd->flow_table); ovs_mutex_destroy(&pmd->flow_mutex); seq_destroy(pmd->reload_seq); - xpthread_cond_destroy(&pmd->cond); - ovs_mutex_destroy(&pmd->cond_mutex); ovs_mutex_destroy(&pmd->port_mutex); free(pmd); }