Skip to content

Commit

Permalink
dpif-netdev: Convert exit latch to flag.
Browse files Browse the repository at this point in the history
No need for a latch here since we don't have to wait.
A simple boolean flag is enough.

The memory order on the reload flag is changed to rel-acq ordering to
serve as a synchronisation point between the pmd threads and the control
thread that asks for termination.

Fixes: e4cfed3 ("dpif-netdev: Add poll-mode-device thread.")
Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Acked-by: Ian Stokes <ian.stokes@intel.com>
Acked-by: Ilya Maximets <i.maximets@samsung.com>
Signed-off-by: 0-day Robot <robot@bytheb.org>
  • Loading branch information
david-marchand authored and ovsrobot committed Jul 9, 2019
1 parent cfc06fb commit d333b02
Showing 1 changed file with 6 additions and 8 deletions.
14 changes: 6 additions & 8 deletions lib/dpif-netdev.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@
#include "hmapx.h"
#include "id-pool.h"
#include "ipf.h"
#include "latch.h"
#include "netdev.h"
#include "netdev-offload.h"
#include "netdev-provider.h"
Expand Down Expand Up @@ -684,10 +683,10 @@ struct dp_netdev_pmd_thread {
/* Current context of the PMD thread. */
struct dp_netdev_pmd_thread_ctx ctx;

struct latch exit_latch; /* For terminating the pmd thread. */
struct seq *reload_seq;
uint64_t last_reload_seq;
atomic_bool reload; /* Do we need to reload ports? */
atomic_bool exit; /* For terminating the pmd thread. */
pthread_t thread;
unsigned core_id; /* CPU core id of this pmd thread. */
int numa_id; /* numa node id of this pmd thread. */
Expand Down Expand Up @@ -1761,7 +1760,7 @@ dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd)

ovs_mutex_lock(&pmd->cond_mutex);
seq_change(pmd->reload_seq);
atomic_store_relaxed(&pmd->reload, true);
atomic_store_explicit(&pmd->reload, true, memory_order_release);
ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
ovs_mutex_unlock(&pmd->cond_mutex);
}
Expand Down Expand Up @@ -5488,7 +5487,7 @@ pmd_thread_main(void *f_)
emc_cache_slow_sweep(&((pmd->flow_cache).emc_cache));
}

atomic_read_relaxed(&pmd->reload, &reload);
atomic_read_explicit(&pmd->reload, &reload, memory_order_acquire);
if (reload) {
break;
}
Expand All @@ -5509,7 +5508,7 @@ pmd_thread_main(void *f_)
ovs_mutex_unlock(&pmd->perf_stats.stats_mutex);

poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list);
exiting = latch_is_set(&pmd->exit_latch);
atomic_read_relaxed(&pmd->exit, &exiting);
/* Signal here to make sure the pmd finishes
* reloading the updated configuration. */
dp_netdev_pmd_reload_done(pmd);
Expand Down Expand Up @@ -5928,7 +5927,7 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
pmd->n_output_batches = 0;

ovs_refcount_init(&pmd->ref_cnt);
latch_init(&pmd->exit_latch);
atomic_init(&pmd->exit, false);
pmd->reload_seq = seq_create();
pmd->last_reload_seq = seq_read(pmd->reload_seq);
atomic_init(&pmd->reload, false);
Expand Down Expand Up @@ -5975,7 +5974,6 @@ dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd)
cmap_destroy(&pmd->classifiers);
cmap_destroy(&pmd->flow_table);
ovs_mutex_destroy(&pmd->flow_mutex);
latch_destroy(&pmd->exit_latch);
seq_destroy(pmd->reload_seq);
xpthread_cond_destroy(&pmd->cond);
ovs_mutex_destroy(&pmd->cond_mutex);
Expand All @@ -5997,7 +5995,7 @@ dp_netdev_del_pmd(struct dp_netdev *dp, struct dp_netdev_pmd_thread *pmd)
pmd_free_static_tx_qid(pmd);
ovs_mutex_unlock(&dp->non_pmd_mutex);
} else {
latch_set(&pmd->exit_latch);
atomic_store_relaxed(&pmd->exit, true);
dp_netdev_reload_pmd__(pmd);
xpthread_join(pmd->thread, NULL);
}
Expand Down

0 comments on commit d333b02

Please sign in to comment.