Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
accel/tcg: Extract store_atom_insert_al16 to host header
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
  • Loading branch information
rth7680 committed May 30, 2023
1 parent af844a1 commit b3f4144
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 39 deletions.
40 changes: 1 addition & 39 deletions accel/tcg/ldst_atomicity.c.inc
Expand Up @@ -10,6 +10,7 @@
*/

#include "host/load-extract-al16-al8.h"
#include "host/store-insert-al16.h"

#ifdef CONFIG_ATOMIC64
# define HAVE_al8 true
Expand Down Expand Up @@ -681,45 +682,6 @@ static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk)
__ATOMIC_RELAXED, __ATOMIC_RELAXED));
}

/**
* store_atom_insert_al16:
* @p: host address
* @val: shifted value to store
* @msk: mask for value to store
*
* Atomically store @val to @p masked by @msk.
*/
static void ATTRIBUTE_ATOMIC128_OPT
store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk)
{
#if defined(CONFIG_ATOMIC128)
__uint128_t *pu, old, new;

/* With CONFIG_ATOMIC128, we can avoid the memory barriers. */
pu = __builtin_assume_aligned(ps, 16);
old = *pu;
do {
new = (old & ~msk.u) | val.u;
} while (!__atomic_compare_exchange_n(pu, &old, new, true,
__ATOMIC_RELAXED, __ATOMIC_RELAXED));
#elif defined(CONFIG_CMPXCHG128)
__uint128_t *pu, old, new;

/*
* Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always
* defer to libatomic, so we must use __sync_*_compare_and_swap_16
* and accept the sequential consistency that comes with it.
*/
pu = __builtin_assume_aligned(ps, 16);
do {
old = *pu;
new = (old & ~msk.u) | val.u;
} while (!__sync_bool_compare_and_swap_16(pu, old, new));
#else
qemu_build_not_reached();
#endif
}

/**
* store_bytes_leN:
* @pv: host address
Expand Down
50 changes: 50 additions & 0 deletions host/include/generic/host/store-insert-al16.h
@@ -0,0 +1,50 @@
/*
* SPDX-License-Identifier: GPL-2.0-or-later
* Atomic store insert into 128-bit, generic version.
*
* Copyright (C) 2023 Linaro, Ltd.
*/

#ifndef HOST_STORE_INSERT_AL16_H
#define HOST_STORE_INSERT_AL16_H

/**
* store_atom_insert_al16:
* @p: host address
* @val: shifted value to store
* @msk: mask for value to store
*
* Atomically store @val to @p masked by @msk.
*/
static inline void ATTRIBUTE_ATOMIC128_OPT
store_atom_insert_al16(Int128 *ps, Int128 val, Int128 msk)
{
#if defined(CONFIG_ATOMIC128)
__uint128_t *pu;
Int128Alias old, new;

/* With CONFIG_ATOMIC128, we can avoid the memory barriers. */
pu = __builtin_assume_aligned(ps, 16);
old.u = *pu;
msk = int128_not(msk);
do {
new.s = int128_and(old.s, msk);
new.s = int128_or(new.s, val);
} while (!__atomic_compare_exchange_n(pu, &old.u, new.u, true,
__ATOMIC_RELAXED, __ATOMIC_RELAXED));
#else
Int128 old, new, cmp;

ps = __builtin_assume_aligned(ps, 16);
old = *ps;
msk = int128_not(msk);
do {
cmp = old;
new = int128_and(old, msk);
new = int128_or(new, val);
old = atomic16_cmpxchg(ps, cmp, new);
} while (int128_ne(cmp, old));
#endif
}

#endif /* HOST_STORE_INSERT_AL16_H */

0 comments on commit b3f4144

Please sign in to comment.