Skip to content

Commit

Permalink
MDEV-22456 after-merge fix: introduce Atomic_relaxed
Browse files Browse the repository at this point in the history
In the merge 9e6e435
we made Atomic_counter a more generic wrapper of std::atomic
so that dict_index_t would support the implicit assignment operator.

It is better to revert the changes to Atomic_counter and
instead introduce Atomic_relaxed as a generic wrapper to std::atomic.

Unlike Atomic_counter, we will not define operator++, operator+=
or similar, because we want to make the operations more explicit
in the users of Atomic_wrapper, because unlike loads and stores,
atomic read-modify-write operations always incur some overhead.
  • Loading branch information
dr-m committed May 18, 2020
1 parent fde94b4 commit 386f168
Show file tree
Hide file tree
Showing 7 changed files with 61 additions and 35 deletions.
45 changes: 45 additions & 0 deletions include/my_atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#define MY_ATOMIC_INCLUDED

/* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2018, 2020, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
Expand Down Expand Up @@ -169,4 +170,48 @@
my_atomic_casptr((P), (E), (D))
#endif

#ifdef __cplusplus
#include <atomic>
/**
A wrapper for std::atomic, defaulting to std::memory_order_relaxed.
When it comes to atomic loads or stores at std::memory_order_relaxed
on IA-32 or AMD64, this wrapper is only introducing some constraints
to the C++ compiler, to prevent some optimizations of loads or
stores.
On POWER and ARM, atomic loads and stores involve different instructions
from normal loads and stores and will thus incur some overhead.
Because atomic read-modify-write operations will always incur
overhead, we intentionally do not define
operator++(), operator--(), operator+=(), operator-=(), or similar,
to make the overhead stand out in the users of this code.
*/
template <typename Type> class Atomic_relaxed
{
std::atomic<Type> m;
public:
Atomic_relaxed(const Atomic_relaxed<Type> &rhs)
{ m.store(rhs, std::memory_order_relaxed); }
Atomic_relaxed(Type val) : m(val) {}
Atomic_relaxed() {}

operator Type() const { return m.load(std::memory_order_relaxed); }
Type operator=(const Type val)
{ m.store(val, std::memory_order_relaxed); return val; }
Type operator=(const Atomic_relaxed<Type> &rhs) { return *this= Type{rhs}; }
Type fetch_add(const Type i, std::memory_order o= std::memory_order_relaxed)
{ return m.fetch_add(i, o); }
Type fetch_sub(const Type i, std::memory_order o= std::memory_order_relaxed)
{ return m.fetch_sub(i, o); }
bool compare_exchange_strong(Type& i1, const Type i2,
std::memory_order o1= std::memory_order_relaxed,
std::memory_order o2= std::memory_order_relaxed)
{ return m.compare_exchange_strong(i1, i2, o1, o2); }
Type exchange(const Type i, std::memory_order o= std::memory_order_relaxed)
{ return m.exchange(i, o); }
};
#endif /* __cplusplus */

#endif /* MY_ATOMIC_INCLUDED */
13 changes: 1 addition & 12 deletions include/my_counter.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef MY_COUNTER_H_INCLUDED
#define MY_COUNTER_H_INCLUDED
/*
Copyright (C) 2018, 2020, MariaDB
Copyright (C) 2018 MariaDB Foundation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
Expand Down Expand Up @@ -45,16 +45,5 @@ template <typename Type> class Atomic_counter
operator Type() const { return m_counter.load(std::memory_order_relaxed); }
Type operator=(const Type val)
{ m_counter.store(val, std::memory_order_relaxed); return val; }
Type operator=(const Atomic_counter<Type> &rhs) { return *this= Type{rhs}; }

Type fetch_add(const Type i, std::memory_order m)
{ return m_counter.fetch_add(i, m); }
Type fetch_sub(const Type i, std::memory_order m)
{ return m_counter.fetch_sub(i, m); }
bool compare_exchange_strong(Type& i1, const Type i2,
std::memory_order m1, std::memory_order m2)
{ return m_counter.compare_exchange_strong(i1, i2, m1, m2); }
Type exchange(const Type i, std::memory_order m)
{ return m_counter.exchange(i, m); }
};
#endif /* MY_COUNTER_H_INCLUDED */
11 changes: 2 additions & 9 deletions storage/innobase/dict/dict0dict.cc
Original file line number Diff line number Diff line change
Expand Up @@ -6158,10 +6158,7 @@ dict_index_zip_pad_update(
beyond max pad size. */
if (info->pad + ZIP_PAD_INCR
< (srv_page_size * zip_pad_max) / 100) {
/* Use atomics even though we have the mutex.
This is to ensure that we are able to read
info->pad atomically. */
info->pad += ZIP_PAD_INCR;
info->pad.fetch_add(ZIP_PAD_INCR);

MONITOR_INC(MONITOR_PAD_INCREMENTS);
}
Expand All @@ -6178,11 +6175,7 @@ dict_index_zip_pad_update(
padding. */
if (info->n_rounds >= ZIP_PAD_SUCCESSFUL_ROUND_LIMIT
&& info->pad > 0) {

/* Use atomics even though we have the mutex.
This is to ensure that we are able to read
info->pad atomically. */
info->pad -= ZIP_PAD_INCR;
info->pad.fetch_sub(ZIP_PAD_INCR);

info->n_rounds = 0;

Expand Down
6 changes: 3 additions & 3 deletions storage/innobase/include/dict0mem.h
Original file line number Diff line number Diff line change
Expand Up @@ -931,7 +931,7 @@ an uncompressed page should be left as padding to avoid compression
failures. This estimate is based on a self-adapting heuristic. */
struct zip_pad_info_t {
SysMutex mutex; /*!< mutex protecting the info */
Atomic_counter<ulint>
Atomic_relaxed<ulint>
pad; /*!< number of bytes used as pad */
ulint success;/*!< successful compression ops during
current round */
Expand Down Expand Up @@ -1107,10 +1107,10 @@ struct dict_index_t {
/* @} */
private:
/** R-tree split sequence number */
Atomic_counter<node_seq_t> rtr_ssn;
Atomic_relaxed<node_seq_t> rtr_ssn;
public:
void set_ssn(node_seq_t ssn) { rtr_ssn= ssn; }
node_seq_t assign_ssn() { return ++rtr_ssn; }
node_seq_t assign_ssn() { return rtr_ssn.fetch_add(1) + 1; }
node_seq_t ssn() const { return rtr_ssn; }

rtr_info_track_t*
Expand Down
6 changes: 3 additions & 3 deletions storage/innobase/include/sync0rw.h
Original file line number Diff line number Diff line change
Expand Up @@ -569,10 +569,10 @@ struct rw_lock_t
#endif /* UNIV_DEBUG */
{
/** Holds the state of the lock. */
Atomic_counter<int32_t> lock_word;
Atomic_relaxed<int32_t> lock_word;

/** 1: there are waiters */
Atomic_counter<uint32_t> waiters;
/** 0=no waiters, 1=waiters for X or SX lock exist */
Atomic_relaxed<uint32_t> waiters;

/** number of granted SX locks. */
volatile ulint sx_recursive;
Expand Down
11 changes: 5 additions & 6 deletions storage/innobase/include/sync0rw.ic
Original file line number Diff line number Diff line change
Expand Up @@ -355,16 +355,15 @@ rw_lock_s_unlock_func(
ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_S));

/* Increment lock_word to indicate 1 less reader */
int32_t lock_word = ++lock->lock_word;
int32_t lock_word = lock->lock_word.fetch_add(1);

if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) {
if (lock_word == -1 || lock_word == -X_LOCK_HALF_DECR - 1) {
/* wait_ex waiter exists. It may not be asleep, but we signal
anyway. We do not wake other waiters, because they can't
exist without wait_ex waiter and wait_ex waiter goes first.*/
os_event_set(lock->wait_ex_event);
sync_array_object_signalled();
} else {
ut_ad(--lock_word);
ut_ad(lock_word > -X_LOCK_DECR);
ut_ad(lock_word < X_LOCK_DECR);
}
Expand Down Expand Up @@ -414,11 +413,11 @@ rw_lock_x_unlock_func(
} else if (lock_word == -X_LOCK_DECR
|| lock_word == -(X_LOCK_DECR + X_LOCK_HALF_DECR)) {
/* There are 2 x-locks */
lock->lock_word += X_LOCK_DECR;
lock->lock_word.fetch_add(X_LOCK_DECR);
} else {
/* There are more than 2 x-locks. */
ut_ad(lock_word < -X_LOCK_DECR);
lock->lock_word++;
lock->lock_word.fetch_add(1);
}

ut_ad(rw_lock_validate(lock));
Expand Down Expand Up @@ -470,7 +469,7 @@ rw_lock_sx_unlock_func(
/* still has x-lock */
ut_ad(lock_word == -X_LOCK_HALF_DECR ||
lock_word <= -(X_LOCK_DECR + X_LOCK_HALF_DECR));
lock->lock_word += X_LOCK_HALF_DECR;
lock->lock_word.fetch_add(X_LOCK_HALF_DECR);
}
}

Expand Down
4 changes: 2 additions & 2 deletions storage/innobase/sync/sync0rw.cc
Original file line number Diff line number Diff line change
Expand Up @@ -528,10 +528,10 @@ rw_lock_x_lock_low(
exists. Add another. */
if (lock_word == 0
|| lock_word == -X_LOCK_HALF_DECR) {
lock->lock_word -= X_LOCK_DECR;
lock->lock_word.fetch_sub(X_LOCK_DECR);
} else {
ut_ad(lock_word <= -X_LOCK_DECR);
lock->lock_word--;
lock->lock_word.fetch_sub(1);
}
}

Expand Down

0 comments on commit 386f168

Please sign in to comment.