Skip to content
Permalink
Browse files
MDEV-13728 - Import MySQL 5.7 atomic operations for MSVC and Solaris
gcc_sync.h, solaris.h, generic-msvc.h copied verbatim.
  • Loading branch information
Sergey Vojtovich committed Nov 27, 2017
1 parent 62fb022 commit 1029b22
Show file tree
Hide file tree
Showing 6 changed files with 304 additions and 261 deletions.
@@ -1,7 +1,7 @@
#ifndef ATOMIC_GCC_BUILTINS_INCLUDED
#define ATOMIC_GCC_BUILTINS_INCLUDED

/* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
/* Copyright (c) 2017 MariaDB Foundation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -16,8 +16,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */

#if defined(HAVE_GCC_C11_ATOMICS)
#define MY_ATOMIC_MODE "gcc-atomics-smp"

#define MY_MEMORY_ORDER_RELAXED __ATOMIC_RELAXED
#define MY_MEMORY_ORDER_CONSUME __ATOMIC_CONSUME
@@ -76,21 +74,5 @@
__atomic_compare_exchange_n((P), (E), (D), 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#define my_atomic_casptr(P, E, D) \
__atomic_compare_exchange_n((P), (E), (D), 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#else
#define MY_ATOMIC_MODE "gcc-builtins-smp"
#define make_atomic_load_body(S) \
ret= __sync_fetch_and_or(a, 0);
#define make_atomic_store_body(S) \
(void) __sync_lock_test_and_set(a, v);
#define make_atomic_add_body(S) \
v= __sync_fetch_and_add(a, v);
#define make_atomic_fas_body(S) \
v= __sync_lock_test_and_set(a, v);
#define make_atomic_cas_body(S) \
int ## S sav; \
int ## S cmp_val= *cmp; \
sav= __sync_val_compare_and_swap(a, cmp_val, set);\
if (!(ret= (sav == cmp_val))) *cmp= sav
#endif

#endif /* ATOMIC_GCC_BUILTINS_INCLUDED */
@@ -0,0 +1,106 @@
#ifndef GCC_SYNC_INCLUDED
#define GCC_SYNC_INCLUDED

/* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */

/* Old GCC __sync builtins introduced in GCC 4.1 */

static inline int my_atomic_cas32(int32 volatile *a, int32 *cmp, int32 set)
{
int32 cmp_val= *cmp;
int32 sav= __sync_val_compare_and_swap(a, cmp_val, set);
int ret= (sav == cmp_val);
if (!ret)
*cmp = sav;
return ret;
}

static inline int my_atomic_cas64(int64 volatile *a, int64 *cmp, int64 set)
{
int64 cmp_val= *cmp;
int64 sav= __sync_val_compare_and_swap(a, cmp_val, set);
int ret= (sav == cmp_val);
if (!ret)
*cmp = sav;
return ret;
}

static inline int my_atomic_casptr(void * volatile *a, void **cmp, void *set)
{
void *cmp_val= *cmp;
void *sav= __sync_val_compare_and_swap(a, cmp_val, set);
int ret= (sav == cmp_val);
if (!ret)
*cmp = sav;
return ret;
}

static inline int32 my_atomic_add32(int32 volatile *a, int32 v)
{
return __sync_fetch_and_add(a, v);
}

static inline int64 my_atomic_add64(int64 volatile *a, int64 v)
{
return __sync_fetch_and_add(a, v);
}

static inline int32 my_atomic_fas32(int32 volatile *a, int32 v)
{
return __sync_lock_test_and_set(a, v);
}

static inline int64 my_atomic_fas64(int64 volatile *a, int64 v)
{
return __sync_lock_test_and_set(a, v);
}

static inline void * my_atomic_fasptr(void * volatile *a, void * v)
{
return __sync_lock_test_and_set(a, v);
}

static inline int32 my_atomic_load32(int32 volatile *a)
{
return __sync_fetch_and_or(a, 0);
}

static inline int64 my_atomic_load64(int64 volatile *a)
{
return __sync_fetch_and_or(a, 0);
}

static inline void* my_atomic_loadptr(void * volatile *a)
{
return __sync_fetch_and_or(a, 0);
}

static inline void my_atomic_store32(int32 volatile *a, int32 v)
{
(void) __sync_lock_test_and_set(a, v);
}

static inline void my_atomic_store64(int64 volatile *a, int64 v)
{
(void) __sync_lock_test_and_set(a, v);
}

static inline void my_atomic_storeptr(void * volatile *a, void *v)
{
(void) __sync_lock_test_and_set(a, v);
}

#endif /* GCC_SYNC_INCLUDED */
@@ -1,5 +1,7 @@
/* Copyright (c) 2006-2008 MySQL AB, 2009 Sun Microsystems, Inc.
Use is subject to license terms.
#ifndef ATOMIC_MSC_INCLUDED
#define ATOMIC_MSC_INCLUDED

/* Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -14,115 +16,120 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */

#ifndef _atomic_h_cleanup_
#define _atomic_h_cleanup_ "atomic/generic-msvc.h"

#include <windows.h>
/*
x86 compilers (both VS2003 or VS2005) never use instrinsics, but generate
function calls to kernel32 instead, even in the optimized build.
We force intrinsics as described in MSDN documentation for
_InterlockedCompareExchange.
*/
#ifdef _M_IX86

#if (_MSC_VER >= 1500)
#include <intrin.h>
#else
C_MODE_START
/*Visual Studio 2003 and earlier do not have prototypes for atomic intrinsics*/
LONG _InterlockedCompareExchange (LONG volatile *Target, LONG Value, LONG Comp);
LONGLONG _InterlockedCompareExchange64 (LONGLONG volatile *Target,
LONGLONG Value, LONGLONG Comp);
C_MODE_END

#pragma intrinsic(_InterlockedCompareExchange)
#pragma intrinsic(_InterlockedCompareExchange64)
#endif

#define InterlockedCompareExchange _InterlockedCompareExchange
#define InterlockedCompareExchange64 _InterlockedCompareExchange64
/*
No need to do something special for InterlockedCompareExchangePointer
as it is a #define to InterlockedCompareExchange. The same applies to
InterlockedExchangePointer.
*/
#endif /*_M_IX86*/

#define MY_ATOMIC_MODE "msvc-intrinsics"
/* Implement using CAS on WIN32 */
#define IL_COMP_EXCHG32(X,Y,Z) \
InterlockedCompareExchange((volatile LONG *)(X),(Y),(Z))
#define IL_COMP_EXCHG64(X,Y,Z) \
InterlockedCompareExchange64((volatile LONGLONG *)(X), \
(LONGLONG)(Y),(LONGLONG)(Z))
#define IL_COMP_EXCHGptr InterlockedCompareExchangePointer

#define make_atomic_cas_body(S) \
int ## S initial_cmp= *cmp; \
int ## S initial_a= IL_COMP_EXCHG ## S (a, set, initial_cmp); \
if (!(ret= (initial_a == initial_cmp))) *cmp= initial_a;

#ifndef _M_IX86
/* Use full set of optimised functions on WIN64 */
#define IL_EXCHG_ADD32(X,Y) \
InterlockedExchangeAdd((volatile LONG *)(X),(Y))
#define IL_EXCHG_ADD64(X,Y) \
InterlockedExchangeAdd64((volatile LONGLONG *)(X),(LONGLONG)(Y))
#define IL_EXCHG32(X,Y) \
InterlockedExchange((volatile LONG *)(X),(Y))
#define IL_EXCHG64(X,Y) \
InterlockedExchange64((volatile LONGLONG *)(X),(LONGLONG)(Y))
#define IL_EXCHGptr InterlockedExchangePointer

#define make_atomic_add_body(S) \
v= IL_EXCHG_ADD ## S (a, v)
#define make_atomic_swap_body(S) \
v= IL_EXCHG ## S (a, v)
#define make_atomic_load_body(S) \
ret= 0; /* avoid compiler warning */ \
ret= IL_COMP_EXCHG ## S (a, ret, ret);
#endif

static inline int my_atomic_cas32(int32 volatile *a, int32 *cmp, int32 set)
{
int32 initial_cmp= *cmp;
int32 initial_a= InterlockedCompareExchange((volatile LONG*)a,
set, initial_cmp);
int ret= (initial_a == initial_cmp);
if (!ret)
*cmp= initial_a;
return ret;
}

static inline int my_atomic_cas64(int64 volatile *a, int64 *cmp, int64 set)
{
int64 initial_cmp= *cmp;
int64 initial_a= InterlockedCompareExchange64((volatile LONGLONG*)a,
(LONGLONG)set,
(LONGLONG)initial_cmp);
int ret= (initial_a == initial_cmp);
if (!ret)
*cmp= initial_a;
return ret;
}

static inline int my_atomic_casptr(void * volatile *a, void **cmp, void *set)
{
void *initial_cmp= *cmp;
void *initial_a= InterlockedCompareExchangePointer(a, set, initial_cmp);
int ret= (initial_a == initial_cmp);
if (!ret)
*cmp= initial_a;
return ret;
}

static inline int32 my_atomic_add32(int32 volatile *a, int32 v)
{
return (int32)InterlockedExchangeAdd((volatile LONG*)a, v);
}

static inline int64 my_atomic_add64(int64 volatile *a, int64 v)
{
return (int64)InterlockedExchangeAdd64((volatile LONGLONG*)a, (LONGLONG)v);
}

static inline int32 my_atomic_load32(int32 volatile *a)
{
return (int32)InterlockedCompareExchange((volatile LONG *)a, 0, 0);
}

static inline int64 my_atomic_load64(int64 volatile *a)
{
return (int64)InterlockedCompareExchange64((volatile LONGLONG *)a, 0, 0);
}

static inline void* my_atomic_loadptr(void * volatile *a)
{
return InterlockedCompareExchangePointer(a, 0, 0);
}

static inline int32 my_atomic_fas32(int32 volatile *a, int32 v)
{
return (int32)InterlockedExchange((volatile LONG*)a, v);
}

static inline int64 my_atomic_fas64(int64 volatile *a, int64 v)
{
return (int64)InterlockedExchange64((volatile LONGLONG*)a, v);
}

static inline void * my_atomic_fasptr(void * volatile *a, void * v)
{
return InterlockedExchangePointer(a, v);
}

static inline void my_atomic_store32(int32 volatile *a, int32 v)
{
(void)InterlockedExchange((volatile LONG*)a, v);
}

static inline void my_atomic_store64(int64 volatile *a, int64 v)
{
(void)InterlockedExchange64((volatile LONGLONG*)a, v);
}

static inline void my_atomic_storeptr(void * volatile *a, void *v)
{
(void)InterlockedExchangePointer(a, v);
}


/*
my_yield_processor (equivalent of x86 PAUSE instruction) should be used
to improve performance on hyperthreaded CPUs. Intel recommends to use it in
spin loops also on non-HT machines to reduce power consumption (see e.g
spin loops also on non-HT machines to reduce power consumption (see e.g
http://softwarecommunity.intel.com/articles/eng/2004.htm)
Running benchmarks for spinlocks implemented with InterlockedCompareExchange
and YieldProcessor shows that much better performance is achieved by calling
YieldProcessor in a loop - that is, yielding longer. On Intel boxes setting
loop count in the range 200-300 brought best results.
*/
#ifndef YIELD_LOOPS
#define YIELD_LOOPS 200
#endif

static __inline int my_yield_processor()
static inline int my_yield_processor()
{
int i;
for(i=0; i<YIELD_LOOPS; i++)
for (i=0; i<YIELD_LOOPS; i++)
{
#if (_MSC_VER <= 1310)
/* On older compilers YieldProcessor is not available, use inline assembly*/
__asm { rep nop }
#else
YieldProcessor();
#endif
}
return 1;
}

#define LF_BACKOFF my_yield_processor()
#else /* cleanup */

#undef IL_EXCHG_ADD32
#undef IL_EXCHG_ADD64
#undef IL_COMP_EXCHG32
#undef IL_COMP_EXCHG64
#undef IL_COMP_EXCHGptr
#undef IL_EXCHG32
#undef IL_EXCHG64
#undef IL_EXCHGptr

#endif

#endif /* ATOMIC_MSC_INCLUDED */

0 comments on commit 1029b22

Please sign in to comment.