Permalink
Browse files

Get rid of ugly alignment macros and some other cruft, we now have al…

…ignas(16) from C++11
  • Loading branch information...
hrydgard committed Aug 30, 2017
1 parent 0c0da1c commit 22e65ba80dce61f8b32c206634d6c3771d9bb0c1
View
@@ -37,8 +37,7 @@
#define STACKALIGN
// An inheritable class to disallow the copy constructor and operator= functions
class NonCopyable
{
class NonCopyable {
protected:
NonCopyable() {}
private:
@@ -65,15 +64,7 @@ class NonCopyable
// Memory leak checks
#define CHECK_HEAP_INTEGRITY()
// Alignment
#define MEMORY_ALIGNED16(x) __declspec(align(16)) x
#define GC_ALIGNED32(x) __declspec(align(32)) x
#define GC_ALIGNED64(x) __declspec(align(64)) x
#define GC_ALIGNED128(x) __declspec(align(128)) x
#define GC_ALIGNED16_DECL(x) __declspec(align(16)) x
#define GC_ALIGNED64_DECL(x) __declspec(align(64)) x
// Debug definitions
// Debug definitions
#if defined(_DEBUG)
#include <crtdbg.h>
#undef CHECK_HEAP_INTEGRITY
@@ -93,20 +84,6 @@ class NonCopyable
#endif
#define __forceinline inline __attribute__((always_inline))
#define MEMORY_ALIGNED16(x) __attribute__((aligned(16))) x
#define GC_ALIGNED32(x) __attribute__((aligned(32))) x
#define GC_ALIGNED64(x) __attribute__((aligned(64))) x
#define GC_ALIGNED128(x) __attribute__((aligned(128))) x
#define GC_ALIGNED16_DECL(x) __attribute__((aligned(16))) x
#define GC_ALIGNED64_DECL(x) __attribute__((aligned(64))) x
#endif
#ifdef _MSC_VER
#define __getcwd _getcwd
#define __chdir _chdir
#else
#define __getcwd getcwd
#define __chdir chdir
#endif
#if !defined(__GNUC__) && (defined(_M_X64) || defined(_M_IX86))
View
@@ -23,6 +23,10 @@
template <bool> struct CompileTimeAssert;
template<> struct CompileTimeAssert<true> {};
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
#endif
#if !defined(_WIN32)
#include <unistd.h>
@@ -35,8 +39,6 @@ template<> struct CompileTimeAssert<true> {};
#define Crash() {kill(getpid(), SIGINT);}
#endif
#define ARRAYSIZE(A) (sizeof(A)/sizeof((A)[0]))
inline u32 __rotl(u32 x, int shift) {
shift &= 31;
if (!shift) return x;
View
@@ -738,28 +738,6 @@ void openIniFile(const std::string fileName) {
#endif
}
// Returns the current directory
std::string GetCurrentDir()
{
char *dir;
// Get the current working directory (getcwd uses malloc)
if (!(dir = __getcwd(NULL, 0))) {
ERROR_LOG(COMMON, "GetCurrentDirectory failed: %s",
GetLastErrorMsg());
return NULL;
}
std::string strDir = dir;
free(dir);
return strDir;
}
// Sets the current directory to the given directory
bool SetCurrentDir(const std::string &directory)
{
return __chdir(directory.c_str()) == 0;
}
const std::string &GetExeDirectory()
{
static std::string ExePath;
View
@@ -21,12 +21,11 @@
#define THUNK_ARENA_SIZE 1024*1024*1
namespace
{
namespace {
#ifndef _M_X64
static u8 GC_ALIGNED32(saved_fp_state[16 * 4 * 4]);
static u8 GC_ALIGNED32(saved_gpr_state[16 * 8]);
alignas(32) static u8 saved_fp_state[16 * 4 * 4];
alignas(32) static u8 saved_gpr_state[16 * 8];
static u16 saved_mxcsr;
#endif
View
@@ -81,7 +81,7 @@ volatile u32 hasTsEvents = 0;
// as we can already reach that structure through a register.
int slicelength;
MEMORY_ALIGNED16(s64) globalTimer;
alignas(16) s64 globalTimer;
s64 idledCycles;
s64 lastGlobalTimeTicks;
s64 lastGlobalTimeUs;
View
@@ -92,10 +92,6 @@ struct Syscall
#define RETURN64(n) {u64 RETURN64_tmp = n; currentMIPS->r[MIPS_REG_V0] = RETURN64_tmp & 0xFFFFFFFF; currentMIPS->r[MIPS_REG_V1] = RETURN64_tmp >> 32;}
#define RETURNF(fl) currentMIPS->f[0] = fl
#ifndef ARRAY_SIZE
#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
#endif
const char *GetFuncName(const char *module, u32 nib);
const char *GetFuncName(int module, int func);
const HLEFunction *GetFunc(const char *module, u32 nib);
View
@@ -50,8 +50,14 @@ class PointerWrap;
#include <fcntl.h>
#include <errno.h>
#endif
#ifdef _MSC_VER
#define PACK // on MSVC we use #pragma pack() instead so let's kill this.
#else
#define PACK __attribute__((packed))
#endif
#ifdef _WIN32
#define PACK
#undef errno
#undef ECONNABORTED
#undef ECONNRESET
@@ -73,7 +79,6 @@ inline bool connectInProgress(int errcode){ return (errcode == WSAEWOULDBLOCK ||
#define INVALID_SOCKET -1
#define SOCKET_ERROR -1
#define closesocket close
#define PACK __attribute__((packed))
inline bool connectInProgress(int errcode){ return (errcode == EINPROGRESS || errcode == EALREADY); }
#endif
View
@@ -23,21 +23,10 @@
#pragma once
#include <stdint.h>
#include <cstdint>
#include <time.h>
#include "proAdhoc.h"
/*#ifdef _MSC_VER
#define PACK
#undef errno
#define errno WSAGetLastError()
#else
#define INVALID_SOCKET -1
#define SOCKET_ERROR -1
#define closesocket close
#define PACK __attribute__((packed))
#endif*/
// Server Listening Port
//#define SERVER_PORT 27312
@@ -638,7 +638,7 @@ namespace MIPSComp
fpr.ReleaseSpillLocksAndDiscardTemps();
}
static const float MEMORY_ALIGNED16(vavg_table[4]) = { 1.0f, 1.0f / 2.0f, 1.0f / 3.0f, 1.0f / 4.0f };
alignas(16) static const float vavg_table[4] = { 1.0f, 1.0f / 2.0f, 1.0f / 3.0f, 1.0f / 4.0f };
void ArmJit::Comp_Vhoriz(MIPSOpcode op) {
NEON_IF_AVAILABLE(CompNEON_Vhoriz);
@@ -507,7 +507,7 @@ namespace MIPSComp {
fpr.ReleaseSpillLocksAndDiscardTemps();
}
static const float MEMORY_ALIGNED16(vavg_table[4]) = { 1.0f, 1.0f / 2.0f, 1.0f / 3.0f, 1.0f / 4.0f };
alignas(16) static const float vavg_table[4] = { 1.0f, 1.0f / 2.0f, 1.0f / 3.0f, 1.0f / 4.0f };
void Arm64Jit::Comp_Vhoriz(MIPSOpcode op) {
CONDITIONAL_DISABLE;
@@ -485,7 +485,7 @@ namespace MIPSComp {
ApplyPrefixD(dregs, V_Single);
}
static const float MEMORY_ALIGNED16(vavg_table[4]) = { 1.0f, 1.0f / 2.0f, 1.0f / 3.0f, 1.0f / 4.0f };
alignas(16) static const float vavg_table[4] = { 1.0f, 1.0f / 2.0f, 1.0f / 3.0f, 1.0f / 4.0f };
void IRFrontend::Comp_Vhoriz(MIPSOpcode op) {
CONDITIONAL_DISABLE;
View
@@ -240,7 +240,7 @@ class MIPSState
u32 savedPC;
u32 MEMORY_ALIGNED16(vcmpResult[4]);
alignas(16) u32 vcmpResult[4];
float sincostemp[2];
@@ -151,8 +151,8 @@ void Jit::Comp_FPULS(MIPSOpcode op) {
}
}
static const u64 MEMORY_ALIGNED16(ssSignBits2[2]) = {0x8000000080000000ULL, 0x8000000080000000ULL};
static const u64 MEMORY_ALIGNED16(ssNoSignMask[2]) = {0x7FFFFFFF7FFFFFFFULL, 0x7FFFFFFF7FFFFFFFULL};
alignas(16) static const u64 ssSignBits2[2] = {0x8000000080000000ULL, 0x8000000080000000ULL};
alignas(16) static const u64 ssNoSignMask[2] = {0x7FFFFFFF7FFFFFFFULL, 0x7FFFFFFF7FFFFFFFULL};
void Jit::CompFPComp(int lhs, int rhs, u8 compare, bool allowNaN) {
gpr.MapReg(MIPS_REG_FPCOND, false, true);
@@ -23,7 +23,7 @@
#include "Core/MIPS/x86/RegCache.h"
#include "Core/MIPS/x86/Jit.h"
static const u64 MEMORY_ALIGNED16(ssNoSignMask[2]) = {0x7FFFFFFF7FFFFFFFULL, 0x7FFFFFFF7FFFFFFFULL};
alignas(16) static const u64 ssNoSignMask[2] = {0x7FFFFFFF7FFFFFFFULL, 0x7FFFFFFF7FFFFFFFULL};
namespace MIPSComp {
using namespace Gen;
View
@@ -65,12 +65,12 @@ using namespace X64JitConstants;
static const float one = 1.0f;
static const float minus_one = -1.0f;
const u32 MEMORY_ALIGNED16( noSignMask[4] ) = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
const u32 MEMORY_ALIGNED16( signBitAll[4] ) = {0x80000000, 0x80000000, 0x80000000, 0x80000000};
const u32 MEMORY_ALIGNED16( signBitLower[4] ) = {0x80000000, 0, 0, 0};
const float MEMORY_ALIGNED16( oneOneOneOne[4] ) = {1.0f, 1.0f, 1.0f, 1.0f};
const u32 MEMORY_ALIGNED16( fourinfnan[4] ) = {0x7F800000, 0x7F800000, 0x7F800000, 0x7F800000};
const float MEMORY_ALIGNED16( identityMatrix[4][4]) = { { 1.0f, 0, 0, 0 }, { 0, 1.0f, 0, 0 }, { 0, 0, 1.0f, 0 }, { 0, 0, 0, 1.0f} };
alignas(16) const u32 noSignMask[4] = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
alignas(16) const u32 signBitAll[4] = {0x80000000, 0x80000000, 0x80000000, 0x80000000};
alignas(16) const u32 signBitLower[4] = {0x80000000, 0, 0, 0};
alignas(16) const float oneOneOneOne[4] = {1.0f, 1.0f, 1.0f, 1.0f};
alignas(16) const u32 fourinfnan[4] = {0x7F800000, 0x7F800000, 0x7F800000, 0x7F800000};
alignas(16) const float identityMatrix[4][4] = { { 1.0f, 0, 0, 0 }, { 0, 1.0f, 0, 0 }, { 0, 0, 1.0f, 0 }, { 0, 0, 0, 1.0f} };
void Jit::Comp_VPFX(MIPSOpcode op)
{
@@ -236,7 +236,7 @@ bool IsOverlapSafe(int dreg, int di, int sn, u8 sregs[], int tn = 0, u8 tregs[]
return IsOverlapSafeAllowS(dreg, di, sn, sregs, tn, tregs) && sregs[di] != dreg;
}
static u32 MEMORY_ALIGNED16(ssLoadStoreTemp);
alignas(16) static u32 ssLoadStoreTemp;
void Jit::Comp_SV(MIPSOpcode op) {
CONDITIONAL_DISABLE;
@@ -1261,7 +1261,7 @@ void Jit::Comp_VecDo3(MIPSOpcode op) {
fpr.ReleaseSpillLocks();
}
static const u32 MEMORY_ALIGNED16( vcmpMask[4][4] ) = {
alignas(16) static const u32 vcmpMask[4][4] = {
{0x00000031, 0x00000000, 0x00000000, 0x00000000},
{0x00000011, 0x00000012, 0x00000000, 0x00000000},
{0x00000011, 0x00000012, 0x00000014, 0x00000000},
@@ -1603,7 +1603,7 @@ void Jit::Comp_Vh2f(MIPSOpcode op) {
if (js.HasUnknownPrefix())
DISABLE;
#define SSE_CONST4(name, val) static const u32 MEMORY_ALIGNED16(name[4]) = { (val), (val), (val), (val) }
#define SSE_CONST4(name, val) alignas(16) static const u32 name[4] = { (val), (val), (val), (val) }
SSE_CONST4(mask_nosign, 0x7fff);
SSE_CONST4(magic, (254 - 15) << 23);
@@ -1683,9 +1683,9 @@ void Jit::Comp_Vh2f(MIPSOpcode op) {
// The goal is to map (reversed byte order for clarity):
// AABBCCDD -> 000000AA 000000BB 000000CC 000000DD
static s8 MEMORY_ALIGNED16( vc2i_shuffle[16] ) = { -1, -1, -1, 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3 };
alignas(16) static s8 vc2i_shuffle[16] = { -1, -1, -1, 0, -1, -1, -1, 1, -1, -1, -1, 2, -1, -1, -1, 3 };
// AABBCCDD -> AAAAAAAA BBBBBBBB CCCCCCCC DDDDDDDD
static s8 MEMORY_ALIGNED16( vuc2i_shuffle[16] ) = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3 };
alignas(16) static s8 vuc2i_shuffle[16] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3 };
void Jit::Comp_Vx2i(MIPSOpcode op) {
CONDITIONAL_DISABLE;
@@ -3158,9 +3158,9 @@ void Jit::Comp_VDet(MIPSOpcode op) {
// The goal is to map (reversed byte order for clarity):
// 000000AA 000000BB 000000CC 000000DD -> AABBCCDD
static s8 MEMORY_ALIGNED16( vi2xc_shuffle[16] ) = { 3, 7, 11, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 };
alignas(16) static const s8 vi2xc_shuffle[16] = { 3, 7, 11, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 };
// 0000AAAA 0000BBBB 0000CCCC 0000DDDD -> AAAABBBB CCCCDDDD
static s8 MEMORY_ALIGNED16( vi2xs_shuffle[16] ) = { 2, 3, 6, 7, 10, 11, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1 };
alignas(16) static const s8 vi2xs_shuffle[16] = { 2, 3, 6, 7, 10, 11, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1 };
void Jit::Comp_Vi2x(MIPSOpcode op) {
CONDITIONAL_DISABLE;
@@ -3292,7 +3292,7 @@ void Jit::Comp_Vi2x(MIPSOpcode op) {
fpr.ReleaseSpillLocks();
}
static const float MEMORY_ALIGNED16(vavg_table[4]) = { 1.0f, 1.0f / 2.0f, 1.0f / 3.0f, 1.0f / 4.0f };
alignas(16) static const float vavg_table[4] = { 1.0f, 1.0f / 2.0f, 1.0f / 3.0f, 1.0f / 4.0f };
void Jit::Comp_Vhoriz(MIPSOpcode op) {
CONDITIONAL_DISABLE;
@@ -27,7 +27,7 @@
#error Should not be compiled on non-ARM.
#endif
static s16 MEMORY_ALIGNED16(volumeValues[4]) = {};
alignas(16) static s16 volumeValues[4] = {};
void AdjustVolumeBlockNEON(s16 *out, s16 *in, size_t size, int leftVol, int rightVol) {
if (leftVol <= 0xFFFF && -leftVol <= 0x10000 && rightVol <= 0xFFFF && -rightVol <= 0x10000) {
@@ -165,8 +165,8 @@ static void spline_n_4(int i, float t, float *knot, float *splineVal) {
const __m128 f31_42_32 = _mm_div_ps(t122, _mm_sub_ps(knot343, knot122));
// It's still faster to use SSE, even with this.
float MEMORY_ALIGNED16(ff30_41_52[4]);
float MEMORY_ALIGNED16(ff31_42_32[4]);
alignas(16) float ff30_41_52[4];
alignas(16) float ff31_42_32[4];
_mm_store_ps(ff30_41_52, f30_41_52);
_mm_store_ps(ff31_42_32, f31_42_32);
@@ -26,7 +26,7 @@
#include "GPU/GPUState.h"
#include "GPU/Common/TextureDecoder.h"
static const u16 MEMORY_ALIGNED16(QuickTexHashInitial[8]) = {0xc00bU, 0x9bd9U, 0x4b73U, 0xb651U, 0x4d9bU, 0x4309U, 0x0083U, 0x0001U};
alignas(16) static const u16 QuickTexHashInitial[8] = {0xc00bU, 0x9bd9U, 0x4b73U, 0xb651U, 0x4d9bU, 0x4309U, 0x0083U, 0x0001U};
#ifdef _MSC_VER
#define __builtin_prefetch(a,b,c)
@@ -38,11 +38,11 @@ bool NEONSkinning = false;
bool NEONMorphing = false;
// Used only in non-NEON mode.
static float MEMORY_ALIGNED16(skinMatrix[12]);
alignas(16) static float skinMatrix[12];
// Will be used only in NEON mode.
static float MEMORY_ALIGNED16(bones[16 * 8]); // First two are kept in registers
static float MEMORY_ALIGNED16(boneMask[4]) = {1.0f, 1.0f, 1.0f, 0.0f};
alignas(16) static float bones[16 * 8]; // First two are kept in registers
alignas(16) static float boneMask[4] = {1.0f, 1.0f, 1.0f, 0.0f};
// NEON register allocation:
// Q0: Texture scaling parameters
@@ -890,7 +890,7 @@ void VertexDecoderJitCache::Jit_Color8888Morph() {
}
// First is the left shift, second is the right shift (against walls, to get the RGBA values.)
static const s16 MEMORY_ALIGNED16(color4444Shift[2][4]) = {{12, 8, 4, 0}, {-12, -12, -12, -12}};
alignas(16) static const s16 color4444Shift[2][4] = {{12, 8, 4, 0}, {-12, -12, -12, -12}};
void VertexDecoderJitCache::Jit_Color4444Morph() {
const bool useNEON = NEONMorphing;
@@ -972,8 +972,8 @@ void VertexDecoderJitCache::Jit_Color4444Morph() {
}
// First is the left shift, second is the right shift (against walls, to get the RGBA values.)
static const s16 MEMORY_ALIGNED16(color565Shift[2][4]) = {{11, 5, 0, 0}, {-11, -10, -11, 0}};
static const float MEMORY_ALIGNED16(byColor565[4]) = {255.0f / 31.0f, 255.0f / 63.0f, 255.0f / 31.0f, 0.0f};
alignas(16) static const s16 color565Shift[2][4] = {{11, 5, 0, 0}, {-11, -10, -11, 0}};
alignas(16) static const float byColor565[4] = {255.0f / 31.0f, 255.0f / 63.0f, 255.0f / 31.0f, 0.0f};
void VertexDecoderJitCache::Jit_Color565Morph() {
const bool useNEON = NEONMorphing;
@@ -1057,8 +1057,8 @@ void VertexDecoderJitCache::Jit_Color565Morph() {
}
// First is the left shift, second is the right shift (against walls, to get the RGBA values.)
static const s16 MEMORY_ALIGNED16(color5551Shift[2][4]) = {{11, 6, 1, 0}, {-11, -11, -11, -15}};
static const float MEMORY_ALIGNED16(byColor5551[4]) = {255.0f / 31.0f, 255.0f / 31.0f, 255.0f / 31.0f, 255.0f / 1.0f};
alignas(16) static const s16 color5551Shift[2][4] = {{11, 6, 1, 0}, {-11, -11, -11, -15}};
alignas(16) static const float byColor5551[4] = {255.0f / 31.0f, 255.0f / 31.0f, 255.0f / 31.0f, 255.0f / 1.0f};
void VertexDecoderJitCache::Jit_Color5551Morph() {
const bool useNEON = NEONMorphing;
Oops, something went wrong.

0 comments on commit 22e65ba

Please sign in to comment.