/
types.h
508 lines (425 loc) · 18.6 KB
/
types.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
/*
Copyright (C) 2005 Guillaume Duhamel
Copyright (C) 2008-2019 DeSmuME team
This file is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This file is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the this software. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TYPES_HPP
#define TYPES_HPP
#include <retro_miscellaneous.h>
#include <retro_inline.h>
#include <math/fxp.h>
//analyze microsoft compilers
#ifdef _MSC_VER
#define HOST_WINDOWS
#endif //_MSC_VER
// Determine CPU architecture for platforms that don't use the autoconf script
#if defined(HOST_WINDOWS) || defined(DESMUME_COCOA)
#if defined(__x86_64__) || defined(__LP64) || defined(__IA64__) || defined(_M_X64) || defined(_WIN64)
#define HOST_64
#else
#define HOST_32
#endif
#endif
//enforce a constraint: gdb stub requires developer
#if defined(GDB_STUB) && !defined(DEVELOPER)
#define DEVELOPER
#endif
#ifdef DEVELOPER
#define IF_DEVELOPER(X) X
#else
#define IF_DEVELOPER(X)
#endif
#ifdef __GNUC__
#ifdef __ALTIVEC__
#define ENABLE_ALTIVEC
#endif
#ifdef __SSE__
#define ENABLE_SSE
#endif
#ifdef __SSE2__
#define ENABLE_SSE2
#endif
#ifdef __SSE3__
#define ENABLE_SSE3
#endif
#ifdef __SSSE3__
#define ENABLE_SSSE3
#endif
#ifdef __SSE4_1__
#define ENABLE_SSE4_1
#endif
#ifdef __SSE4_2__
#define ENABLE_SSE4_2
#endif
#ifdef __AVX__
#define ENABLE_AVX
#endif
#ifdef __AVX2__
#define ENABLE_AVX2
#endif
// AVX-512 is special because it has multiple tiers of support.
//
// For our case, Tier-0 will be the baseline AVX-512 tier that includes the basic Foundation and
// Conflict Detection extensions, which should be supported on all AVX-512 CPUs. Higher tiers
// include more extensions, where each higher tier also assumes support for all lower tiers.
//
// For typical use cases in DeSmuME, the most practical AVX-512 tier will be Tier-1.
#if defined(__AVX512F__) && defined(__AVX512CD__)
#define ENABLE_AVX512_0
#endif
#if defined(ENABLE_AVX512_0) && defined(__AVX512BW__) && defined(__AVX512DQ__)
#define ENABLE_AVX512_1
#endif
#if defined(ENABLE_AVX512_1) && defined(__AVX512IFMA__) && defined(__AVX512VBMI__)
#define ENABLE_AVX512_2
#endif
#if defined(ENABLE_AVX512_2) && defined(__AVX512VNNI__) && defined(__AVX512VBMI2__) && defined(__AVX512BITALG__)
#define ENABLE_AVX512_3
#endif
#endif
#ifdef _MSC_VER
#include <compat/msvc.h>
#else
#define WINAPI
#endif
#if !defined(MAX_PATH)
#if defined(HOST_WINDOWS)
#define MAX_PATH 260
#elif defined(__GNUC__)
#include <limits.h>
#if !defined(PATH_MAX)
#define MAX_PATH 1024
#else
#define MAX_PATH PATH_MAX
#endif
#else
#define MAX_PATH 1024
#endif
#endif
//------------alignment macros-------------
//dont apply these to types without further testing. it only works portably here on declarations of variables
//cant we find a pattern other people use more successfully?
#if _MSC_VER >= 9999 // Was 1900. The way we use DS_ALIGN doesn't jive with how alignas() wants itself to be used, so just use __declspec(align(X)) for now to avoid problems.
#define DS_ALIGN(X) alignas(X)
#elif defined(_MSC_VER) || defined(__INTEL_COMPILER)
#define DS_ALIGN(X) __declspec(align(X))
#elif defined(__GNUC__)
#define DS_ALIGN(X) __attribute__ ((aligned (X)))
#else
#define DS_ALIGN(X)
#endif
#ifdef HOST_64
#define CACHE_ALIGN_SIZE 64
#else
#define CACHE_ALIGN_SIZE 32
#endif
//use this for example when you want a byte value to be better-aligned
#define CACHE_ALIGN DS_ALIGN(CACHE_ALIGN_SIZE)
#define FAST_ALIGN DS_ALIGN(4)
//---------------------------------------------
#ifdef __MINGW32__
#define FASTCALL __attribute__((fastcall))
#define ASMJIT_CALL_CONV kX86FuncConvGccFastCall
#elif defined (__i386__) && !defined(__clang__)
#define FASTCALL __attribute__((regparm(3)))
#define ASMJIT_CALL_CONV kX86FuncConvGccRegParm3
#elif defined(_MSC_VER) || defined(__INTEL_COMPILER)
#define FASTCALL
#define ASMJIT_CALL_CONV kX86FuncConvDefault
#else
#define FASTCALL
#define ASMJIT_CALL_CONV kX86FuncConvDefault
#endif
#ifdef _MSC_VER
#define _CDECL_ __cdecl
#else
#define _CDECL_
#endif
#ifndef FORCEINLINE
#if defined(_MSC_VER) || defined(__INTEL_COMPILER)
#define FORCEINLINE __forceinline
#define MSC_FORCEINLINE __forceinline
#else
#define FORCEINLINE inline __attribute__((always_inline))
#define MSC_FORCEINLINE
#endif
#endif
#ifndef NOINLINE
#ifdef __GNUC__
#define NOINLINE __attribute__((noinline))
#else
#define NOINLINE
#endif
#endif
#ifndef LOOPVECTORIZE_DISABLE
#if defined(_MSC_VER)
#if _MSC_VER >= 1700
#define LOOPVECTORIZE_DISABLE loop(no_vector)
#else
#define LOOPVECTORIZE_DISABLE
#endif
#elif defined(__clang__)
#define LOOPVECTORIZE_DISABLE clang loop vectorize(disable)
#else
#define LOOPVECTORIZE_DISABLE
#endif
#endif
#if defined(__LP64__)
typedef unsigned char u8;
typedef unsigned short u16;
typedef unsigned int u32;
typedef unsigned long long u64;
typedef signed char s8;
typedef signed short s16;
typedef signed int s32;
typedef signed long long s64;
#else
typedef unsigned char u8;
typedef unsigned short u16;
typedef unsigned int u32;
#if defined(_MSC_VER) || defined(__INTEL_COMPILER)
typedef unsigned __int64 u64;
#else
typedef unsigned long long u64;
#endif
typedef signed char s8;
typedef signed short s16;
typedef signed int s32;
#if defined(_MSC_VER) || defined(__INTEL_COMPILER)
typedef __int64 s64;
#else
typedef signed long long s64;
#endif
#endif
typedef u8 uint8;
typedef u16 uint16;
#ifndef OBJ_C
typedef u32 uint32;
#else
#define uint32 u32 //uint32 is defined in Leopard somewhere, avoid conflicts
#endif
#ifdef ENABLE_ALTIVEC
#ifndef __APPLE_ALTIVEC__
#include <altivec.h>
#endif
typedef vector unsigned char v128u8;
typedef vector signed char v128s8;
typedef vector unsigned short v128u16;
typedef vector signed short v128s16;
typedef vector unsigned int v128u32;
typedef vector signed int v128s32;
#endif
#ifdef ENABLE_SSE2
#include <emmintrin.h>
typedef __m128i v128u8;
typedef __m128i v128s8;
typedef __m128i v128u16;
typedef __m128i v128s16;
typedef __m128i v128u32;
typedef __m128i v128s32;
#endif
#if defined(ENABLE_AVX) || defined(ENABLE_AVX512_0)
#include <immintrin.h>
typedef __m256i v256u8;
typedef __m256i v256s8;
typedef __m256i v256u16;
typedef __m256i v256s16;
typedef __m256i v256u32;
typedef __m256i v256s32;
#if defined(ENABLE_AVX512_0)
typedef __m512i v512u8;
typedef __m512i v512s8;
typedef __m512i v512u16;
typedef __m512i v512s16;
typedef __m512i v512u32;
typedef __m512i v512s32;
#endif
#endif // defined(ENABLE_AVX) || defined(ENABLE_AVX512_0)
/*---------- GPU3D fixed-points types -----------*/
typedef s32 f32;
#define inttof32(n) ((n) << 12)
#define f32toint(n) ((n) >> 12)
#define floattof32(n) ((int32)((n) * (1 << 12)))
#define f32tofloat(n) (((float)(n)) / (float)(1<<12))
typedef s16 t16;
#define f32tot16(n) ((t16)(n >> 8))
#define inttot16(n) ((n) << 4)
#define t16toint(n) ((n) >> 4)
#define floattot16(n) ((t16)((n) * (1 << 4)))
#define t16ofloat(n) (((float)(n)) / (float)(1<<4))
typedef s16 v16;
#define inttov16(n) ((n) << 12)
#define f32tov16(n) (n)
#define floattov16(n) ((v16)((n) * (1 << 12)))
#define v16toint(n) ((n) >> 12)
#define v16tofloat(n) (((float)(n)) / (float)(1<<12))
typedef s16 v10;
#define inttov10(n) ((n) << 9)
#define f32tov10(n) ((v10)(n >> 3))
#define v10toint(n) ((n) >> 9)
#define floattov10(n) ((v10)((n) * (1 << 9)))
#define v10tofloat(n) (((float)(n)) / (float)(1<<9))
/*----------------------*/
#ifndef OBJ_C
typedef int BOOL;
#else
//apple also defines BOOL
typedef int desmume_BOOL;
#define BOOL desmume_BOOL
#endif
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
// Atomic functions
#if defined(HOST_WINDOWS)
#include <winnt.h>
//#define atomic_add_32(V,M) InterlockedAddNoFence((volatile LONG *)(V),(LONG)(M)) // Requires Windows 8
inline s32 atomic_add_32(volatile s32 *V, s32 M) { return (s32)(InterlockedExchangeAdd((volatile LONG *)V, (LONG)M) + M); }
inline s32 atomic_add_barrier32(volatile s32 *V, s32 M) { return (s32)(InterlockedExchangeAdd((volatile LONG *)V, (LONG)M) + M); }
//#define atomic_inc_32(V) InterlockedIncrementNoFence((volatile LONG *)(V)) // Requires Windows 8
#define atomic_inc_32(V) _InterlockedIncrement((volatile LONG *)(V))
#define atomic_inc_barrier32(V) _InterlockedIncrement((volatile LONG *)(V))
//#define atomic_dec_32(V) InterlockedDecrementNoFence((volatile LONG *)(V)) // Requires Windows 8
#define atomic_dec_32(V) _InterlockedDecrement((volatile LONG *)(V))
#define atomic_dec_barrier32(V) _InterlockedDecrement((volatile LONG *)(V))
//#define atomic_or_32(V,M) InterlockedOrNoFence((volatile LONG *)(V),(LONG)(M)) // Requires Windows 8
#define atomic_or_32(V,M) _InterlockedOr((volatile LONG *)(V),(LONG)(M))
#define atomic_or_barrier32(V,M) _InterlockedOr((volatile LONG *)(V),(LONG)(M))
//#define atomic_and_32(V,M) InterlockedAndNoFence((volatile LONG *)(V),(LONG)(M)) // Requires Windows 8
#define atomic_and_32(V,M) _InterlockedAnd((volatile LONG *)(V),(LONG)(M))
#define atomic_and_barrier32(V,M) _InterlockedAnd((volatile LONG *)(V),(LONG)(M))
//#define atomic_xor_32(V,M) InterlockedXorNoFence((volatile LONG *)(V),(LONG)(M)) // Requires Windows 8
#define atomic_xor_32(V,M) _InterlockedXor((volatile LONG *)(V),(LONG)(M))
#define atomic_xor_barrier32(V,M) _InterlockedXor((volatile LONG *)(V),(LONG)(M))
inline bool atomic_test_and_set_32(volatile s32 *V, s32 M) { return (_interlockedbittestandset((volatile LONG *)V, (LONG)M)) ? true : false; }
inline bool atomic_test_and_set_barrier32(volatile s32 *V, s32 M) { return (_interlockedbittestandset((volatile LONG *)V, (LONG)M)) ? true : false; }
inline bool atomic_test_and_clear_32(volatile s32 *V, s32 M) { return (_interlockedbittestandreset((volatile LONG *)V, (LONG)M)) ? true : false; }
inline bool atomic_test_and_clear_barrier32(volatile s32 *V, s32 M) { return (_interlockedbittestandreset((volatile LONG *)V, (LONG)M)) ? true : false; }
#elif defined(DESMUME_COCOA)
#include <libkern/OSAtomic.h>
#define atomic_add_32(V,M) OSAtomicAdd32((M),(V))
#define atomic_add_barrier32(V,M) OSAtomicAdd32Barrier((M),(V))
#define atomic_inc_32(V) OSAtomicIncrement32((V))
#define atomic_inc_barrier32(V) OSAtomicIncrement32Barrier((V))
#define atomic_dec_32(V) OSAtomicDecrement32((V))
#define atomic_dec_barrier32(V) OSAtomicDecrement32Barrier((V))
#define atomic_or_32(V,M) OSAtomicOr32((M),(volatile uint32_t *)(V))
#define atomic_or_barrier32(V,M) OSAtomicOr32Barrier((M),(volatile uint32_t *)(V))
#define atomic_and_32(V,M) OSAtomicAnd32((M),(volatile uint32_t *)(V))
#define atomic_and_barrier32(V,M) OSAtomicAnd32Barrier((M),(volatile uint32_t *)(V))
#define atomic_xor_32(V,M) OSAtomicXor32((M),(volatile uint32_t *)(V))
#define atomic_xor_barrier32(V,M) OSAtomicXor32Barrier((M),(volatile uint32_t *)(V))
#define atomic_test_and_set_32(V,M) OSAtomicTestAndSet((M),(V))
#define atomic_test_and_set_barrier32(V,M) OSAtomicTestAndSetBarrier((M),(V))
#define atomic_test_and_clear_32(V,M) OSAtomicTestAndClear((M),(V))
#define atomic_test_and_clear_barrier32(V,M) OSAtomicTestAndClearBarrier((M),(V))
#else // Just use C++11 std::atomic
#include <atomic>
inline s32 atomic_add_32(volatile s32 *V, s32 M) { return std::atomic_fetch_add_explicit<s32>((volatile std::atomic<s32> *)V, M, std::memory_order::memory_order_relaxed) + M; }
inline s32 atomic_add_barrier32(volatile s32 *V, s32 M) { return std::atomic_fetch_add_explicit<s32>((volatile std::atomic<s32> *)V, M, std::memory_order::memory_order_seq_cst) + M; }
inline s32 atomic_inc_32(volatile s32 *V) { return atomic_add_32(V, 1); }
inline s32 atomic_inc_barrier32(volatile s32 *V) { return atomic_add_barrier32(V, 1); }
inline s32 atomic_dec_32(volatile s32 *V) { return atomic_add_32(V, -1); }
inline s32 atomic_dec_barrier32(volatile s32 *V) { return atomic_add_barrier32(V, -1); }
inline s32 atomic_or_32(volatile s32 *V, s32 M) { return std::atomic_fetch_or_explicit<s32>((volatile std::atomic<s32> *)V, M, std::memory_order::memory_order_relaxed) | M; }
inline s32 atomic_or_barrier32(volatile s32 *V, s32 M) { return std::atomic_fetch_or_explicit<s32>((volatile std::atomic<s32> *)V, M, std::memory_order::memory_order_seq_cst) | M; }
inline s32 atomic_and_32(volatile s32 *V, s32 M) { return std::atomic_fetch_and_explicit<s32>((volatile std::atomic<s32> *)V, M, std::memory_order::memory_order_relaxed) & M; }
inline s32 atomic_and_barrier32(volatile s32 *V, s32 M) { return std::atomic_fetch_and_explicit<s32>((volatile std::atomic<s32> *)V, M, std::memory_order::memory_order_seq_cst) & M; }
inline s32 atomic_xor_32(volatile s32 *V, s32 M) { return std::atomic_fetch_xor_explicit<s32>((volatile std::atomic<s32> *)V, M, std::memory_order::memory_order_relaxed) ^ M; }
inline s32 atomic_xor_barrier32(volatile s32 *V, s32 M) { return std::atomic_fetch_xor_explicit<s32>((volatile std::atomic<s32> *)V, M, std::memory_order::memory_order_seq_cst) ^ M; }
inline bool atomic_test_and_set_32(volatile s32 *V, s32 M) { return (std::atomic_fetch_or_explicit<s32>((volatile std::atomic<s32> *)V,(0x80>>((M)&0x07)), std::memory_order::memory_order_relaxed) & (0x80>>((M)&0x07))) ? true : false; }
inline bool atomic_test_and_set_barrier32(volatile s32 *V, s32 M) { return (std::atomic_fetch_or_explicit<s32>((volatile std::atomic<s32> *)V,(0x80>>((M)&0x07)), std::memory_order::memory_order_seq_cst) & (0x80>>((M)&0x07))) ? true : false; }
inline bool atomic_test_and_clear_32(volatile s32 *V, s32 M) { return (std::atomic_fetch_and_explicit<s32>((volatile std::atomic<s32> *)V,~(s32)(0x80>>((M)&0x07)), std::memory_order::memory_order_relaxed) & (0x80>>((M)&0x07))) ? true : false; }
inline bool atomic_test_and_clear_barrier32(volatile s32 *V, s32 M) { return (std::atomic_fetch_and_explicit<s32>((volatile std::atomic<s32> *)V,~(s32)(0x80>>((M)&0x07)), std::memory_order::memory_order_seq_cst) & (0x80>>((M)&0x07))) ? true : false; }
#endif
/* little endian (ds' endianess) to local endianess convert macros */
#ifdef MSB_FIRST /* local arch is big endian */
# define LE_TO_LOCAL_16(x) ((((x)&0xff)<<8)|(((x)>>8)&0xff))
# define LE_TO_LOCAL_32(x) ((((x)&0xff)<<24)|(((x)&0xff00)<<8)|(((x)>>8)&0xff00)|(((x)>>24)&0xff))
# define LE_TO_LOCAL_64(x) ((((x)&0xff)<<56)|(((x)&0xff00)<<40)|(((x)&0xff0000)<<24)|(((x)&0xff000000)<<8)|(((x)>>8)&0xff000000)|(((x)>>24)&0xff0000)|(((x)>>40)&0xff00)|(((x)>>56)&0xff))
# define LOCAL_TO_LE_16(x) ((((x)&0xff)<<8)|(((x)>>8)&0xff))
# define LOCAL_TO_LE_32(x) ((((x)&0xff)<<24)|(((x)&0xff00)<<8)|(((x)>>8)&0xff00)|(((x)>>24)&0xff))
# define LOCAL_TO_LE_64(x) ((((x)&0xff)<<56)|(((x)&0xff00)<<40)|(((x)&0xff0000)<<24)|(((x)&0xff000000)<<8)|(((x)>>8)&0xff000000)|(((x)>>24)&0xff0000)|(((x)>>40)&0xff00)|(((x)>>56)&0xff))
#else /* local arch is little endian */
# define LE_TO_LOCAL_16(x) (x)
# define LE_TO_LOCAL_32(x) (x)
# define LE_TO_LOCAL_64(x) (x)
# define LOCAL_TO_LE_16(x) (x)
# define LOCAL_TO_LE_32(x) (x)
# define LOCAL_TO_LE_64(x) (x)
#endif
// kilobytes and megabytes macro
#define MB(x) ((x)*1024*1024)
#define KB(x) ((x)*1024)
//fairly standard for loop macros
#define MACRODO1(TRICK,TODO) { const size_t X = TRICK; TODO; }
#define MACRODO2(X,TODO) { MACRODO1((X),TODO) MACRODO1(((X)+1),TODO) }
#define MACRODO4(X,TODO) { MACRODO2((X),TODO) MACRODO2(((X)+2),TODO) }
#define MACRODO8(X,TODO) { MACRODO4((X),TODO) MACRODO4(((X)+4),TODO) }
#define MACRODO16(X,TODO) { MACRODO8((X),TODO) MACRODO8(((X)+8),TODO) }
#define MACRODO32(X,TODO) { MACRODO16((X),TODO) MACRODO16(((X)+16),TODO) }
#define MACRODO64(X,TODO) { MACRODO32((X),TODO) MACRODO32(((X)+32),TODO) }
#define MACRODO128(X,TODO) { MACRODO64((X),TODO) MACRODO64(((X)+64),TODO) }
#define MACRODO256(X,TODO) { MACRODO128((X),TODO) MACRODO128(((X)+128),TODO) }
//this one lets you loop any number of times (as long as N<256)
#define MACRODO_N(N,TODO) {\
if((N)&0x100) MACRODO256(0,TODO); \
if((N)&0x080) MACRODO128((N)&(0x100),TODO); \
if((N)&0x040) MACRODO64((N)&(0x100|0x080),TODO); \
if((N)&0x020) MACRODO32((N)&(0x100|0x080|0x040),TODO); \
if((N)&0x010) MACRODO16((N)&(0x100|0x080|0x040|0x020),TODO); \
if((N)&0x008) MACRODO8((N)&(0x100|0x080|0x040|0x020|0x010),TODO); \
if((N)&0x004) MACRODO4((N)&(0x100|0x080|0x040|0x020|0x010|0x008),TODO); \
if((N)&0x002) MACRODO2((N)&(0x100|0x080|0x040|0x020|0x010|0x008|0x004),TODO); \
if((N)&0x001) MACRODO1((N)&(0x100|0x080|0x040|0x020|0x010|0x008|0x004|0x002),TODO); \
}
//---------------------------
//Binary constant generator macro By Tom Torfs - donated to the public domain
//turn a numeric literal into a hex constant
//(avoids problems with leading zeroes)
//8-bit constants max value 0x11111111, always fits in unsigned long
#define HEX__(n) 0x##n##LU
//8-bit conversion function
#define B8__(x) ((x&0x0000000FLU)?1:0) \
+((x&0x000000F0LU)?2:0) \
+((x&0x00000F00LU)?4:0) \
+((x&0x0000F000LU)?8:0) \
+((x&0x000F0000LU)?16:0) \
+((x&0x00F00000LU)?32:0) \
+((x&0x0F000000LU)?64:0) \
+((x&0xF0000000LU)?128:0)
//for upto 8-bit binary constants
#define B8(d) ((unsigned char)B8__(HEX__(d)))
// for upto 16-bit binary constants, MSB first
#define B16(dmsb,dlsb) (((unsigned short)B8(dmsb)<<8) \
+ B8(dlsb))
// for upto 32-bit binary constants, MSB first */
#define B32(dmsb,db2,db3,dlsb) (((unsigned long)B8(dmsb)<<24) \
+ ((unsigned long)B8(db2)<<16) \
+ ((unsigned long)B8(db3)<<8) \
+ B8(dlsb))
//Sample usage:
//B8(01010101) = 85
//B16(10101010,01010101) = 43605
//B32(10000000,11111111,10101010,01010101) = 2164238933
//---------------------------
#ifndef CTASSERT
#define CTASSERT(x) typedef char __assert ## y[(x) ? 1 : -1]
#endif
template<typename T> inline void reconstruct(T* t) {
t->~T();
new(t) T();
}
#endif