Permalink
Browse files

added SFMT lib to 3rdparty

  • Loading branch information...
1 parent 0811bdd commit 84b34839191986145d3060204e45fa0d49622459 @liuliu committed Oct 22, 2012
View
@@ -39,4 +39,18 @@ Redistribution and use in source and binary forms, with or without modification,
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+SFMT:
+
+Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima University.
+Copyright (c) 2012 Mutsuo Saito, Makoto Matsumoto, Hiroshima University and The University of Tokyo.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+* Neither the names of Hiroshima University, The University of Tokyo nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
sha1: TODO
@@ -0,0 +1,156 @@
+#pragma once
+/**
+ * @file SFMT-alti.h
+ *
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT)
+ * pseudorandom number generator
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (Hiroshima University)
+ *
+ * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University. All rights reserved.
+ *
+ * The new BSD License is applied to this software.
+ * see LICENSE.txt
+ */
+
+#ifndef SFMT_ALTI_H
+#define SFMT_ALTI_H
+
+inline static vector unsigned int vec_recursion(vector unsigned int a,
+ vector unsigned int b,
+ vector unsigned int c,
+ vector unsigned int d);
+
+/**
+ * This function represents the recursion formula in AltiVec and BIG ENDIAN.
+ * @param a a 128-bit part of the interal state array
+ * @param b a 128-bit part of the interal state array
+ * @param c a 128-bit part of the interal state array
+ * @param d a 128-bit part of the interal state array
+ * @return output
+ */
+inline static vector unsigned int vec_recursion(vector unsigned int a,
+ vector unsigned int b,
+ vector unsigned int c,
+ vector unsigned int d) {
+
+ const vector unsigned int sl1 = SFMT_ALTI_SL1;
+ const vector unsigned int sr1 = SFMT_ALTI_SR1;
+#ifdef ONLY64
+ const vector unsigned int mask = SFMT_ALTI_MSK64;
+ const vector unsigned char perm_sl = SFMT_ALTI_SL2_PERM64;
+ const vector unsigned char perm_sr = SFMT_ALTI_SR2_PERM64;
+#else
+ const vector unsigned int mask = SFMT_ALTI_MSK;
+ const vector unsigned char perm_sl = SFMT_ALTI_SL2_PERM;
+ const vector unsigned char perm_sr = SFMT_ALTI_SR2_PERM;
+#endif
+ vector unsigned int v, w, x, y, z;
+ x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl);
+ v = a;
+ y = vec_sr(b, sr1);
+ z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr);
+ w = vec_sl(d, sl1);
+ z = vec_xor(z, w);
+ y = vec_and(y, mask);
+ v = vec_xor(v, x);
+ z = vec_xor(z, y);
+ z = vec_xor(z, v);
+ return z;
+}
+
+/**
+ * This function fills the internal state array with pseudorandom
+ * integers.
+ */
+void sfmt_gen_rand_all(sfmt_t * sfmt) {
+ int i;
+ vector unsigned int r, r1, r2;
+
+ r1 = sfmt->state[N - 2].s;
+ r2 = sfmt->state[N - 1].s;
+ for (i = 0; i < N - POS1; i++) {
+ r = vec_recursion(sfmt->state[i].s, sfmt->state[i + POS1].s, r1, r2);
+ sfmt->state[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = vec_recursion(sfmt->state[i].s, sfmt->state[i + POS1 - N].s, r1, r2);
+ sfmt->state[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+/**
+ * This function fills the user-specified array with pseudorandom
+ * integers.
+ *
+ * @param array an 128-bit array to be filled by pseudorandom numbers.
+ * @param size number of 128-bit pesudorandom numbers to be generated.
+ */
+inline static void gen_rand_array(sfmt_t * sfmt, w128_t *array, int size) {
+ int i, j;
+ vector unsigned int r, r1, r2;
+
+ r1 = sfmt->state[N - 2].s;
+ r2 = sfmt->state[N - 1].s;
+ for (i = 0; i < N - POS1; i++) {
+ r = vec_recursion(sfmt->state[i].s, sfmt->state[i + POS1].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (; i < N; i++) {
+ r = vec_recursion(sfmt->state[i].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ /* main loop */
+ for (; i < size - N; i++) {
+ r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+ for (j = 0; j < 2 * N - size; j++) {
+ sfmt->state[j].s = array[j + size - N].s;
+ }
+ for (; i < size; i++) {
+ r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2);
+ array[i].s = r;
+ sfmt->state[j++].s = r;
+ r1 = r2;
+ r2 = r;
+ }
+}
+
+#ifndef ONLY64
+#if defined(__APPLE__)
+#define SFMT_ALTI_SWAP (vector unsigned char) \
+ (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11)
+#else
+#define SFMT_ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}
+#endif
+/**
+ * This function swaps high and low 32-bit of 64-bit integers in user
+ * specified array.
+ *
+ * @param array an 128-bit array to be swaped.
+ * @param size size of 128-bit array.
+ */
+inline static void swap(w128_t *array, int size) {
+ int i;
+ const vector unsigned char perm = SFMT_ALTI_SWAP;
+
+ for (i = 0; i < size; i++) {
+ array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm);
+ }
+}
+#endif
+
+#endif
@@ -0,0 +1,164 @@
+#pragma once
+/**
+ * @file SFMT-common.h
+ *
+ * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom
+ * number generator with jump function. This file includes common functions
+ * used in random number generation and jump.
+ *
+ * @author Mutsuo Saito (Hiroshima University)
+ * @author Makoto Matsumoto (The University of Tokyo)
+ *
+ * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ * University.
+ * Copyright (C) 2012 Mutsuo Saito, Makoto Matsumoto, Hiroshima
+ * University and The University of Tokyo.
+ * All rights reserved.
+ *
+ * The 3-clause BSD License is applied to this software, see
+ * LICENSE.txt
+ */
+#ifndef SFMT_COMMON_H
+#define SFMT_COMMON_H
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "SFMT.h"
+
+inline static void do_recursion(w128_t * r, w128_t * a, w128_t * b,
+ w128_t * c, w128_t * d);
+
+inline static void rshift128(w128_t *out, w128_t const *in, int shift);
+inline static void lshift128(w128_t *out, w128_t const *in, int shift);
+
+/**
+ * This function simulates SIMD 128-bit right shift by the standard C.
+ * The 128-bit integer given in in is shifted by (shift * 8) bits.
+ * This function simulates the LITTLE ENDIAN SIMD.
+ * @param out the output of this function
+ * @param in the 128-bit data to be shifted
+ * @param shift the shift value
+ */
+#ifdef ONLY64
+inline static void rshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
+ tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
+
+ oh = th >> (shift * 8);
+ ol = tl >> (shift * 8);
+ ol |= th << (64 - shift * 8);
+ out->u[0] = (uint32_t)(ol >> 32);
+ out->u[1] = (uint32_t)ol;
+ out->u[2] = (uint32_t)(oh >> 32);
+ out->u[3] = (uint32_t)oh;
+}
+#else
+inline static void rshift128(w128_t *out, w128_t const *in, int shift)
+{
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
+ tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
+
+ oh = th >> (shift * 8);
+ ol = tl >> (shift * 8);
+ ol |= th << (64 - shift * 8);
+ out->u[1] = (uint32_t)(ol >> 32);
+ out->u[0] = (uint32_t)ol;
+ out->u[3] = (uint32_t)(oh >> 32);
+ out->u[2] = (uint32_t)oh;
+}
+#endif
+/**
+ * This function simulates SIMD 128-bit left shift by the standard C.
+ * The 128-bit integer given in in is shifted by (shift * 8) bits.
+ * This function simulates the LITTLE ENDIAN SIMD.
+ * @param out the output of this function
+ * @param in the 128-bit data to be shifted
+ * @param shift the shift value
+ */
+#ifdef ONLY64
+inline static void lshift128(w128_t *out, w128_t const *in, int shift) {
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]);
+ tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]);
+
+ oh = th << (shift * 8);
+ ol = tl << (shift * 8);
+ oh |= tl >> (64 - shift * 8);
+ out->u[0] = (uint32_t)(ol >> 32);
+ out->u[1] = (uint32_t)ol;
+ out->u[2] = (uint32_t)(oh >> 32);
+ out->u[3] = (uint32_t)oh;
+}
+#else
+inline static void lshift128(w128_t *out, w128_t const *in, int shift)
+{
+ uint64_t th, tl, oh, ol;
+
+ th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]);
+ tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]);
+
+ oh = th << (shift * 8);
+ ol = tl << (shift * 8);
+ oh |= tl >> (64 - shift * 8);
+ out->u[1] = (uint32_t)(ol >> 32);
+ out->u[0] = (uint32_t)ol;
+ out->u[3] = (uint32_t)(oh >> 32);
+ out->u[2] = (uint32_t)oh;
+}
+#endif
+/**
+ * This function represents the recursion formula.
+ * @param r output
+ * @param a a 128-bit part of the internal state array
+ * @param b a 128-bit part of the internal state array
+ * @param c a 128-bit part of the internal state array
+ * @param d a 128-bit part of the internal state array
+ */
+#ifdef ONLY64
+inline static void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c,
+ w128_t *d) {
+ w128_t x;
+ w128_t y;
+
+ lshift128(&x, a, SFMT_SL2);
+ rshift128(&y, c, SFMT_SR2);
+ r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SFMT_SR1) & SFMT_MSK2) ^ y.u[0]
+ ^ (d->u[0] << SFMT_SL1);
+ r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SFMT_SR1) & SFMT_MSK1) ^ y.u[1]
+ ^ (d->u[1] << SFMT_SL1);
+ r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SFMT_SR1) & SFMT_MSK4) ^ y.u[2]
+ ^ (d->u[2] << SFMT_SL1);
+ r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SFMT_SR1) & SFMT_MSK3) ^ y.u[3]
+ ^ (d->u[3] << SFMT_SL1);
+}
+#else
+inline static void do_recursion(w128_t *r, w128_t *a, w128_t *b,
+ w128_t *c, w128_t *d)
+{
+ w128_t x;
+ w128_t y;
+
+ lshift128(&x, a, SFMT_SL2);
+ rshift128(&y, c, SFMT_SR2);
+ r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SFMT_SR1) & SFMT_MSK1)
+ ^ y.u[0] ^ (d->u[0] << SFMT_SL1);
+ r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SFMT_SR1) & SFMT_MSK2)
+ ^ y.u[1] ^ (d->u[1] << SFMT_SL1);
+ r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SFMT_SR1) & SFMT_MSK3)
+ ^ y.u[2] ^ (d->u[2] << SFMT_SL1);
+ r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SFMT_SR1) & SFMT_MSK4)
+ ^ y.u[3] ^ (d->u[3] << SFMT_SL1);
+}
+#endif
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
Oops, something went wrong.

0 comments on commit 84b3483

Please sign in to comment.