1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336

/*
A Cprogram for MT1993764
Coded by Takuji Nishimura and Makoto Matsumoto. (With a lot of
changes by Bill J. Gray; see below.)
This is a 64bit version of Mersenne Twister pseudorandom number
generator.
Before using, initialize the state by using init_mt64(seed, state_array)
or init_mt64_by_array( init_key, key_length, state_array).
Copyright (C) 2004, Makoto Matsumoto and Takuji Nishimura,
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The names of its contributors may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
References:
T. Nishimura, ``Tables of 64bit Mersenne Twisters''
ACM Transactions on Modeling and
Computer Simulation 10. (2000) 348357.
M. Matsumoto and T. Nishimura,
``Mersenne Twister: a 623dimensionally equidistributed
uniform pseudorandom number generator''
ACM Transactions on Modeling and
Computer Simulation 8. (Jan. 1998) 330.
Any feedback is very welcome.
http://www.math.hiroshimau.ac.jp/~mmat/MT/emt.html
email: mmat @ math.sci.hiroshimau.ac.jp (remove spaces)
(BEFORE PROVIDING FEEDBACK, BE AWARE THAT THE FOLLOWING IS NOT THE
ORIGINAL CODE! It's been very heavily revised, to the point where
it may not be particularly familiar to the original authors. You may
want to send feedback to me, Bill Gray, pluto(at)projectpluto(dot)com.)
2011 Apr 2: BJG: removed static variables to make code
reentrant (and therefore threadsafe, I hope). Changed
'unsigned long long's to 'uint64_t'. Used a UCONST( ) macro
to allow the code to compile in Microsoft VC, which has a somewhat
different way (of course) for defining 64bit constants.
There is now a function to get N bits at a time, where N <= 64.
Certain ways of getting doubleprecision random numbers are given.
I moved the 'test' routines into a separate mt64test.c module,
added a timing test, and created a header file.
*/
#include <stdint.h>
#include "mt64.h"
#ifndef UINT64_C
#ifdef _MSC_VER
#define UINT64_C( a) (a##ui64)
#else
#ifdef _WIN32
#define UINT64_C( a) (a##ULL)
#else
#define UINT64_C( a) ((uint64_t)(a##UL))
#endif
#endif
#endif
#define MM 156
#define MATRIX_A UINT64_C( 0xB5026F5AA96619E9)
#define UM UINT64_C( 0xFFFFFFFF80000000) /* Most significant 33 bits */
#define LM UINT64_C( 0x7FFFFFFF) /* Least significant 31 bits */
#define MT_SIZE 312
#define LOC mt[MT_SIZE]
#define N_BITS_LEFT mt[MT_SIZE + 1]
#define LEFTOVER_BITS mt[MT_SIZE + 2]
/* Initializes mt[MT_SIZE] with a seed, using a */
/* modified linear congruential generator. */
void init_mt64( const uint64_t seed, uint64_t *mt)
{
int i;
mt[0] = seed;
for( i = 1; i < MT_SIZE; i++)
mt[i] = (UINT64_C( 6364136223846793005) * (mt[i1] ^ (mt[i1] >> 62)) + i);
LOC = MT_SIZE;
N_BITS_LEFT = (uint32_t)0;
LEFTOVER_BITS = (uint64_t)0;
}
/* initialize by an array with arraylength */
/* init_key is the array for initializing keys */
/* key_length is its length */
void init_mt64_by_array( const uint64_t init_key[],
const uint64_t key_length, uint64_t *mt)
{
uint64_t i, j, k;
init_mt64( UINT64_C( 19650218), mt);
i=1; j=0;
k = (MT_SIZE>key_length ? MT_SIZE : key_length);
for (; k; k) {
mt[i] = (mt[i] ^ ((mt[i1] ^ (mt[i1] >> 62)) * UINT64_C( 3935559000370003845)))
+ init_key[j] + j; /* non linear */
i++; j++;
if (i>=MT_SIZE) { mt[0] = mt[MT_SIZE1]; i=1; }
if (j>=key_length) j=0;
}
for (k=MT_SIZE1; k; k) {
mt[i] = (mt[i] ^ ((mt[i1] ^ (mt[i1] >> 62)) * UINT64_C( 2862933555777941757)))
 i; /* non linear */
i++;
if (i>=MT_SIZE) { mt[0] = mt[MT_SIZE1]; i=1; }
}
mt[0] = UINT64_C( 1) << 63; /* MSB is 1; assuring nonzero initial array */
}
/* generates a random number on [0, 2^641]interval */
uint64_t mt64( uint64_t * RESTRICT mt)
{
uint64_t x;
if (LOC >= MT_SIZE) { /* generate MT_SIZE words at one time */
static const uint64_t mag01[2]={ UINT64_C( 0), MATRIX_A};
uint64_t * RESTRICT endptr = mt + MT_SIZE  MM;
while( mt < endptr)
{
x = (mt[0] & UM)  (mt[1] & LM);
*mt = mt[MM] ^ (x >> 1) ^ mag01[(unsigned)x & 1u];
mt++;
}
endptr += MM  1;
while( mt < endptr)
{
x = (mt[0] & UM)  (mt[1] & LM);
*mt = mt[MM  MT_SIZE] ^ (x >> 1) ^ mag01[(unsigned)x & 1u];
mt++;
}
mt = MT_SIZE  1;
x = (mt[MT_SIZE1] & UM)  (mt[0] & LM);
mt[MT_SIZE1] = mt[MM1] ^ (x >> 1) ^ mag01[(unsigned)x & 1u];
LOC = 0;
}
x = mt[LOC++];
x ^= (x >> 29) & UINT64_C( 0x5555555555555555);
x ^= (x << 17) & UINT64_C( 0x71D67FFFEDA60000);
x ^= (x << 37) & UINT64_C( 0xFFF7EEE000000000);
x ^= (x >> 43);
return x;
}
/* The following (currently unused) function allows one to
get n_bits of random bits at a time. Thus, for example,
mt64_get_bits( mt, 2) will return a random value from 0 to
3. The benefit of this is that the full mt64() function
would then be called only once every 32 times, and one
has somewhat faster code.
Another example: if you called mt64_get_bits( mt, 25)
three times in a row, mt64( ) would be called the first time,
generating 64 new random bits. 25 would be returned to the
user, with 39 saved in LEFTOVER_BITS (and therefore,
N_BITS_LEFT == 39). On the second call, 25 more bits would
be extracted from LEFTOVER_BITS, with N_BITS_LEFT reset
to 3925=14. On the third call, we would see that we had
only fourteen random bits; another call to mt64() would be
made, with 25 of the resulting bits returned to the user
and the remaining 39 put back into LEFTOVER_BITS. That
would reset N_BITS_LEFT to 14+39 = 53 random bits.
The potential pitfall is that it assumes that mt64()
really returns random bits, and that no unexpected
correlations will appear when examining them in, say,
sevenbit increments. With a truly "random" source, the
order in which bits are sent is obviously unimportant. But I
don't _know_ for a certainty that MT64 is sufficiently
"random", and most tests run on it have presumably looked
for correlations over 64bit intervals rather than, say,
sevenbit ones. As was said of the infamous RANDU generator
(in _Numerical Recipes in C_): "We guarantee that each number
is random individually, but we don't guarantee that more
than one of them is random." MT64 is immensely more "random"
than that generator, and I believe it's safe to get bits in
this manner (and if it isn't, the solution should be to use
a still better PRNG, rather than ignoring the problem; the
"randomness" of a PRNG should not depend on the order in
which bits are examined.) But, "caveat user". */
#ifdef CURRENTLY_UNUSED
uint64_t mt64_get_bits( uint64_t *mt, const int n_bits)
{
uint64_t rval;
if( (unsigned)n_bits <= N_BITS_LEFT)
{ /* yes, can just use bits */
rval = LEFTOVER_BITS; /* computed on previous calls */
LEFTOVER_BITS >>= n_bits;
N_BITS_LEFT = n_bits;
}
else /* no, gotta generate new bits */
{
rval = mt64( mt);
LEFTOVER_BITS ^= rval >> (n_bits  N_BITS_LEFT);
N_BITS_LEFT += 64  n_bits;
}
/* Mask off and return only the lowest 'n_bits' bits: */
return( rval & (((uint64_t)1 << n_bits)  (uint64_t)1));
}
#endif
/* The following generates a double on the [0,1) interval. I
tried three methods: (1) the original one in the MT64 code,
which takes a pseudorandom number, shifts it down, casts it
to double, and multiplies it by 2^53; (2) a modification of a
method described in _Numerical Recipes_ that assumes 64bit
doubles in IEEE format, but which gets a typepunning warning
in gcc; and (3) almost the same method, except by doing a
memcpy(), the typepunning warning is avoided. (Any of the
three methods can be selected with suitable #defines.)
All three methods work Just Fine. The last two could have
trouble if doubles aren't 64 bits or if they aren't in IEEE
format. And it turns out (after running timing tests) that
the performance of all three is essentially identical. About
80% of the time is spent in mt64() anyway; the efforts to
convert the result to a double take up the remaining 20%,
and that did not vary much between the three methods.
Incidentally, I also tried the commentedout version:
// const uint64_t rval = jflone  mt64_get_bits( mt, 52);
My thought was that this would cut down on calls to mt64()
by a factor of 52/64 = .8125, since we'd be only using 52
random bits at a time. However, mt64_get_bits() has some
overhead; it actually was a little slower this way. It
_could_ be a useful trick if one's PRNG was slower, though.
Another completely commented out version attempts to be
faster and provide somewhat better results. It's actually
a little slower. The "better results" refers to the fact that
the "usual" mt64_double has a granularity of 2^53, even
though doubles can represent numbers from .25 to .5 down to
2^54, those from .125 to .25 down to 2^55, and so on.
It's hard to persuade me that this is really a problem.
But if it were, something resembling the commentedout
version would address it. */
#ifdef AVOID_TYPE_PUNNING
#include <string.h> /* for memcpy( ) prototype */
#endif
#ifdef __GNUC__
#pragma GCC diagnostic ignored "Wstrictaliasing"
#endif
double mt64_double( uint64_t * RESTRICT mt)
{
#ifdef NON_IEEE_WAY
const double two_to_the_53rd_power = 9007199254740992.0;
return (int64_t)(mt64( mt) >> 11) * (1.0 / two_to_the_53rd_power);
#else
const uint64_t jflone = UINT64_C( 0x3ff0000000000000);
const uint64_t jflmsk = UINT64_C( 0x000fffffffffffff);
const uint64_t rval = jflone  (jflmsk & mt64( mt));
// const uint64_t rval = jflone  mt64_get_bits( mt, 52);
#ifndef AVOID_TYPE_PUNNING
return( *((const double * RESTRICT)&rval)  1.);
#else
double d_rval;
memcpy( &d_rval, &rval, sizeof( double));
d_rval = 1.;
return( d_rval);
#endif
#endif
}
#ifdef __GNUC__
#pragma GCC diagnostic warning "Wstrictaliasing"
#endif
// double mt64_double( uint64_t * RESTRICT mt)
// {
// const uint64_t jflmsk = UINT64_C( 0x000fffffffffffff);
// uint64_t rval = mt64( mt);
//
// if( rval & (UINT64_C( 1) << 63)) /* .5 <= returned value < 1 */
// rval = UINT64_C( 0x3fe0000000000000)  (jflmsk & rval);
// else if( rval & (UINT64_C( 1) << 62)) /* .25 <= returned value < .5 */
// rval = UINT64_C( 0x3fd0000000000000)  (jflmsk & rval);
// else if( rval & (UINT64_C( 1) << 61)) /* .125 <= returned value < .25 */
// rval = UINT64_C( 0x3fc0000000000000)  (jflmsk & rval);
// else
// {
// const double two_to_the_64th_power = 65536. * 65536. * 65536. * 65536.;
// return( (double)( (int64_t)rval) * (1. / two_to_the_64th_power));
// }
// #ifndef AVOID_TYPE_PUNNING
// return( *((const double * RESTRICT)&rval));
// #else
// double d_rval;
//
// memcpy( &d_rval, &rval, sizeof( double));
// return( d_rval);
// #endif
// }
