summaryrefslogtreecommitdiff
path: root/numpy/random/src
diff options
context:
space:
mode:
authorKevin Sheppard <kevin.k.sheppard@gmail.com>2019-05-24 10:41:58 +0100
committermattip <matti.picus@gmail.com>2019-05-27 22:58:35 +0300
commit58c0e72854c3f79d3d165d74f2dc721815a38b57 (patch)
tree3a8fff1f2c1a073248c81e53ba8d3a0367c00907 /numpy/random/src
parent3db5a7736cf26db59817eb8939b042ae18c482fa (diff)
downloadnumpy-58c0e72854c3f79d3d165d74f2dc721815a38b57.tar.gz
Revert "MAINT: Implement API changes for randomgen-derived code"
This reverts commit 17e0070df93f4262908f884dca4b08cb7d0bba7f.
Diffstat (limited to 'numpy/random/src')
-rw-r--r--numpy/random/src/pcg32/LICENSE.md22
-rw-r--r--numpy/random/src/pcg32/pcg-advance-64.c62
-rw-r--r--numpy/random/src/pcg32/pcg32-test-data-gen.c59
-rw-r--r--numpy/random/src/pcg32/pcg32.c30
-rw-r--r--numpy/random/src/pcg32/pcg32.h86
-rw-r--r--numpy/random/src/pcg32/pcg_variants.h2210
-rw-r--r--numpy/random/src/pcg64/LICENSE.md22
-rw-r--r--numpy/random/src/pcg64/pcg64-benchmark.c42
-rw-r--r--numpy/random/src/pcg64/pcg64-test-data-gen.c73
-rw-r--r--numpy/random/src/pcg64/pcg64.c185
-rw-r--r--numpy/random/src/pcg64/pcg64.h262
-rw-r--r--numpy/random/src/pcg64/pcg64.orig.c11
-rw-r--r--numpy/random/src/pcg64/pcg64.orig.h2025
13 files changed, 5089 insertions, 0 deletions
diff --git a/numpy/random/src/pcg32/LICENSE.md b/numpy/random/src/pcg32/LICENSE.md
new file mode 100644
index 000000000..e28ef1a8b
--- /dev/null
+++ b/numpy/random/src/pcg32/LICENSE.md
@@ -0,0 +1,22 @@
+# PCG32
+
+## The MIT License
+
+PCG Random Number Generation for C.
+
+Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/numpy/random/src/pcg32/pcg-advance-64.c b/numpy/random/src/pcg32/pcg-advance-64.c
new file mode 100644
index 000000000..8210e7565
--- /dev/null
+++ b/numpy/random/src/pcg32/pcg-advance-64.c
@@ -0,0 +1,62 @@
+/*
+ * PCG Random Number Generation for C.
+ *
+ * Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * For additional information about the PCG random number generation scheme,
+ * including its license and other licensing options, visit
+ *
+ * http://www.pcg-random.org
+ */
+
+/*
+ * This code is derived from the canonical C++ PCG implementation, which
+ * has many additional features and is preferable if you can use C++ in
+ * your project.
+ *
+ * Repetative C code is derived using C preprocessor metaprogramming
+ * techniques.
+ */
+
+#include "pcg_variants.h"
+
+/* Multi-step advance functions (jump-ahead, jump-back)
+ *
+ * The method used here is based on Brown, "Random Number Generation
+ * with Arbitrary Stride,", Transactions of the American Nuclear
+ * Society (Nov. 1994). The algorithm is very similar to fast
+ * exponentiation.
+ *
+ * Even though delta is an unsigned integer, we can pass a
+ * signed integer to go backwards, it just goes "the long way round".
+ */
+
+uint64_t pcg_advance_lcg_64(uint64_t state, uint64_t delta, uint64_t cur_mult,
+ uint64_t cur_plus)
+{
+ uint64_t acc_mult = 1u;
+ uint64_t acc_plus = 0u;
+ while (delta > 0) {
+ if (delta & 1) {
+ acc_mult *= cur_mult;
+ acc_plus = acc_plus * cur_mult + cur_plus;
+ }
+ cur_plus = (cur_mult + 1) * cur_plus;
+ cur_mult *= cur_mult;
+ delta /= 2;
+ }
+ return acc_mult * state + acc_plus;
+}
+
diff --git a/numpy/random/src/pcg32/pcg32-test-data-gen.c b/numpy/random/src/pcg32/pcg32-test-data-gen.c
new file mode 100644
index 000000000..cccaf84b9
--- /dev/null
+++ b/numpy/random/src/pcg32/pcg32-test-data-gen.c
@@ -0,0 +1,59 @@
+/*
+ * Generate testing csv files
+ *
+ *
+ * gcc pcg32-test-data-gen.c pcg32.orig.c ../splitmix64/splitmix64.c -o
+ * pgc64-test-data-gen
+ */
+
+#include "pcg_variants.h"
+#include <inttypes.h>
+#include <stdio.h>
+
+#define N 1000
+
+int main() {
+ pcg32_random_t rng;
+ uint64_t inc, seed = 0xDEADBEAF;
+ inc = 0;
+ int i;
+ uint64_t store[N];
+ pcg32_srandom_r(&rng, seed, inc);
+ for (i = 0; i < N; i++) {
+ store[i] = pcg32_random_r(&rng);
+ }
+
+ FILE *fp;
+ fp = fopen("pcg32-testset-1.csv", "w");
+ if (fp == NULL) {
+ printf("Couldn't open file\n");
+ return -1;
+ }
+ fprintf(fp, "seed, 0x%" PRIx64 "\n", seed);
+ for (i = 0; i < N; i++) {
+ fprintf(fp, "%d, 0x%" PRIx64 "\n", i, store[i]);
+ if (i == 999) {
+ printf("%d, 0x%" PRIx64 "\n", i, store[i]);
+ }
+ }
+ fclose(fp);
+
+ seed = 0;
+ pcg32_srandom_r(&rng, seed, inc);
+ for (i = 0; i < N; i++) {
+ store[i] = pcg32_random_r(&rng);
+ }
+ fp = fopen("pcg32-testset-2.csv", "w");
+ if (fp == NULL) {
+ printf("Couldn't open file\n");
+ return -1;
+ }
+ fprintf(fp, "seed, 0x%" PRIx64 "\n", seed);
+ for (i = 0; i < N; i++) {
+ fprintf(fp, "%d, 0x%" PRIx64 "\n", i, store[i]);
+ if (i == 999) {
+ printf("%d, 0x%" PRIx64 "\n", i, store[i]);
+ }
+ }
+ fclose(fp);
+}
diff --git a/numpy/random/src/pcg32/pcg32.c b/numpy/random/src/pcg32/pcg32.c
new file mode 100644
index 000000000..5fbf6759f
--- /dev/null
+++ b/numpy/random/src/pcg32/pcg32.c
@@ -0,0 +1,30 @@
+#include "pcg32.h"
+
+extern inline uint64_t pcg32_next64(pcg32_state *state);
+extern inline uint32_t pcg32_next32(pcg32_state *state);
+extern inline double pcg32_next_double(pcg32_state *state);
+
+uint64_t pcg_advance_lcg_64(uint64_t state, uint64_t delta, uint64_t cur_mult,
+ uint64_t cur_plus) {
+ uint64_t acc_mult, acc_plus;
+ acc_mult = 1u;
+ acc_plus = 0u;
+ while (delta > 0) {
+ if (delta & 1) {
+ acc_mult *= cur_mult;
+ acc_plus = acc_plus * cur_mult + cur_plus;
+ }
+ cur_plus = (cur_mult + 1) * cur_plus;
+ cur_mult *= cur_mult;
+ delta /= 2;
+ }
+ return acc_mult * state + acc_plus;
+}
+
+extern void pcg32_advance_state(pcg32_state *state, uint64_t step) {
+ pcg32_advance_r(state->pcg_state, step);
+}
+
+extern void pcg32_set_seed(pcg32_state *state, uint64_t seed, uint64_t inc) {
+ pcg32_srandom_r(state->pcg_state, seed, inc);
+}
diff --git a/numpy/random/src/pcg32/pcg32.h b/numpy/random/src/pcg32/pcg32.h
new file mode 100644
index 000000000..32c6b693d
--- /dev/null
+++ b/numpy/random/src/pcg32/pcg32.h
@@ -0,0 +1,86 @@
+#ifndef _RANDOMDGEN__PCG32_H_
+#define _RANDOMDGEN__PCG32_H_
+
+#include <inttypes.h>
+
+#ifdef _WIN32
+#define inline __forceinline
+#endif
+
+#define PCG_DEFAULT_MULTIPLIER_64 6364136223846793005ULL
+
+struct pcg_state_setseq_64 {
+ uint64_t state;
+ uint64_t inc;
+};
+
+static inline uint32_t pcg_rotr_32(uint32_t value, unsigned int rot) {
+#if PCG_USE_INLINE_ASM && __clang__ && (__x86_64__ || __i386__)
+ asm("rorl %%cl, %0" : "=r"(value) : "0"(value), "c"(rot));
+ return value;
+#else
+ return (value >> rot) | (value << ((-rot) & 31));
+#endif
+}
+
+static inline void pcg_setseq_64_step_r(struct pcg_state_setseq_64 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_64 + rng->inc;
+}
+
+static inline uint32_t pcg_output_xsh_rr_64_32(uint64_t state) {
+ return pcg_rotr_32(((state >> 18u) ^ state) >> 27u, state >> 59u);
+}
+
+static inline uint32_t
+pcg_setseq_64_xsh_rr_32_random_r(struct pcg_state_setseq_64 *rng) {
+ uint64_t oldstate;
+ oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_xsh_rr_64_32(oldstate);
+}
+
+static inline void pcg_setseq_64_srandom_r(struct pcg_state_setseq_64 *rng,
+ uint64_t initstate,
+ uint64_t initseq) {
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_64_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_64_step_r(rng);
+}
+
+extern uint64_t pcg_advance_lcg_64(uint64_t state, uint64_t delta,
+ uint64_t cur_mult, uint64_t cur_plus);
+
+static inline void pcg_setseq_64_advance_r(struct pcg_state_setseq_64 *rng,
+ uint64_t delta) {
+ rng->state = pcg_advance_lcg_64(rng->state, delta, PCG_DEFAULT_MULTIPLIER_64,
+ rng->inc);
+}
+
+typedef struct pcg_state_setseq_64 pcg32_random_t;
+#define pcg32_random_r pcg_setseq_64_xsh_rr_32_random_r
+#define pcg32_srandom_r pcg_setseq_64_srandom_r
+#define pcg32_advance_r pcg_setseq_64_advance_r
+
+typedef struct s_pcg32_state { pcg32_random_t *pcg_state; } pcg32_state;
+
+static inline uint64_t pcg32_next64(pcg32_state *state) {
+ return (uint64_t)(pcg32_random_r(state->pcg_state)) << 32 |
+ pcg32_random_r(state->pcg_state);
+}
+
+static inline uint32_t pcg32_next32(pcg32_state *state) {
+ return pcg32_random_r(state->pcg_state);
+}
+
+static inline double pcg32_next_double(pcg32_state *state) {
+ int32_t a = pcg32_random_r(state->pcg_state) >> 5,
+ b = pcg32_random_r(state->pcg_state) >> 6;
+ return (a * 67108864.0 + b) / 9007199254740992.0;
+}
+
+void pcg32_advance_state(pcg32_state *state, uint64_t step);
+void pcg32_set_seed(pcg32_state *state, uint64_t seed, uint64_t inc);
+
+#endif
diff --git a/numpy/random/src/pcg32/pcg_variants.h b/numpy/random/src/pcg32/pcg_variants.h
new file mode 100644
index 000000000..32daac1ce
--- /dev/null
+++ b/numpy/random/src/pcg32/pcg_variants.h
@@ -0,0 +1,2210 @@
+/*
+ * PCG Random Number Generation for C.
+ *
+ * Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * For additional information about the PCG random number generation scheme,
+ * including its license and other licensing options, visit
+ *
+ * http://www.pcg-random.org
+ */
+
+/*
+ * This code is derived from the canonical C++ PCG implementation, which
+ * has many additional features and is preferable if you can use C++ in
+ * your project.
+ *
+ * Much of the derivation was performed mechanically. In particular, the
+ * output functions were generated by compiling the C++ output functions
+ * into LLVM bitcode and then transforming that using the LLVM C backend
+ * (from https://github.com/draperlaboratory/llvm-cbe), and then
+ * postprocessing and hand editing the output.
+ *
+ * Much of the remaining code was generated by C-preprocessor metaprogramming.
+ */
+
+#ifndef PCG_VARIANTS_H_INCLUDED
+#define PCG_VARIANTS_H_INCLUDED 1
+
+#include <inttypes.h>
+
+#if __SIZEOF_INT128__
+ typedef __uint128_t pcg128_t;
+ #define PCG_128BIT_CONSTANT(high,low) \
+ ((((pcg128_t)high) << 64) + low)
+ #define PCG_HAS_128BIT_OPS 1
+#endif
+
+#if __GNUC_GNU_INLINE__ && !defined(__cplusplus)
+ #error Nonstandard GNU inlining semantics. Compile with -std=c99 or better.
+ // We could instead use macros PCG_INLINE and PCG_EXTERN_INLINE
+ // but better to just reject ancient C code.
+#endif
+
+#if __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Rotate helper functions.
+ */
+
+inline uint8_t pcg_rotr_8(uint8_t value, unsigned int rot)
+{
+/* Unfortunately, clang is kinda pathetic when it comes to properly
+ * recognizing idiomatic rotate code, so for clang we actually provide
+ * assembler directives (enabled with PCG_USE_INLINE_ASM). Boo, hiss.
+ */
+#if PCG_USE_INLINE_ASM && __clang__ && (__x86_64__ || __i386__)
+ asm ("rorb %%cl, %0" : "=r" (value) : "0" (value), "c" (rot));
+ return value;
+#else
+ return (value >> rot) | (value << ((- rot) & 7));
+#endif
+}
+
+inline uint16_t pcg_rotr_16(uint16_t value, unsigned int rot)
+{
+#if PCG_USE_INLINE_ASM && __clang__ && (__x86_64__ || __i386__)
+ asm ("rorw %%cl, %0" : "=r" (value) : "0" (value), "c" (rot));
+ return value;
+#else
+ return (value >> rot) | (value << ((- rot) & 15));
+#endif
+}
+
+inline uint32_t pcg_rotr_32(uint32_t value, unsigned int rot)
+{
+#if PCG_USE_INLINE_ASM && __clang__ && (__x86_64__ || __i386__)
+ asm ("rorl %%cl, %0" : "=r" (value) : "0" (value), "c" (rot));
+ return value;
+#else
+ return (value >> rot) | (value << ((- rot) & 31));
+#endif
+}
+
+inline uint64_t pcg_rotr_64(uint64_t value, unsigned int rot)
+{
+#if 0 && PCG_USE_INLINE_ASM && __clang__ && __x86_64__
+ // For whatever reason, clang actually *does* generate rotq by
+ // itself, so we don't need this code.
+ asm ("rorq %%cl, %0" : "=r" (value) : "0" (value), "c" (rot));
+ return value;
+#else
+ return (value >> rot) | (value << ((- rot) & 63));
+#endif
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t pcg_rotr_128(pcg128_t value, unsigned int rot)
+{
+ return (value >> rot) | (value << ((- rot) & 127));
+}
+#endif
+
+/*
+ * Output functions. These are the core of the PCG generation scheme.
+ */
+
+// XSH RS
+
+inline uint8_t pcg_output_xsh_rs_16_8(uint16_t state)
+{
+ return (uint8_t)(((state >> 7u) ^ state) >> ((state >> 14u) + 3u));
+}
+
+inline uint16_t pcg_output_xsh_rs_32_16(uint32_t state)
+{
+ return (uint16_t)(((state >> 11u) ^ state) >> ((state >> 30u) + 11u));
+}
+
+inline uint32_t pcg_output_xsh_rs_64_32(uint64_t state)
+{
+
+ return (uint32_t)(((state >> 22u) ^ state) >> ((state >> 61u) + 22u));
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_output_xsh_rs_128_64(pcg128_t state)
+{
+ return (uint64_t)(((state >> 43u) ^ state) >> ((state >> 124u) + 45u));
+}
+#endif
+
+// XSH RR
+
+inline uint8_t pcg_output_xsh_rr_16_8(uint16_t state)
+{
+ return pcg_rotr_8(((state >> 5u) ^ state) >> 5u, state >> 13u);
+}
+
+inline uint16_t pcg_output_xsh_rr_32_16(uint32_t state)
+{
+ return pcg_rotr_16(((state >> 10u) ^ state) >> 12u, state >> 28u);
+}
+
+inline uint32_t pcg_output_xsh_rr_64_32(uint64_t state)
+{
+ return pcg_rotr_32(((state >> 18u) ^ state) >> 27u, state >> 59u);
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_output_xsh_rr_128_64(pcg128_t state)
+{
+ return pcg_rotr_64(((state >> 29u) ^ state) >> 58u, state >> 122u);
+}
+#endif
+
+// RXS M XS
+
+inline uint8_t pcg_output_rxs_m_xs_8_8(uint8_t state)
+{
+ uint8_t word = ((state >> ((state >> 6u) + 2u)) ^ state) * 217u;
+ return (word >> 6u) ^ word;
+}
+
+inline uint16_t pcg_output_rxs_m_xs_16_16(uint16_t state)
+{
+ uint16_t word = ((state >> ((state >> 13u) + 3u)) ^ state) * 62169u;
+ return (word >> 11u) ^ word;
+}
+
+inline uint32_t pcg_output_rxs_m_xs_32_32(uint32_t state)
+{
+ uint32_t word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u;
+ return (word >> 22u) ^ word;
+}
+
+inline uint64_t pcg_output_rxs_m_xs_64_64(uint64_t state)
+{
+ uint64_t word = ((state >> ((state >> 59u) + 5u)) ^ state)
+ * 12605985483714917081ull;
+ return (word >> 43u) ^ word;
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t pcg_output_rxs_m_xs_128_128(pcg128_t state)
+{
+ pcg128_t word = ((state >> ((state >> 122u) + 6u)) ^ state)
+ * (PCG_128BIT_CONSTANT(17766728186571221404ULL,
+ 12605985483714917081ULL));
+ // 327738287884841127335028083622016905945
+ return (word >> 86u) ^ word;
+}
+#endif
+
+// XSL RR (only defined for >= 64 bits)
+
+inline uint32_t pcg_output_xsl_rr_64_32(uint64_t state)
+{
+ return pcg_rotr_32(((uint32_t)(state >> 32u)) ^ (uint32_t)state,
+ state >> 59u);
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_output_xsl_rr_128_64(pcg128_t state)
+{
+ return pcg_rotr_64(((uint64_t)(state >> 64u)) ^ (uint64_t)state,
+ state >> 122u);
+}
+#endif
+
+// XSL RR RR (only defined for >= 64 bits)
+
+inline uint64_t pcg_output_xsl_rr_rr_64_64(uint64_t state)
+{
+ uint32_t rot1 = (uint32_t)(state >> 59u);
+ uint32_t high = (uint32_t)(state >> 32u);
+ uint32_t low = (uint32_t)state;
+ uint32_t xored = high ^ low;
+ uint32_t newlow = pcg_rotr_32(xored, rot1);
+ uint32_t newhigh = pcg_rotr_32(high, newlow & 31u);
+ return (((uint64_t)newhigh) << 32u) | newlow;
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t pcg_output_xsl_rr_rr_128_128(pcg128_t state)
+{
+ uint32_t rot1 = (uint32_t)(state >> 122u);
+ uint64_t high = (uint64_t)(state >> 64u);
+ uint64_t low = (uint64_t)state;
+ uint64_t xored = high ^ low;
+ uint64_t newlow = pcg_rotr_64(xored, rot1);
+ uint64_t newhigh = pcg_rotr_64(high, newlow & 63u);
+ return (((pcg128_t)newhigh) << 64u) | newlow;
+}
+#endif
+
+#define PCG_DEFAULT_MULTIPLIER_8 141U
+#define PCG_DEFAULT_MULTIPLIER_16 12829U
+#define PCG_DEFAULT_MULTIPLIER_32 747796405U
+#define PCG_DEFAULT_MULTIPLIER_64 6364136223846793005ULL
+
+#define PCG_DEFAULT_INCREMENT_8 77U
+#define PCG_DEFAULT_INCREMENT_16 47989U
+#define PCG_DEFAULT_INCREMENT_32 2891336453U
+#define PCG_DEFAULT_INCREMENT_64 1442695040888963407ULL
+
+#if PCG_HAS_128BIT_OPS
+#define PCG_DEFAULT_MULTIPLIER_128 \
+ PCG_128BIT_CONSTANT(2549297995355413924ULL,4865540595714422341ULL)
+#define PCG_DEFAULT_INCREMENT_128 \
+ PCG_128BIT_CONSTANT(6364136223846793005ULL,1442695040888963407ULL)
+#endif
+
+/*
+ * Static initialization constants (if you can't call srandom for some
+ * bizarre reason).
+ */
+
+#define PCG_STATE_ONESEQ_8_INITIALIZER { 0xd7U }
+#define PCG_STATE_ONESEQ_16_INITIALIZER { 0x20dfU }
+#define PCG_STATE_ONESEQ_32_INITIALIZER { 0x46b56677U }
+#define PCG_STATE_ONESEQ_64_INITIALIZER { 0x4d595df4d0f33173ULL }
+#if PCG_HAS_128BIT_OPS
+#define PCG_STATE_ONESEQ_128_INITIALIZER \
+ { PCG_128BIT_CONSTANT(0xb8dc10e158a92392ULL, 0x98046df007ec0a53ULL) }
+#endif
+
+#define PCG_STATE_UNIQUE_8_INITIALIZER PCG_STATE_ONESEQ_8_INITIALIZER
+#define PCG_STATE_UNIQUE_16_INITIALIZER PCG_STATE_ONESEQ_16_INITIALIZER
+#define PCG_STATE_UNIQUE_32_INITIALIZER PCG_STATE_ONESEQ_32_INITIALIZER
+#define PCG_STATE_UNIQUE_64_INITIALIZER PCG_STATE_ONESEQ_64_INITIALIZER
+#if PCG_HAS_128BIT_OPS
+#define PCG_STATE_UNIQUE_128_INITIALIZER PCG_STATE_ONESEQ_128_INITIALIZER
+#endif
+
+#define PCG_STATE_MCG_8_INITIALIZER { 0xe5U }
+#define PCG_STATE_MCG_16_INITIALIZER { 0xa5e5U }
+#define PCG_STATE_MCG_32_INITIALIZER { 0xd15ea5e5U }
+#define PCG_STATE_MCG_64_INITIALIZER { 0xcafef00dd15ea5e5ULL }
+#if PCG_HAS_128BIT_OPS
+#define PCG_STATE_MCG_128_INITIALIZER \
+ { PCG_128BIT_CONSTANT(0x0000000000000000ULL, 0xcafef00dd15ea5e5ULL) }
+#endif
+
+#define PCG_STATE_SETSEQ_8_INITIALIZER { 0x9bU, 0xdbU }
+#define PCG_STATE_SETSEQ_16_INITIALIZER { 0xe39bU, 0x5bdbU }
+#define PCG_STATE_SETSEQ_32_INITIALIZER { 0xec02d89bU, 0x94b95bdbU }
+#define PCG_STATE_SETSEQ_64_INITIALIZER \
+ { 0x853c49e6748fea9bULL, 0xda3e39cb94b95bdbULL }
+#if PCG_HAS_128BIT_OPS
+#define PCG_STATE_SETSEQ_128_INITIALIZER \
+ { PCG_128BIT_CONSTANT(0x979c9a98d8462005ULL, 0x7d3e9cb6cfe0549bULL), \
+ PCG_128BIT_CONSTANT(0x0000000000000001ULL, 0xda3e39cb94b95bdbULL) }
+#endif
+
+/* Representations for the oneseq, mcg, and unique variants */
+
+struct pcg_state_8 {
+ uint8_t state;
+};
+
+struct pcg_state_16 {
+ uint16_t state;
+};
+
+struct pcg_state_32 {
+ uint32_t state;
+};
+
+struct pcg_state_64 {
+ uint64_t state;
+};
+
+#if PCG_HAS_128BIT_OPS
+struct pcg_state_128 {
+ pcg128_t state;
+};
+#endif
+
+/* Representations setseq variants */
+
+struct pcg_state_setseq_8 {
+ uint8_t state;
+ uint8_t inc;
+};
+
+struct pcg_state_setseq_16 {
+ uint16_t state;
+ uint16_t inc;
+};
+
+struct pcg_state_setseq_32 {
+ uint32_t state;
+ uint32_t inc;
+};
+
+struct pcg_state_setseq_64 {
+ uint64_t state;
+ uint64_t inc;
+};
+
+#if PCG_HAS_128BIT_OPS
+struct pcg_state_setseq_128 {
+ pcg128_t state;
+ pcg128_t inc;
+};
+#endif
+
+/* Multi-step advance functions (jump-ahead, jump-back) */
+
+extern uint8_t pcg_advance_lcg_8(uint8_t state, uint8_t delta, uint8_t cur_mult,
+ uint8_t cur_plus);
+extern uint16_t pcg_advance_lcg_16(uint16_t state, uint16_t delta,
+ uint16_t cur_mult, uint16_t cur_plus);
+extern uint32_t pcg_advance_lcg_32(uint32_t state, uint32_t delta,
+ uint32_t cur_mult, uint32_t cur_plus);
+extern uint64_t pcg_advance_lcg_64(uint64_t state, uint64_t delta,
+ uint64_t cur_mult, uint64_t cur_plus);
+
+#if PCG_HAS_128BIT_OPS
+extern pcg128_t pcg_advance_lcg_128(pcg128_t state, pcg128_t delta,
+ pcg128_t cur_mult, pcg128_t cur_plus);
+#endif
+
+/* Functions to advance the underlying LCG, one version for each size and
+ * each style. These functions are considered semi-private. There is rarely
+ * a good reason to call them directly.
+ */
+
+inline void pcg_oneseq_8_step_r(struct pcg_state_8* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_8
+ + PCG_DEFAULT_INCREMENT_8;
+}
+
+inline void pcg_oneseq_8_advance_r(struct pcg_state_8* rng, uint8_t delta)
+{
+ rng->state = pcg_advance_lcg_8(rng->state, delta, PCG_DEFAULT_MULTIPLIER_8,
+ PCG_DEFAULT_INCREMENT_8);
+}
+
+inline void pcg_mcg_8_step_r(struct pcg_state_8* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_8;
+}
+
+inline void pcg_mcg_8_advance_r(struct pcg_state_8* rng, uint8_t delta)
+{
+ rng->state
+ = pcg_advance_lcg_8(rng->state, delta, PCG_DEFAULT_MULTIPLIER_8, 0u);
+}
+
+inline void pcg_unique_8_step_r(struct pcg_state_8* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_8
+ + (uint8_t)(((intptr_t)rng) | 1u);
+}
+
+inline void pcg_unique_8_advance_r(struct pcg_state_8* rng, uint8_t delta)
+{
+ rng->state = pcg_advance_lcg_8(rng->state, delta, PCG_DEFAULT_MULTIPLIER_8,
+ (uint8_t)(((intptr_t)rng) | 1u));
+}
+
+inline void pcg_setseq_8_step_r(struct pcg_state_setseq_8* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_8 + rng->inc;
+}
+
+inline void pcg_setseq_8_advance_r(struct pcg_state_setseq_8* rng,
+ uint8_t delta)
+{
+ rng->state = pcg_advance_lcg_8(rng->state, delta, PCG_DEFAULT_MULTIPLIER_8,
+ rng->inc);
+}
+
+inline void pcg_oneseq_16_step_r(struct pcg_state_16* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_16
+ + PCG_DEFAULT_INCREMENT_16;
+}
+
+inline void pcg_oneseq_16_advance_r(struct pcg_state_16* rng, uint16_t delta)
+{
+ rng->state = pcg_advance_lcg_16(
+ rng->state, delta, PCG_DEFAULT_MULTIPLIER_16, PCG_DEFAULT_INCREMENT_16);
+}
+
+inline void pcg_mcg_16_step_r(struct pcg_state_16* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_16;
+}
+
+inline void pcg_mcg_16_advance_r(struct pcg_state_16* rng, uint16_t delta)
+{
+ rng->state
+ = pcg_advance_lcg_16(rng->state, delta, PCG_DEFAULT_MULTIPLIER_16, 0u);
+}
+
+inline void pcg_unique_16_step_r(struct pcg_state_16* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_16
+ + (uint16_t)(((intptr_t)rng) | 1u);
+}
+
+inline void pcg_unique_16_advance_r(struct pcg_state_16* rng, uint16_t delta)
+{
+ rng->state
+ = pcg_advance_lcg_16(rng->state, delta, PCG_DEFAULT_MULTIPLIER_16,
+ (uint16_t)(((intptr_t)rng) | 1u));
+}
+
+inline void pcg_setseq_16_step_r(struct pcg_state_setseq_16* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_16 + rng->inc;
+}
+
+inline void pcg_setseq_16_advance_r(struct pcg_state_setseq_16* rng,
+ uint16_t delta)
+{
+ rng->state = pcg_advance_lcg_16(rng->state, delta,
+ PCG_DEFAULT_MULTIPLIER_16, rng->inc);
+}
+
+inline void pcg_oneseq_32_step_r(struct pcg_state_32* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_32
+ + PCG_DEFAULT_INCREMENT_32;
+}
+
+inline void pcg_oneseq_32_advance_r(struct pcg_state_32* rng, uint32_t delta)
+{
+ rng->state = pcg_advance_lcg_32(
+ rng->state, delta, PCG_DEFAULT_MULTIPLIER_32, PCG_DEFAULT_INCREMENT_32);
+}
+
+inline void pcg_mcg_32_step_r(struct pcg_state_32* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_32;
+}
+
+inline void pcg_mcg_32_advance_r(struct pcg_state_32* rng, uint32_t delta)
+{
+ rng->state
+ = pcg_advance_lcg_32(rng->state, delta, PCG_DEFAULT_MULTIPLIER_32, 0u);
+}
+
+inline void pcg_unique_32_step_r(struct pcg_state_32* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_32
+ + (uint32_t)(((intptr_t)rng) | 1u);
+}
+
+inline void pcg_unique_32_advance_r(struct pcg_state_32* rng, uint32_t delta)
+{
+ rng->state
+ = pcg_advance_lcg_32(rng->state, delta, PCG_DEFAULT_MULTIPLIER_32,
+ (uint32_t)(((intptr_t)rng) | 1u));
+}
+
+inline void pcg_setseq_32_step_r(struct pcg_state_setseq_32* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_32 + rng->inc;
+}
+
+inline void pcg_setseq_32_advance_r(struct pcg_state_setseq_32* rng,
+ uint32_t delta)
+{
+ rng->state = pcg_advance_lcg_32(rng->state, delta,
+ PCG_DEFAULT_MULTIPLIER_32, rng->inc);
+}
+
+inline void pcg_oneseq_64_step_r(struct pcg_state_64* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_64
+ + PCG_DEFAULT_INCREMENT_64;
+}
+
+inline void pcg_oneseq_64_advance_r(struct pcg_state_64* rng, uint64_t delta)
+{
+ rng->state = pcg_advance_lcg_64(
+ rng->state, delta, PCG_DEFAULT_MULTIPLIER_64, PCG_DEFAULT_INCREMENT_64);
+}
+
+inline void pcg_mcg_64_step_r(struct pcg_state_64* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_64;
+}
+
+inline void pcg_mcg_64_advance_r(struct pcg_state_64* rng, uint64_t delta)
+{
+ rng->state
+ = pcg_advance_lcg_64(rng->state, delta, PCG_DEFAULT_MULTIPLIER_64, 0u);
+}
+
+inline void pcg_unique_64_step_r(struct pcg_state_64* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_64
+ + (uint64_t)(((intptr_t)rng) | 1u);
+}
+
+inline void pcg_unique_64_advance_r(struct pcg_state_64* rng, uint64_t delta)
+{
+ rng->state
+ = pcg_advance_lcg_64(rng->state, delta, PCG_DEFAULT_MULTIPLIER_64,
+ (uint64_t)(((intptr_t)rng) | 1u));
+}
+
+inline void pcg_setseq_64_step_r(struct pcg_state_setseq_64* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_64 + rng->inc;
+}
+
+inline void pcg_setseq_64_advance_r(struct pcg_state_setseq_64* rng,
+ uint64_t delta)
+{
+ rng->state = pcg_advance_lcg_64(rng->state, delta,
+ PCG_DEFAULT_MULTIPLIER_64, rng->inc);
+}
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_oneseq_128_step_r(struct pcg_state_128* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_128
+ + PCG_DEFAULT_INCREMENT_128;
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_oneseq_128_advance_r(struct pcg_state_128* rng, pcg128_t delta)
+{
+ rng->state
+ = pcg_advance_lcg_128(rng->state, delta, PCG_DEFAULT_MULTIPLIER_128,
+ PCG_DEFAULT_INCREMENT_128);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_mcg_128_step_r(struct pcg_state_128* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_128;
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_mcg_128_advance_r(struct pcg_state_128* rng, pcg128_t delta)
+{
+ rng->state = pcg_advance_lcg_128(rng->state, delta,
+ PCG_DEFAULT_MULTIPLIER_128, 0u);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_unique_128_step_r(struct pcg_state_128* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_128
+ + (pcg128_t)(((intptr_t)rng) | 1u);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_unique_128_advance_r(struct pcg_state_128* rng, pcg128_t delta)
+{
+ rng->state
+ = pcg_advance_lcg_128(rng->state, delta, PCG_DEFAULT_MULTIPLIER_128,
+ (pcg128_t)(((intptr_t)rng) | 1u));
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_setseq_128_step_r(struct pcg_state_setseq_128* rng)
+{
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_128 + rng->inc;
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_setseq_128_advance_r(struct pcg_state_setseq_128* rng,
+ pcg128_t delta)
+{
+ rng->state = pcg_advance_lcg_128(rng->state, delta,
+ PCG_DEFAULT_MULTIPLIER_128, rng->inc);
+}
+#endif
+
+/* Functions to seed the RNG state, one version for each size and each
+ * style. Unlike the step functions, regular users can and should call
+ * these functions.
+ */
+
+inline void pcg_oneseq_8_srandom_r(struct pcg_state_8* rng, uint8_t initstate)
+{
+ rng->state = 0U;
+ pcg_oneseq_8_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_8_step_r(rng);
+}
+
+inline void pcg_mcg_8_srandom_r(struct pcg_state_8* rng, uint8_t initstate)
+{
+ rng->state = initstate | 1u;
+}
+
+inline void pcg_unique_8_srandom_r(struct pcg_state_8* rng, uint8_t initstate)
+{
+ rng->state = 0U;
+ pcg_unique_8_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_8_step_r(rng);
+}
+
+inline void pcg_setseq_8_srandom_r(struct pcg_state_setseq_8* rng,
+ uint8_t initstate, uint8_t initseq)
+{
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_8_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_8_step_r(rng);
+}
+
+inline void pcg_oneseq_16_srandom_r(struct pcg_state_16* rng,
+ uint16_t initstate)
+{
+ rng->state = 0U;
+ pcg_oneseq_16_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_16_step_r(rng);
+}
+
+inline void pcg_mcg_16_srandom_r(struct pcg_state_16* rng, uint16_t initstate)
+{
+ rng->state = initstate | 1u;
+}
+
+inline void pcg_unique_16_srandom_r(struct pcg_state_16* rng,
+ uint16_t initstate)
+{
+ rng->state = 0U;
+ pcg_unique_16_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_16_step_r(rng);
+}
+
+inline void pcg_setseq_16_srandom_r(struct pcg_state_setseq_16* rng,
+ uint16_t initstate, uint16_t initseq)
+{
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_16_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_16_step_r(rng);
+}
+
+inline void pcg_oneseq_32_srandom_r(struct pcg_state_32* rng,
+ uint32_t initstate)
+{
+ rng->state = 0U;
+ pcg_oneseq_32_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_32_step_r(rng);
+}
+
+inline void pcg_mcg_32_srandom_r(struct pcg_state_32* rng, uint32_t initstate)
+{
+ rng->state = initstate | 1u;
+}
+
+inline void pcg_unique_32_srandom_r(struct pcg_state_32* rng,
+ uint32_t initstate)
+{
+ rng->state = 0U;
+ pcg_unique_32_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_32_step_r(rng);
+}
+
+inline void pcg_setseq_32_srandom_r(struct pcg_state_setseq_32* rng,
+ uint32_t initstate, uint32_t initseq)
+{
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_32_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_32_step_r(rng);
+}
+
+inline void pcg_oneseq_64_srandom_r(struct pcg_state_64* rng,
+ uint64_t initstate)
+{
+ rng->state = 0U;
+ pcg_oneseq_64_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_64_step_r(rng);
+}
+
+inline void pcg_mcg_64_srandom_r(struct pcg_state_64* rng, uint64_t initstate)
+{
+ rng->state = initstate | 1u;
+}
+
+inline void pcg_unique_64_srandom_r(struct pcg_state_64* rng,
+ uint64_t initstate)
+{
+ rng->state = 0U;
+ pcg_unique_64_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_64_step_r(rng);
+}
+
+inline void pcg_setseq_64_srandom_r(struct pcg_state_setseq_64* rng,
+ uint64_t initstate, uint64_t initseq)
+{
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_64_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_64_step_r(rng);
+}
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_oneseq_128_srandom_r(struct pcg_state_128* rng,
+ pcg128_t initstate)
+{
+ rng->state = 0U;
+ pcg_oneseq_128_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_128_step_r(rng);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_mcg_128_srandom_r(struct pcg_state_128* rng, pcg128_t initstate)
+{
+ rng->state = initstate | 1u;
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_unique_128_srandom_r(struct pcg_state_128* rng,
+ pcg128_t initstate)
+{
+ rng->state = 0U;
+ pcg_unique_128_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_128_step_r(rng);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_setseq_128_srandom_r(struct pcg_state_setseq_128* rng,
+ pcg128_t initstate, pcg128_t initseq)
+{
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_128_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_128_step_r(rng);
+}
+#endif
+
+/* Now, finally we create each of the individual generators. We provide
+ * a random_r function that provides a random number of the appropriate
+ * type (using the full range of the type) and a boundedrand_r version
+ * that provides
+ *
+ * Implementation notes for boundedrand_r:
+ *
+ * To avoid bias, we need to make the range of the RNG a multiple of
+ * bound, which we do by dropping output less than a threshold.
+ * Let's consider a 32-bit case... A naive scheme to calculate the
+ * threshold would be to do
+ *
+ * uint32_t threshold = 0x100000000ull % bound;
+ *
+ * but 64-bit div/mod is slower than 32-bit div/mod (especially on
+ * 32-bit platforms). In essence, we do
+ *
+ * uint32_t threshold = (0x100000000ull-bound) % bound;
+ *
+ * because this version will calculate the same modulus, but the LHS
+ * value is less than 2^32.
+ *
+ * (Note that using modulo is only wise for good RNGs, poorer RNGs
+ * such as raw LCGs do better using a technique based on division.)
+ * Empricical tests show that division is preferable to modulus for
+ * reducting the range of an RNG. It's faster, and sometimes it can
+ * even be statistically prefereable.
+ */
+
+/* Generation functions for XSH RS */
+
+inline uint8_t pcg_oneseq_16_xsh_rs_8_random_r(struct pcg_state_16* rng)
+{
+ uint16_t oldstate = rng->state;
+ pcg_oneseq_16_step_r(rng);
+ return pcg_output_xsh_rs_16_8(oldstate);
+}
+
+inline uint8_t pcg_oneseq_16_xsh_rs_8_boundedrand_r(struct pcg_state_16* rng,
+ uint8_t bound)
+{
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_oneseq_16_xsh_rs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_oneseq_32_xsh_rs_16_random_r(struct pcg_state_32* rng)
+{
+ uint32_t oldstate = rng->state;
+ pcg_oneseq_32_step_r(rng);
+ return pcg_output_xsh_rs_32_16(oldstate);
+}
+
+inline uint16_t pcg_oneseq_32_xsh_rs_16_boundedrand_r(struct pcg_state_32* rng,
+ uint16_t bound)
+{
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_oneseq_32_xsh_rs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_oneseq_64_xsh_rs_32_random_r(struct pcg_state_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_xsh_rs_64_32(oldstate);
+}
+
+inline uint32_t pcg_oneseq_64_xsh_rs_32_boundedrand_r(struct pcg_state_64* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_oneseq_64_xsh_rs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_oneseq_128_xsh_rs_64_random_r(struct pcg_state_128* rng)
+{
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_xsh_rs_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_oneseq_128_xsh_rs_64_boundedrand_r(struct pcg_state_128* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_128_xsh_rs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_unique_16_xsh_rs_8_random_r(struct pcg_state_16* rng)
+{
+ uint16_t oldstate = rng->state;
+ pcg_unique_16_step_r(rng);
+ return pcg_output_xsh_rs_16_8(oldstate);
+}
+
+inline uint8_t pcg_unique_16_xsh_rs_8_boundedrand_r(struct pcg_state_16* rng,
+ uint8_t bound)
+{
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_unique_16_xsh_rs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_unique_32_xsh_rs_16_random_r(struct pcg_state_32* rng)
+{
+ uint32_t oldstate = rng->state;
+ pcg_unique_32_step_r(rng);
+ return pcg_output_xsh_rs_32_16(oldstate);
+}
+
+inline uint16_t pcg_unique_32_xsh_rs_16_boundedrand_r(struct pcg_state_32* rng,
+ uint16_t bound)
+{
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_unique_32_xsh_rs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_unique_64_xsh_rs_32_random_r(struct pcg_state_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_xsh_rs_64_32(oldstate);
+}
+
+inline uint32_t pcg_unique_64_xsh_rs_32_boundedrand_r(struct pcg_state_64* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_unique_64_xsh_rs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_unique_128_xsh_rs_64_random_r(struct pcg_state_128* rng)
+{
+ pcg_unique_128_step_r(rng);
+ return pcg_output_xsh_rs_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_unique_128_xsh_rs_64_boundedrand_r(struct pcg_state_128* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_128_xsh_rs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_setseq_16_xsh_rs_8_random_r(struct pcg_state_setseq_16* rng)
+{
+ uint16_t oldstate = rng->state;
+ pcg_setseq_16_step_r(rng);
+ return pcg_output_xsh_rs_16_8(oldstate);
+}
+
+inline uint8_t
+pcg_setseq_16_xsh_rs_8_boundedrand_r(struct pcg_state_setseq_16* rng,
+ uint8_t bound)
+{
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_setseq_16_xsh_rs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t
+pcg_setseq_32_xsh_rs_16_random_r(struct pcg_state_setseq_32* rng)
+{
+ uint32_t oldstate = rng->state;
+ pcg_setseq_32_step_r(rng);
+ return pcg_output_xsh_rs_32_16(oldstate);
+}
+
+inline uint16_t
+pcg_setseq_32_xsh_rs_16_boundedrand_r(struct pcg_state_setseq_32* rng,
+ uint16_t bound)
+{
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_setseq_32_xsh_rs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t
+pcg_setseq_64_xsh_rs_32_random_r(struct pcg_state_setseq_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_xsh_rs_64_32(oldstate);
+}
+
+inline uint32_t
+pcg_setseq_64_xsh_rs_32_boundedrand_r(struct pcg_state_setseq_64* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_setseq_64_xsh_rs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsh_rs_64_random_r(struct pcg_state_setseq_128* rng)
+{
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsh_rs_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsh_rs_64_boundedrand_r(struct pcg_state_setseq_128* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_128_xsh_rs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_mcg_16_xsh_rs_8_random_r(struct pcg_state_16* rng)
+{
+ uint16_t oldstate = rng->state;
+ pcg_mcg_16_step_r(rng);
+ return pcg_output_xsh_rs_16_8(oldstate);
+}
+
+inline uint8_t pcg_mcg_16_xsh_rs_8_boundedrand_r(struct pcg_state_16* rng,
+ uint8_t bound)
+{
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_mcg_16_xsh_rs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_mcg_32_xsh_rs_16_random_r(struct pcg_state_32* rng)
+{
+ uint32_t oldstate = rng->state;
+ pcg_mcg_32_step_r(rng);
+ return pcg_output_xsh_rs_32_16(oldstate);
+}
+
+inline uint16_t pcg_mcg_32_xsh_rs_16_boundedrand_r(struct pcg_state_32* rng,
+ uint16_t bound)
+{
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_mcg_32_xsh_rs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_mcg_64_xsh_rs_32_random_r(struct pcg_state_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_mcg_64_step_r(rng);
+ return pcg_output_xsh_rs_64_32(oldstate);
+}
+
+inline uint32_t pcg_mcg_64_xsh_rs_32_boundedrand_r(struct pcg_state_64* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_mcg_64_xsh_rs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsh_rs_64_random_r(struct pcg_state_128* rng)
+{
+ pcg_mcg_128_step_r(rng);
+ return pcg_output_xsh_rs_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsh_rs_64_boundedrand_r(struct pcg_state_128* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_mcg_128_xsh_rs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+/* Generation functions for XSH RR */
+
+inline uint8_t pcg_oneseq_16_xsh_rr_8_random_r(struct pcg_state_16* rng)
+{
+ uint16_t oldstate = rng->state;
+ pcg_oneseq_16_step_r(rng);
+ return pcg_output_xsh_rr_16_8(oldstate);
+}
+
+inline uint8_t pcg_oneseq_16_xsh_rr_8_boundedrand_r(struct pcg_state_16* rng,
+ uint8_t bound)
+{
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_oneseq_16_xsh_rr_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_oneseq_32_xsh_rr_16_random_r(struct pcg_state_32* rng)
+{
+ uint32_t oldstate = rng->state;
+ pcg_oneseq_32_step_r(rng);
+ return pcg_output_xsh_rr_32_16(oldstate);
+}
+
+inline uint16_t pcg_oneseq_32_xsh_rr_16_boundedrand_r(struct pcg_state_32* rng,
+ uint16_t bound)
+{
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_oneseq_32_xsh_rr_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_oneseq_64_xsh_rr_32_random_r(struct pcg_state_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_xsh_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_oneseq_64_xsh_rr_32_boundedrand_r(struct pcg_state_64* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_oneseq_64_xsh_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_oneseq_128_xsh_rr_64_random_r(struct pcg_state_128* rng)
+{
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_xsh_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_oneseq_128_xsh_rr_64_boundedrand_r(struct pcg_state_128* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_128_xsh_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_unique_16_xsh_rr_8_random_r(struct pcg_state_16* rng)
+{
+ uint16_t oldstate = rng->state;
+ pcg_unique_16_step_r(rng);
+ return pcg_output_xsh_rr_16_8(oldstate);
+}
+
+inline uint8_t pcg_unique_16_xsh_rr_8_boundedrand_r(struct pcg_state_16* rng,
+ uint8_t bound)
+{
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_unique_16_xsh_rr_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_unique_32_xsh_rr_16_random_r(struct pcg_state_32* rng)
+{
+ uint32_t oldstate = rng->state;
+ pcg_unique_32_step_r(rng);
+ return pcg_output_xsh_rr_32_16(oldstate);
+}
+
+inline uint16_t pcg_unique_32_xsh_rr_16_boundedrand_r(struct pcg_state_32* rng,
+ uint16_t bound)
+{
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_unique_32_xsh_rr_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_unique_64_xsh_rr_32_random_r(struct pcg_state_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_xsh_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_unique_64_xsh_rr_32_boundedrand_r(struct pcg_state_64* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_unique_64_xsh_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_unique_128_xsh_rr_64_random_r(struct pcg_state_128* rng)
+{
+ pcg_unique_128_step_r(rng);
+ return pcg_output_xsh_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_unique_128_xsh_rr_64_boundedrand_r(struct pcg_state_128* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_128_xsh_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_setseq_16_xsh_rr_8_random_r(struct pcg_state_setseq_16* rng)
+{
+ uint16_t oldstate = rng->state;
+ pcg_setseq_16_step_r(rng);
+ return pcg_output_xsh_rr_16_8(oldstate);
+}
+
+inline uint8_t
+pcg_setseq_16_xsh_rr_8_boundedrand_r(struct pcg_state_setseq_16* rng,
+ uint8_t bound)
+{
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_setseq_16_xsh_rr_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t
+pcg_setseq_32_xsh_rr_16_random_r(struct pcg_state_setseq_32* rng)
+{
+ uint32_t oldstate = rng->state;
+ pcg_setseq_32_step_r(rng);
+ return pcg_output_xsh_rr_32_16(oldstate);
+}
+
+inline uint16_t
+pcg_setseq_32_xsh_rr_16_boundedrand_r(struct pcg_state_setseq_32* rng,
+ uint16_t bound)
+{
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_setseq_32_xsh_rr_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t
+pcg_setseq_64_xsh_rr_32_random_r(struct pcg_state_setseq_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_xsh_rr_64_32(oldstate);
+}
+
+inline uint32_t
+pcg_setseq_64_xsh_rr_32_boundedrand_r(struct pcg_state_setseq_64* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_setseq_64_xsh_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsh_rr_64_random_r(struct pcg_state_setseq_128* rng)
+{
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsh_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsh_rr_64_boundedrand_r(struct pcg_state_setseq_128* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_128_xsh_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_mcg_16_xsh_rr_8_random_r(struct pcg_state_16* rng)
+{
+ uint16_t oldstate = rng->state;
+ pcg_mcg_16_step_r(rng);
+ return pcg_output_xsh_rr_16_8(oldstate);
+}
+
+inline uint8_t pcg_mcg_16_xsh_rr_8_boundedrand_r(struct pcg_state_16* rng,
+ uint8_t bound)
+{
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_mcg_16_xsh_rr_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_mcg_32_xsh_rr_16_random_r(struct pcg_state_32* rng)
+{
+ uint32_t oldstate = rng->state;
+ pcg_mcg_32_step_r(rng);
+ return pcg_output_xsh_rr_32_16(oldstate);
+}
+
+inline uint16_t pcg_mcg_32_xsh_rr_16_boundedrand_r(struct pcg_state_32* rng,
+ uint16_t bound)
+{
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_mcg_32_xsh_rr_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_mcg_64_xsh_rr_32_random_r(struct pcg_state_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_mcg_64_step_r(rng);
+ return pcg_output_xsh_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_mcg_64_xsh_rr_32_boundedrand_r(struct pcg_state_64* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_mcg_64_xsh_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsh_rr_64_random_r(struct pcg_state_128* rng)
+{
+ pcg_mcg_128_step_r(rng);
+ return pcg_output_xsh_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsh_rr_64_boundedrand_r(struct pcg_state_128* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_mcg_128_xsh_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+/* Generation functions for RXS M XS (no MCG versions because they
+ * don't make sense when you want to use the entire state)
+ */
+
+inline uint8_t pcg_oneseq_8_rxs_m_xs_8_random_r(struct pcg_state_8* rng)
+{
+ uint8_t oldstate = rng->state;
+ pcg_oneseq_8_step_r(rng);
+ return pcg_output_rxs_m_xs_8_8(oldstate);
+}
+
+inline uint8_t pcg_oneseq_8_rxs_m_xs_8_boundedrand_r(struct pcg_state_8* rng,
+ uint8_t bound)
+{
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_oneseq_8_rxs_m_xs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_oneseq_16_rxs_m_xs_16_random_r(struct pcg_state_16* rng)
+{
+ uint16_t oldstate = rng->state;
+ pcg_oneseq_16_step_r(rng);
+ return pcg_output_rxs_m_xs_16_16(oldstate);
+}
+
+inline uint16_t
+pcg_oneseq_16_rxs_m_xs_16_boundedrand_r(struct pcg_state_16* rng,
+ uint16_t bound)
+{
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_oneseq_16_rxs_m_xs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_oneseq_32_rxs_m_xs_32_random_r(struct pcg_state_32* rng)
+{
+ uint32_t oldstate = rng->state;
+ pcg_oneseq_32_step_r(rng);
+ return pcg_output_rxs_m_xs_32_32(oldstate);
+}
+
+inline uint32_t
+pcg_oneseq_32_rxs_m_xs_32_boundedrand_r(struct pcg_state_32* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_oneseq_32_rxs_m_xs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint64_t pcg_oneseq_64_rxs_m_xs_64_random_r(struct pcg_state_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_rxs_m_xs_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_oneseq_64_rxs_m_xs_64_boundedrand_r(struct pcg_state_64* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_64_rxs_m_xs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t pcg_oneseq_128_rxs_m_xs_128_random_r(struct pcg_state_128* rng)
+{
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_rxs_m_xs_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_oneseq_128_rxs_m_xs_128_boundedrand_r(struct pcg_state_128* rng,
+ pcg128_t bound)
+{
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_oneseq_128_rxs_m_xs_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint16_t pcg_unique_16_rxs_m_xs_16_random_r(struct pcg_state_16* rng)
+{
+ uint16_t oldstate = rng->state;
+ pcg_unique_16_step_r(rng);
+ return pcg_output_rxs_m_xs_16_16(oldstate);
+}
+
+inline uint16_t
+pcg_unique_16_rxs_m_xs_16_boundedrand_r(struct pcg_state_16* rng,
+ uint16_t bound)
+{
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_unique_16_rxs_m_xs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_unique_32_rxs_m_xs_32_random_r(struct pcg_state_32* rng)
+{
+ uint32_t oldstate = rng->state;
+ pcg_unique_32_step_r(rng);
+ return pcg_output_rxs_m_xs_32_32(oldstate);
+}
+
+inline uint32_t
+pcg_unique_32_rxs_m_xs_32_boundedrand_r(struct pcg_state_32* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_unique_32_rxs_m_xs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint64_t pcg_unique_64_rxs_m_xs_64_random_r(struct pcg_state_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_rxs_m_xs_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_unique_64_rxs_m_xs_64_boundedrand_r(struct pcg_state_64* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_64_rxs_m_xs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t pcg_unique_128_rxs_m_xs_128_random_r(struct pcg_state_128* rng)
+{
+ pcg_unique_128_step_r(rng);
+ return pcg_output_rxs_m_xs_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_unique_128_rxs_m_xs_128_boundedrand_r(struct pcg_state_128* rng,
+ pcg128_t bound)
+{
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_unique_128_rxs_m_xs_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_setseq_8_rxs_m_xs_8_random_r(struct pcg_state_setseq_8* rng)
+{
+ uint8_t oldstate = rng->state;
+ pcg_setseq_8_step_r(rng);
+ return pcg_output_rxs_m_xs_8_8(oldstate);
+}
+
+inline uint8_t
+pcg_setseq_8_rxs_m_xs_8_boundedrand_r(struct pcg_state_setseq_8* rng,
+ uint8_t bound)
+{
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_setseq_8_rxs_m_xs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t
+pcg_setseq_16_rxs_m_xs_16_random_r(struct pcg_state_setseq_16* rng)
+{
+ uint16_t oldstate = rng->state;
+ pcg_setseq_16_step_r(rng);
+ return pcg_output_rxs_m_xs_16_16(oldstate);
+}
+
+inline uint16_t
+pcg_setseq_16_rxs_m_xs_16_boundedrand_r(struct pcg_state_setseq_16* rng,
+ uint16_t bound)
+{
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_setseq_16_rxs_m_xs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t
+pcg_setseq_32_rxs_m_xs_32_random_r(struct pcg_state_setseq_32* rng)
+{
+ uint32_t oldstate = rng->state;
+ pcg_setseq_32_step_r(rng);
+ return pcg_output_rxs_m_xs_32_32(oldstate);
+}
+
+inline uint32_t
+pcg_setseq_32_rxs_m_xs_32_boundedrand_r(struct pcg_state_setseq_32* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_setseq_32_rxs_m_xs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint64_t
+pcg_setseq_64_rxs_m_xs_64_random_r(struct pcg_state_setseq_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_rxs_m_xs_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_setseq_64_rxs_m_xs_64_boundedrand_r(struct pcg_state_setseq_64* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_64_rxs_m_xs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_setseq_128_rxs_m_xs_128_random_r(struct pcg_state_setseq_128* rng)
+{
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_rxs_m_xs_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_setseq_128_rxs_m_xs_128_boundedrand_r(struct pcg_state_setseq_128* rng,
+ pcg128_t bound)
+{
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_setseq_128_rxs_m_xs_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+/* Generation functions for XSL RR (only defined for "large" types) */
+
+inline uint32_t pcg_oneseq_64_xsl_rr_32_random_r(struct pcg_state_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_xsl_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_oneseq_64_xsl_rr_32_boundedrand_r(struct pcg_state_64* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_oneseq_64_xsl_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_oneseq_128_xsl_rr_64_random_r(struct pcg_state_128* rng)
+{
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_oneseq_128_xsl_rr_64_boundedrand_r(struct pcg_state_128* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_128_xsl_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint32_t pcg_unique_64_xsl_rr_32_random_r(struct pcg_state_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_xsl_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_unique_64_xsl_rr_32_boundedrand_r(struct pcg_state_64* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_unique_64_xsl_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_unique_128_xsl_rr_64_random_r(struct pcg_state_128* rng)
+{
+ pcg_unique_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_unique_128_xsl_rr_64_boundedrand_r(struct pcg_state_128* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_128_xsl_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint32_t
+pcg_setseq_64_xsl_rr_32_random_r(struct pcg_state_setseq_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_xsl_rr_64_32(oldstate);
+}
+
+inline uint32_t
+pcg_setseq_64_xsl_rr_32_boundedrand_r(struct pcg_state_setseq_64* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_setseq_64_xsl_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsl_rr_64_random_r(struct pcg_state_setseq_128* rng)
+{
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsl_rr_64_boundedrand_r(struct pcg_state_setseq_128* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_128_xsl_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint32_t pcg_mcg_64_xsl_rr_32_random_r(struct pcg_state_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_mcg_64_step_r(rng);
+ return pcg_output_xsl_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_mcg_64_xsl_rr_32_boundedrand_r(struct pcg_state_64* rng,
+ uint32_t bound)
+{
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_mcg_64_xsl_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsl_rr_64_random_r(struct pcg_state_128* rng)
+{
+ pcg_mcg_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsl_rr_64_boundedrand_r(struct pcg_state_128* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_mcg_128_xsl_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+/* Generation functions for XSL RR RR (only defined for "large" types) */
+
+inline uint64_t pcg_oneseq_64_xsl_rr_rr_64_random_r(struct pcg_state_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_xsl_rr_rr_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_oneseq_64_xsl_rr_rr_64_boundedrand_r(struct pcg_state_64* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_64_xsl_rr_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t pcg_oneseq_128_xsl_rr_rr_128_random_r(struct pcg_state_128* rng)
+{
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_xsl_rr_rr_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_oneseq_128_xsl_rr_rr_128_boundedrand_r(struct pcg_state_128* rng,
+ pcg128_t bound)
+{
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_oneseq_128_xsl_rr_rr_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint64_t pcg_unique_64_xsl_rr_rr_64_random_r(struct pcg_state_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_xsl_rr_rr_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_unique_64_xsl_rr_rr_64_boundedrand_r(struct pcg_state_64* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_64_xsl_rr_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t pcg_unique_128_xsl_rr_rr_128_random_r(struct pcg_state_128* rng)
+{
+ pcg_unique_128_step_r(rng);
+ return pcg_output_xsl_rr_rr_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_unique_128_xsl_rr_rr_128_boundedrand_r(struct pcg_state_128* rng,
+ pcg128_t bound)
+{
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_unique_128_xsl_rr_rr_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint64_t
+pcg_setseq_64_xsl_rr_rr_64_random_r(struct pcg_state_setseq_64* rng)
+{
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_xsl_rr_rr_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_setseq_64_xsl_rr_rr_64_boundedrand_r(struct pcg_state_setseq_64* rng,
+ uint64_t bound)
+{
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_64_xsl_rr_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_setseq_128_xsl_rr_rr_128_random_r(struct pcg_state_setseq_128* rng)
+{
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsl_rr_rr_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_setseq_128_xsl_rr_rr_128_boundedrand_r(struct pcg_state_setseq_128* rng,
+ pcg128_t bound)
+{
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_setseq_128_xsl_rr_rr_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+//// Typedefs
+typedef struct pcg_state_setseq_64 pcg32_random_t;
+typedef struct pcg_state_64 pcg32s_random_t;
+typedef struct pcg_state_64 pcg32u_random_t;
+typedef struct pcg_state_64 pcg32f_random_t;
+//// random_r
+#define pcg32_random_r pcg_setseq_64_xsh_rr_32_random_r
+#define pcg32s_random_r pcg_oneseq_64_xsh_rr_32_random_r
+#define pcg32u_random_r pcg_unique_64_xsh_rr_32_random_r
+#define pcg32f_random_r pcg_mcg_64_xsh_rs_32_random_r
+//// boundedrand_r
+#define pcg32_boundedrand_r pcg_setseq_64_xsh_rr_32_boundedrand_r
+#define pcg32s_boundedrand_r pcg_oneseq_64_xsh_rr_32_boundedrand_r
+#define pcg32u_boundedrand_r pcg_unique_64_xsh_rr_32_boundedrand_r
+#define pcg32f_boundedrand_r pcg_mcg_64_xsh_rs_32_boundedrand_r
+//// srandom_r
+#define pcg32_srandom_r pcg_setseq_64_srandom_r
+#define pcg32s_srandom_r pcg_oneseq_64_srandom_r
+#define pcg32u_srandom_r pcg_unique_64_srandom_r
+#define pcg32f_srandom_r pcg_mcg_64_srandom_r
+//// advance_r
+#define pcg32_advance_r pcg_setseq_64_advance_r
+#define pcg32s_advance_r pcg_oneseq_64_advance_r
+#define pcg32u_advance_r pcg_unique_64_advance_r
+#define pcg32f_advance_r pcg_mcg_64_advance_r
+
+#if PCG_HAS_128BIT_OPS
+//// Typedefs
+typedef struct pcg_state_setseq_128 pcg64_random_t;
+typedef struct pcg_state_128 pcg64s_random_t;
+typedef struct pcg_state_128 pcg64u_random_t;
+typedef struct pcg_state_128 pcg64f_random_t;
+//// random_r
+#define pcg64_random_r pcg_setseq_128_xsl_rr_64_random_r
+#define pcg64s_random_r pcg_oneseq_128_xsl_rr_64_random_r
+#define pcg64u_random_r pcg_unique_128_xsl_rr_64_random_r
+#define pcg64f_random_r pcg_mcg_128_xsl_rr_64_random_r
+//// boundedrand_r
+#define pcg64_boundedrand_r pcg_setseq_128_xsl_rr_64_boundedrand_r
+#define pcg64s_boundedrand_r pcg_oneseq_128_xsl_rr_64_boundedrand_r
+#define pcg64u_boundedrand_r pcg_unique_128_xsl_rr_64_boundedrand_r
+#define pcg64f_boundedrand_r pcg_mcg_128_xsl_rr_64_boundedrand_r
+//// srandom_r
+#define pcg64_srandom_r pcg_setseq_128_srandom_r
+#define pcg64s_srandom_r pcg_oneseq_128_srandom_r
+#define pcg64u_srandom_r pcg_unique_128_srandom_r
+#define pcg64f_srandom_r pcg_mcg_128_srandom_r
+//// advance_r
+#define pcg64_advance_r pcg_setseq_128_advance_r
+#define pcg64s_advance_r pcg_oneseq_128_advance_r
+#define pcg64u_advance_r pcg_unique_128_advance_r
+#define pcg64f_advance_r pcg_mcg_128_advance_r
+#endif
+
+//// Typedefs
+typedef struct pcg_state_8 pcg8si_random_t;
+typedef struct pcg_state_16 pcg16si_random_t;
+typedef struct pcg_state_32 pcg32si_random_t;
+typedef struct pcg_state_64 pcg64si_random_t;
+//// random_r
+#define pcg8si_random_r pcg_oneseq_8_rxs_m_xs_8_random_r
+#define pcg16si_random_r pcg_oneseq_16_rxs_m_xs_16_random_r
+#define pcg32si_random_r pcg_oneseq_32_rxs_m_xs_32_random_r
+#define pcg64si_random_r pcg_oneseq_64_rxs_m_xs_64_random_r
+//// boundedrand_r
+#define pcg8si_boundedrand_r pcg_oneseq_8_rxs_m_xs_8_boundedrand_r
+#define pcg16si_boundedrand_r pcg_oneseq_16_rxs_m_xs_16_boundedrand_r
+#define pcg32si_boundedrand_r pcg_oneseq_32_rxs_m_xs_32_boundedrand_r
+#define pcg64si_boundedrand_r pcg_oneseq_64_rxs_m_xs_64_boundedrand_r
+//// srandom_r
+#define pcg8si_srandom_r pcg_oneseq_8_srandom_r
+#define pcg16si_srandom_r pcg_oneseq_16_srandom_r
+#define pcg32si_srandom_r pcg_oneseq_32_srandom_r
+#define pcg64si_srandom_r pcg_oneseq_64_srandom_r
+//// advance_r
+#define pcg8si_advance_r pcg_oneseq_8_advance_r
+#define pcg16si_advance_r pcg_oneseq_16_advance_r
+#define pcg32si_advance_r pcg_oneseq_32_advance_r
+#define pcg64si_advance_r pcg_oneseq_64_advance_r
+
+#if PCG_HAS_128BIT_OPS
+typedef struct pcg_state_128 pcg128si_random_t;
+#define pcg128si_random_r pcg_oneseq_128_rxs_m_xs_128_random_r
+#define pcg128si_boundedrand_r pcg_oneseq_128_rxs_m_xs_128_boundedrand_r
+#define pcg128si_srandom_r pcg_oneseq_128_srandom_r
+#define pcg128si_advance_r pcg_oneseq_128_advance_r
+#endif
+
+//// Typedefs
+typedef struct pcg_state_setseq_8 pcg8i_random_t;
+typedef struct pcg_state_setseq_16 pcg16i_random_t;
+typedef struct pcg_state_setseq_32 pcg32i_random_t;
+typedef struct pcg_state_setseq_64 pcg64i_random_t;
+//// random_r
+#define pcg8i_random_r pcg_setseq_8_rxs_m_xs_8_random_r
+#define pcg16i_random_r pcg_setseq_16_rxs_m_xs_16_random_r
+#define pcg32i_random_r pcg_setseq_32_rxs_m_xs_32_random_r
+#define pcg64i_random_r pcg_setseq_64_rxs_m_xs_64_random_r
+//// boundedrand_r
+#define pcg8i_boundedrand_r pcg_setseq_8_rxs_m_xs_8_boundedrand_r
+#define pcg16i_boundedrand_r pcg_setseq_16_rxs_m_xs_16_boundedrand_r
+#define pcg32i_boundedrand_r pcg_setseq_32_rxs_m_xs_32_boundedrand_r
+#define pcg64i_boundedrand_r pcg_setseq_64_rxs_m_xs_64_boundedrand_r
+//// srandom_r
+#define pcg8i_srandom_r pcg_setseq_8_srandom_r
+#define pcg16i_srandom_r pcg_setseq_16_srandom_r
+#define pcg32i_srandom_r pcg_setseq_32_srandom_r
+#define pcg64i_srandom_r pcg_setseq_64_srandom_r
+//// advance_r
+#define pcg8i_advance_r pcg_setseq_8_advance_r
+#define pcg16i_advance_r pcg_setseq_16_advance_r
+#define pcg32i_advance_r pcg_setseq_32_advance_r
+#define pcg64i_advance_r pcg_setseq_64_advance_r
+
+#if PCG_HAS_128BIT_OPS
+typedef struct pcg_state_setseq_128 pcg128i_random_t;
+#define pcg128i_random_r pcg_setseq_128_rxs_m_xs_128_random_r
+#define pcg128i_boundedrand_r pcg_setseq_128_rxs_m_xs_128_boundedrand_r
+#define pcg128i_srandom_r pcg_setseq_128_srandom_r
+#define pcg128i_advance_r pcg_setseq_128_advance_r
+#endif
+
+extern uint32_t pcg32_random();
+extern uint32_t pcg32_boundedrand(uint32_t bound);
+extern void pcg32_srandom(uint64_t seed, uint64_t seq);
+extern void pcg32_advance(uint64_t delta);
+
+#if PCG_HAS_128BIT_OPS
+extern uint64_t pcg64_random();
+extern uint64_t pcg64_boundedrand(uint64_t bound);
+extern void pcg64_srandom(pcg128_t seed, pcg128_t seq);
+extern void pcg64_advance(pcg128_t delta);
+#endif
+
+/*
+ * Static initialization constants (if you can't call srandom for some
+ * bizarre reason).
+ */
+
+#define PCG32_INITIALIZER PCG_STATE_SETSEQ_64_INITIALIZER
+#define PCG32U_INITIALIZER PCG_STATE_UNIQUE_64_INITIALIZER
+#define PCG32S_INITIALIZER PCG_STATE_ONESEQ_64_INITIALIZER
+#define PCG32F_INITIALIZER PCG_STATE_MCG_64_INITIALIZER
+
+#if PCG_HAS_128BIT_OPS
+#define PCG64_INITIALIZER PCG_STATE_SETSEQ_128_INITIALIZER
+#define PCG64U_INITIALIZER PCG_STATE_UNIQUE_128_INITIALIZER
+#define PCG64S_INITIALIZER PCG_STATE_ONESEQ_128_INITIALIZER
+#define PCG64F_INITIALIZER PCG_STATE_MCG_128_INITIALIZER
+#endif
+
+#define PCG8SI_INITIALIZER PCG_STATE_ONESEQ_8_INITIALIZER
+#define PCG16SI_INITIALIZER PCG_STATE_ONESEQ_16_INITIALIZER
+#define PCG32SI_INITIALIZER PCG_STATE_ONESEQ_32_INITIALIZER
+#define PCG64SI_INITIALIZER PCG_STATE_ONESEQ_64_INITIALIZER
+#if PCG_HAS_128BIT_OPS
+#define PCG128SI_INITIALIZER PCG_STATE_ONESEQ_128_INITIALIZER
+#endif
+
+#define PCG8I_INITIALIZER PCG_STATE_SETSEQ_8_INITIALIZER
+#define PCG16I_INITIALIZER PCG_STATE_SETSEQ_16_INITIALIZER
+#define PCG32I_INITIALIZER PCG_STATE_SETSEQ_32_INITIALIZER
+#define PCG64I_INITIALIZER PCG_STATE_SETSEQ_64_INITIALIZER
+#if PCG_HAS_128BIT_OPS
+#define PCG128I_INITIALIZER PCG_STATE_SETSEQ_128_INITIALIZER
+#endif
+
+#if __cplusplus
+}
+#endif
+
+#endif // PCG_VARIANTS_H_INCLUDED
diff --git a/numpy/random/src/pcg64/LICENSE.md b/numpy/random/src/pcg64/LICENSE.md
new file mode 100644
index 000000000..7aac7a51c
--- /dev/null
+++ b/numpy/random/src/pcg64/LICENSE.md
@@ -0,0 +1,22 @@
+# PCG64
+
+## The MIT License
+
+PCG Random Number Generation for C.
+
+Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/numpy/random/src/pcg64/pcg64-benchmark.c b/numpy/random/src/pcg64/pcg64-benchmark.c
new file mode 100644
index 000000000..76f3ec78c
--- /dev/null
+++ b/numpy/random/src/pcg64/pcg64-benchmark.c
@@ -0,0 +1,42 @@
+/*
+ * cl pcg64-benchmark.c pcg64.c ../splitmix64/splitmix64.c /Ox
+ * Measure-Command { .\xoroshiro128-benchmark.exe }
+ *
+ * gcc pcg64-benchmark.c pcg64.c ../splitmix64/splitmix64.c -O3 -o
+ * pcg64-benchmark
+ * time ./pcg64-benchmark
+ */
+#include "../splitmix64/splitmix64.h"
+#include "pcg64.h"
+#include <inttypes.h>
+#include <stdio.h>
+#include <time.h>
+
+#define N 1000000000
+
+int main() {
+ pcg64_random_t rng;
+ uint64_t sum = 0, count = 0;
+ uint64_t seed = 0xDEADBEAF;
+ int i;
+#if __SIZEOF_INT128__ && !defined(PCG_FORCE_EMULATED_128BIT_MATH)
+ rng.state = (__uint128_t)splitmix64_next(&seed) << 64;
+ rng.state |= splitmix64_next(&seed);
+ rng.inc = (__uint128_t)1;
+#else
+ rng.state.high = splitmix64_next(&seed);
+ rng.state.low = splitmix64_next(&seed);
+ rng.inc.high = 0;
+ rng.inc.low = 1;
+#endif
+ clock_t begin = clock();
+ for (i = 0; i < N; i++) {
+ sum += pcg64_random_r(&rng);
+ count++;
+ }
+ clock_t end = clock();
+ double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
+ printf("0x%" PRIx64 "\ncount: %" PRIu64 "\n", sum, count);
+ printf("%" PRIu64 " randoms per second\n",
+ (uint64_t)(N / time_spent) / 1000000 * 1000000);
+}
diff --git a/numpy/random/src/pcg64/pcg64-test-data-gen.c b/numpy/random/src/pcg64/pcg64-test-data-gen.c
new file mode 100644
index 000000000..0c2b079a3
--- /dev/null
+++ b/numpy/random/src/pcg64/pcg64-test-data-gen.c
@@ -0,0 +1,73 @@
+/*
+ * Generate testing csv files
+ *
+ * GCC only
+ *
+ * gcc pcg64-test-data-gen.c pcg64.orig.c ../splitmix64/splitmix64.c -o
+ * pgc64-test-data-gen
+ */
+
+#include "pcg64.orig.h"
+#include <inttypes.h>
+#include <stdio.h>
+
+#define N 1000
+
+int main() {
+ pcg64_random_t rng;
+ uint64_t state, seed = 0xDEADBEAF;
+ state = seed;
+ __uint128_t temp, s, inc;
+ int i;
+ uint64_t store[N];
+ s = (__uint128_t)seed;
+ inc = (__uint128_t)0;
+ pcg64_srandom_r(&rng, s, inc);
+ printf("0x%" PRIx64, (uint64_t)(rng.state >> 64));
+ printf("%" PRIx64 "\n", (uint64_t)rng.state);
+ printf("0x%" PRIx64, (uint64_t)(rng.inc >> 64));
+ printf("%" PRIx64 "\n", (uint64_t)rng.inc);
+ for (i = 0; i < N; i++) {
+ store[i] = pcg64_random_r(&rng);
+ }
+
+ FILE *fp;
+ fp = fopen("pcg64-testset-1.csv", "w");
+ if (fp == NULL) {
+ printf("Couldn't open file\n");
+ return -1;
+ }
+ fprintf(fp, "seed, 0x%" PRIx64 "\n", seed);
+ for (i = 0; i < N; i++) {
+ fprintf(fp, "%d, 0x%" PRIx64 "\n", i, store[i]);
+ if (i == 999) {
+ printf("%d, 0x%" PRIx64 "\n", i, store[i]);
+ }
+ }
+ fclose(fp);
+
+ state = seed = 0;
+ s = (__uint128_t)seed;
+ i = (__uint128_t)0;
+ pcg64_srandom_r(&rng, s, i);
+ printf("0x%" PRIx64, (uint64_t)(rng.state >> 64));
+ printf("%" PRIx64 "\n", (uint64_t)rng.state);
+ printf("0x%" PRIx64, (uint64_t)(rng.inc >> 64));
+ printf("%" PRIx64 "\n", (uint64_t)rng.inc);
+ for (i = 0; i < N; i++) {
+ store[i] = pcg64_random_r(&rng);
+ }
+ fp = fopen("pcg64-testset-2.csv", "w");
+ if (fp == NULL) {
+ printf("Couldn't open file\n");
+ return -1;
+ }
+ fprintf(fp, "seed, 0x%" PRIx64 "\n", seed);
+ for (i = 0; i < N; i++) {
+ fprintf(fp, "%d, 0x%" PRIx64 "\n", i, store[i]);
+ if (i == 999) {
+ printf("%d, 0x%" PRIx64 "\n", i, store[i]);
+ }
+ }
+ fclose(fp);
+}
diff --git a/numpy/random/src/pcg64/pcg64.c b/numpy/random/src/pcg64/pcg64.c
new file mode 100644
index 000000000..c89454029
--- /dev/null
+++ b/numpy/random/src/pcg64/pcg64.c
@@ -0,0 +1,185 @@
+/*
+ * PCG64 Random Number Generation for C.
+ *
+ * Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+ * Copyright 2015 Robert Kern <robert.kern@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * For additional information about the PCG random number generation scheme,
+ * including its license and other licensing options, visit
+ *
+ * http://www.pcg-random.org
+ *
+ * Relicensed MIT in May 2019
+ *
+ * The MIT License
+ *
+ * PCG Random Number Generation for C.
+ *
+ * Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "pcg64.h"
+
+extern inline void pcg_setseq_128_step_r(pcg_state_setseq_128 *rng);
+extern inline uint64_t pcg_output_xsl_rr_128_64(pcg128_t state);
+extern inline void pcg_setseq_128_srandom_r(pcg_state_setseq_128 *rng,
+ pcg128_t initstate,
+ pcg128_t initseq);
+extern inline uint64_t
+pcg_setseq_128_xsl_rr_64_random_r(pcg_state_setseq_128 *rng);
+extern inline uint64_t
+pcg_setseq_128_xsl_rr_64_boundedrand_r(pcg_state_setseq_128 *rng,
+ uint64_t bound);
+extern inline void pcg_setseq_128_advance_r(pcg_state_setseq_128 *rng,
+ pcg128_t delta);
+
+/* Multi-step advance functions (jump-ahead, jump-back)
+ *
+ * The method used here is based on Brown, "Random Number Generation
+ * with Arbitrary Stride,", Transactions of the American Nuclear
+ * Society (Nov. 1994). The algorithm is very similar to fast
+ * exponentiation.
+ *
+ * Even though delta is an unsigned integer, we can pass a
+ * signed integer to go backwards, it just goes "the long way round".
+ */
+
+#ifndef PCG_EMULATED_128BIT_MATH
+
+pcg128_t pcg_advance_lcg_128(pcg128_t state, pcg128_t delta, pcg128_t cur_mult,
+ pcg128_t cur_plus) {
+ pcg128_t acc_mult = 1u;
+ pcg128_t acc_plus = 0u;
+ while (delta > 0) {
+ if (delta & 1) {
+ acc_mult *= cur_mult;
+ acc_plus = acc_plus * cur_mult + cur_plus;
+ }
+ cur_plus = (cur_mult + 1) * cur_plus;
+ cur_mult *= cur_mult;
+ delta /= 2;
+ }
+ return acc_mult * state + acc_plus;
+}
+
+#else
+
+pcg128_t pcg_advance_lcg_128(pcg128_t state, pcg128_t delta, pcg128_t cur_mult,
+ pcg128_t cur_plus) {
+ pcg128_t acc_mult = PCG_128BIT_CONSTANT(0u, 1u);
+ pcg128_t acc_plus = PCG_128BIT_CONSTANT(0u, 0u);
+ while ((delta.high > 0) || (delta.low > 0)) {
+ if (delta.low & 1) {
+ acc_mult = _pcg128_mult(acc_mult, cur_mult);
+ acc_plus = _pcg128_add(_pcg128_mult(acc_plus, cur_mult), cur_plus);
+ }
+ cur_plus = _pcg128_mult(_pcg128_add(cur_mult, PCG_128BIT_CONSTANT(0u, 1u)),
+ cur_plus);
+ cur_mult = _pcg128_mult(cur_mult, cur_mult);
+ delta.low >>= 1;
+ delta.low += delta.high & 1;
+ delta.high >>= 1;
+ }
+ return _pcg128_add(_pcg128_mult(acc_mult, state), acc_plus);
+}
+
+#endif
+
+extern inline uint64_t pcg64_next64(pcg64_state *state);
+extern inline uint32_t pcg64_next32(pcg64_state *state);
+
+extern void pcg64_advance(pcg64_state *state, uint64_t *step) {
+ pcg128_t delta;
+#if __SIZEOF_INT128__ && !defined(PCG_FORCE_EMULATED_128BIT_MATH)
+ delta = (((pcg128_t)step[0]) << 64) | step[1];
+#else
+ delta.high = step[0];
+ delta.low = step[1];
+#endif
+ pcg64_advance_r(state->pcg_state, delta);
+}
+
+extern void pcg64_set_seed(pcg64_state *state, uint64_t *seed, uint64_t *inc) {
+ pcg128_t s, i;
+#if __SIZEOF_INT128__ && !defined(PCG_FORCE_EMULATED_128BIT_MATH)
+ s = (((pcg128_t)seed[0]) << 64) | seed[1];
+ i = (((pcg128_t)inc[0]) << 64) | inc[1];
+#else
+ s.high = seed[0];
+ s.low = seed[1];
+ i.high = inc[0];
+ i.low = inc[1];
+#endif
+ pcg64_srandom_r(state->pcg_state, s, i);
+}
+
+extern void pcg64_get_state(pcg64_state *state, uint64_t *state_arr,
+ int *has_uint32, uint32_t *uinteger) {
+ /*
+ * state_arr contains state.high, state.low, inc.high, inc.low
+ * which are interpreted as the upper 64 bits (high) or lower
+ * 64 bits of a uint128_t variable
+ *
+ */
+#if __SIZEOF_INT128__ && !defined(PCG_FORCE_EMULATED_128BIT_MATH)
+ state_arr[0] = (uint64_t)(state->pcg_state->state >> 64);
+ state_arr[1] = (uint64_t)(state->pcg_state->state & 0xFFFFFFFFFFFFFFFFULL);
+ state_arr[2] = (uint64_t)(state->pcg_state->inc >> 64);
+ state_arr[3] = (uint64_t)(state->pcg_state->inc & 0xFFFFFFFFFFFFFFFFULL);
+#else
+ state_arr[0] = (uint64_t)state->pcg_state->state.high;
+ state_arr[1] = (uint64_t)state->pcg_state->state.low;
+ state_arr[2] = (uint64_t)state->pcg_state->inc.high;
+ state_arr[3] = (uint64_t)state->pcg_state->inc.low;
+#endif
+ has_uint32[0] = state->has_uint32;
+ uinteger[0] = state->uinteger;
+}
+
+extern void pcg64_set_state(pcg64_state *state, uint64_t *state_arr,
+ int has_uint32, uint32_t uinteger) {
+ /*
+ * state_arr contains state.high, state.low, inc.high, inc.low
+ * which are interpreted as the upper 64 bits (high) or lower
+ * 64 bits of a uint128_t variable
+ *
+ */
+#if __SIZEOF_INT128__ && !defined(PCG_FORCE_EMULATED_128BIT_MATH)
+ state->pcg_state->state = (((pcg128_t)state_arr[0]) << 64) | state_arr[1];
+ state->pcg_state->inc = (((pcg128_t)state_arr[2]) << 64) | state_arr[3];
+#else
+ state->pcg_state->state.high = state_arr[0];
+ state->pcg_state->state.low = state_arr[1];
+ state->pcg_state->inc.high = state_arr[2];
+ state->pcg_state->inc.low = state_arr[3];
+#endif
+ state->has_uint32 = has_uint32;
+ state->uinteger = uinteger;
+}
diff --git a/numpy/random/src/pcg64/pcg64.h b/numpy/random/src/pcg64/pcg64.h
new file mode 100644
index 000000000..d4c96ff5f
--- /dev/null
+++ b/numpy/random/src/pcg64/pcg64.h
@@ -0,0 +1,262 @@
+/*
+ * PCG64 Random Number Generation for C.
+ *
+ * Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+ * Copyright 2015 Robert Kern <robert.kern@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * For additional information about the PCG random number generation scheme,
+ * including its license and other licensing options, visit
+ *
+ * http://www.pcg-random.org
+ *
+ * Relicensed MIT in May 2019
+ *
+ * The MIT License
+ *
+ * PCG Random Number Generation for C.
+ *
+ * Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef PCG64_H_INCLUDED
+#define PCG64_H_INCLUDED 1
+
+#include <inttypes.h>
+
+#ifdef _WIN32
+#define inline __forceinline
+#endif
+
+#if __GNUC_GNU_INLINE__ && !defined(__cplusplus)
+#error Nonstandard GNU inlining semantics. Compile with -std=c99 or better.
+#endif
+
+#if __cplusplus
+extern "C" {
+#endif
+
+#if __SIZEOF_INT128__ && !defined(PCG_FORCE_EMULATED_128BIT_MATH)
+typedef __uint128_t pcg128_t;
+#define PCG_128BIT_CONSTANT(high, low) (((pcg128_t)(high) << 64) + low)
+#else
+typedef struct {
+ uint64_t high;
+ uint64_t low;
+} pcg128_t;
+
+static inline pcg128_t PCG_128BIT_CONSTANT(uint64_t high, uint64_t low) {
+ pcg128_t result;
+ result.high = high;
+ result.low = low;
+ return result;
+}
+
+#define PCG_EMULATED_128BIT_MATH 1
+#endif
+
+typedef struct { pcg128_t state; } pcg_state_128;
+
+typedef struct {
+ pcg128_t state;
+ pcg128_t inc;
+} pcg_state_setseq_128;
+
+#define PCG_DEFAULT_MULTIPLIER_128 \
+ PCG_128BIT_CONSTANT(2549297995355413924ULL, 4865540595714422341ULL)
+#define PCG_DEFAULT_INCREMENT_128 \
+ PCG_128BIT_CONSTANT(6364136223846793005ULL, 1442695040888963407ULL)
+#define PCG_STATE_SETSEQ_128_INITIALIZER \
+ { \
+ PCG_128BIT_CONSTANT(0x979c9a98d8462005ULL, 0x7d3e9cb6cfe0549bULL) \
+ , PCG_128BIT_CONSTANT(0x0000000000000001ULL, 0xda3e39cb94b95bdbULL) \
+ }
+
+static inline uint64_t pcg_rotr_64(uint64_t value, unsigned int rot) {
+ return (value >> rot) | (value << ((-rot) & 63));
+}
+
+#ifdef PCG_EMULATED_128BIT_MATH
+
+static inline pcg128_t _pcg128_add(pcg128_t a, pcg128_t b) {
+ pcg128_t result;
+
+ result.low = a.low + b.low;
+ result.high = a.high + b.high + (result.low < b.low);
+ return result;
+}
+
+static inline void _pcg_mult64(uint64_t x, uint64_t y, uint64_t *z1,
+ uint64_t *z0) {
+
+#if defined _WIN32 && _MSC_VER >= 1900 && _M_AMD64
+ z0[0] = _umul128(x, y, z1);
+#else
+ uint64_t x0, x1, y0, y1;
+ uint64_t w0, w1, w2, t;
+ /* Lower 64 bits are straightforward clock-arithmetic. */
+ *z0 = x * y;
+
+ x0 = x & 0xFFFFFFFFULL;
+ x1 = x >> 32;
+ y0 = y & 0xFFFFFFFFULL;
+ y1 = y >> 32;
+ w0 = x0 * y0;
+ t = x1 * y0 + (w0 >> 32);
+ w1 = t & 0xFFFFFFFFULL;
+ w2 = t >> 32;
+ w1 += x0 * y1;
+ *z1 = x1 * y1 + w2 + (w1 >> 32);
+#endif
+}
+
+static inline pcg128_t _pcg128_mult(pcg128_t a, pcg128_t b) {
+ uint64_t h1;
+ pcg128_t result;
+
+ h1 = a.high * b.low + a.low * b.high;
+ _pcg_mult64(a.low, b.low, &(result.high), &(result.low));
+ result.high += h1;
+ return result;
+}
+
+static inline void pcg_setseq_128_step_r(pcg_state_setseq_128 *rng) {
+ rng->state = _pcg128_add(_pcg128_mult(rng->state, PCG_DEFAULT_MULTIPLIER_128),
+ rng->inc);
+}
+
+static inline uint64_t pcg_output_xsl_rr_128_64(pcg128_t state) {
+ return pcg_rotr_64(state.high ^ state.low, state.high >> 58u);
+}
+
+static inline void pcg_setseq_128_srandom_r(pcg_state_setseq_128 *rng,
+ pcg128_t initstate,
+ pcg128_t initseq) {
+ rng->state = PCG_128BIT_CONSTANT(0ULL, 0ULL);
+ rng->inc.high = initseq.high << 1u;
+ rng->inc.high |= initseq.low & 0x800000000000ULL;
+ rng->inc.low = (initseq.low << 1u) | 1u;
+ pcg_setseq_128_step_r(rng);
+ rng->state = _pcg128_add(rng->state, initstate);
+ pcg_setseq_128_step_r(rng);
+}
+
+#else /* PCG_EMULATED_128BIT_MATH */
+
+static inline void pcg_setseq_128_step_r(pcg_state_setseq_128 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_128 + rng->inc;
+}
+
+static inline uint64_t pcg_output_xsl_rr_128_64(pcg128_t state) {
+ return pcg_rotr_64(((uint64_t)(state >> 64u)) ^ (uint64_t)state,
+ state >> 122u);
+}
+
+static inline void pcg_setseq_128_srandom_r(pcg_state_setseq_128 *rng,
+ pcg128_t initstate,
+ pcg128_t initseq) {
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_128_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_128_step_r(rng);
+}
+
+#endif /* PCG_EMULATED_128BIT_MATH */
+
+static inline uint64_t
+pcg_setseq_128_xsl_rr_64_random_r(pcg_state_setseq_128 *rng) {
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+}
+
+static inline uint64_t
+pcg_setseq_128_xsl_rr_64_boundedrand_r(pcg_state_setseq_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_128_xsl_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+extern pcg128_t pcg_advance_lcg_128(pcg128_t state, pcg128_t delta,
+ pcg128_t cur_mult, pcg128_t cur_plus);
+
+static inline void pcg_setseq_128_advance_r(pcg_state_setseq_128 *rng,
+ pcg128_t delta) {
+ rng->state = pcg_advance_lcg_128(rng->state, delta,
+ PCG_DEFAULT_MULTIPLIER_128, rng->inc);
+}
+
+typedef pcg_state_setseq_128 pcg64_random_t;
+#define pcg64_random_r pcg_setseq_128_xsl_rr_64_random_r
+#define pcg64_boundedrand_r pcg_setseq_128_xsl_rr_64_boundedrand_r
+#define pcg64_srandom_r pcg_setseq_128_srandom_r
+#define pcg64_advance_r pcg_setseq_128_advance_r
+#define PCG64_INITIALIZER PCG_STATE_SETSEQ_128_INITIALIZER
+
+#if __cplusplus
+}
+#endif
+
+typedef struct s_pcg64_state {
+ pcg64_random_t *pcg_state;
+ int has_uint32;
+ uint32_t uinteger;
+} pcg64_state;
+
+static inline uint64_t pcg64_next64(pcg64_state *state) {
+ return pcg64_random_r(state->pcg_state);
+}
+
+static inline uint32_t pcg64_next32(pcg64_state *state) {
+ uint64_t next;
+ if (state->has_uint32) {
+ state->has_uint32 = 0;
+ return state->uinteger;
+ }
+ next = pcg64_random_r(state->pcg_state);
+ state->has_uint32 = 1;
+ state->uinteger = (uint32_t)(next >> 32);
+ return (uint32_t)(next & 0xffffffff);
+}
+
+void pcg64_advance(pcg64_state *state, uint64_t *step);
+
+void pcg64_set_seed(pcg64_state *state, uint64_t *seed, uint64_t *inc);
+
+void pcg64_get_state(pcg64_state *state, uint64_t *state_arr, int *has_uint32,
+ uint32_t *uinteger);
+
+void pcg64_set_state(pcg64_state *state, uint64_t *state_arr, int has_uint32,
+ uint32_t uinteger);
+
+#endif /* PCG64_H_INCLUDED */
diff --git a/numpy/random/src/pcg64/pcg64.orig.c b/numpy/random/src/pcg64/pcg64.orig.c
new file mode 100644
index 000000000..07e97e4b6
--- /dev/null
+++ b/numpy/random/src/pcg64/pcg64.orig.c
@@ -0,0 +1,11 @@
+#include "pcg64.orig.h"
+
+extern inline void pcg_setseq_128_srandom_r(pcg64_random_t *rng,
+ pcg128_t initstate,
+ pcg128_t initseq);
+
+extern uint64_t pcg_rotr_64(uint64_t value, unsigned int rot);
+extern inline uint64_t pcg_output_xsl_rr_128_64(pcg128_t state);
+extern void pcg_setseq_128_step_r(struct pcg_state_setseq_128 *rng);
+extern uint64_t
+pcg_setseq_128_xsl_rr_64_random_r(struct pcg_state_setseq_128 *rng);
diff --git a/numpy/random/src/pcg64/pcg64.orig.h b/numpy/random/src/pcg64/pcg64.orig.h
new file mode 100644
index 000000000..74be91f31
--- /dev/null
+++ b/numpy/random/src/pcg64/pcg64.orig.h
@@ -0,0 +1,2025 @@
+/*
+ * PCG Random Number Generation for C.
+ *
+ * Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * For additional information about the PCG random number generation scheme,
+ * including its license and other licensing options, visit
+ *
+ * http://www.pcg-random.org
+ */
+
+/*
+ * This code is derived from the canonical C++ PCG implementation, which
+ * has many additional features and is preferable if you can use C++ in
+ * your project.
+ *
+ * Much of the derivation was performed mechanically. In particular, the
+ * output functions were generated by compiling the C++ output functions
+ * into LLVM bitcode and then transforming that using the LLVM C backend
+ * (from https://github.com/draperlaboratory/llvm-cbe), and then
+ * postprocessing and hand editing the output.
+ *
+ * Much of the remaining code was generated by C-preprocessor metaprogramming.
+ */
+
+#ifndef PCG_VARIANTS_H_INCLUDED
+#define PCG_VARIANTS_H_INCLUDED 1
+
+#include <inttypes.h>
+
+#if __SIZEOF_INT128__
+typedef __uint128_t pcg128_t;
+#define PCG_128BIT_CONSTANT(high, low) ((((pcg128_t)high) << 64) + low)
+#define PCG_HAS_128BIT_OPS 1
+#endif
+
+#if __GNUC_GNU_INLINE__ && !defined(__cplusplus)
+#error Nonstandard GNU inlining semantics. Compile with -std=c99 or better.
+// We could instead use macros PCG_INLINE and PCG_EXTERN_INLINE
+// but better to just reject ancient C code.
+#endif
+
+#if __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Rotate helper functions.
+ */
+
+inline uint8_t pcg_rotr_8(uint8_t value, unsigned int rot) {
+/* Unfortunately, clang is kinda pathetic when it comes to properly
+ * recognizing idiomatic rotate code, so for clang we actually provide
+ * assembler directives (enabled with PCG_USE_INLINE_ASM). Boo, hiss.
+ */
+#if PCG_USE_INLINE_ASM && __clang__ && (__x86_64__ || __i386__)
+ asm("rorb %%cl, %0" : "=r"(value) : "0"(value), "c"(rot));
+ return value;
+#else
+ return (value >> rot) | (value << ((-rot) & 7));
+#endif
+}
+
+inline uint16_t pcg_rotr_16(uint16_t value, unsigned int rot) {
+#if PCG_USE_INLINE_ASM && __clang__ && (__x86_64__ || __i386__)
+ asm("rorw %%cl, %0" : "=r"(value) : "0"(value), "c"(rot));
+ return value;
+#else
+ return (value >> rot) | (value << ((-rot) & 15));
+#endif
+}
+
+inline uint32_t pcg_rotr_32(uint32_t value, unsigned int rot) {
+#if PCG_USE_INLINE_ASM && __clang__ && (__x86_64__ || __i386__)
+ asm("rorl %%cl, %0" : "=r"(value) : "0"(value), "c"(rot));
+ return value;
+#else
+ return (value >> rot) | (value << ((-rot) & 31));
+#endif
+}
+
+inline uint64_t pcg_rotr_64(uint64_t value, unsigned int rot) {
+#if 0 && PCG_USE_INLINE_ASM && __clang__ && __x86_64__
+ // For whatever reason, clang actually *does* generate rotq by
+ // itself, so we don't need this code.
+ asm ("rorq %%cl, %0" : "=r" (value) : "0" (value), "c" (rot));
+ return value;
+#else
+ return (value >> rot) | (value << ((-rot) & 63));
+#endif
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t pcg_rotr_128(pcg128_t value, unsigned int rot) {
+ return (value >> rot) | (value << ((-rot) & 127));
+}
+#endif
+
+/*
+ * Output functions. These are the core of the PCG generation scheme.
+ */
+
+// XSH RS
+
+inline uint8_t pcg_output_xsh_rs_16_8(uint16_t state) {
+ return (uint8_t)(((state >> 7u) ^ state) >> ((state >> 14u) + 3u));
+}
+
+inline uint16_t pcg_output_xsh_rs_32_16(uint32_t state) {
+ return (uint16_t)(((state >> 11u) ^ state) >> ((state >> 30u) + 11u));
+}
+
+inline uint32_t pcg_output_xsh_rs_64_32(uint64_t state) {
+
+ return (uint32_t)(((state >> 22u) ^ state) >> ((state >> 61u) + 22u));
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_output_xsh_rs_128_64(pcg128_t state) {
+ return (uint64_t)(((state >> 43u) ^ state) >> ((state >> 124u) + 45u));
+}
+#endif
+
+// XSH RR
+
+inline uint8_t pcg_output_xsh_rr_16_8(uint16_t state) {
+ return pcg_rotr_8(((state >> 5u) ^ state) >> 5u, state >> 13u);
+}
+
+inline uint16_t pcg_output_xsh_rr_32_16(uint32_t state) {
+ return pcg_rotr_16(((state >> 10u) ^ state) >> 12u, state >> 28u);
+}
+
+inline uint32_t pcg_output_xsh_rr_64_32(uint64_t state) {
+ return pcg_rotr_32(((state >> 18u) ^ state) >> 27u, state >> 59u);
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_output_xsh_rr_128_64(pcg128_t state) {
+ return pcg_rotr_64(((state >> 29u) ^ state) >> 58u, state >> 122u);
+}
+#endif
+
+// RXS M XS
+
+inline uint8_t pcg_output_rxs_m_xs_8_8(uint8_t state) {
+ uint8_t word = ((state >> ((state >> 6u) + 2u)) ^ state) * 217u;
+ return (word >> 6u) ^ word;
+}
+
+inline uint16_t pcg_output_rxs_m_xs_16_16(uint16_t state) {
+ uint16_t word = ((state >> ((state >> 13u) + 3u)) ^ state) * 62169u;
+ return (word >> 11u) ^ word;
+}
+
+inline uint32_t pcg_output_rxs_m_xs_32_32(uint32_t state) {
+ uint32_t word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u;
+ return (word >> 22u) ^ word;
+}
+
+inline uint64_t pcg_output_rxs_m_xs_64_64(uint64_t state) {
+ uint64_t word =
+ ((state >> ((state >> 59u) + 5u)) ^ state) * 12605985483714917081ull;
+ return (word >> 43u) ^ word;
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t pcg_output_rxs_m_xs_128_128(pcg128_t state) {
+ pcg128_t word =
+ ((state >> ((state >> 122u) + 6u)) ^ state) *
+ (PCG_128BIT_CONSTANT(17766728186571221404ULL, 12605985483714917081ULL));
+ // 327738287884841127335028083622016905945
+ return (word >> 86u) ^ word;
+}
+#endif
+
+// XSL RR (only defined for >= 64 bits)
+
+inline uint32_t pcg_output_xsl_rr_64_32(uint64_t state) {
+ return pcg_rotr_32(((uint32_t)(state >> 32u)) ^ (uint32_t)state,
+ state >> 59u);
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_output_xsl_rr_128_64(pcg128_t state) {
+ return pcg_rotr_64(((uint64_t)(state >> 64u)) ^ (uint64_t)state,
+ state >> 122u);
+}
+#endif
+
+// XSL RR RR (only defined for >= 64 bits)
+
+inline uint64_t pcg_output_xsl_rr_rr_64_64(uint64_t state) {
+ uint32_t rot1 = (uint32_t)(state >> 59u);
+ uint32_t high = (uint32_t)(state >> 32u);
+ uint32_t low = (uint32_t)state;
+ uint32_t xored = high ^ low;
+ uint32_t newlow = pcg_rotr_32(xored, rot1);
+ uint32_t newhigh = pcg_rotr_32(high, newlow & 31u);
+ return (((uint64_t)newhigh) << 32u) | newlow;
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t pcg_output_xsl_rr_rr_128_128(pcg128_t state) {
+ uint32_t rot1 = (uint32_t)(state >> 122u);
+ uint64_t high = (uint64_t)(state >> 64u);
+ uint64_t low = (uint64_t)state;
+ uint64_t xored = high ^ low;
+ uint64_t newlow = pcg_rotr_64(xored, rot1);
+ uint64_t newhigh = pcg_rotr_64(high, newlow & 63u);
+ return (((pcg128_t)newhigh) << 64u) | newlow;
+}
+#endif
+
+#define PCG_DEFAULT_MULTIPLIER_8 141U
+#define PCG_DEFAULT_MULTIPLIER_16 12829U
+#define PCG_DEFAULT_MULTIPLIER_32 747796405U
+#define PCG_DEFAULT_MULTIPLIER_64 6364136223846793005ULL
+
+#define PCG_DEFAULT_INCREMENT_8 77U
+#define PCG_DEFAULT_INCREMENT_16 47989U
+#define PCG_DEFAULT_INCREMENT_32 2891336453U
+#define PCG_DEFAULT_INCREMENT_64 1442695040888963407ULL
+
+#if PCG_HAS_128BIT_OPS
+#define PCG_DEFAULT_MULTIPLIER_128 \
+ PCG_128BIT_CONSTANT(2549297995355413924ULL, 4865540595714422341ULL)
+#define PCG_DEFAULT_INCREMENT_128 \
+ PCG_128BIT_CONSTANT(6364136223846793005ULL, 1442695040888963407ULL)
+#endif
+
+ /*
+ * Static initialization constants (if you can't call srandom for some
+ * bizarre reason).
+ */
+
+#define PCG_STATE_ONESEQ_8_INITIALIZER \
+ { 0xd7U }
+#define PCG_STATE_ONESEQ_16_INITIALIZER \
+ { 0x20dfU }
+#define PCG_STATE_ONESEQ_32_INITIALIZER \
+ { 0x46b56677U }
+#define PCG_STATE_ONESEQ_64_INITIALIZER \
+ { 0x4d595df4d0f33173ULL }
+#if PCG_HAS_128BIT_OPS
+#define PCG_STATE_ONESEQ_128_INITIALIZER \
+ { PCG_128BIT_CONSTANT(0xb8dc10e158a92392ULL, 0x98046df007ec0a53ULL) }
+#endif
+
+#define PCG_STATE_UNIQUE_8_INITIALIZER PCG_STATE_ONESEQ_8_INITIALIZER
+#define PCG_STATE_UNIQUE_16_INITIALIZER PCG_STATE_ONESEQ_16_INITIALIZER
+#define PCG_STATE_UNIQUE_32_INITIALIZER PCG_STATE_ONESEQ_32_INITIALIZER
+#define PCG_STATE_UNIQUE_64_INITIALIZER PCG_STATE_ONESEQ_64_INITIALIZER
+#if PCG_HAS_128BIT_OPS
+#define PCG_STATE_UNIQUE_128_INITIALIZER PCG_STATE_ONESEQ_128_INITIALIZER
+#endif
+
+#define PCG_STATE_MCG_8_INITIALIZER \
+ { 0xe5U }
+#define PCG_STATE_MCG_16_INITIALIZER \
+ { 0xa5e5U }
+#define PCG_STATE_MCG_32_INITIALIZER \
+ { 0xd15ea5e5U }
+#define PCG_STATE_MCG_64_INITIALIZER \
+ { 0xcafef00dd15ea5e5ULL }
+#if PCG_HAS_128BIT_OPS
+#define PCG_STATE_MCG_128_INITIALIZER \
+ { PCG_128BIT_CONSTANT(0x0000000000000000ULL, 0xcafef00dd15ea5e5ULL) }
+#endif
+
+#define PCG_STATE_SETSEQ_8_INITIALIZER \
+ { 0x9bU, 0xdbU }
+#define PCG_STATE_SETSEQ_16_INITIALIZER \
+ { 0xe39bU, 0x5bdbU }
+#define PCG_STATE_SETSEQ_32_INITIALIZER \
+ { 0xec02d89bU, 0x94b95bdbU }
+#define PCG_STATE_SETSEQ_64_INITIALIZER \
+ { 0x853c49e6748fea9bULL, 0xda3e39cb94b95bdbULL }
+#if PCG_HAS_128BIT_OPS
+#define PCG_STATE_SETSEQ_128_INITIALIZER \
+ { \
+ PCG_128BIT_CONSTANT(0x979c9a98d8462005ULL, 0x7d3e9cb6cfe0549bULL) \
+ , PCG_128BIT_CONSTANT(0x0000000000000001ULL, 0xda3e39cb94b95bdbULL) \
+ }
+#endif
+
+/* Representations for the oneseq, mcg, and unique variants */
+
+struct pcg_state_8 {
+ uint8_t state;
+};
+
+struct pcg_state_16 {
+ uint16_t state;
+};
+
+struct pcg_state_32 {
+ uint32_t state;
+};
+
+struct pcg_state_64 {
+ uint64_t state;
+};
+
+#if PCG_HAS_128BIT_OPS
+struct pcg_state_128 {
+ pcg128_t state;
+};
+#endif
+
+/* Representations setseq variants */
+
+struct pcg_state_setseq_8 {
+ uint8_t state;
+ uint8_t inc;
+};
+
+struct pcg_state_setseq_16 {
+ uint16_t state;
+ uint16_t inc;
+};
+
+struct pcg_state_setseq_32 {
+ uint32_t state;
+ uint32_t inc;
+};
+
+struct pcg_state_setseq_64 {
+ uint64_t state;
+ uint64_t inc;
+};
+
+#if PCG_HAS_128BIT_OPS
+struct pcg_state_setseq_128 {
+ pcg128_t state;
+ pcg128_t inc;
+};
+#endif
+
+/* Multi-step advance functions (jump-ahead, jump-back) */
+
+extern uint8_t pcg_advance_lcg_8(uint8_t state, uint8_t delta, uint8_t cur_mult,
+ uint8_t cur_plus);
+extern uint16_t pcg_advance_lcg_16(uint16_t state, uint16_t delta,
+ uint16_t cur_mult, uint16_t cur_plus);
+extern uint32_t pcg_advance_lcg_32(uint32_t state, uint32_t delta,
+ uint32_t cur_mult, uint32_t cur_plus);
+extern uint64_t pcg_advance_lcg_64(uint64_t state, uint64_t delta,
+ uint64_t cur_mult, uint64_t cur_plus);
+
+#if PCG_HAS_128BIT_OPS
+extern pcg128_t pcg_advance_lcg_128(pcg128_t state, pcg128_t delta,
+ pcg128_t cur_mult, pcg128_t cur_plus);
+#endif
+
+/* Functions to advance the underlying LCG, one version for each size and
+ * each style. These functions are considered semi-private. There is rarely
+ * a good reason to call them directly.
+ */
+
+inline void pcg_oneseq_8_step_r(struct pcg_state_8 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_8 + PCG_DEFAULT_INCREMENT_8;
+}
+
+inline void pcg_oneseq_8_advance_r(struct pcg_state_8 *rng, uint8_t delta) {
+ rng->state = pcg_advance_lcg_8(rng->state, delta, PCG_DEFAULT_MULTIPLIER_8,
+ PCG_DEFAULT_INCREMENT_8);
+}
+
+inline void pcg_mcg_8_step_r(struct pcg_state_8 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_8;
+}
+
+inline void pcg_mcg_8_advance_r(struct pcg_state_8 *rng, uint8_t delta) {
+ rng->state =
+ pcg_advance_lcg_8(rng->state, delta, PCG_DEFAULT_MULTIPLIER_8, 0u);
+}
+
+inline void pcg_unique_8_step_r(struct pcg_state_8 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_8 + (uint8_t)(((intptr_t)rng) | 1u);
+}
+
+inline void pcg_unique_8_advance_r(struct pcg_state_8 *rng, uint8_t delta) {
+ rng->state = pcg_advance_lcg_8(rng->state, delta, PCG_DEFAULT_MULTIPLIER_8,
+ (uint8_t)(((intptr_t)rng) | 1u));
+}
+
+inline void pcg_setseq_8_step_r(struct pcg_state_setseq_8 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_8 + rng->inc;
+}
+
+inline void pcg_setseq_8_advance_r(struct pcg_state_setseq_8 *rng,
+ uint8_t delta) {
+ rng->state =
+ pcg_advance_lcg_8(rng->state, delta, PCG_DEFAULT_MULTIPLIER_8, rng->inc);
+}
+
+inline void pcg_oneseq_16_step_r(struct pcg_state_16 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_16 + PCG_DEFAULT_INCREMENT_16;
+}
+
+inline void pcg_oneseq_16_advance_r(struct pcg_state_16 *rng, uint16_t delta) {
+ rng->state = pcg_advance_lcg_16(rng->state, delta, PCG_DEFAULT_MULTIPLIER_16,
+ PCG_DEFAULT_INCREMENT_16);
+}
+
+inline void pcg_mcg_16_step_r(struct pcg_state_16 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_16;
+}
+
+inline void pcg_mcg_16_advance_r(struct pcg_state_16 *rng, uint16_t delta) {
+ rng->state =
+ pcg_advance_lcg_16(rng->state, delta, PCG_DEFAULT_MULTIPLIER_16, 0u);
+}
+
+inline void pcg_unique_16_step_r(struct pcg_state_16 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_16 + (uint16_t)(((intptr_t)rng) | 1u);
+}
+
+inline void pcg_unique_16_advance_r(struct pcg_state_16 *rng, uint16_t delta) {
+ rng->state = pcg_advance_lcg_16(rng->state, delta, PCG_DEFAULT_MULTIPLIER_16,
+ (uint16_t)(((intptr_t)rng) | 1u));
+}
+
+inline void pcg_setseq_16_step_r(struct pcg_state_setseq_16 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_16 + rng->inc;
+}
+
+inline void pcg_setseq_16_advance_r(struct pcg_state_setseq_16 *rng,
+ uint16_t delta) {
+ rng->state = pcg_advance_lcg_16(rng->state, delta, PCG_DEFAULT_MULTIPLIER_16,
+ rng->inc);
+}
+
+inline void pcg_oneseq_32_step_r(struct pcg_state_32 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_32 + PCG_DEFAULT_INCREMENT_32;
+}
+
+inline void pcg_oneseq_32_advance_r(struct pcg_state_32 *rng, uint32_t delta) {
+ rng->state = pcg_advance_lcg_32(rng->state, delta, PCG_DEFAULT_MULTIPLIER_32,
+ PCG_DEFAULT_INCREMENT_32);
+}
+
+inline void pcg_mcg_32_step_r(struct pcg_state_32 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_32;
+}
+
+inline void pcg_mcg_32_advance_r(struct pcg_state_32 *rng, uint32_t delta) {
+ rng->state =
+ pcg_advance_lcg_32(rng->state, delta, PCG_DEFAULT_MULTIPLIER_32, 0u);
+}
+
+inline void pcg_unique_32_step_r(struct pcg_state_32 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_32 + (uint32_t)(((intptr_t)rng) | 1u);
+}
+
+inline void pcg_unique_32_advance_r(struct pcg_state_32 *rng, uint32_t delta) {
+ rng->state = pcg_advance_lcg_32(rng->state, delta, PCG_DEFAULT_MULTIPLIER_32,
+ (uint32_t)(((intptr_t)rng) | 1u));
+}
+
+inline void pcg_setseq_32_step_r(struct pcg_state_setseq_32 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_32 + rng->inc;
+}
+
+inline void pcg_setseq_32_advance_r(struct pcg_state_setseq_32 *rng,
+ uint32_t delta) {
+ rng->state = pcg_advance_lcg_32(rng->state, delta, PCG_DEFAULT_MULTIPLIER_32,
+ rng->inc);
+}
+
+inline void pcg_oneseq_64_step_r(struct pcg_state_64 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_64 + PCG_DEFAULT_INCREMENT_64;
+}
+
+inline void pcg_oneseq_64_advance_r(struct pcg_state_64 *rng, uint64_t delta) {
+ rng->state = pcg_advance_lcg_64(rng->state, delta, PCG_DEFAULT_MULTIPLIER_64,
+ PCG_DEFAULT_INCREMENT_64);
+}
+
+inline void pcg_mcg_64_step_r(struct pcg_state_64 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_64;
+}
+
+inline void pcg_mcg_64_advance_r(struct pcg_state_64 *rng, uint64_t delta) {
+ rng->state =
+ pcg_advance_lcg_64(rng->state, delta, PCG_DEFAULT_MULTIPLIER_64, 0u);
+}
+
+inline void pcg_unique_64_step_r(struct pcg_state_64 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_64 + (uint64_t)(((intptr_t)rng) | 1u);
+}
+
+inline void pcg_unique_64_advance_r(struct pcg_state_64 *rng, uint64_t delta) {
+ rng->state = pcg_advance_lcg_64(rng->state, delta, PCG_DEFAULT_MULTIPLIER_64,
+ (uint64_t)(((intptr_t)rng) | 1u));
+}
+
+inline void pcg_setseq_64_step_r(struct pcg_state_setseq_64 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_64 + rng->inc;
+}
+
+inline void pcg_setseq_64_advance_r(struct pcg_state_setseq_64 *rng,
+ uint64_t delta) {
+ rng->state = pcg_advance_lcg_64(rng->state, delta, PCG_DEFAULT_MULTIPLIER_64,
+ rng->inc);
+}
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_oneseq_128_step_r(struct pcg_state_128 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_128 + PCG_DEFAULT_INCREMENT_128;
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_oneseq_128_advance_r(struct pcg_state_128 *rng,
+ pcg128_t delta) {
+ rng->state = pcg_advance_lcg_128(
+ rng->state, delta, PCG_DEFAULT_MULTIPLIER_128, PCG_DEFAULT_INCREMENT_128);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_mcg_128_step_r(struct pcg_state_128 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_128;
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_mcg_128_advance_r(struct pcg_state_128 *rng, pcg128_t delta) {
+ rng->state =
+ pcg_advance_lcg_128(rng->state, delta, PCG_DEFAULT_MULTIPLIER_128, 0u);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_unique_128_step_r(struct pcg_state_128 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_128 +
+ (pcg128_t)(((intptr_t)rng) | 1u);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_unique_128_advance_r(struct pcg_state_128 *rng,
+ pcg128_t delta) {
+ rng->state =
+ pcg_advance_lcg_128(rng->state, delta, PCG_DEFAULT_MULTIPLIER_128,
+ (pcg128_t)(((intptr_t)rng) | 1u));
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_setseq_128_step_r(struct pcg_state_setseq_128 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_128 + rng->inc;
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_setseq_128_advance_r(struct pcg_state_setseq_128 *rng,
+ pcg128_t delta) {
+ rng->state = pcg_advance_lcg_128(rng->state, delta,
+ PCG_DEFAULT_MULTIPLIER_128, rng->inc);
+}
+#endif
+
+/* Functions to seed the RNG state, one version for each size and each
+ * style. Unlike the step functions, regular users can and should call
+ * these functions.
+ */
+
+inline void pcg_oneseq_8_srandom_r(struct pcg_state_8 *rng, uint8_t initstate) {
+ rng->state = 0U;
+ pcg_oneseq_8_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_8_step_r(rng);
+}
+
+inline void pcg_mcg_8_srandom_r(struct pcg_state_8 *rng, uint8_t initstate) {
+ rng->state = initstate | 1u;
+}
+
+inline void pcg_unique_8_srandom_r(struct pcg_state_8 *rng, uint8_t initstate) {
+ rng->state = 0U;
+ pcg_unique_8_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_8_step_r(rng);
+}
+
+inline void pcg_setseq_8_srandom_r(struct pcg_state_setseq_8 *rng,
+ uint8_t initstate, uint8_t initseq) {
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_8_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_8_step_r(rng);
+}
+
+inline void pcg_oneseq_16_srandom_r(struct pcg_state_16 *rng,
+ uint16_t initstate) {
+ rng->state = 0U;
+ pcg_oneseq_16_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_16_step_r(rng);
+}
+
+inline void pcg_mcg_16_srandom_r(struct pcg_state_16 *rng, uint16_t initstate) {
+ rng->state = initstate | 1u;
+}
+
+inline void pcg_unique_16_srandom_r(struct pcg_state_16 *rng,
+ uint16_t initstate) {
+ rng->state = 0U;
+ pcg_unique_16_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_16_step_r(rng);
+}
+
+inline void pcg_setseq_16_srandom_r(struct pcg_state_setseq_16 *rng,
+ uint16_t initstate, uint16_t initseq) {
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_16_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_16_step_r(rng);
+}
+
+inline void pcg_oneseq_32_srandom_r(struct pcg_state_32 *rng,
+ uint32_t initstate) {
+ rng->state = 0U;
+ pcg_oneseq_32_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_32_step_r(rng);
+}
+
+inline void pcg_mcg_32_srandom_r(struct pcg_state_32 *rng, uint32_t initstate) {
+ rng->state = initstate | 1u;
+}
+
+inline void pcg_unique_32_srandom_r(struct pcg_state_32 *rng,
+ uint32_t initstate) {
+ rng->state = 0U;
+ pcg_unique_32_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_32_step_r(rng);
+}
+
+inline void pcg_setseq_32_srandom_r(struct pcg_state_setseq_32 *rng,
+ uint32_t initstate, uint32_t initseq) {
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_32_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_32_step_r(rng);
+}
+
+inline void pcg_oneseq_64_srandom_r(struct pcg_state_64 *rng,
+ uint64_t initstate) {
+ rng->state = 0U;
+ pcg_oneseq_64_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_64_step_r(rng);
+}
+
+inline void pcg_mcg_64_srandom_r(struct pcg_state_64 *rng, uint64_t initstate) {
+ rng->state = initstate | 1u;
+}
+
+inline void pcg_unique_64_srandom_r(struct pcg_state_64 *rng,
+ uint64_t initstate) {
+ rng->state = 0U;
+ pcg_unique_64_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_64_step_r(rng);
+}
+
+inline void pcg_setseq_64_srandom_r(struct pcg_state_setseq_64 *rng,
+ uint64_t initstate, uint64_t initseq) {
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_64_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_64_step_r(rng);
+}
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_oneseq_128_srandom_r(struct pcg_state_128 *rng,
+ pcg128_t initstate) {
+ rng->state = 0U;
+ pcg_oneseq_128_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_128_step_r(rng);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_mcg_128_srandom_r(struct pcg_state_128 *rng,
+ pcg128_t initstate) {
+ rng->state = initstate | 1u;
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_unique_128_srandom_r(struct pcg_state_128 *rng,
+ pcg128_t initstate) {
+ rng->state = 0U;
+ pcg_unique_128_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_128_step_r(rng);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_setseq_128_srandom_r(struct pcg_state_setseq_128 *rng,
+ pcg128_t initstate, pcg128_t initseq) {
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_128_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_128_step_r(rng);
+}
+#endif
+
+/* Now, finally we create each of the individual generators. We provide
+ * a random_r function that provides a random number of the appropriate
+ * type (using the full range of the type) and a boundedrand_r version
+ * that provides
+ *
+ * Implementation notes for boundedrand_r:
+ *
+ * To avoid bias, we need to make the range of the RNG a multiple of
+ * bound, which we do by dropping output less than a threshold.
+ * Let's consider a 32-bit case... A naive scheme to calculate the
+ * threshold would be to do
+ *
+ * uint32_t threshold = 0x100000000ull % bound;
+ *
+ * but 64-bit div/mod is slower than 32-bit div/mod (especially on
+ * 32-bit platforms). In essence, we do
+ *
+ * uint32_t threshold = (0x100000000ull-bound) % bound;
+ *
+ * because this version will calculate the same modulus, but the LHS
+ * value is less than 2^32.
+ *
+ * (Note that using modulo is only wise for good RNGs, poorer RNGs
+ * such as raw LCGs do better using a technique based on division.)
+ * Empricical tests show that division is preferable to modulus for
+ * reducting the range of an RNG. It's faster, and sometimes it can
+ * even be statistically prefereable.
+ */
+
+/* Generation functions for XSH RS */
+
+inline uint8_t pcg_oneseq_16_xsh_rs_8_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_oneseq_16_step_r(rng);
+ return pcg_output_xsh_rs_16_8(oldstate);
+}
+
+inline uint8_t pcg_oneseq_16_xsh_rs_8_boundedrand_r(struct pcg_state_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_oneseq_16_xsh_rs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_oneseq_32_xsh_rs_16_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_oneseq_32_step_r(rng);
+ return pcg_output_xsh_rs_32_16(oldstate);
+}
+
+inline uint16_t pcg_oneseq_32_xsh_rs_16_boundedrand_r(struct pcg_state_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_oneseq_32_xsh_rs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_oneseq_64_xsh_rs_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_xsh_rs_64_32(oldstate);
+}
+
+inline uint32_t pcg_oneseq_64_xsh_rs_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_oneseq_64_xsh_rs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_oneseq_128_xsh_rs_64_random_r(struct pcg_state_128 *rng) {
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_xsh_rs_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_oneseq_128_xsh_rs_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_128_xsh_rs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_unique_16_xsh_rs_8_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_unique_16_step_r(rng);
+ return pcg_output_xsh_rs_16_8(oldstate);
+}
+
+inline uint8_t pcg_unique_16_xsh_rs_8_boundedrand_r(struct pcg_state_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_unique_16_xsh_rs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_unique_32_xsh_rs_16_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_unique_32_step_r(rng);
+ return pcg_output_xsh_rs_32_16(oldstate);
+}
+
+inline uint16_t pcg_unique_32_xsh_rs_16_boundedrand_r(struct pcg_state_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_unique_32_xsh_rs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_unique_64_xsh_rs_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_xsh_rs_64_32(oldstate);
+}
+
+inline uint32_t pcg_unique_64_xsh_rs_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_unique_64_xsh_rs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_unique_128_xsh_rs_64_random_r(struct pcg_state_128 *rng) {
+ pcg_unique_128_step_r(rng);
+ return pcg_output_xsh_rs_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_unique_128_xsh_rs_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_128_xsh_rs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t
+pcg_setseq_16_xsh_rs_8_random_r(struct pcg_state_setseq_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_setseq_16_step_r(rng);
+ return pcg_output_xsh_rs_16_8(oldstate);
+}
+
+inline uint8_t
+pcg_setseq_16_xsh_rs_8_boundedrand_r(struct pcg_state_setseq_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_setseq_16_xsh_rs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t
+pcg_setseq_32_xsh_rs_16_random_r(struct pcg_state_setseq_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_setseq_32_step_r(rng);
+ return pcg_output_xsh_rs_32_16(oldstate);
+}
+
+inline uint16_t
+pcg_setseq_32_xsh_rs_16_boundedrand_r(struct pcg_state_setseq_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_setseq_32_xsh_rs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t
+pcg_setseq_64_xsh_rs_32_random_r(struct pcg_state_setseq_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_xsh_rs_64_32(oldstate);
+}
+
+inline uint32_t
+pcg_setseq_64_xsh_rs_32_boundedrand_r(struct pcg_state_setseq_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_setseq_64_xsh_rs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsh_rs_64_random_r(struct pcg_state_setseq_128 *rng) {
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsh_rs_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsh_rs_64_boundedrand_r(struct pcg_state_setseq_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_128_xsh_rs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_mcg_16_xsh_rs_8_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_mcg_16_step_r(rng);
+ return pcg_output_xsh_rs_16_8(oldstate);
+}
+
+inline uint8_t pcg_mcg_16_xsh_rs_8_boundedrand_r(struct pcg_state_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_mcg_16_xsh_rs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_mcg_32_xsh_rs_16_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_mcg_32_step_r(rng);
+ return pcg_output_xsh_rs_32_16(oldstate);
+}
+
+inline uint16_t pcg_mcg_32_xsh_rs_16_boundedrand_r(struct pcg_state_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_mcg_32_xsh_rs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_mcg_64_xsh_rs_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_mcg_64_step_r(rng);
+ return pcg_output_xsh_rs_64_32(oldstate);
+}
+
+inline uint32_t pcg_mcg_64_xsh_rs_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_mcg_64_xsh_rs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsh_rs_64_random_r(struct pcg_state_128 *rng) {
+ pcg_mcg_128_step_r(rng);
+ return pcg_output_xsh_rs_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsh_rs_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_mcg_128_xsh_rs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+/* Generation functions for XSH RR */
+
+inline uint8_t pcg_oneseq_16_xsh_rr_8_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_oneseq_16_step_r(rng);
+ return pcg_output_xsh_rr_16_8(oldstate);
+}
+
+inline uint8_t pcg_oneseq_16_xsh_rr_8_boundedrand_r(struct pcg_state_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_oneseq_16_xsh_rr_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_oneseq_32_xsh_rr_16_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_oneseq_32_step_r(rng);
+ return pcg_output_xsh_rr_32_16(oldstate);
+}
+
+inline uint16_t pcg_oneseq_32_xsh_rr_16_boundedrand_r(struct pcg_state_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_oneseq_32_xsh_rr_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_oneseq_64_xsh_rr_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_xsh_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_oneseq_64_xsh_rr_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_oneseq_64_xsh_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_oneseq_128_xsh_rr_64_random_r(struct pcg_state_128 *rng) {
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_xsh_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_oneseq_128_xsh_rr_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_128_xsh_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_unique_16_xsh_rr_8_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_unique_16_step_r(rng);
+ return pcg_output_xsh_rr_16_8(oldstate);
+}
+
+inline uint8_t pcg_unique_16_xsh_rr_8_boundedrand_r(struct pcg_state_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_unique_16_xsh_rr_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_unique_32_xsh_rr_16_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_unique_32_step_r(rng);
+ return pcg_output_xsh_rr_32_16(oldstate);
+}
+
+inline uint16_t pcg_unique_32_xsh_rr_16_boundedrand_r(struct pcg_state_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_unique_32_xsh_rr_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_unique_64_xsh_rr_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_xsh_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_unique_64_xsh_rr_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_unique_64_xsh_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_unique_128_xsh_rr_64_random_r(struct pcg_state_128 *rng) {
+ pcg_unique_128_step_r(rng);
+ return pcg_output_xsh_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_unique_128_xsh_rr_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_128_xsh_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t
+pcg_setseq_16_xsh_rr_8_random_r(struct pcg_state_setseq_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_setseq_16_step_r(rng);
+ return pcg_output_xsh_rr_16_8(oldstate);
+}
+
+inline uint8_t
+pcg_setseq_16_xsh_rr_8_boundedrand_r(struct pcg_state_setseq_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_setseq_16_xsh_rr_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t
+pcg_setseq_32_xsh_rr_16_random_r(struct pcg_state_setseq_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_setseq_32_step_r(rng);
+ return pcg_output_xsh_rr_32_16(oldstate);
+}
+
+inline uint16_t
+pcg_setseq_32_xsh_rr_16_boundedrand_r(struct pcg_state_setseq_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_setseq_32_xsh_rr_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t
+pcg_setseq_64_xsh_rr_32_random_r(struct pcg_state_setseq_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_xsh_rr_64_32(oldstate);
+}
+
+inline uint32_t
+pcg_setseq_64_xsh_rr_32_boundedrand_r(struct pcg_state_setseq_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_setseq_64_xsh_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsh_rr_64_random_r(struct pcg_state_setseq_128 *rng) {
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsh_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsh_rr_64_boundedrand_r(struct pcg_state_setseq_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_128_xsh_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_mcg_16_xsh_rr_8_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_mcg_16_step_r(rng);
+ return pcg_output_xsh_rr_16_8(oldstate);
+}
+
+inline uint8_t pcg_mcg_16_xsh_rr_8_boundedrand_r(struct pcg_state_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_mcg_16_xsh_rr_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_mcg_32_xsh_rr_16_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_mcg_32_step_r(rng);
+ return pcg_output_xsh_rr_32_16(oldstate);
+}
+
+inline uint16_t pcg_mcg_32_xsh_rr_16_boundedrand_r(struct pcg_state_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_mcg_32_xsh_rr_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_mcg_64_xsh_rr_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_mcg_64_step_r(rng);
+ return pcg_output_xsh_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_mcg_64_xsh_rr_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_mcg_64_xsh_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsh_rr_64_random_r(struct pcg_state_128 *rng) {
+ pcg_mcg_128_step_r(rng);
+ return pcg_output_xsh_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsh_rr_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_mcg_128_xsh_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+/* Generation functions for RXS M XS (no MCG versions because they
+ * don't make sense when you want to use the entire state)
+ */
+
+inline uint8_t pcg_oneseq_8_rxs_m_xs_8_random_r(struct pcg_state_8 *rng) {
+ uint8_t oldstate = rng->state;
+ pcg_oneseq_8_step_r(rng);
+ return pcg_output_rxs_m_xs_8_8(oldstate);
+}
+
+inline uint8_t pcg_oneseq_8_rxs_m_xs_8_boundedrand_r(struct pcg_state_8 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_oneseq_8_rxs_m_xs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_oneseq_16_rxs_m_xs_16_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_oneseq_16_step_r(rng);
+ return pcg_output_rxs_m_xs_16_16(oldstate);
+}
+
+inline uint16_t
+pcg_oneseq_16_rxs_m_xs_16_boundedrand_r(struct pcg_state_16 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_oneseq_16_rxs_m_xs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_oneseq_32_rxs_m_xs_32_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_oneseq_32_step_r(rng);
+ return pcg_output_rxs_m_xs_32_32(oldstate);
+}
+
+inline uint32_t
+pcg_oneseq_32_rxs_m_xs_32_boundedrand_r(struct pcg_state_32 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_oneseq_32_rxs_m_xs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint64_t pcg_oneseq_64_rxs_m_xs_64_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_rxs_m_xs_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_oneseq_64_rxs_m_xs_64_boundedrand_r(struct pcg_state_64 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_64_rxs_m_xs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_oneseq_128_rxs_m_xs_128_random_r(struct pcg_state_128 *rng) {
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_rxs_m_xs_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_oneseq_128_rxs_m_xs_128_boundedrand_r(struct pcg_state_128 *rng,
+ pcg128_t bound) {
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_oneseq_128_rxs_m_xs_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint16_t pcg_unique_16_rxs_m_xs_16_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_unique_16_step_r(rng);
+ return pcg_output_rxs_m_xs_16_16(oldstate);
+}
+
+inline uint16_t
+pcg_unique_16_rxs_m_xs_16_boundedrand_r(struct pcg_state_16 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_unique_16_rxs_m_xs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_unique_32_rxs_m_xs_32_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_unique_32_step_r(rng);
+ return pcg_output_rxs_m_xs_32_32(oldstate);
+}
+
+inline uint32_t
+pcg_unique_32_rxs_m_xs_32_boundedrand_r(struct pcg_state_32 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_unique_32_rxs_m_xs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint64_t pcg_unique_64_rxs_m_xs_64_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_rxs_m_xs_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_unique_64_rxs_m_xs_64_boundedrand_r(struct pcg_state_64 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_64_rxs_m_xs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_unique_128_rxs_m_xs_128_random_r(struct pcg_state_128 *rng) {
+ pcg_unique_128_step_r(rng);
+ return pcg_output_rxs_m_xs_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_unique_128_rxs_m_xs_128_boundedrand_r(struct pcg_state_128 *rng,
+ pcg128_t bound) {
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_unique_128_rxs_m_xs_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t
+pcg_setseq_8_rxs_m_xs_8_random_r(struct pcg_state_setseq_8 *rng) {
+ uint8_t oldstate = rng->state;
+ pcg_setseq_8_step_r(rng);
+ return pcg_output_rxs_m_xs_8_8(oldstate);
+}
+
+inline uint8_t
+pcg_setseq_8_rxs_m_xs_8_boundedrand_r(struct pcg_state_setseq_8 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_setseq_8_rxs_m_xs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t
+pcg_setseq_16_rxs_m_xs_16_random_r(struct pcg_state_setseq_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_setseq_16_step_r(rng);
+ return pcg_output_rxs_m_xs_16_16(oldstate);
+}
+
+inline uint16_t
+pcg_setseq_16_rxs_m_xs_16_boundedrand_r(struct pcg_state_setseq_16 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_setseq_16_rxs_m_xs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t
+pcg_setseq_32_rxs_m_xs_32_random_r(struct pcg_state_setseq_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_setseq_32_step_r(rng);
+ return pcg_output_rxs_m_xs_32_32(oldstate);
+}
+
+inline uint32_t
+pcg_setseq_32_rxs_m_xs_32_boundedrand_r(struct pcg_state_setseq_32 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_setseq_32_rxs_m_xs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint64_t
+pcg_setseq_64_rxs_m_xs_64_random_r(struct pcg_state_setseq_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_rxs_m_xs_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_setseq_64_rxs_m_xs_64_boundedrand_r(struct pcg_state_setseq_64 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_64_rxs_m_xs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_setseq_128_rxs_m_xs_128_random_r(struct pcg_state_setseq_128 *rng) {
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_rxs_m_xs_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_setseq_128_rxs_m_xs_128_boundedrand_r(struct pcg_state_setseq_128 *rng,
+ pcg128_t bound) {
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_setseq_128_rxs_m_xs_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+/* Generation functions for XSL RR (only defined for "large" types) */
+
+inline uint32_t pcg_oneseq_64_xsl_rr_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_xsl_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_oneseq_64_xsl_rr_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_oneseq_64_xsl_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_oneseq_128_xsl_rr_64_random_r(struct pcg_state_128 *rng) {
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_oneseq_128_xsl_rr_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_128_xsl_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint32_t pcg_unique_64_xsl_rr_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_xsl_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_unique_64_xsl_rr_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_unique_64_xsl_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_unique_128_xsl_rr_64_random_r(struct pcg_state_128 *rng) {
+ pcg_unique_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_unique_128_xsl_rr_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_128_xsl_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint32_t
+pcg_setseq_64_xsl_rr_32_random_r(struct pcg_state_setseq_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_xsl_rr_64_32(oldstate);
+}
+
+inline uint32_t
+pcg_setseq_64_xsl_rr_32_boundedrand_r(struct pcg_state_setseq_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_setseq_64_xsl_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsl_rr_64_random_r(struct pcg_state_setseq_128 *rng) {
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsl_rr_64_boundedrand_r(struct pcg_state_setseq_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_128_xsl_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint32_t pcg_mcg_64_xsl_rr_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_mcg_64_step_r(rng);
+ return pcg_output_xsl_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_mcg_64_xsl_rr_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_mcg_64_xsl_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsl_rr_64_random_r(struct pcg_state_128 *rng) {
+ pcg_mcg_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsl_rr_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_mcg_128_xsl_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+/* Generation functions for XSL RR RR (only defined for "large" types) */
+
+inline uint64_t pcg_oneseq_64_xsl_rr_rr_64_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_xsl_rr_rr_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_oneseq_64_xsl_rr_rr_64_boundedrand_r(struct pcg_state_64 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_64_xsl_rr_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_oneseq_128_xsl_rr_rr_128_random_r(struct pcg_state_128 *rng) {
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_xsl_rr_rr_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_oneseq_128_xsl_rr_rr_128_boundedrand_r(struct pcg_state_128 *rng,
+ pcg128_t bound) {
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_oneseq_128_xsl_rr_rr_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint64_t pcg_unique_64_xsl_rr_rr_64_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_xsl_rr_rr_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_unique_64_xsl_rr_rr_64_boundedrand_r(struct pcg_state_64 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_64_xsl_rr_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_unique_128_xsl_rr_rr_128_random_r(struct pcg_state_128 *rng) {
+ pcg_unique_128_step_r(rng);
+ return pcg_output_xsl_rr_rr_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_unique_128_xsl_rr_rr_128_boundedrand_r(struct pcg_state_128 *rng,
+ pcg128_t bound) {
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_unique_128_xsl_rr_rr_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint64_t
+pcg_setseq_64_xsl_rr_rr_64_random_r(struct pcg_state_setseq_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_xsl_rr_rr_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_setseq_64_xsl_rr_rr_64_boundedrand_r(struct pcg_state_setseq_64 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_64_xsl_rr_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_setseq_128_xsl_rr_rr_128_random_r(struct pcg_state_setseq_128 *rng) {
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsl_rr_rr_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_setseq_128_xsl_rr_rr_128_boundedrand_r(struct pcg_state_setseq_128 *rng,
+ pcg128_t bound) {
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_setseq_128_xsl_rr_rr_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+//// Typedefs
+typedef struct pcg_state_setseq_64 pcg32_random_t;
+typedef struct pcg_state_64 pcg32s_random_t;
+typedef struct pcg_state_64 pcg32u_random_t;
+typedef struct pcg_state_64 pcg32f_random_t;
+//// random_r
+#define pcg32_random_r pcg_setseq_64_xsh_rr_32_random_r
+#define pcg32s_random_r pcg_oneseq_64_xsh_rr_32_random_r
+#define pcg32u_random_r pcg_unique_64_xsh_rr_32_random_r
+#define pcg32f_random_r pcg_mcg_64_xsh_rs_32_random_r
+//// boundedrand_r
+#define pcg32_boundedrand_r pcg_setseq_64_xsh_rr_32_boundedrand_r
+#define pcg32s_boundedrand_r pcg_oneseq_64_xsh_rr_32_boundedrand_r
+#define pcg32u_boundedrand_r pcg_unique_64_xsh_rr_32_boundedrand_r
+#define pcg32f_boundedrand_r pcg_mcg_64_xsh_rs_32_boundedrand_r
+//// srandom_r
+#define pcg32_srandom_r pcg_setseq_64_srandom_r
+#define pcg32s_srandom_r pcg_oneseq_64_srandom_r
+#define pcg32u_srandom_r pcg_unique_64_srandom_r
+#define pcg32f_srandom_r pcg_mcg_64_srandom_r
+//// advance_r
+#define pcg32_advance_r pcg_setseq_64_advance_r
+#define pcg32s_advance_r pcg_oneseq_64_advance_r
+#define pcg32u_advance_r pcg_unique_64_advance_r
+#define pcg32f_advance_r pcg_mcg_64_advance_r
+
+#if PCG_HAS_128BIT_OPS
+//// Typedefs
+typedef struct pcg_state_setseq_128 pcg64_random_t;
+typedef struct pcg_state_128 pcg64s_random_t;
+typedef struct pcg_state_128 pcg64u_random_t;
+typedef struct pcg_state_128 pcg64f_random_t;
+//// random_r
+#define pcg64_random_r pcg_setseq_128_xsl_rr_64_random_r
+#define pcg64s_random_r pcg_oneseq_128_xsl_rr_64_random_r
+#define pcg64u_random_r pcg_unique_128_xsl_rr_64_random_r
+#define pcg64f_random_r pcg_mcg_128_xsl_rr_64_random_r
+//// boundedrand_r
+#define pcg64_boundedrand_r pcg_setseq_128_xsl_rr_64_boundedrand_r
+#define pcg64s_boundedrand_r pcg_oneseq_128_xsl_rr_64_boundedrand_r
+#define pcg64u_boundedrand_r pcg_unique_128_xsl_rr_64_boundedrand_r
+#define pcg64f_boundedrand_r pcg_mcg_128_xsl_rr_64_boundedrand_r
+//// srandom_r
+#define pcg64_srandom_r pcg_setseq_128_srandom_r
+#define pcg64s_srandom_r pcg_oneseq_128_srandom_r
+#define pcg64u_srandom_r pcg_unique_128_srandom_r
+#define pcg64f_srandom_r pcg_mcg_128_srandom_r
+//// advance_r
+#define pcg64_advance_r pcg_setseq_128_advance_r
+#define pcg64s_advance_r pcg_oneseq_128_advance_r
+#define pcg64u_advance_r pcg_unique_128_advance_r
+#define pcg64f_advance_r pcg_mcg_128_advance_r
+#endif
+
+//// Typedefs
+typedef struct pcg_state_8 pcg8si_random_t;
+typedef struct pcg_state_16 pcg16si_random_t;
+typedef struct pcg_state_32 pcg32si_random_t;
+typedef struct pcg_state_64 pcg64si_random_t;
+//// random_r
+#define pcg8si_random_r pcg_oneseq_8_rxs_m_xs_8_random_r
+#define pcg16si_random_r pcg_oneseq_16_rxs_m_xs_16_random_r
+#define pcg32si_random_r pcg_oneseq_32_rxs_m_xs_32_random_r
+#define pcg64si_random_r pcg_oneseq_64_rxs_m_xs_64_random_r
+//// boundedrand_r
+#define pcg8si_boundedrand_r pcg_oneseq_8_rxs_m_xs_8_boundedrand_r
+#define pcg16si_boundedrand_r pcg_oneseq_16_rxs_m_xs_16_boundedrand_r
+#define pcg32si_boundedrand_r pcg_oneseq_32_rxs_m_xs_32_boundedrand_r
+#define pcg64si_boundedrand_r pcg_oneseq_64_rxs_m_xs_64_boundedrand_r
+//// srandom_r
+#define pcg8si_srandom_r pcg_oneseq_8_srandom_r
+#define pcg16si_srandom_r pcg_oneseq_16_srandom_r
+#define pcg32si_srandom_r pcg_oneseq_32_srandom_r
+#define pcg64si_srandom_r pcg_oneseq_64_srandom_r
+//// advance_r
+#define pcg8si_advance_r pcg_oneseq_8_advance_r
+#define pcg16si_advance_r pcg_oneseq_16_advance_r
+#define pcg32si_advance_r pcg_oneseq_32_advance_r
+#define pcg64si_advance_r pcg_oneseq_64_advance_r
+
+#if PCG_HAS_128BIT_OPS
+typedef struct pcg_state_128 pcg128si_random_t;
+#define pcg128si_random_r pcg_oneseq_128_rxs_m_xs_128_random_r
+#define pcg128si_boundedrand_r pcg_oneseq_128_rxs_m_xs_128_boundedrand_r
+#define pcg128si_srandom_r pcg_oneseq_128_srandom_r
+#define pcg128si_advance_r pcg_oneseq_128_advance_r
+#endif
+
+//// Typedefs
+typedef struct pcg_state_setseq_8 pcg8i_random_t;
+typedef struct pcg_state_setseq_16 pcg16i_random_t;
+typedef struct pcg_state_setseq_32 pcg32i_random_t;
+typedef struct pcg_state_setseq_64 pcg64i_random_t;
+//// random_r
+#define pcg8i_random_r pcg_setseq_8_rxs_m_xs_8_random_r
+#define pcg16i_random_r pcg_setseq_16_rxs_m_xs_16_random_r
+#define pcg32i_random_r pcg_setseq_32_rxs_m_xs_32_random_r
+#define pcg64i_random_r pcg_setseq_64_rxs_m_xs_64_random_r
+//// boundedrand_r
+#define pcg8i_boundedrand_r pcg_setseq_8_rxs_m_xs_8_boundedrand_r
+#define pcg16i_boundedrand_r pcg_setseq_16_rxs_m_xs_16_boundedrand_r
+#define pcg32i_boundedrand_r pcg_setseq_32_rxs_m_xs_32_boundedrand_r
+#define pcg64i_boundedrand_r pcg_setseq_64_rxs_m_xs_64_boundedrand_r
+//// srandom_r
+#define pcg8i_srandom_r pcg_setseq_8_srandom_r
+#define pcg16i_srandom_r pcg_setseq_16_srandom_r
+#define pcg32i_srandom_r pcg_setseq_32_srandom_r
+#define pcg64i_srandom_r pcg_setseq_64_srandom_r
+//// advance_r
+#define pcg8i_advance_r pcg_setseq_8_advance_r
+#define pcg16i_advance_r pcg_setseq_16_advance_r
+#define pcg32i_advance_r pcg_setseq_32_advance_r
+#define pcg64i_advance_r pcg_setseq_64_advance_r
+
+#if PCG_HAS_128BIT_OPS
+typedef struct pcg_state_setseq_128 pcg128i_random_t;
+#define pcg128i_random_r pcg_setseq_128_rxs_m_xs_128_random_r
+#define pcg128i_boundedrand_r pcg_setseq_128_rxs_m_xs_128_boundedrand_r
+#define pcg128i_srandom_r pcg_setseq_128_srandom_r
+#define pcg128i_advance_r pcg_setseq_128_advance_r
+#endif
+
+extern uint32_t pcg32_random();
+extern uint32_t pcg32_boundedrand(uint32_t bound);
+extern void pcg32_srandom(uint64_t seed, uint64_t seq);
+extern void pcg32_advance(uint64_t delta);
+
+#if PCG_HAS_128BIT_OPS
+extern uint64_t pcg64_random();
+extern uint64_t pcg64_boundedrand(uint64_t bound);
+extern void pcg64_srandom(pcg128_t seed, pcg128_t seq);
+extern void pcg64_advance(pcg128_t delta);
+#endif
+
+/*
+ * Static initialization constants (if you can't call srandom for some
+ * bizarre reason).
+ */
+
+#define PCG32_INITIALIZER PCG_STATE_SETSEQ_64_INITIALIZER
+#define PCG32U_INITIALIZER PCG_STATE_UNIQUE_64_INITIALIZER
+#define PCG32S_INITIALIZER PCG_STATE_ONESEQ_64_INITIALIZER
+#define PCG32F_INITIALIZER PCG_STATE_MCG_64_INITIALIZER
+
+#if PCG_HAS_128BIT_OPS
+#define PCG64_INITIALIZER PCG_STATE_SETSEQ_128_INITIALIZER
+#define PCG64U_INITIALIZER PCG_STATE_UNIQUE_128_INITIALIZER
+#define PCG64S_INITIALIZER PCG_STATE_ONESEQ_128_INITIALIZER
+#define PCG64F_INITIALIZER PCG_STATE_MCG_128_INITIALIZER
+#endif
+
+#define PCG8SI_INITIALIZER PCG_STATE_ONESEQ_8_INITIALIZER
+#define PCG16SI_INITIALIZER PCG_STATE_ONESEQ_16_INITIALIZER
+#define PCG32SI_INITIALIZER PCG_STATE_ONESEQ_32_INITIALIZER
+#define PCG64SI_INITIALIZER PCG_STATE_ONESEQ_64_INITIALIZER
+#if PCG_HAS_128BIT_OPS
+#define PCG128SI_INITIALIZER PCG_STATE_ONESEQ_128_INITIALIZER
+#endif
+
+#define PCG8I_INITIALIZER PCG_STATE_SETSEQ_8_INITIALIZER
+#define PCG16I_INITIALIZER PCG_STATE_SETSEQ_16_INITIALIZER
+#define PCG32I_INITIALIZER PCG_STATE_SETSEQ_32_INITIALIZER
+#define PCG64I_INITIALIZER PCG_STATE_SETSEQ_64_INITIALIZER
+#if PCG_HAS_128BIT_OPS
+#define PCG128I_INITIALIZER PCG_STATE_SETSEQ_128_INITIALIZER
+#endif
+
+#if __cplusplus
+}
+#endif
+
+#endif // PCG_VARIANTS_H_INCLUDED