#pragma GCC target("avx2") #pragma GCC optimize("O3") #pragma GCC optimize("unroll-loops") #pragma GCC optimize ("fast-math") #include #include #include #include #include #include #include #include /* signed integer */ typedef int8_t i8; typedef int16_t i16; typedef int32_t i32; typedef int64_t i64; typedef __int128_t i128; /* unsigned integer */ typedef uint8_t u8; typedef uint16_t u16; typedef uint32_t u32; typedef uint64_t u64; typedef __uint128_t u128; /* floating point number */ typedef float f32; typedef double f64; typedef long double f80; typedef int FastInt; /* io */ static inline FastInt read_int(void) { FastInt c, x = 0, f = 1; while (c = getchar_unlocked(), c < 48 || c > 57) if (c == 45) f = -f; while (47 < c && c < 58) { x = x * 10 + c - 48; c = getchar_unlocked(); } return f * x; } static inline i64 in(void) { i64 c, x = 0, f = 1; while (c = getchar_unlocked(), c < 48 || c > 57) if (c == 45) f = -f; while (47 < c && c < 58) { x = x * 10 + c - 48; c = getchar_unlocked(); } return f * x; } static inline u64 inu(void) { u64 c, x = 0; while (c = getchar_unlocked(), c < 48 || c > 57); while (47 < c && c < 58) { x = x * 10 + c - 48; c = getchar_unlocked(); } return x; } static inline void write_int(FastInt x) { if (x < 0) { putchar_unlocked('-'); x = -x; } if (x >= 10) write_int(x / 10); putchar_unlocked(x - x / 10 * 10 + 48); } static inline void out(i64 x) { if (x < 0) { putchar_unlocked('-'); x = -x; } if (x >= 10) out(x / 10); putchar_unlocked(x - x / 10 * 10 + 48); } static inline void outu(u64 x) { if (x >= 10) outu(x / 10); putchar_unlocked(x - x / 10 * 10 + 48); } static inline void NL(void) { putchar_unlocked('\n'); } static inline void SP(void) { putchar_unlocked(' '); } /* MACROS */ #define POPCNT(a) __builtin_popcountll((a)) #define CTZ(a) __builtin_ctzll((a)) #define CLZ(a) __builtin_clzll((a)) #define LSBit(a) ((a)&(-(a))) #define CLSBit(a) ((a)&((a)-(1))) #define HAS_SINGLE_BIT(a) (POPCNT((a))==1) #define BIT_CEIL(a) ((!(a))?(1):((POPCNT(a))==(1)?((1ull)<<((63)-CLZ((a)))):((1ull)<<((64)-CLZ(a))))) #define BIT_FLOOR(a) ((!(a))?(0):((1ull)<<((63)-CLZ((a))))) #define BIT_WIDTH(a) ((a)?((64)-CLZ((a))):(0)) #define _ROTL(x, s) (((x)<<((s)%(64)))|(((x)>>((64)-((s)%(64)))))) #define _ROTR(x, s) (((x)>>((s)%(64)))|(((x)<<((64)-((s)%(64)))))) #define ROTL(x, s) (((s)==(0))?(0):(((s)<(0))?(_ROTR((x),-(s))):(_ROTL((x),(s))))) #define ROTR(x, s) (((s)==(0))?(0):(((s)<(0))?(_ROTL((x),-(s))):(_ROTR((x),(s))))) #define SWAP(a, b) (((a)^=(b)),((b)^=(a)),((a)^=(b))) #define MAX(a, b) ((a)>(b)?(a):(b)) #define MIN(a, b) ((a)<(b)?(a):(b)) /* montgomery modular multiplication 32-bit */ typedef u32 Montgomery; Montgomery _one(u32 mod) { return -1u % mod + 1; } Montgomery _r2(u32 mod) { return (u64)(i64)-1 % mod + 1; } Montgomery _inv(u32 mod) { u32 u = 1, v = 0, x = 1ULL << 31; for (FastInt i = 0; i < 32; i++) { if (u & 1) u = (u + mod) >> 1, v = (v >> 1) + x; else u >>= 1, v >>= 1; } return -v; } Montgomery _MR(u64 a, Montgomery inv, u32 mod) { i32 z = (a >> 32) - ((((u32)a * inv) * (u64)mod) >> 32); return z < 0 ? z + mod : (u64)z; } Montgomery _to_montgomery(u32 a, Montgomery r2, Montgomery inv, u32 mod) { return _MR((u64)a * r2, inv, mod); } u32 _from_montgomery(Montgomery A, Montgomery inv, u32 mod) { u32 temp = _MR((u64)A, inv, mod) - mod; return temp + (mod & -(temp >> 31u)); } Montgomery add_MR(Montgomery A, Montgomery B, u32 mod) { A += B - (mod << 1u); A += (mod << 1u) & -(A >> 31u); return A; } Montgomery sub_MR(Montgomery A, Montgomery B, u32 mod) { A -= B; A += (mod << 1u) & -(A >> 31u); return A; } Montgomery min_MR(Montgomery A, u32 mod) { return sub_MR(0, A, mod); } Montgomery mul_MR(Montgomery A, Montgomery B, Montgomery inv, u32 mod) { return _MR((u64)A * B, inv, mod); } Montgomery pow_MR(Montgomery A, FastInt n, Montgomery inv, u32 mod) { Montgomery ret = _one(mod); while (n > 0) { if (n & 1) ret = mul_MR(ret, A, inv, mod); A = mul_MR(A, A, inv, mod); n >>= 1; } return ret; } Montgomery inv_MR(Montgomery A, Montgomery inv, u32 mod) { return pow_MR(A, (i64)mod - 2, inv, mod); } Montgomery div_MR(Montgomery A, Montgomery B, Montgomery inv, u32 mod) { return mul_MR(A, inv_MR(B, inv, mod), inv, mod); } bool eq_MR(Montgomery A, Montgomery B, Montgomery inv, u32 mod) { return _from_montgomery(A, inv, mod) == _from_montgomery(B, inv, mod); } bool neq_MR(Montgomery A, Montgomery B, Montgomery inv, u32 mod) { return _from_montgomery(A, inv, mod) != _from_montgomery(B, inv, mod); } static inline Montgomery in_mint(Montgomery r2, Montgomery inv, u32 mod) { u32 c, x = 0; while (c = getchar_unlocked(), c < 48 || c > 57); while (47 < c && c < 58) { x = x * 10 + c - 48; c = getchar_unlocked(); } return _to_montgomery(x, r2, inv, mod); } static inline void out_mint(Montgomery A, Montgomery inv, u32 mod) { u32 a = _from_montgomery(A, inv, mod); if (a >= 10) outu(a / 10); putchar_unlocked(a - a / 10 * 10 + 48); } /* montgomery modular multiplication 64-bit */ typedef u64 Montgomery64; Montgomery64 _one_64(u64 mod) { return -1ull % mod + 1; } Montgomery64 _r2_64(u64 mod) { return (u128)(i128)-1 % mod + 1; } Montgomery64 _inv_64(u64 mod) { u64 u = 1, v = 0, x = 1ULL << 63; for (FastInt i = 0; i < 64; i++) { if (u & 1) u = (u + mod) >> 1, v = (v >> 1) + x; else u >>= 1, v >>= 1; } return -v; } Montgomery64 _MR_64(u128 a, Montgomery64 inv, u64 mod) { i64 A = (a >> 64) - ((((u64)a * inv) * (u128)mod) >> 64); return A < 0 ? A + mod : (u64)A; } Montgomery64 _to_montgomery_64(u64 a, Montgomery64 r2, Montgomery64 inv, u64 mod) { return _MR_64((u128)a * r2, inv, mod); } u64 _from_montgomery_64(Montgomery64 A, Montgomery64 inv, u64 mod) { u64 temp = _MR_64((u128)A, inv, mod) - mod; return temp + (mod & -(temp >> 63u)); } Montgomery64 add_MR_64(Montgomery64 A, Montgomery64 B, u64 mod) { A += B - (mod << 1u); A += (mod << 1u) & -(A >> 63u); return A; } Montgomery64 sub_MR_64(Montgomery64 A, Montgomery64 B, u64 mod) { A -= B; A += (mod << 1u) & -(A >> 63u); return A; } Montgomery64 min_MR_64(Montgomery64 A, u64 mod) { return sub_MR_64(0, A, mod); } Montgomery64 mul_MR_64(Montgomery64 A, Montgomery64 B, Montgomery64 inv, u64 mod) { return _MR_64((u128)A * B, inv, mod); } Montgomery64 pow_MR_64(Montgomery64 A, i64 n, Montgomery64 inv, u64 mod) { Montgomery64 ret = _one_64(mod), mul = A; while (n > 0) { if (n & 1) ret = mul_MR_64(ret, mul, inv, mod); mul = mul_MR_64(mul, mul, inv, mod); n >>= 1; } return ret; } Montgomery64 inv_MR_64(Montgomery64 A, Montgomery64 inv, u64 mod) { return pow_MR_64(A, (i64)mod - 2, inv, mod); } Montgomery64 div_MR_64(Montgomery64 A, Montgomery64 B, Montgomery64 inv, u64 mod) { return mul_MR_64(A, inv_MR_64(B, inv, mod), inv, mod); } bool eq_MR_64(Montgomery64 A, Montgomery64 B, Montgomery64 inv, u64 mod) { return _from_montgomery_64(A, inv, mod) == _from_montgomery_64(B, inv, mod); } bool neq_MR_64(Montgomery64 A, Montgomery64 B, Montgomery64 inv, u64 mod) { return _from_montgomery_64(A, inv, mod) != _from_montgomery_64(B, inv, mod); } static inline Montgomery64 in_mint_64(Montgomery64 r2, Montgomery64 inv, u64 mod) { u64 c, x = 0; while (c = getchar_unlocked(), c < 48 || c > 57); while (47 < c && c < 58) { x = x * 10 + c - 48; c = getchar_unlocked(); } return _to_montgomery_64(x, r2, inv, mod); } static inline void out_mint_64(Montgomery64 A, Montgomery64 inv, u64 mod) { u64 a = _from_montgomery_64(A, inv, mod); if (a >= 10) outu(a / 10); putchar_unlocked(a - a / 10 * 10 + 48); } bool miller_rabin32(u32 n, u32 d, const u32 *bases, u32 bases_len, Montgomery r2, Montgomery inv, Montgomery one, Montgomery rev) { for (u32 i = 0; i < bases_len; i++) { if (n <= bases[i]) break; Montgomery a = _to_montgomery(bases[i], r2, inv, n); u32 t = d; Montgomery y = pow_MR(a, t, inv, n); while (t != n - 1 && y != one && y != rev) { y = mul_MR(y, y, inv, n); t <<= 1; } if (y != rev && (!(t & 1))) return false; } return true; } bool miller_rabin64(u64 n, u64 d, const u64 *bases, u64 bases_len, Montgomery64 r2, Montgomery64 inv, Montgomery64 one, Montgomery64 rev) { for (u64 i = 0; i < bases_len; i++) { if (n <= bases[i]) break; Montgomery64 a = _to_montgomery_64(bases[i], r2, inv, n); u64 t = d; Montgomery64 y = pow_MR_64(a, t, inv, n); while (t != n - 1 && y != one && y != rev) { y = mul_MR_64(y, y, inv, n); t <<= 1; } if (y != rev && (!(t & 1))) return false; } return true; } bool is_prime32(u32 n) { u32 m = n - 1; Montgomery r2 = _r2(n); Montgomery inv = _inv(n); Montgomery one = _one(n); Montgomery rev = _to_montgomery(m, r2, inv, n); u32 d = m >> CTZ(m); const u32 bases1[] = { 2u }; const u32 bases2[] = { 2u, 3u }; const u32 bases3[] = { 2u, 7u, 61u }; if (n < 2047u) { return miller_rabin32(n, d, bases1, 1u, r2, inv, one, rev); } if (n < 1373653u) { return miller_rabin32(n, d, bases2, 2u, r2, inv, one, rev); } return miller_rabin32(n, d, bases3, 3u, r2, inv, one, rev); } bool is_prime64(u64 n) { u64 m = n - 1; Montgomery64 r2 = _r2_64(n); Montgomery64 inv = _inv_64(n); Montgomery64 one = _one_64(n); Montgomery64 rev = _to_montgomery_64(m, r2, inv, n); u64 d = m >> CTZ(m); const u64 bases4[] = { 2ull, 13ull, 23ull, 1662803ull }; const u64 bases5[] = { 2ull, 3ull, 5ull, 7ull, 11ull }; const u64 bases6[] = { 2ull, 3ull, 5ull, 7ull, 11ull, 13ull }; const u64 bases7[] = { 2ull, 3ull, 5ull, 7ull, 11ull, 13ull, 17ull }; const u64 bases8[] = { 2ull, 325ull, 9375ull, 28178ull, 450775ull, 9780504ull, 1795265022ull }; if (n < 1122004669633ull) { return miller_rabin64(n, d, bases4, 4u, r2, inv, one, rev); } if (n < 2152302898747ull) { return miller_rabin64(n, d, bases5, 5u, r2, inv, one, rev); } if (n < 3474749660383ull) { return miller_rabin64(n, d, bases6, 6u, r2, inv, one, rev); } if (n < 341550071728321ull) { return miller_rabin64(n, d, bases7, 7u, r2, inv, one, rev); } return miller_rabin64(n, d, bases8, 7u, r2, inv, one, rev); } bool is_prime(u64 n) { if (n <= 3u) return n == 2u || n == 3u; if (!(n & 1)) return false; if (n < ((u32)1u << 31)) { return is_prime32((u32)n); } return is_prime64(n); } void Main(void) { FastInt Q = read_int(); while (Q--) { u64 x = inu(); outu(x); SP(); outu(is_prime(x)); NL(); } } int main(void) { Main(); return 0; }