結果
| 問題 |
No.1241 Eternal Tours
|
| コンテスト | |
| ユーザー |
|
| 提出日時 | 2020-09-25 23:02:22 |
| 言語 | C++17 (gcc 13.3.0 + boost 1.87.0) |
| 結果 |
AC
|
| 実行時間 | 399 ms / 6,000 ms |
| コード長 | 37,388 bytes |
| コンパイル時間 | 3,161 ms |
| コンパイル使用メモリ | 320,620 KB |
| 最終ジャッジ日時 | 2025-01-14 21:26:58 |
|
ジャッジサーバーID (参考情報) |
judge3 / judge4 |
(要ログイン)
| ファイルパターン | 結果 |
|---|---|
| sample | AC * 4 |
| other | AC * 40 |
コンパイルメッセージ
main.cpp:468:1: warning: ‘always_inline’ function might not be inlinable [-Wattributes]
468 | montgomery_sub_256(const __m256i &a, const __m256i &b, const __m256i &m2,
| ^~~~~~~~~~~~~~~~~~
main.cpp:460:1: warning: ‘always_inline’ function might not be inlinable [-Wattributes]
460 | montgomery_add_256(const __m256i &a, const __m256i &b, const __m256i &m2,
| ^~~~~~~~~~~~~~~~~~
main.cpp:452:1: warning: ‘always_inline’ function might not be inlinable [-Wattributes]
452 | montgomery_mul_256(const __m256i &a, const __m256i &b, const __m256i &r,
| ^~~~~~~~~~~~~~~~~~
main.cpp:441:1: warning: ‘always_inline’ function might not be inlinable [-Wattributes]
441 | my256_mulhi_epu32(const __m256i &a, const __m256i &b) {
| ^~~~~~~~~~~~~~~~~
main.cpp:436:1: warning: ‘always_inline’ function might not be inlinable [-Wattributes]
436 | my256_mullo_epu32(const __m256i &a, const __m256i &b) {
| ^~~~~~~~~~~~~~~~~
main.cpp:429:1: warning: ‘always_inline’ function might not be inlinable [-Wattributes]
429 | montgomery_sub_128(const __m128i &a, const __m128i &b, const __m128i &m2,
| ^~~~~~~~~~~~~~~~~~
main.cpp:422:1: warning: ‘always_inline’ function might not be inlinable [-Wattributes]
422 | montgomery_add_128(const __m128i &a, const __m128i &b, const __m128i &m2,
| ^~~~~~~~~~~~~~~~~~
main.cpp:414:1: warning: ‘always_inline’ function might not be inlinable [-Wattributes]
414 | montgomery_mul_128(const __m128i &a, const __m128i &b, const __m128i &r,
| ^~~~~~~~~~~~~~~~~~
main.cpp:403:1: warning: ‘always_inline’ function might not be inlinable [-Wattributes]
403 | my128_mulhi_epu32(const __m128i &a, const __m128i &b) {
| ^~~~~~~~~~~~~~~~~
main.cpp:398:1: warning: ‘always_inline’ function might not be inlinable [-Wattributes]
398 | my128_mullo_epu32(const __m128i &a, const __m128i &b) {
| ^~~~~~~~~~~~~~~~~
ソースコード
#pragma region kyopro_template
#define Nyaan_template
#include <immintrin.h>
#include <bits/stdc++.h>
#define pb push_back
#define eb emplace_back
#define fi first
#define se second
#define each(x, v) for (auto &x : v)
#define all(v) (v).begin(), (v).end()
#define sz(v) ((int)(v).size())
#define mem(a, val) memset(a, val, sizeof(a))
#define ini(...) \
int __VA_ARGS__; \
in(__VA_ARGS__)
#define inl(...) \
long long __VA_ARGS__; \
in(__VA_ARGS__)
#define ins(...) \
string __VA_ARGS__; \
in(__VA_ARGS__)
#define inc(...) \
char __VA_ARGS__; \
in(__VA_ARGS__)
#define in2(s, t) \
for (int i = 0; i < (int)s.size(); i++) { \
in(s[i], t[i]); \
}
#define in3(s, t, u) \
for (int i = 0; i < (int)s.size(); i++) { \
in(s[i], t[i], u[i]); \
}
#define in4(s, t, u, v) \
for (int i = 0; i < (int)s.size(); i++) { \
in(s[i], t[i], u[i], v[i]); \
}
#define rep(i, N) for (long long i = 0; i < (long long)(N); i++)
#define repr(i, N) for (long long i = (long long)(N)-1; i >= 0; i--)
#define rep1(i, N) for (long long i = 1; i <= (long long)(N); i++)
#define repr1(i, N) for (long long i = (N); (long long)(i) > 0; i--)
#define reg(i, a, b) for (long long i = (a); i < (b); i++)
#define die(...) \
do { \
out(__VA_ARGS__); \
return; \
} while (0)
using namespace std;
using ll = long long;
template <class T>
using V = vector<T>;
using vi = vector<int>;
using vl = vector<long long>;
using vvi = vector<vector<int>>;
using vd = V<double>;
using vs = V<string>;
using vvl = vector<vector<long long>>;
using P = pair<long long, long long>;
using vp = vector<P>;
using pii = pair<int, int>;
using vpi = vector<pair<int, int>>;
constexpr int inf = 1001001001;
constexpr long long infLL = (1LL << 61) - 1;
template <typename T, typename U>
inline bool amin(T &x, U y) {
return (y < x) ? (x = y, true) : false;
}
template <typename T, typename U>
inline bool amax(T &x, U y) {
return (x < y) ? (x = y, true) : false;
}
template <typename T, typename U>
ostream &operator<<(ostream &os, const pair<T, U> &p) {
os << p.first << " " << p.second;
return os;
}
template <typename T, typename U>
istream &operator>>(istream &is, pair<T, U> &p) {
is >> p.first >> p.second;
return is;
}
template <typename T>
ostream &operator<<(ostream &os, const vector<T> &v) {
int s = (int)v.size();
for (int i = 0; i < s; i++) os << (i ? " " : "") << v[i];
return os;
}
template <typename T>
istream &operator>>(istream &is, vector<T> &v) {
for (auto &x : v) is >> x;
return is;
}
void in() {}
template <typename T, class... U>
void in(T &t, U &... u) {
cin >> t;
in(u...);
}
void out() { cout << "\n"; }
template <typename T, class... U>
void out(const T &t, const U &... u) {
cout << t;
if (sizeof...(u)) cout << " ";
out(u...);
}
#ifdef NyaanDebug
#define trc(...) \
do { \
cerr << #__VA_ARGS__ << " = "; \
dbg_out(__VA_ARGS__); \
} while (0)
#define trca(v, N) \
do { \
cerr << #v << " = "; \
array_out(v, N); \
} while (0)
#define trcc(v) \
do { \
cerr << #v << " = {"; \
each(x, v) { cerr << " " << x << ","; } \
cerr << "}" << endl; \
} while (0)
template <typename T>
void _cout(const T &c) {
cerr << c;
}
void _cout(const int &c) {
if (c == 1001001001)
cerr << "inf";
else if (c == -1001001001)
cerr << "-inf";
else
cerr << c;
}
void _cout(const unsigned int &c) {
if (c == 1001001001)
cerr << "inf";
else
cerr << c;
}
void _cout(const long long &c) {
if (c == 1001001001 || c == (1LL << 61) - 1)
cerr << "inf";
else if (c == -1001001001 || c == -((1LL << 61) - 1))
cerr << "-inf";
else
cerr << c;
}
void _cout(const unsigned long long &c) {
if (c == 1001001001 || c == (1LL << 61) - 1)
cerr << "inf";
else
cerr << c;
}
template <typename T, typename U>
void _cout(const pair<T, U> &p) {
cerr << "{ ";
_cout(p.fi);
cerr << ", ";
_cout(p.se);
cerr << " } ";
}
template <typename T>
void _cout(const vector<T> &v) {
int s = v.size();
cerr << "{ ";
for (int i = 0; i < s; i++) {
cerr << (i ? ", " : "");
_cout(v[i]);
}
cerr << " } ";
}
template <typename T>
void _cout(const vector<vector<T>> &v) {
cerr << "[ ";
for (const auto &x : v) {
cerr << endl;
_cout(x);
cerr << ", ";
}
cerr << endl << " ] ";
}
void dbg_out() { cerr << endl; }
template <typename T, class... U>
void dbg_out(const T &t, const U &... u) {
_cout(t);
if (sizeof...(u)) cerr << ", ";
dbg_out(u...);
}
template <typename T>
void array_out(const T &v, int s) {
cerr << "{ ";
for (int i = 0; i < s; i++) {
cerr << (i ? ", " : "");
_cout(v[i]);
}
cerr << " } " << endl;
}
template <typename T>
void array_out(const T &v, int H, int W) {
cerr << "[ ";
for (int i = 0; i < H; i++) {
cerr << (i ? ", " : "");
array_out(v[i], W);
}
cerr << " ] " << endl;
}
#else
#define trc(...)
#define trca(...)
#define trcc(...)
#endif
inline int popcnt(unsigned long long a) { return __builtin_popcountll(a); }
inline int lsb(unsigned long long a) { return __builtin_ctzll(a); }
inline int msb(unsigned long long a) { return 63 - __builtin_clzll(a); }
template <typename T>
inline int getbit(T a, int i) {
return (a >> i) & 1;
}
template <typename T>
inline void setbit(T &a, int i) {
a |= (1LL << i);
}
template <typename T>
inline void delbit(T &a, int i) {
a &= ~(1LL << i);
}
template <typename T>
int lb(const vector<T> &v, const T &a) {
return lower_bound(begin(v), end(v), a) - begin(v);
}
template <typename T>
int ub(const vector<T> &v, const T &a) {
return upper_bound(begin(v), end(v), a) - begin(v);
}
template <typename T>
int btw(T a, T x, T b) {
return a <= x && x < b;
}
template <typename T, typename U>
T ceil(T a, U b) {
return (a + b - 1) / b;
}
constexpr long long TEN(int n) {
long long ret = 1, x = 10;
while (n) {
if (n & 1) ret *= x;
x *= x;
n >>= 1;
}
return ret;
}
template <typename T>
vector<T> mkrui(const vector<T> &v) {
vector<T> ret(v.size() + 1);
for (int i = 0; i < int(v.size()); i++) ret[i + 1] = ret[i] + v[i];
return ret;
};
template <typename T>
vector<T> mkuni(const vector<T> &v) {
vector<T> ret(v);
sort(ret.begin(), ret.end());
ret.erase(unique(ret.begin(), ret.end()), ret.end());
return ret;
}
template <typename F>
vector<int> mkord(int N, F f) {
vector<int> ord(N);
iota(begin(ord), end(ord), 0);
sort(begin(ord), end(ord), f);
return ord;
}
template <typename T = int>
vector<T> mkiota(int N) {
vector<T> ret(N);
iota(begin(ret), end(ret), 0);
return ret;
}
template <typename T>
vector<int> mkinv(vector<T> &v) {
vector<int> inv(v.size());
for (int i = 0; i < (int)v.size(); i++) inv[v[i]] = i;
return inv;
}
struct IoSetupNya {
IoSetupNya() {
cin.tie(nullptr);
ios::sync_with_stdio(false);
cout << fixed << setprecision(15);
cerr << fixed << setprecision(7);
}
} iosetupnya;
void solve();
int main() { solve(); }
#pragma endregion
using namespace std;
template <uint32_t mod>
struct LazyMontgomeryModInt {
using mint = LazyMontgomeryModInt;
using i32 = int32_t;
using u32 = uint32_t;
using u64 = uint64_t;
static constexpr u32 get_r() {
u32 ret = mod;
for (i32 i = 0; i < 4; ++i) ret *= 2 - mod * ret;
return ret;
}
static constexpr u32 r = get_r();
static constexpr u32 n2 = -u64(mod) % mod;
static_assert(r * mod == 1, "invalid, r * mod != 1");
static_assert(mod < (1 << 30), "invalid, mod >= 2 ^ 30");
static_assert((mod & 1) == 1, "invalid, mod % 2 == 0");
u32 a;
constexpr LazyMontgomeryModInt() : a(0) {}
constexpr LazyMontgomeryModInt(const int64_t &b)
: a(reduce(u64(b % mod + mod) * n2)){};
static constexpr u32 reduce(const u64 &b) {
return (b + u64(u32(b) * u32(-r)) * mod) >> 32;
}
constexpr mint &operator+=(const mint &b) {
if (i32(a += b.a - 2 * mod) < 0) a += 2 * mod;
return *this;
}
constexpr mint &operator-=(const mint &b) {
if (i32(a -= b.a) < 0) a += 2 * mod;
return *this;
}
constexpr mint &operator*=(const mint &b) {
a = reduce(u64(a) * b.a);
return *this;
}
constexpr mint &operator/=(const mint &b) {
*this *= b.inverse();
return *this;
}
constexpr mint operator+(const mint &b) const { return mint(*this) += b; }
constexpr mint operator-(const mint &b) const { return mint(*this) -= b; }
constexpr mint operator*(const mint &b) const { return mint(*this) *= b; }
constexpr mint operator/(const mint &b) const { return mint(*this) /= b; }
constexpr bool operator==(const mint &b) const {
return (a >= mod ? a - mod : a) == (b.a >= mod ? b.a - mod : b.a);
}
constexpr bool operator!=(const mint &b) const {
return (a >= mod ? a - mod : a) != (b.a >= mod ? b.a - mod : b.a);
}
constexpr mint operator-() const { return mint() - mint(*this); }
constexpr mint pow(u64 n) const {
mint ret(1), mul(*this);
while (n > 0) {
if (n & 1) ret *= mul;
mul *= mul;
n >>= 1;
}
return ret;
}
constexpr mint inverse() const { return pow(mod - 2); }
friend ostream &operator<<(ostream &os, const mint &b) {
return os << b.get();
}
friend istream &operator>>(istream &is, mint &b) {
int64_t t;
is >> t;
b = LazyMontgomeryModInt<mod>(t);
return (is);
}
constexpr u32 get() const {
u32 ret = reduce(a);
return ret >= mod ? ret - mod : ret;
}
static constexpr u32 get_mod() { return mod; }
};
using namespace std;
using namespace std;
__attribute__((target("sse4.2"))) __attribute__((always_inline)) __m128i
my128_mullo_epu32(const __m128i &a, const __m128i &b) {
return _mm_mullo_epi32(a, b);
}
__attribute__((target("sse4.2"))) __attribute__((always_inline)) __m128i
my128_mulhi_epu32(const __m128i &a, const __m128i &b) {
__m128i a13 = _mm_shuffle_epi32(a, 0xF5);
__m128i b13 = _mm_shuffle_epi32(b, 0xF5);
__m128i prod02 = _mm_mul_epu32(a, b);
__m128i prod13 = _mm_mul_epu32(a13, b13);
__m128i prod = _mm_unpackhi_epi64(_mm_unpacklo_epi32(prod02, prod13),
_mm_unpackhi_epi32(prod02, prod13));
return prod;
}
__attribute__((target("sse4.2"))) __attribute__((always_inline)) __m128i
montgomery_mul_128(const __m128i &a, const __m128i &b, const __m128i &r,
const __m128i &m1) {
return _mm_sub_epi32(
_mm_add_epi32(my128_mulhi_epu32(a, b), m1),
my128_mulhi_epu32(my128_mullo_epu32(my128_mullo_epu32(a, b), r), m1));
}
__attribute__((target("sse4.2"))) __attribute__((always_inline)) __m128i
montgomery_add_128(const __m128i &a, const __m128i &b, const __m128i &m2,
const __m128i &m0) {
__m128i ret = _mm_sub_epi32(_mm_add_epi32(a, b), m2);
return _mm_add_epi32(_mm_and_si128(_mm_cmpgt_epi32(m0, ret), m2), ret);
}
__attribute__((target("sse4.2"))) __attribute__((always_inline)) __m128i
montgomery_sub_128(const __m128i &a, const __m128i &b, const __m128i &m2,
const __m128i &m0) {
__m128i ret = _mm_sub_epi32(a, b);
return _mm_add_epi32(_mm_and_si128(_mm_cmpgt_epi32(m0, ret), m2), ret);
}
__attribute__((target("avx2"))) __attribute__((always_inline)) __m256i
my256_mullo_epu32(const __m256i &a, const __m256i &b) {
return _mm256_mullo_epi32(a, b);
}
__attribute__((target("avx2"))) __attribute__((always_inline)) __m256i
my256_mulhi_epu32(const __m256i &a, const __m256i &b) {
__m256i a13 = _mm256_shuffle_epi32(a, 0xF5);
__m256i b13 = _mm256_shuffle_epi32(b, 0xF5);
__m256i prod02 = _mm256_mul_epu32(a, b);
__m256i prod13 = _mm256_mul_epu32(a13, b13);
__m256i prod = _mm256_unpackhi_epi64(_mm256_unpacklo_epi32(prod02, prod13),
_mm256_unpackhi_epi32(prod02, prod13));
return prod;
}
__attribute__((target("avx2"))) __attribute__((always_inline)) __m256i
montgomery_mul_256(const __m256i &a, const __m256i &b, const __m256i &r,
const __m256i &m1) {
return _mm256_sub_epi32(
_mm256_add_epi32(my256_mulhi_epu32(a, b), m1),
my256_mulhi_epu32(my256_mullo_epu32(my256_mullo_epu32(a, b), r), m1));
}
__attribute__((target("avx2"))) __attribute__((always_inline)) __m256i
montgomery_add_256(const __m256i &a, const __m256i &b, const __m256i &m2,
const __m256i &m0) {
__m256i ret = _mm256_sub_epi32(_mm256_add_epi32(a, b), m2);
return _mm256_add_epi32(_mm256_and_si256(_mm256_cmpgt_epi32(m0, ret), m2),
ret);
}
__attribute__((target("avx2"))) __attribute__((always_inline)) __m256i
montgomery_sub_256(const __m256i &a, const __m256i &b, const __m256i &m2,
const __m256i &m0) {
__m256i ret = _mm256_sub_epi32(a, b);
return _mm256_add_epi32(_mm256_and_si256(_mm256_cmpgt_epi32(m0, ret), m2),
ret);
}
constexpr int SZ = 1 << 19;
uint32_t buf1_[SZ * 2] __attribute__((aligned(64)));
uint32_t buf2_[SZ * 2] __attribute__((aligned(64)));
template <typename mint>
struct NTT {
static constexpr uint32_t get_pr() {
uint32_t mod = mint::get_mod();
using u64 = uint64_t;
u64 ds[32] = {};
int idx = 0;
u64 m = mod - 1;
for (u64 i = 2; i * i <= m; ++i) {
if (m % i == 0) {
ds[idx++] = i;
while (m % i == 0) m /= i;
}
}
if (m != 1) ds[idx++] = m;
uint32_t pr = 2;
while (1) {
int flg = 1;
for (int i = 0; i < idx; ++i) {
u64 a = pr, b = (mod - 1) / ds[i], r = 1;
while (b) {
if (b & 1) r = r * a % mod;
a = a * a % mod;
b >>= 1;
}
if (r == 1) {
flg = 0;
break;
}
}
if (flg == 1) break;
++pr;
}
return pr;
};
static constexpr uint32_t mod = mint::get_mod();
static constexpr uint32_t pr = get_pr();
static constexpr int level = __builtin_ctzll(mod - 1);
mint dw[level], dy[level];
mint *buf1, *buf2;
constexpr NTT() {
setwy(level);
buf1 = reinterpret_cast<mint *>(::buf1_);
buf2 = reinterpret_cast<mint *>(::buf2_);
}
constexpr void setwy(int k) {
mint w[level], y[level];
w[k - 1] = mint(pr).pow((mod - 1) / (1 << k));
y[k - 1] = w[k - 1].inverse();
for (int i = k - 2; i > 0; --i)
w[i] = w[i + 1] * w[i + 1], y[i] = y[i + 1] * y[i + 1];
dw[0] = dy[0] = w[1] * w[1];
dw[1] = w[1], dy[1] = y[1], dw[2] = w[2], dy[2] = y[2];
for (int i = 3; i < k; ++i) {
dw[i] = dw[i - 1] * y[i - 2] * w[i];
dy[i] = dy[i - 1] * w[i - 2] * y[i];
}
}
__attribute__((target("avx2"))) void ntt(mint *a, int n) {
int k = n ? __builtin_ctz(n) : 0;
if (k == 0) return;
if (k == 1) {
mint a1 = a[1];
a[1] = a[0] - a[1];
a[0] = a[0] + a1;
return;
}
if (k & 1) {
int v = 1 << (k - 1);
if (v < 8) {
for (int j = 0; j < v; ++j) {
mint ajv = a[j + v];
a[j + v] = a[j] - ajv;
a[j] += ajv;
}
} else {
const __m256i m0 = _mm256_set1_epi32(0);
const __m256i m2 = _mm256_set1_epi32(mod + mod);
int j0 = 0;
int j1 = v;
for (; j0 < v; j0 += 8, j1 += 8) {
__m256i T0 = _mm256_loadu_si256((__m256i *)(a + j0));
__m256i T1 = _mm256_loadu_si256((__m256i *)(a + j1));
__m256i naj = montgomery_add_256(T0, T1, m2, m0);
__m256i najv = montgomery_sub_256(T0, T1, m2, m0);
_mm256_storeu_si256((__m256i *)(a + j0), naj);
_mm256_storeu_si256((__m256i *)(a + j1), najv);
}
}
}
int u = 1 << (2 + (k & 1));
int v = 1 << (k - 2 - (k & 1));
mint one = mint(1);
mint imag = dw[1];
while (v) {
if (v == 1) {
mint ww = one, xx = one, wx = one;
for (int jh = 0; jh < u;) {
ww = xx * xx, wx = ww * xx;
mint t0 = a[jh + 0], t1 = a[jh + 1] * xx;
mint t2 = a[jh + 2] * ww, t3 = a[jh + 3] * wx;
mint t0p2 = t0 + t2, t1p3 = t1 + t3;
mint t0m2 = t0 - t2, t1m3 = (t1 - t3) * imag;
a[jh + 0] = t0p2 + t1p3, a[jh + 1] = t0p2 - t1p3;
a[jh + 2] = t0m2 + t1m3, a[jh + 3] = t0m2 - t1m3;
xx *= dw[__builtin_ctz((jh += 4))];
}
} else if (v == 4) {
const __m128i m0 = _mm_set1_epi32(0);
const __m128i m1 = _mm_set1_epi32(mod);
const __m128i m2 = _mm_set1_epi32(mod + mod);
const __m128i r = _mm_set1_epi32(mint::r);
const __m128i Imag = _mm_set1_epi32(imag.a);
mint ww = one, xx = one, wx = one;
for (int jh = 0; jh < u;) {
if (jh == 0) {
int j0 = 0;
int j1 = v;
int j2 = j1 + v;
int j3 = j2 + v;
int je = v;
for (; j0 < je; j0 += 4, j1 += 4, j2 += 4, j3 += 4) {
const __m128i T0 = _mm_loadu_si128((__m128i *)(a + j0));
const __m128i T1 = _mm_loadu_si128((__m128i *)(a + j1));
const __m128i T2 = _mm_loadu_si128((__m128i *)(a + j2));
const __m128i T3 = _mm_loadu_si128((__m128i *)(a + j3));
const __m128i T0P2 = montgomery_add_128(T0, T2, m2, m0);
const __m128i T1P3 = montgomery_add_128(T1, T3, m2, m0);
const __m128i T0M2 = montgomery_sub_128(T0, T2, m2, m0);
const __m128i T1M3 = montgomery_mul_128(
montgomery_sub_128(T1, T3, m2, m0), Imag, r, m1);
_mm_storeu_si128((__m128i *)(a + j0),
montgomery_add_128(T0P2, T1P3, m2, m0));
_mm_storeu_si128((__m128i *)(a + j1),
montgomery_sub_128(T0P2, T1P3, m2, m0));
_mm_storeu_si128((__m128i *)(a + j2),
montgomery_add_128(T0M2, T1M3, m2, m0));
_mm_storeu_si128((__m128i *)(a + j3),
montgomery_sub_128(T0M2, T1M3, m2, m0));
}
} else {
ww = xx * xx, wx = ww * xx;
const __m128i WW = _mm_set1_epi32(ww.a);
const __m128i WX = _mm_set1_epi32(wx.a);
const __m128i XX = _mm_set1_epi32(xx.a);
int j0 = jh * v;
int j1 = j0 + v;
int j2 = j1 + v;
int j3 = j2 + v;
int je = j1;
for (; j0 < je; j0 += 4, j1 += 4, j2 += 4, j3 += 4) {
const __m128i T0 = _mm_loadu_si128((__m128i *)(a + j0));
const __m128i T1 = _mm_loadu_si128((__m128i *)(a + j1));
const __m128i T2 = _mm_loadu_si128((__m128i *)(a + j2));
const __m128i T3 = _mm_loadu_si128((__m128i *)(a + j3));
const __m128i MT1 = montgomery_mul_128(T1, XX, r, m1);
const __m128i MT2 = montgomery_mul_128(T2, WW, r, m1);
const __m128i MT3 = montgomery_mul_128(T3, WX, r, m1);
const __m128i T0P2 = montgomery_add_128(T0, MT2, m2, m0);
const __m128i T1P3 = montgomery_add_128(MT1, MT3, m2, m0);
const __m128i T0M2 = montgomery_sub_128(T0, MT2, m2, m0);
const __m128i T1M3 = montgomery_mul_128(
montgomery_sub_128(MT1, MT3, m2, m0), Imag, r, m1);
_mm_storeu_si128((__m128i *)(a + j0),
montgomery_add_128(T0P2, T1P3, m2, m0));
_mm_storeu_si128((__m128i *)(a + j1),
montgomery_sub_128(T0P2, T1P3, m2, m0));
_mm_storeu_si128((__m128i *)(a + j2),
montgomery_add_128(T0M2, T1M3, m2, m0));
_mm_storeu_si128((__m128i *)(a + j3),
montgomery_sub_128(T0M2, T1M3, m2, m0));
}
}
xx *= dw[__builtin_ctz((jh += 4))];
}
} else {
const __m256i m0 = _mm256_set1_epi32(0);
const __m256i m1 = _mm256_set1_epi32(mod);
const __m256i m2 = _mm256_set1_epi32(mod + mod);
const __m256i r = _mm256_set1_epi32(mint::r);
const __m256i Imag = _mm256_set1_epi32(imag.a);
mint ww = one, xx = one, wx = one;
for (int jh = 0; jh < u;) {
if (jh == 0) {
int j0 = 0;
int j1 = v;
int j2 = j1 + v;
int j3 = j2 + v;
int je = v;
for (; j0 < je; j0 += 8, j1 += 8, j2 += 8, j3 += 8) {
const __m256i T0 = _mm256_loadu_si256((__m256i *)(a + j0));
const __m256i T1 = _mm256_loadu_si256((__m256i *)(a + j1));
const __m256i T2 = _mm256_loadu_si256((__m256i *)(a + j2));
const __m256i T3 = _mm256_loadu_si256((__m256i *)(a + j3));
const __m256i T0P2 = montgomery_add_256(T0, T2, m2, m0);
const __m256i T1P3 = montgomery_add_256(T1, T3, m2, m0);
const __m256i T0M2 = montgomery_sub_256(T0, T2, m2, m0);
const __m256i T1M3 = montgomery_mul_256(
montgomery_sub_256(T1, T3, m2, m0), Imag, r, m1);
_mm256_storeu_si256((__m256i *)(a + j0),
montgomery_add_256(T0P2, T1P3, m2, m0));
_mm256_storeu_si256((__m256i *)(a + j1),
montgomery_sub_256(T0P2, T1P3, m2, m0));
_mm256_storeu_si256((__m256i *)(a + j2),
montgomery_add_256(T0M2, T1M3, m2, m0));
_mm256_storeu_si256((__m256i *)(a + j3),
montgomery_sub_256(T0M2, T1M3, m2, m0));
}
} else {
ww = xx * xx, wx = ww * xx;
const __m256i WW = _mm256_set1_epi32(ww.a);
const __m256i WX = _mm256_set1_epi32(wx.a);
const __m256i XX = _mm256_set1_epi32(xx.a);
int j0 = jh * v;
int j1 = j0 + v;
int j2 = j1 + v;
int j3 = j2 + v;
int je = j1;
for (; j0 < je; j0 += 8, j1 += 8, j2 += 8, j3 += 8) {
const __m256i T0 = _mm256_loadu_si256((__m256i *)(a + j0));
const __m256i T1 = _mm256_loadu_si256((__m256i *)(a + j1));
const __m256i T2 = _mm256_loadu_si256((__m256i *)(a + j2));
const __m256i T3 = _mm256_loadu_si256((__m256i *)(a + j3));
const __m256i MT1 = montgomery_mul_256(T1, XX, r, m1);
const __m256i MT2 = montgomery_mul_256(T2, WW, r, m1);
const __m256i MT3 = montgomery_mul_256(T3, WX, r, m1);
const __m256i T0P2 = montgomery_add_256(T0, MT2, m2, m0);
const __m256i T1P3 = montgomery_add_256(MT1, MT3, m2, m0);
const __m256i T0M2 = montgomery_sub_256(T0, MT2, m2, m0);
const __m256i T1M3 = montgomery_mul_256(
montgomery_sub_256(MT1, MT3, m2, m0), Imag, r, m1);
_mm256_storeu_si256((__m256i *)(a + j0),
montgomery_add_256(T0P2, T1P3, m2, m0));
_mm256_storeu_si256((__m256i *)(a + j1),
montgomery_sub_256(T0P2, T1P3, m2, m0));
_mm256_storeu_si256((__m256i *)(a + j2),
montgomery_add_256(T0M2, T1M3, m2, m0));
_mm256_storeu_si256((__m256i *)(a + j3),
montgomery_sub_256(T0M2, T1M3, m2, m0));
}
}
xx *= dw[__builtin_ctz((jh += 4))];
}
}
u <<= 2;
v >>= 2;
}
}
__attribute__((target("avx2"))) void intt(mint *a, int n,
int normalize = true) {
int k = n ? __builtin_ctz(n) : 0;
if (k == 0) return;
if (k == 1) {
mint a1 = a[1];
a[1] = a[0] - a[1];
a[0] = a[0] + a1;
if (normalize) {
a[0] *= mint(2).inverse();
a[1] *= mint(2).inverse();
}
return;
}
int u = 1 << (k - 2);
int v = 1;
mint one = mint(1);
mint imag = dy[1];
while (u) {
if (v == 1) {
mint ww = one, xx = one, yy = one;
u <<= 2;
for (int jh = 0; jh < u;) {
ww = xx * xx, yy = xx * imag;
mint t0 = a[jh + 0], t1 = a[jh + 1];
mint t2 = a[jh + 2], t3 = a[jh + 3];
mint t0p1 = t0 + t1, t2p3 = t2 + t3;
mint t0m1 = (t0 - t1) * xx, t2m3 = (t2 - t3) * yy;
a[jh + 0] = t0p1 + t2p3, a[jh + 2] = (t0p1 - t2p3) * ww;
a[jh + 1] = t0m1 + t2m3, a[jh + 3] = (t0m1 - t2m3) * ww;
xx *= dy[__builtin_ctz(jh += 4)];
}
} else if (v == 4) {
const __m128i m0 = _mm_set1_epi32(0);
const __m128i m1 = _mm_set1_epi32(mod);
const __m128i m2 = _mm_set1_epi32(mod + mod);
const __m128i r = _mm_set1_epi32(mint::r);
const __m128i Imag = _mm_set1_epi32(imag.a);
mint ww = one, xx = one, yy = one;
u <<= 2;
for (int jh = 0; jh < u;) {
if (jh == 0) {
int j0 = 0;
int j1 = v;
int j2 = v + v;
int j3 = j2 + v;
for (; j0 < v; j0 += 4, j1 += 4, j2 += 4, j3 += 4) {
const __m128i T0 = _mm_loadu_si128((__m128i *)(a + j0));
const __m128i T1 = _mm_loadu_si128((__m128i *)(a + j1));
const __m128i T2 = _mm_loadu_si128((__m128i *)(a + j2));
const __m128i T3 = _mm_loadu_si128((__m128i *)(a + j3));
const __m128i T0P1 = montgomery_add_128(T0, T1, m2, m0);
const __m128i T2P3 = montgomery_add_128(T2, T3, m2, m0);
const __m128i T0M1 = montgomery_sub_128(T0, T1, m2, m0);
const __m128i T2M3 = montgomery_mul_128(
montgomery_sub_128(T2, T3, m2, m0), Imag, r, m1);
_mm_storeu_si128((__m128i *)(a + j0),
montgomery_add_128(T0P1, T2P3, m2, m0));
_mm_storeu_si128((__m128i *)(a + j2),
montgomery_sub_128(T0P1, T2P3, m2, m0));
_mm_storeu_si128((__m128i *)(a + j1),
montgomery_add_128(T0M1, T2M3, m2, m0));
_mm_storeu_si128((__m128i *)(a + j3),
montgomery_sub_128(T0M1, T2M3, m2, m0));
}
} else {
ww = xx * xx, yy = xx * imag;
const __m128i WW = _mm_set1_epi32(ww.a);
const __m128i XX = _mm_set1_epi32(xx.a);
const __m128i YY = _mm_set1_epi32(yy.a);
int j0 = jh * v;
int j1 = j0 + v;
int j2 = j1 + v;
int j3 = j2 + v;
int je = j1;
for (; j0 < je; j0 += 4, j1 += 4, j2 += 4, j3 += 4) {
const __m128i T0 = _mm_loadu_si128((__m128i *)(a + j0));
const __m128i T1 = _mm_loadu_si128((__m128i *)(a + j1));
const __m128i T2 = _mm_loadu_si128((__m128i *)(a + j2));
const __m128i T3 = _mm_loadu_si128((__m128i *)(a + j3));
const __m128i T0P1 = montgomery_add_128(T0, T1, m2, m0);
const __m128i T2P3 = montgomery_add_128(T2, T3, m2, m0);
const __m128i T0M1 = montgomery_mul_128(
montgomery_sub_128(T0, T1, m2, m0), XX, r, m1);
__m128i T2M3 = montgomery_mul_128(
montgomery_sub_128(T2, T3, m2, m0), YY, r, m1);
_mm_storeu_si128((__m128i *)(a + j0),
montgomery_add_128(T0P1, T2P3, m2, m0));
_mm_storeu_si128(
(__m128i *)(a + j2),
montgomery_mul_128(montgomery_sub_128(T0P1, T2P3, m2, m0), WW,
r, m1));
_mm_storeu_si128((__m128i *)(a + j1),
montgomery_add_128(T0M1, T2M3, m2, m0));
_mm_storeu_si128(
(__m128i *)(a + j3),
montgomery_mul_128(montgomery_sub_128(T0M1, T2M3, m2, m0), WW,
r, m1));
}
}
xx *= dy[__builtin_ctz(jh += 4)];
}
} else {
const __m256i m0 = _mm256_set1_epi32(0);
const __m256i m1 = _mm256_set1_epi32(mod);
const __m256i m2 = _mm256_set1_epi32(mod + mod);
const __m256i r = _mm256_set1_epi32(mint::r);
const __m256i Imag = _mm256_set1_epi32(imag.a);
mint ww = one, xx = one, yy = one;
u <<= 2;
for (int jh = 0; jh < u;) {
if (jh == 0) {
int j0 = 0;
int j1 = v;
int j2 = v + v;
int j3 = j2 + v;
for (; j0 < v; j0 += 8, j1 += 8, j2 += 8, j3 += 8) {
const __m256i T0 = _mm256_loadu_si256((__m256i *)(a + j0));
const __m256i T1 = _mm256_loadu_si256((__m256i *)(a + j1));
const __m256i T2 = _mm256_loadu_si256((__m256i *)(a + j2));
const __m256i T3 = _mm256_loadu_si256((__m256i *)(a + j3));
const __m256i T0P1 = montgomery_add_256(T0, T1, m2, m0);
const __m256i T2P3 = montgomery_add_256(T2, T3, m2, m0);
const __m256i T0M1 = montgomery_sub_256(T0, T1, m2, m0);
const __m256i T2M3 = montgomery_mul_256(
montgomery_sub_256(T2, T3, m2, m0), Imag, r, m1);
_mm256_storeu_si256((__m256i *)(a + j0),
montgomery_add_256(T0P1, T2P3, m2, m0));
_mm256_storeu_si256((__m256i *)(a + j2),
montgomery_sub_256(T0P1, T2P3, m2, m0));
_mm256_storeu_si256((__m256i *)(a + j1),
montgomery_add_256(T0M1, T2M3, m2, m0));
_mm256_storeu_si256((__m256i *)(a + j3),
montgomery_sub_256(T0M1, T2M3, m2, m0));
}
} else {
ww = xx * xx, yy = xx * imag;
const __m256i WW = _mm256_set1_epi32(ww.a);
const __m256i XX = _mm256_set1_epi32(xx.a);
const __m256i YY = _mm256_set1_epi32(yy.a);
int j0 = jh * v;
int j1 = j0 + v;
int j2 = j1 + v;
int j3 = j2 + v;
int je = j1;
for (; j0 < je; j0 += 8, j1 += 8, j2 += 8, j3 += 8) {
const __m256i T0 = _mm256_loadu_si256((__m256i *)(a + j0));
const __m256i T1 = _mm256_loadu_si256((__m256i *)(a + j1));
const __m256i T2 = _mm256_loadu_si256((__m256i *)(a + j2));
const __m256i T3 = _mm256_loadu_si256((__m256i *)(a + j3));
const __m256i T0P1 = montgomery_add_256(T0, T1, m2, m0);
const __m256i T2P3 = montgomery_add_256(T2, T3, m2, m0);
const __m256i T0M1 = montgomery_mul_256(
montgomery_sub_256(T0, T1, m2, m0), XX, r, m1);
const __m256i T2M3 = montgomery_mul_256(
montgomery_sub_256(T2, T3, m2, m0), YY, r, m1);
_mm256_storeu_si256((__m256i *)(a + j0),
montgomery_add_256(T0P1, T2P3, m2, m0));
_mm256_storeu_si256(
(__m256i *)(a + j2),
montgomery_mul_256(montgomery_sub_256(T0P1, T2P3, m2, m0), WW,
r, m1));
_mm256_storeu_si256((__m256i *)(a + j1),
montgomery_add_256(T0M1, T2M3, m2, m0));
_mm256_storeu_si256(
(__m256i *)(a + j3),
montgomery_mul_256(montgomery_sub_256(T0M1, T2M3, m2, m0), WW,
r, m1));
}
}
xx *= dy[__builtin_ctz(jh += 4)];
}
}
u >>= 4;
v <<= 2;
}
if (k & 1) {
v = 1 << (k - 1);
if (v < 8) {
for (int j = 0; j < v; ++j) {
mint ajv = a[j] - a[j + v];
a[j] += a[j + v];
a[j + v] = ajv;
}
} else {
const __m256i m0 = _mm256_set1_epi32(0);
const __m256i m2 = _mm256_set1_epi32(mod + mod);
int j0 = 0;
int j1 = v;
for (; j0 < v; j0 += 8, j1 += 8) {
const __m256i T0 = _mm256_loadu_si256((__m256i *)(a + j0));
const __m256i T1 = _mm256_loadu_si256((__m256i *)(a + j1));
__m256i naj = montgomery_add_256(T0, T1, m2, m0);
__m256i najv = montgomery_sub_256(T0, T1, m2, m0);
_mm256_storeu_si256((__m256i *)(a + j0), naj);
_mm256_storeu_si256((__m256i *)(a + j1), najv);
}
}
}
if (normalize) {
mint invn = mint(n).inverse();
for (int i = 0; i < n; i++) a[i] *= invn;
}
}
__attribute__((target("avx2"))) void inplace_multiply(
int l1, int l2, int zero_padding = true) {
int l = l1 + l2 - 1;
int M = 4;
while (M < l) M <<= 1;
if (zero_padding) {
for (int i = l1; i < M; i++) buf1_[i] = 0;
for (int i = l2; i < M; i++) buf2_[i] = 0;
}
const __m256i m0 = _mm256_set1_epi32(0);
const __m256i m1 = _mm256_set1_epi32(mod);
const __m256i r = _mm256_set1_epi32(mint::r);
const __m256i N2 = _mm256_set1_epi32(mint::n2);
for (int i = 0; i < l1; i += 8) {
__m256i a = _mm256_loadu_si256((__m256i *)(buf1_ + i));
__m256i b = montgomery_mul_256(a, N2, r, m1);
_mm256_storeu_si256((__m256i *)(buf1_ + i), b);
}
for (int i = 0; i < l2; i += 8) {
__m256i a = _mm256_loadu_si256((__m256i *)(buf2_ + i));
__m256i b = montgomery_mul_256(a, N2, r, m1);
_mm256_storeu_si256((__m256i *)(buf2_ + i), b);
}
ntt(buf1, M);
ntt(buf2, M);
for (int i = 0; i < M; i += 8) {
__m256i a = _mm256_loadu_si256((__m256i *)(buf1_ + i));
__m256i b = _mm256_loadu_si256((__m256i *)(buf2_ + i));
__m256i c = montgomery_mul_256(a, b, r, m1);
_mm256_storeu_si256((__m256i *)(buf1_ + i), c);
}
intt(buf1, M, false);
const __m256i INVM = _mm256_set1_epi32((mint(M).inverse()).a);
for (int i = 0; i < l; i += 8) {
__m256i a = _mm256_loadu_si256((__m256i *)(buf1_ + i));
__m256i b = montgomery_mul_256(a, INVM, r, m1);
__m256i c = my256_mulhi_epu32(my256_mullo_epu32(b, r), m1);
__m256i d = _mm256_and_si256(_mm256_cmpgt_epi32(c, m0), m1);
__m256i e = _mm256_sub_epi32(d, c);
_mm256_storeu_si256((__m256i *)(buf1_ + i), e);
}
}
void ntt(vector<mint> &a) {
int M = (int)a.size();
for (int i = 0; i < M; i++) buf1[i].a = a[i].a;
ntt(buf1, M);
for (int i = 0; i < M; i++) a[i].a = buf1[i].a;
}
void intt(vector<mint> &a) {
int M = (int)a.size();
for (int i = 0; i < M; i++) buf1[i].a = a[i].a;
intt(buf1, M, true);
for (int i = 0; i < M; i++) a[i].a = buf1[i].a;
}
vector<mint> multiply(const vector<mint> &a, const vector<mint> &b) {
if (a.size() == 0 && b.size() == 0) return vector<mint>{};
int l = a.size() + b.size() - 1;
if (min<int>(a.size(), b.size()) <= 40) {
vector<mint> s(l);
for (int i = 0; i < (int)a.size(); ++i)
for (int j = 0; j < (int)b.size(); ++j) s[i + j] += a[i] * b[j];
return s;
}
int M = 4;
while (M < l) M <<= 1;
for (int i = 0; i < (int)a.size(); ++i) buf1[i].a = a[i].a;
for (int i = (int)a.size(); i < M; ++i) buf1[i].a = 0;
for (int i = 0; i < (int)b.size(); ++i) buf2[i].a = b[i].a;
for (int i = (int)b.size(); i < M; ++i) buf2[i].a = 0;
ntt(buf1, M);
ntt(buf2, M);
for (int i = 0; i < M; ++i)
buf1[i].a = mint::reduce(uint64_t(buf1[i].a) * buf2[i].a);
intt(buf1, M, false);
vector<mint> s(l);
mint invm = mint(M).inverse();
for (int i = 0; i < l; ++i) s[i] = buf1[i] * invm;
return s;
}
void ntt_doubling(vector<mint> &a) {
int M = (int)a.size();
for (int i = 0; i < M; i++) buf1[i].a = a[i].a;
intt(buf1, M);
mint r = 1, zeta = mint(pr).pow((mint::get_mod() - 1) / (M << 1));
for (int i = 0; i < M; i++) buf1[i] *= r, r *= zeta;
ntt(buf1, M);
a.resize(2 * M);
for (int i = 0; i < M; i++) a[M + i].a = buf1[i].a;
}
};
using mint = LazyMontgomeryModInt<998244353>;
NTT<mint> ntt;
void fft(V<V<mint>> &a) {
int N = sz(a);
int M = sz(a[0]);
rep(i, N) ntt.ntt(a[i]);
V<mint> b(N);
rep(j, M) {
rep(i, N) b[i] = a[i][j];
ntt.ntt(b);
rep(i, N) a[i][j] = b[i];
}
}
void ifft(V<V<mint>> &a) {
int N = sz(a);
int M = sz(a[0]);
V<mint> b(N);
rep(j, M) {
rep(i, N) b[i] = a[i][j];
ntt.intt(b);
rep(i, N) a[i][j] = b[i];
}
rep(i, N) ntt.intt(a[i]);
}
void solve() {
inl(x, y, t, a, b, c, d);
int N = 1 << x;
int M = 1 << y;
V<V<mint>> A(N * 2, V<mint>(M * 2, 0));
A[0][0] = A[1][0] = A[0][1] = A[2 * N - 1][0] = A[0][2 * M - 1] = 1;
V<V<mint>> B(N * 2, V<mint>(M * 2, 0));
B[a][b] = 1;
trc(A);
fft(A);
trc(A);
trc(B);
fft(B);
trc(B);
trc(t);
each(v, A) each(w, v) w = w.pow(t);
rep(i, 2 * N) rep(j, 2 * M) A[i][j] *= B[i][j];
trc(A);
ifft(A);
trc(A);
trc(A[c][d]);
trc(A[2 * N - c][d]);
trc(A[c][2 * M - d]);
trc(A[2 * N - c][2 * M - d]);
out(A[c][d] - A[2 * N - c][d] - A[c][2 * M - d] + A[2 * N - c][2 * M - d]);
}