結果

問題 No.1754 T-block Tiling
ユーザー moritaoymoritaoy
提出日時 2021-11-20 13:32:41
言語 C++17
(gcc 12.3.0 + boost 1.83.0)
結果
AC  
実行時間 11 ms / 2,000 ms
コード長 7,973 bytes
コンパイル時間 2,048 ms
コンパイル使用メモリ 205,060 KB
実行使用メモリ 15,124 KB
最終ジャッジ日時 2023-09-02 00:55:32
合計ジャッジ時間 2,500 ms
ジャッジサーバーID
(参考情報)
judge15 / judge12
このコードへのチャレンジ(β)

テストケース

テストケース表示
入力 結果 実行時間
実行使用メモリ
testcase_00 AC 11 ms
15,088 KB
testcase_01 AC 11 ms
15,124 KB
権限があれば一括ダウンロードができます

ソースコード

diff #

#include<bits/stdc++.h>
#pragma GCC target("avx2")

#pragma GCC optimize("Ofast")

#pragma GCC optimize("O3")
#pragma GCC optimize("unroll-loops")
using namespace std;
#define lp(i,n) for(int i=0;i<(int)(n);i++)
#define lps(i,j,n) for(int i=j;i<n;i++)

#define fordebug int hoge;cin>>hoge;

#define lambda(RES_TYPE, ...) (function<RES_TYPE(__VA_ARGS__)>)[&](__VA_ARGS__) -> RES_TYPE
#define method(FUNC_NAME, RES_TYPE, ...) function<RES_TYPE(__VA_ARGS__)> FUNC_NAME = lambda(RES_TYPE, __VA_ARGS__)

#define DEKAI 1000000007
#define floot10 cout<<fixed<<setprecision(15);
#define all(v) v.begin(),v.end()
double PI = acos(-1);

namespace {
#define __DECLARE__(C)						\
  template <typename T>						\
  std::ostream &operator<<(std::ostream &, const C<T> &);

#define __DECLAREM__(C)						\
  template <typename T, typename U>				\
  std::ostream &operator<<(std::ostream &, const C<T, U> &);

  __DECLARE__(std::vector)
  __DECLARE__(std::deque)
  __DECLARE__(std::set)
  __DECLARE__(std::stack)
  __DECLARE__(std::queue)
  __DECLARE__(std::priority_queue)
  __DECLARE__(std::unordered_set)
  __DECLAREM__(std::map)
  __DECLAREM__(std::unordered_map)

  template <typename T, typename U>
  std::ostream &operator<<(std::ostream &, const std::pair<T, U> &);
  template <typename... T>
  std::ostream &operator<<(std::ostream &, const std::tuple<T...> &);
  template <typename T, std::size_t N>
  std::ostream &operator<<(std::ostream &, const std::array<T, N> &);

  template <typename Tuple, std::size_t N>
  struct __TuplePrinter__ {
    static void print(std::ostream &os, const Tuple &t) {
      __TuplePrinter__<Tuple, N - 1>::print(os, t);
      os << ", " << std::get<N - 1>(t);
    }
  };

  template <typename Tuple>
  struct __TuplePrinter__<Tuple, 1> {
    static void print(std::ostream &os, const Tuple &t) { os << std::get<0>(t); }
  };

  template <typename... T>
  std::ostream &operator<<(std::ostream &os, const std::tuple<T...> &t) {
    os << '(';
    __TuplePrinter__<decltype(t), sizeof...(T)>::print(os, t);
    os << ')';
    return os;
  }

  template <typename T, typename U>
  std::ostream &operator<<(std::ostream &os, const std::pair<T, U> &v) {
    return os << '(' << v.first << ", " << v.second << ')';
  }

#define __INNER__                               \
  os << '[';                                    \
  for (auto it = begin(c); it != end(c);) {     \
    os << *it;                                  \
    os << (++it != end(c) ? ", " : "");         \
  }                                             \
  return os << ']';

  template <typename T, std::size_t N>
  std::ostream &operator<<(std::ostream &os, const std::array<T, N> &c) {
    __INNER__
      }

#define __DEFINE__(C)						\
  template <typename T>						\
  std::ostream &operator<<(std::ostream &os, const C<T> &c) {	\
    __INNER__							\
      }

#define __DEFINEM__(C)							\
  template <typename T, typename U>					\
  std::ostream &operator<<(std::ostream &os, const C<T, U> &c) {	\
    __INNER__								\
      }

#define __DEFINEW__(C, M1, M2)					\
  template <typename T>						\
  std::ostream &operator<<(std::ostream &os, const C<T> &c) {	\
    std::deque<T> v;						\
    for (auto d = c; !d.empty(); d.pop()) v.M1(d.M2());		\
    return os << v;						\
  }

  __DEFINE__(std::vector)
  __DEFINE__(std::deque)
  __DEFINE__(std::set)
  __DEFINEW__(std::stack, push_front, top)
  __DEFINEW__(std::queue, push_back, front)
  __DEFINEW__(std::priority_queue, push_front, top)
  __DEFINE__(std::unordered_set)
  __DEFINEM__(std::map)
  __DEFINEM__(std::unordered_map)
}

template <signed M, unsigned T>
struct mod_int {
    constexpr static signed MODULO = M;
    constexpr static unsigned TABLE_SIZE = T;

    signed x;

    mod_int() : x(0) {}

    mod_int(long long y) : x(static_cast<signed>(y >= 0 ? y % MODULO : MODULO - (-y) % MODULO)) {}

    mod_int(int y) : x(y >= 0 ? y % MODULO : MODULO - (-y) % MODULO) {}

    mod_int &operator+=(const mod_int &rhs) {
        if ((x += rhs.x) >= MODULO) x -= MODULO;
        return *this;
    }

    mod_int &operator-=(const mod_int &rhs) {
        if ((x += MODULO - rhs.x) >= MODULO) x -= MODULO;
        return *this;
    }

    mod_int &operator*=(const mod_int &rhs) {
        x = static_cast<signed>(1LL * x * rhs.x % MODULO);
        return *this;
    }

    mod_int &operator/=(const mod_int &rhs) {
        x = static_cast<signed>((1LL * x * rhs.inv().x) % MODULO);
        return *this;
    }

    mod_int operator-() const { return mod_int(-x); }

    mod_int operator+(const mod_int &rhs) const { return mod_int(*this) += rhs; }

    mod_int operator-(const mod_int &rhs) const { return mod_int(*this) -= rhs; }

    mod_int operator*(const mod_int &rhs) const { return mod_int(*this) *= rhs; }

    mod_int operator/(const mod_int &rhs) const { return mod_int(*this) /= rhs; }

    bool operator<(const mod_int &rhs) const { return x < rhs.x; }

    mod_int inv() const {
        assert(x != 0);
        if (x <= static_cast<signed>(TABLE_SIZE)) {
            if (_inv[1].x == 0) prepare();
            return _inv[x];
        } else {
            signed a = x, b = MODULO, u = 1, v = 0, t;
            while (b) {
                t = a / b;
                a -= t * b;
                std::swap(a, b);
                u -= t * v;
                std::swap(u, v);
            }
            return mod_int(u);
        }
    }

    mod_int pow(long long t) const {
        assert(!(x == 0 && t == 0));
        mod_int e = *this, res = mod_int(1);
        for (; t; e *= e, t >>= 1)
            if (t & 1) res *= e;
        return res;
    }

    mod_int fact() {
        if (_fact[0].x == 0) prepare();
        return _fact[x];
    }

    mod_int inv_fact() {
        if (_fact[0].x == 0) prepare();
        return _inv_fact[x];
    }

    mod_int choose(mod_int y) {
        assert(y.x <= x);
        return this->fact() * y.inv_fact() * mod_int(x - y.x).inv_fact();
    }

    static mod_int _inv[TABLE_SIZE + 1];

    static mod_int _fact[TABLE_SIZE + 1];

    static mod_int _inv_fact[TABLE_SIZE + 1];

    static void prepare() {
        _inv[1] = 1;
        for (int i = 2; i <= (int)TABLE_SIZE; ++i) {
            _inv[i] = 1LL * _inv[MODULO % i].x * (MODULO - MODULO / i) % MODULO;
        }
        _fact[0] = 1;
        for (unsigned i = 1; i <= TABLE_SIZE; ++i) {
            _fact[i] = _fact[i - 1] * int(i);
        }
        _inv_fact[TABLE_SIZE] = _fact[TABLE_SIZE].inv();
        for (int i = (int)TABLE_SIZE - 1; i >= 0; --i) {
            _inv_fact[i] = _inv_fact[i + 1] * (i + 1);
        }
    }
};

template <int M, unsigned F>
std::ostream &operator<<(std::ostream &os, const mod_int<M, F> &rhs) {
    return os << rhs.x;
}

template <int M, unsigned F>
std::istream &operator>>(std::istream &is, mod_int<M, F> &rhs) {
    long long s;
    is >> s;
    rhs = mod_int<M, F>(s);
    return is;
}

template <int M, unsigned F>
mod_int<M, F> mod_int<M, F>::_inv[TABLE_SIZE + 1];

template <int M, unsigned F>
mod_int<M, F> mod_int<M, F>::_fact[TABLE_SIZE + 1];

template <int M, unsigned F>
mod_int<M, F> mod_int<M, F>::_inv_fact[TABLE_SIZE + 1];

template <int M, unsigned F>
bool operator==(const mod_int<M, F> &lhs, const mod_int<M, F> &rhs) {
    return lhs.x == rhs.x;
}

template <int M, unsigned F>
bool operator!=(const mod_int<M, F> &lhs, const mod_int<M, F> &rhs) {
    return !(lhs == rhs);
}

const int MF = 1000010;
const int MOD = 998244353;

using mint = mod_int<MOD, MF>;

mint binom(int n, int r) { return (r < 0 || r > n || n < 0) ? 0 : mint(n).choose(r); }

mint fact(int n) { return mint(n).fact(); }

mint inv_fact(int n) { return mint(n).inv_fact(); }


#define PI acos(-1)

signed main(){
  int t;
  cin>>t;
  lp(z,t){
    int n;
    cin>>n;
    mint dp[n+1][n];
    lp(i,n+1)lp(j,n)dp[i][j]=0;
    dp[0][0]=1;
    for(int i=1;i<=n;i++){
      lp(j,n){
	dp[i][j+1]+=dp[i-1][j];
	dp[i][0]+=dp[i-1][j]*2;
      }
    }
    cout<<dp[n][0]<<endl;
  }
  return 0;
}

0