結果
問題 | No.1790 Subtree Deletion |
ユーザー | hitonanode |
提出日時 | 2021-12-19 02:35:25 |
言語 | C++23 (gcc 12.3.0 + boost 1.83.0) |
結果 |
WA
|
実行時間 | - |
コード長 | 12,959 bytes |
コンパイル時間 | 2,763 ms |
コンパイル使用メモリ | 206,904 KB |
実行使用メモリ | 26,156 KB |
最終ジャッジ日時 | 2024-09-15 14:23:21 |
合計ジャッジ時間 | 5,958 ms |
ジャッジサーバーID (参考情報) |
judge5 / judge1 |
(要ログイン)
テストケース
テストケース表示入力 | 結果 | 実行時間 実行使用メモリ |
---|---|---|
testcase_00 | AC | 2 ms
5,248 KB |
testcase_01 | AC | 2 ms
5,376 KB |
testcase_02 | AC | 2 ms
5,376 KB |
testcase_03 | WA | - |
testcase_04 | WA | - |
testcase_05 | WA | - |
testcase_06 | WA | - |
testcase_07 | WA | - |
testcase_08 | AC | 17 ms
5,376 KB |
testcase_09 | AC | 186 ms
26,016 KB |
testcase_10 | WA | - |
testcase_11 | WA | - |
testcase_12 | WA | - |
testcase_13 | WA | - |
testcase_14 | WA | - |
ソースコード
#include <algorithm> #include <array> #include <bitset> #include <cassert> #include <chrono> #include <cmath> #include <complex> #include <deque> #include <forward_list> #include <fstream> #include <functional> #include <iomanip> #include <ios> #include <iostream> #include <limits> #include <list> #include <map> #include <numeric> #include <queue> #include <random> #include <set> #include <sstream> #include <stack> #include <string> #include <tuple> #include <type_traits> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> using namespace std; using lint = long long; using pint = pair<int, int>; using plint = pair<lint, lint>; struct fast_ios { fast_ios(){ cin.tie(nullptr), ios::sync_with_stdio(false), cout << fixed << setprecision(20); }; } fast_ios_; #define ALL(x) (x).begin(), (x).end() #define FOR(i, begin, end) for(int i=(begin),i##_end_=(end);i<i##_end_;i++) #define IFOR(i, begin, end) for(int i=(end)-1,i##_begin_=(begin);i>=i##_begin_;i--) #define REP(i, n) FOR(i,0,n) #define IREP(i, n) IFOR(i,0,n) template <typename T, typename V> void ndarray(vector<T>& vec, const V& val, int len) { vec.assign(len, val); } template <typename T, typename V, typename... Args> void ndarray(vector<T>& vec, const V& val, int len, Args... args) { vec.resize(len), for_each(begin(vec), end(vec), [&](T& v) { ndarray(v, val, args...); }); } template <typename T> bool chmax(T &m, const T q) { return m < q ? (m = q, true) : false; } template <typename T> bool chmin(T &m, const T q) { return m > q ? (m = q, true) : false; } int floor_lg(long long x) { return x <= 0 ? -1 : 63 - __builtin_clzll(x); } template <typename T1, typename T2> pair<T1, T2> operator+(const pair<T1, T2> &l, const pair<T1, T2> &r) { return make_pair(l.first + r.first, l.second + r.second); } template <typename T1, typename T2> pair<T1, T2> operator-(const pair<T1, T2> &l, const pair<T1, T2> &r) { return make_pair(l.first - r.first, l.second - r.second); } template <typename T> vector<T> sort_unique(vector<T> vec) { sort(vec.begin(), vec.end()), vec.erase(unique(vec.begin(), vec.end()), vec.end()); return vec; } template <typename T> int arglb(const std::vector<T> &v, const T &x) { return std::distance(v.begin(), std::lower_bound(v.begin(), v.end(), x)); } template <typename T> int argub(const std::vector<T> &v, const T &x) { return std::distance(v.begin(), std::upper_bound(v.begin(), v.end(), x)); } template <typename T> istream &operator>>(istream &is, vector<T> &vec) { for (auto &v : vec) is >> v; return is; } template <typename T> ostream &operator<<(ostream &os, const vector<T> &vec) { os << '['; for (auto v : vec) os << v << ','; os << ']'; return os; } template <typename T, size_t sz> ostream &operator<<(ostream &os, const array<T, sz> &arr) { os << '['; for (auto v : arr) os << v << ','; os << ']'; return os; } #if __cplusplus >= 201703L template <typename... T> istream &operator>>(istream &is, tuple<T...> &tpl) { std::apply([&is](auto &&... args) { ((is >> args), ...);}, tpl); return is; } template <typename... T> ostream &operator<<(ostream &os, const tuple<T...> &tpl) { os << '('; std::apply([&os](auto &&... args) { ((os << args << ','), ...);}, tpl); return os << ')'; } #endif template <typename T> ostream &operator<<(ostream &os, const deque<T> &vec) { os << "deq["; for (auto v : vec) os << v << ','; os << ']'; return os; } template <typename T> ostream &operator<<(ostream &os, const set<T> &vec) { os << '{'; for (auto v : vec) os << v << ','; os << '}'; return os; } template <typename T, typename TH> ostream &operator<<(ostream &os, const unordered_set<T, TH> &vec) { os << '{'; for (auto v : vec) os << v << ','; os << '}'; return os; } template <typename T> ostream &operator<<(ostream &os, const multiset<T> &vec) { os << '{'; for (auto v : vec) os << v << ','; os << '}'; return os; } template <typename T> ostream &operator<<(ostream &os, const unordered_multiset<T> &vec) { os << '{'; for (auto v : vec) os << v << ','; os << '}'; return os; } template <typename T1, typename T2> ostream &operator<<(ostream &os, const pair<T1, T2> &pa) { os << '(' << pa.first << ',' << pa.second << ')'; return os; } template <typename TK, typename TV> ostream &operator<<(ostream &os, const map<TK, TV> &mp) { os << '{'; for (auto v : mp) os << v.first << "=>" << v.second << ','; os << '}'; return os; } template <typename TK, typename TV, typename TH> ostream &operator<<(ostream &os, const unordered_map<TK, TV, TH> &mp) { os << '{'; for (auto v : mp) os << v.first << "=>" << v.second << ','; os << '}'; return os; } #ifdef HITONANODE_LOCAL const string COLOR_RESET = "\033[0m", BRIGHT_GREEN = "\033[1;32m", BRIGHT_RED = "\033[1;31m", BRIGHT_CYAN = "\033[1;36m", NORMAL_CROSSED = "\033[0;9;37m", RED_BACKGROUND = "\033[1;41m", NORMAL_FAINT = "\033[0;2m"; #define dbg(x) cerr << BRIGHT_CYAN << #x << COLOR_RESET << " = " << (x) << NORMAL_FAINT << " (L" << __LINE__ << ") " << __FILE__ << COLOR_RESET << endl #define dbgif(cond, x) ((cond) ? cerr << BRIGHT_CYAN << #x << COLOR_RESET << " = " << (x) << NORMAL_FAINT << " (L" << __LINE__ << ") " << __FILE__ << COLOR_RESET << endl : cerr) #else #define dbg(x) (x) #define dbgif(cond, x) 0 #endif #include <atcoder/segtree> // Heavy-Light Decomposition of trees // Based on http://beet-aizu.hatenablog.com/entry/2017/12/12/235950 struct HeavyLightDecomposition { int V; int k; int nb_heavy_path; std::vector<std::vector<int>> e; std::vector<int> par; // par[i] = parent of vertex i (Default: -1) std::vector<int> depth; // depth[i] = distance between root and vertex i std::vector<int> subtree_sz; // subtree_sz[i] = size of subtree whose root is i std::vector<int> heavy_child; // heavy_child[i] = child of vertex i on heavy path (Default: -1) std::vector<int> tree_id; // tree_id[i] = id of tree vertex i belongs to std::vector<int> aligned_id, aligned_id_inv; // aligned_id[i] = aligned id for vertex i (consecutive on heavy edges) std::vector<int> head; // head[i] = id of vertex on heavy path of vertex i, nearest to root std::vector<int> head_ids; // consist of head vertex id's std::vector<int> heavy_path_id; // heavy_path_id[i] = heavy_path_id for vertex [i] HeavyLightDecomposition(int sz = 0) : V(sz), k(0), nb_heavy_path(0), e(sz), par(sz), depth(sz), subtree_sz(sz), heavy_child(sz), tree_id(sz, -1), aligned_id(sz), aligned_id_inv(sz), head(sz), heavy_path_id(sz, -1) {} void add_edge(int u, int v) { e[u].emplace_back(v); e[v].emplace_back(u); } void _build_dfs(int root) { std::stack<std::pair<int, int>> st; par[root] = -1; depth[root] = 0; st.emplace(root, 0); while (!st.empty()) { int now = st.top().first; int& i = st.top().second; if (i < (int)e[now].size()) { int nxt = e[now][i++]; if (nxt == par[now]) continue; par[nxt] = now; depth[nxt] = depth[now] + 1; st.emplace(nxt, 0); } else { st.pop(); int max_sub_sz = 0; subtree_sz[now] = 1; heavy_child[now] = -1; for (auto nxt : e[now]) { if (nxt == par[now]) continue; subtree_sz[now] += subtree_sz[nxt]; if (max_sub_sz < subtree_sz[nxt]) max_sub_sz = subtree_sz[nxt], heavy_child[now] = nxt; } } } } void _build_bfs(int root, int tree_id_now) { std::queue<int> q({root}); while (!q.empty()) { int h = q.front(); q.pop(); head_ids.emplace_back(h); for (int now = h; now != -1; now = heavy_child[now]) { tree_id[now] = tree_id_now; aligned_id[now] = k++; aligned_id_inv[aligned_id[now]] = now; heavy_path_id[now] = nb_heavy_path; head[now] = h; for (int nxt : e[now]) if (nxt != par[now] and nxt != heavy_child[now]) q.push(nxt); } nb_heavy_path++; } } void build(std::vector<int> roots = {0}) { int tree_id_now = 0; for (auto r : roots) { _build_dfs(r); _build_bfs(r, tree_id_now++); } } template <typename Monoid> std::vector<Monoid> segtree_rearrange(const std::vector<Monoid>& data) const { assert(int(data.size()) == V); std::vector<Monoid> ret; ret.reserve(V); for (int i = 0; i < V; i++) ret.emplace_back(data[aligned_id_inv[i]]); return ret; } // query for vertices on path [u, v] (INCLUSIVE) void for_each_vertex(int u, int v, const std::function<void(int ancestor, int descendant)>& f) const { while (true) { if (aligned_id[u] > aligned_id[v]) std::swap(u, v); f(std::max(aligned_id[head[v]], aligned_id[u]), aligned_id[v]); if (head[u] == head[v]) break; v = par[head[v]]; } } void for_each_vertex_noncommutative(int from, int to, const std::function<void(int ancestor, int descendant)>& fup, const std::function<void(int ancestor, int descendant)>& fdown) const { int u = from, v = to; const int lca = lowest_common_ancestor(u, v), dlca = depth[lca]; while (u >= 0 and depth[u] > dlca) { const int p = (depth[head[u]] > dlca ? head[u] : lca); fup(aligned_id[p] + (p == lca), aligned_id[u]), u = par[p]; } std::vector<std::pair<int, int>> lrs; while (v >= 0 and depth[v] >= dlca) { const int p = (depth[head[v]] >= dlca ? head[v] : lca); lrs.emplace_back(p, v), v = par[p]; } std::reverse(lrs.begin(), lrs.end()); for (const auto& lr : lrs) fdown(aligned_id[lr.first], aligned_id[lr.second]); } // query for edges on path [u, v] void for_each_edge(int u, int v, const std::function<void(int, int)>& f) const { while (true) { if (aligned_id[u] > aligned_id[v]) std::swap(u, v); if (head[u] != head[v]) { f(aligned_id[head[v]], aligned_id[v]); v = par[head[v]]; } else { if (u != v) f(aligned_id[u] + 1, aligned_id[v]); break; } } } // lowest_common_ancestor: O(logV) int lowest_common_ancestor(int u, int v) const { assert(tree_id[u] == tree_id[v] and tree_id[u] >= 0); while (true) { if (aligned_id[u] > aligned_id[v]) std::swap(u, v); if (head[u] == head[v]) return u; v = par[head[v]]; } } int distance(int u, int v) const { assert(tree_id[u] == tree_id[v] and tree_id[u] >= 0); return depth[u] + depth[v] - 2 * depth[lowest_common_ancestor(u, v)]; } }; #include <atcoder/lazysegtree> lint op(lint l, lint r) { return l ^ r; } lint e() { return 0; } using F = lint; lint mapping(F f, lint x) { return f ^ x; } F composition(F f, F g) { return f ^ g; } F id() { return 0; } int opsum(int l, int r) { return l + r; } int esum() { return 0; } int main() { int N; cin >> N; vector<vector<pair<int, lint>>> to(N); HeavyLightDecomposition hld(N); REP(i, N - 1) { int l, r; lint a; cin >> l >> r >> a; l--, r--; to[l].emplace_back(r, a); to[r].emplace_back(l, a); hld.add_edge(l, r); } vector<lint> weights(N); vector<lint> parw(N); auto rec = [&](auto &&self, int now, int prv) -> void { for (auto [nxt, w] : to[now]) { if (nxt == prv) continue; self(self, nxt, now); parw[nxt] = w; weights[nxt] ^= w; weights[now] ^= weights[nxt]; } }; rec(rec, 0, -1); hld.build({0}); atcoder::lazy_segtree<lint, op, e, F, mapping, composition, id> tree1(hld.segtree_rearrange(weights)); vector<int> init2(N); atcoder::segtree<int, opsum, esum> tree2(init2); int Q; cin >> Q; while (Q--) { int tp, x; cin >> tp >> x; --x; if (tp == 1) { int ne = 0; hld.for_each_vertex(0, x, [&](int a, int d) { ne += tree2.prod(a, d + 1); }); if (ne) continue; tree2.set(hld.aligned_id[x], 1); auto v = tree1.get(hld.aligned_id[x]); hld.for_each_vertex(0, x, [&](int a, int d) { tree1.apply(0, x, v); }); } else { int ne = 0; hld.for_each_vertex(0, x, [&](int a, int d) { ne += tree2.prod(a, d + 1); }); if (ne) { cout << "0\n"; } else { auto v = tree1.get(hld.aligned_id[x]); cout << (v ^ parw[x]) << '\n'; } } } }