結果
| 問題 |
No.1524 Upward Mobility
|
| コンテスト | |
| ユーザー |
hitonanode
|
| 提出日時 | 2021-05-28 22:20:53 |
| 言語 | C++17 (gcc 13.3.0 + boost 1.87.0) |
| 結果 |
WA
|
| 実行時間 | - |
| コード長 | 15,640 bytes |
| コンパイル時間 | 2,198 ms |
| コンパイル使用メモリ | 163,992 KB |
| 最終ジャッジ日時 | 2025-01-21 20:09:01 |
|
ジャッジサーバーID (参考情報) |
judge3 / judge4 |
(要ログイン)
| ファイルパターン | 結果 |
|---|---|
| sample | AC * 1 WA * 1 |
| other | AC * 6 WA * 25 |
ソースコード
#include <algorithm>
#include <array>
#include <bitset>
#include <cassert>
#include <chrono>
#include <cmath>
#include <complex>
#include <deque>
#include <forward_list>
#include <fstream>
#include <functional>
#include <iomanip>
#include <ios>
#include <iostream>
#include <limits>
#include <list>
#include <map>
#include <numeric>
#include <queue>
#include <random>
#include <set>
#include <sstream>
#include <stack>
#include <string>
#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
using namespace std;
using lint = long long;
using pint = pair<int, int>;
using plint = pair<lint, lint>;
struct fast_ios { fast_ios(){ cin.tie(nullptr), ios::sync_with_stdio(false), cout << fixed << setprecision(20); }; } fast_ios_;
#define ALL(x) (x).begin(), (x).end()
#define FOR(i, begin, end) for(int i=(begin),i##_end_=(end);i<i##_end_;i++)
#define IFOR(i, begin, end) for(int i=(end)-1,i##_begin_=(begin);i>=i##_begin_;i--)
#define REP(i, n) FOR(i,0,n)
#define IREP(i, n) IFOR(i,0,n)
template <typename T, typename V>
void ndarray(vector<T>& vec, const V& val, int len) { vec.assign(len, val); }
template <typename T, typename V, typename... Args> void ndarray(vector<T>& vec, const V& val, int len, Args... args) { vec.resize(len), for_each(begin(vec), end(vec), [&](T& v) { ndarray(v, val, args...); }); }
template <typename T> bool chmax(T &m, const T q) { return m < q ? (m = q, true) : false; }
template <typename T> bool chmin(T &m, const T q) { return m > q ? (m = q, true) : false; }
int floor_lg(long long x) { return x <= 0 ? -1 : 63 - __builtin_clzll(x); }
template <typename T1, typename T2> pair<T1, T2> operator+(const pair<T1, T2> &l, const pair<T1, T2> &r) { return make_pair(l.first + r.first, l.second + r.second); }
template <typename T1, typename T2> pair<T1, T2> operator-(const pair<T1, T2> &l, const pair<T1, T2> &r) { return make_pair(l.first - r.first, l.second - r.second); }
template <typename T> vector<T> sort_unique(vector<T> vec) { sort(vec.begin(), vec.end()), vec.erase(unique(vec.begin(), vec.end()), vec.end()); return vec; }
template <typename T> int arglb(const std::vector<T> &v, const T &x) { return std::distance(v.begin(), std::lower_bound(v.begin(), v.end(), x)); }
template <typename T> int argub(const std::vector<T> &v, const T &x) { return std::distance(v.begin(), std::upper_bound(v.begin(), v.end(), x)); }
template <typename T> istream &operator>>(istream &is, vector<T> &vec) { for (auto &v : vec) is >> v; return is; }
template <typename T> ostream &operator<<(ostream &os, const vector<T> &vec) { os << '['; for (auto v : vec) os << v << ','; os << ']'; return os; }
template <typename T, size_t sz> ostream &operator<<(ostream &os, const array<T, sz> &arr) { os << '['; for (auto v : arr) os << v << ','; os << ']'; return os; }
#if __cplusplus >= 201703L
template <typename... T> istream &operator>>(istream &is, tuple<T...> &tpl) { std::apply([&is](auto &&... args) { ((is >> args), ...);}, tpl); return is; }
template <typename... T> ostream &operator<<(ostream &os, const tuple<T...> &tpl) { os << '('; std::apply([&os](auto &&... args) { ((os << args << ','), ...);}, tpl); return os << ')'; }
#endif
template <typename T> ostream &operator<<(ostream &os, const deque<T> &vec) { os << "deq["; for (auto v : vec) os << v << ','; os << ']'; return os; }
template <typename T> ostream &operator<<(ostream &os, const set<T> &vec) { os << '{'; for (auto v : vec) os << v << ','; os << '}'; return os; }
template <typename T, typename TH> ostream &operator<<(ostream &os, const unordered_set<T, TH> &vec) { os << '{'; for (auto v : vec) os << v << ','; os << '}'; return os; }
template <typename T> ostream &operator<<(ostream &os, const multiset<T> &vec) { os << '{'; for (auto v : vec) os << v << ','; os << '}'; return os; }
template <typename T> ostream &operator<<(ostream &os, const unordered_multiset<T> &vec) { os << '{'; for (auto v : vec) os << v << ','; os << '}'; return os; }
template <typename T1, typename T2> ostream &operator<<(ostream &os, const pair<T1, T2> &pa) { os << '(' << pa.first << ',' << pa.second << ')'; return os; }
template <typename TK, typename TV> ostream &operator<<(ostream &os, const map<TK, TV> &mp) { os << '{'; for (auto v : mp) os << v.first << "=>" << v.second << ','; os << '}'; return os; }
template <typename TK, typename TV, typename TH> ostream &operator<<(ostream &os, const unordered_map<TK, TV, TH> &mp) { os << '{'; for (auto v : mp) os << v.first << "=>" << v.second << ','; os << '}'; return os; }
#ifdef HITONANODE_LOCAL
const string COLOR_RESET = "\033[0m", BRIGHT_GREEN = "\033[1;32m", BRIGHT_RED = "\033[1;31m", BRIGHT_CYAN = "\033[1;36m", NORMAL_CROSSED = "\033[0;9;37m", RED_BACKGROUND = "\033[1;41m", NORMAL_FAINT = "\033[0;2m";
#define dbg(x) cerr << BRIGHT_CYAN << #x << COLOR_RESET << " = " << (x) << NORMAL_FAINT << " (L" << __LINE__ << ") " << __FILE__ << COLOR_RESET << endl
#define dbgif(cond, x) ((cond) ? cerr << BRIGHT_CYAN << #x << COLOR_RESET << " = " << (x) << NORMAL_FAINT << " (L" << __LINE__ << ") " << __FILE__ << COLOR_RESET << endl : cerr)
#else
#define dbg(x) (x)
#define dbgif(cond, x) 0
#endif
// Preorder Euler Tour
// (行きがけ順,部分木の頂点クエリ等に有用)
struct PreorderEulerTour {
int V; // # of vertices of tree
int root;
std::vector<std::vector<int>> edges;
std::vector<int> subtree_begin, subtree_end;
std::vector<int> vis_order;
void _build_dfs(int now, int prv) {
subtree_begin[now] = vis_order.size();
vis_order.push_back(now);
for (auto nxt : edges[now])
if (nxt != prv) _build_dfs(nxt, now);
subtree_end[now] = vis_order.size();
}
PreorderEulerTour() = default;
PreorderEulerTour(const std::vector<std::vector<int>> &to, int root) : V(to.size()), root(root), edges(to) {
assert(root >= 0 and root < V);
subtree_begin.resize(V);
subtree_end.resize(V);
_build_dfs(root, -1);
}
};
struct HeavyLightDecomposition {
int V;
int k;
int nb_heavy_path;
std::vector<std::vector<int>> e;
std::vector<int> par; // par[i] = parent of vertex i (Default: -1)
std::vector<int> depth; // depth[i] = distance between root and vertex i
std::vector<int> subtree_sz; // subtree_sz[i] = size of subtree whose root is i
std::vector<int> heavy_child; // heavy_child[i] = child of vertex i on heavy path (Default: -1)
std::vector<int> tree_id; // tree_id[i] = id of tree vertex i belongs to
std::vector<int> aligned_id, aligned_id_inv; // aligned_id[i] = aligned id for vertex i (consecutive on heavy edges)
std::vector<int> head; // head[i] = id of vertex on heavy path of vertex i, nearest to root
std::vector<int> head_ids; // consist of head vertex id's
std::vector<int> heavy_path_id; // heavy_path_id[i] = heavy_path_id for vertex [i]
HeavyLightDecomposition(int sz = 0) : V(sz), k(0), nb_heavy_path(0), e(sz), par(sz), depth(sz), subtree_sz(sz), heavy_child(sz), tree_id(sz, -1), aligned_id(sz), aligned_id_inv(sz), head(sz), heavy_path_id(sz, -1) {}
void add_edge(int u, int v) {
e[u].emplace_back(v);
e[v].emplace_back(u);
}
void _build_dfs(int root) {
std::stack<std::pair<int, int>> st;
par[root] = -1;
depth[root] = 0;
st.emplace(root, 0);
while (!st.empty()) {
int now = st.top().first;
int& i = st.top().second;
if (i < (int)e[now].size()) {
int nxt = e[now][i++];
if (nxt == par[now]) continue;
par[nxt] = now;
depth[nxt] = depth[now] + 1;
st.emplace(nxt, 0);
} else {
st.pop();
int max_sub_sz = 0;
subtree_sz[now] = 1;
heavy_child[now] = -1;
for (auto nxt : e[now]) {
if (nxt == par[now]) continue;
subtree_sz[now] += subtree_sz[nxt];
if (max_sub_sz < subtree_sz[nxt]) max_sub_sz = subtree_sz[nxt], heavy_child[now] = nxt;
}
}
}
}
void _build_bfs(int root, int tree_id_now) {
std::queue<int> q({root});
while (!q.empty()) {
int h = q.front();
q.pop();
head_ids.emplace_back(h);
for (int now = h; now != -1; now = heavy_child[now]) {
tree_id[now] = tree_id_now;
aligned_id[now] = k++;
aligned_id_inv[aligned_id[now]] = now;
heavy_path_id[now] = nb_heavy_path;
head[now] = h;
for (int nxt : e[now])
if (nxt != par[now] and nxt != heavy_child[now]) q.push(nxt);
}
nb_heavy_path++;
}
}
void build(std::vector<int> roots = {0}) {
int tree_id_now = 0;
for (auto r : roots) {
_build_dfs(r);
_build_bfs(r, tree_id_now++);
}
}
template <typename Monoid> std::vector<Monoid> segtree_rearrange(const std::vector<Monoid>& data) const {
assert(int(data.size()) == V);
std::vector<Monoid> ret;
ret.reserve(V);
for (int i = 0; i < V; i++) ret.emplace_back(data[aligned_id_inv[i]]);
return ret;
}
// query for vertices on path [u, v] (INCLUSIVE)
void for_each_vertex(int u, int v, const std::function<void(int ancestor, int descendant)>& f) const {
while (true) {
if (aligned_id[u] > aligned_id[v]) std::swap(u, v);
f(std::max(aligned_id[head[v]], aligned_id[u]), aligned_id[v]);
if (head[u] == head[v]) break;
v = par[head[v]];
}
}
void for_each_vertex_noncommutative(int from, int to, const std::function<void(int ancestor, int descendant)>& fup, const std::function<void(int ancestor, int descendant)>& fdown) const {
int u = from, v = to;
const int lca = lowest_common_ancestor(u, v), dlca = depth[lca];
while (u >= 0 and depth[u] > dlca) {
const int p = (depth[head[u]] > dlca ? head[u] : lca);
fup(aligned_id[p] + (p == lca), aligned_id[u]), u = par[p];
}
std::vector<std::pair<int, int>> lrs;
while (v >= 0 and depth[v] >= dlca) {
const int p = (depth[head[v]] >= dlca ? head[v] : lca);
lrs.emplace_back(p, v), v = par[p];
}
std::reverse(lrs.begin(), lrs.end());
for (const auto& lr : lrs) fdown(aligned_id[lr.first], aligned_id[lr.second]);
}
// query for edges on path [u, v]
void for_each_edge(int u, int v, const std::function<void(int, int)>& f) const {
while (true) {
if (aligned_id[u] > aligned_id[v]) std::swap(u, v);
if (head[u] != head[v]) {
f(aligned_id[head[v]], aligned_id[v]);
v = par[head[v]];
} else {
if (u != v) f(aligned_id[u] + 1, aligned_id[v]);
break;
}
}
}
// lowest_common_ancestor: O(logV)
int lowest_common_ancestor(int u, int v) const {
assert(tree_id[u] == tree_id[v] and tree_id[u] >= 0);
while (true) {
if (aligned_id[u] > aligned_id[v]) std::swap(u, v);
if (head[u] == head[v]) return u;
v = par[head[v]];
}
}
int distance(int u, int v) const {
assert(tree_id[u] == tree_id[v] and tree_id[u] >= 0);
return depth[u] + depth[v] - 2 * depth[lowest_common_ancestor(u, v)];
}
};
// StarrySkyTree: segment tree for Range Minimum Query & Range Add Query
// Complexity: (N)$ (construction), (\log N)$ (add / get / prod)
// - RangeAddRangeMin(std::vector<Tp> data_init) : Initialize array x by data_init.
// - add(int begin, int end, Tp vadd) : Update x[i] <- x[i] + vadd for all begin <= i < end.
// - get(int pos) : Get x[pos].
// - prod(int begin, int end) : Get min(x[begin], ..., x[end - 1]).
template <typename Tp, Tp defaultT = std::numeric_limits<Tp>::max() / 2> struct RangeAddRangeMin {
int N, head;
std::vector<Tp> range_min, range_add;
static inline Tp f(Tp x, Tp y) noexcept { return std::min(x, y); }
inline void _merge(int pos) {
range_min[pos] = f(range_min[pos * 2] + range_add[pos * 2], range_min[pos * 2 + 1] + range_add[pos * 2 + 1]);
}
void initialize(const std::vector<Tp> &data_init) {
N = data_init.size(), head = 1;
while (head < N) head <<= 1;
range_min.assign(head * 2, defaultT);
range_add.assign(head * 2, 0);
std::copy(data_init.begin(), data_init.end(), range_min.begin() + head);
for (int pos = head; --pos;) _merge(pos);
}
RangeAddRangeMin() = default;
RangeAddRangeMin(const std::vector<Tp> &data_init) { initialize(data_init); }
void _add(int begin, int end, int pos, int l, int r, Tp vadd) noexcept {
if (r <= begin or end <= l) return;
if (begin <= l and r <= end) {
range_add[pos] += vadd;
return;
}
_add(begin, end, pos * 2, l, (l + r) / 2, vadd);
_add(begin, end, pos * 2 + 1, (l + r) / 2, r, vadd);
_merge(pos);
}
// Add `vadd` to (x_begin, ..., x_{end - 1})
void add(int begin, int end, Tp vadd) noexcept { return _add(begin, end, 1, 0, head, vadd); }
Tp _get(int begin, int end, int pos, int l, int r) const noexcept {
if (r <= begin or end <= l) return defaultT;
if (begin <= l and r <= end) return range_min[pos] + range_add[pos];
return f(_get(begin, end, pos * 2, l, (l + r) / 2), _get(begin, end, pos * 2 + 1, (l + r) / 2, r)) + range_add[pos];
}
// Return f(x_begin, ..., x_{end - 1})
Tp get(int pos) const noexcept { return prod(pos, pos + 1); }
Tp prod(int begin, int end) const noexcept { return _get(begin, end, 1, 0, head); }
};
int main() {
int N;
cin >> N;
vector<vector<int>> to(N);
HeavyLightDecomposition hld(N);
FOR(i, 1, N) {
int p;
cin >> p;
p--;
to[p].push_back(i);
hld.add_edge(p, i);
}
dbg(to);
const int root = 0;
hld.build({root});
vector<int> A(N);
vector<lint> B(N);
cin >> A >> B;
PreorderEulerTour tour(to, root);
vector<int> ids(N);
iota(ALL(ids), 0);
sort(ALL(ids), [&](int i, int j) { return A[i] < A[j]; });
vector<lint> init(N);
RangeAddRangeMin<lint, 0> rmq(init);
vector<lint> dp0(N);
for (const auto i : ids) {
lint su = 0;
hld.for_each_vertex(i, i, [&](int l, int r) { su -= rmq.prod(l, r + 1); });
dbg(i);
dbg(su);
hld.for_each_vertex(i, i, [&](int l, int r) { dp0[i] = -rmq.prod(l, r + 1); });
chmax(dp0[i], B[i]);
if (su < B[i]) {
hld.for_each_vertex(i, root, [&](int l, int r) { rmq.add(l, r + 1, -B[i] ); });
}
}
// vector<lint> tmp(N);
// REP(i, N) tmp[i] = rmq.get(i, i + 1);
REP(i, N) {
// hld.for_each_vertex(i, i, [&](int l, int r) { dp0[i] = -rmq.prod(l, r + 1); });
}
dbg(dp0);
auto ord = tour.vis_order;
std::reverse(ALL(ord));
vector<lint> dp_rootfree(N);
for (auto i : ord) {
for (auto j : to[i]) {
dp_rootfree[i] += dp_rootfree[j];
}
chmax(dp_rootfree[i], dp0[i]);
}
dbg(dp_rootfree);
cout << dp_rootfree[root] << '\n';
}
hitonanode