結果
問題 | No.235 めぐるはめぐる (5) |
ユーザー | anta |
提出日時 | 2015-08-25 23:24:43 |
言語 | C++11 (gcc 11.4.0) |
結果 |
TLE
|
実行時間 | - |
コード長 | 18,753 bytes |
コンパイル時間 | 1,451 ms |
コンパイル使用メモリ | 113,768 KB |
実行使用メモリ | 45,920 KB |
最終ジャッジ日時 | 2024-07-18 14:34:30 |
合計ジャッジ時間 | 25,034 ms |
ジャッジサーバーID (参考情報) |
judge1 / judge3 |
(要ログイン)
テストケース
テストケース表示入力 | 結果 | 実行時間 実行使用メモリ |
---|---|---|
testcase_00 | TLE | - |
testcase_01 | -- | - |
testcase_02 | -- | - |
コンパイルメッセージ
main.cpp: In function ‘int main()’: main.cpp:639:30: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result] 639 | scanf("%d", &S[i]); | ~~~~~^~~~~~~~~~~~~ main.cpp:643:30: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result] 643 | scanf("%d", &C[i]); | ~~~~~^~~~~~~~~~~~~ main.cpp:652:30: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result] 652 | scanf("%d%d", &A, &B), -- A, -- B; | ~~~~~^~~~~~~~~~~~~~~~ main.cpp:662:22: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result] 662 | scanf("%d", &Q); | ~~~~~^~~~~~~~~~ main.cpp:666:30: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result] 666 | scanf("%d", &ty); | ~~~~~^~~~~~~~~~~ main.cpp:670:38: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result] 670 | scanf("%d%d%d", &X, &Y, &Z), -- X, -- Y; | ~~~~~^~~~~~~~~~~~~~~~~~~~~~ main.cpp:679:38: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result] 679 | scanf("%d%d", &X, &Y), -- X, -- Y; | ~~~~~^~~~~~~~~~~~~~~~
ソースコード
#include <string> #include <vector> #include <algorithm> #include <numeric> #include <set> #include <map> #include <queue> #include <iostream> #include <sstream> #include <cstdio> #include <cmath> #include <ctime> #include <cstring> #include <cctype> #include <cassert> #include <limits> #include <functional> #define rep(i,n) for(int (i)=0;(i)<(int)(n);++(i)) #define rer(i,l,u) for(int (i)=(int)(l);(i)<=(int)(u);++(i)) #define reu(i,l,u) for(int (i)=(int)(l);(i)<(int)(u);++(i)) #if defined(_MSC_VER) || __cplusplus > 199711L #define aut(r,v) auto r = (v) #else #define aut(r,v) __typeof(v) r = (v) #endif #define each(it,o) for(aut(it, (o).begin()); it != (o).end(); ++ it) #define all(o) (o).begin(), (o).end() #define pb(x) push_back(x) #define mp(x,y) make_pair((x),(y)) #define mset(m,v) memset(m,v,sizeof(m)) #define INF 0x3f3f3f3f #define INFL 0x3f3f3f3f3f3f3f3fLL using namespace std; typedef vector<int> vi; typedef pair<int,int> pii; typedef vector<pair<int,int> > vpii; typedef long long ll; template<typename T, typename U> inline void amin(T &x, U y) { if(y < x) x = y; } template<typename T, typename U> inline void amax(T &x, U y) { if(x < y) x = y; } template<int MOD> struct ModInt { static const int Mod = MOD; unsigned x; ModInt(): x(0) { } ModInt(signed sig) { int sigt = sig % MOD; if(sigt < 0) sigt += MOD; x = sigt; } ModInt(signed long long sig) { int sigt = sig % MOD; if(sigt < 0) sigt += MOD; x = sigt; } int get() const { return (int)x; } ModInt &operator+=(ModInt that) { if((x += that.x) >= MOD) x -= MOD; return *this; } ModInt &operator-=(ModInt that) { if((x += MOD - that.x) >= MOD) x -= MOD; return *this; } ModInt &operator*=(ModInt that) { x = (unsigned long long)x * that.x % MOD; return *this; } ModInt operator+(ModInt that) const { return ModInt(*this) += that; } ModInt operator-(ModInt that) const { return ModInt(*this) -= that; } ModInt operator*(ModInt that) const { return ModInt(*this) *= that; } }; typedef ModInt<1000000007> mint; struct HeavyLightDecomposition { vector<int> colors, positions; //Vertex -> Color, Vertex -> Offset vector<int> lengths, parents, branches; //Color -> Int, Color -> Color, Color -> Offset vector<int> parentnodes, depths; //Vertex -> Vertex, Vertex -> Int //vector<FenwickTree>とかを避けて1次元にしたい時に使う //sortednodesの[lefts[v], rights[v])はvのsubtreeとなっている vector<int> sortednodes, offsets; //Index -> Vertex, Color -> Index vector<int> lefts, rights; //Vertex -> Index struct BuildDFSState { int i, len, parent; BuildDFSState() { } BuildDFSState(int i_, int l, int p): i(i_), len(l), parent(p) { } }; //両方の辺があってもいいし、親から子への辺だけでもよい void build(const vector<vi> &g, int root) { int n = g.size(); colors.assign(n, -1); positions.assign(n, -1); lengths.clear(); parents.clear(); branches.clear(); parentnodes.assign(n, -1); depths.assign(n, -1); sortednodes.clear(); offsets.clear(); lefts.assign(n, -1); rights.assign(n, -1); vector<int> subtreesizes; measure(g, root, subtreesizes); typedef BuildDFSState State; depths[root] = 0; vector<State> s; s.push_back(State(root, 0, -1)); while(!s.empty()) { State t = s.back(); s.pop_back(); int i = t.i, len = t.len; int index = sortednodes.size(); int color = lengths.size(); if(t.parent == -3) { rights[i] = index; continue; } if(t.parent != -2) { assert(parents.size() == color); parents.push_back(t.parent); branches.push_back(len); offsets.push_back(index); len = 0; } colors[i] = color; positions[i] = len; lefts[i] = index; sortednodes.push_back(i); int maxsize = -1, maxj = -1; each(j, g[i]) if(colors[*j] == -1) { if(maxsize < subtreesizes[*j]) { maxsize = subtreesizes[*j]; maxj = *j; } parentnodes[*j] = i; depths[*j] = depths[i] + 1; } s.push_back(State(i, -1, -3)); if(maxj == -1) { lengths.push_back(len + 1); }else { each(j, g[i]) if(colors[*j] == -1 && *j != maxj) s.push_back(State(*j, len, color)); s.push_back(State(maxj, len + 1, -2)); } } } void get(int v, int &c, int &p) const { c = colors[v]; p = positions[v]; } bool go_up(int &c, int &p) const { p = branches[c]; c = parents[c]; return c != -1; } inline const int *nodesBegin(int c) const { return &sortednodes[0] + offsets[c]; } inline const int *nodesEnd(int c) const { return &sortednodes[0] + (c+1 == offsets.size() ? sortednodes.size() : offsets[c+1]); } private: void measure(const vector<vi> &g, int root, vector<int> &out_subtreesizes) const { out_subtreesizes.assign(g.size(), -1); vector<int> s; s.push_back(root); while(!s.empty()) { int i = s.back(); s.pop_back(); if(out_subtreesizes[i] == -2) { int s = 1; each(j, g[i]) if(out_subtreesizes[*j] != -2) s += out_subtreesizes[*j]; out_subtreesizes[i] = s; }else { s.push_back(i); each(j, g[i]) if(out_subtreesizes[*j] == -1) s.push_back(*j); out_subtreesizes[i] = -2; } } } }; struct Val { mint val, coef; explicit Val(): val(), coef() { } explicit Val(mint val_, mint coef_): val(val_), coef(coef_) { } }; struct PathSum { mint sum, coefsum; PathSum(): sum(), coefsum() { } explicit PathSum(const Val &val): sum(val.val), coefsum(val.coef) { } PathSum &operator+=(const PathSum &that) { sum += that.sum; coefsum += that.coefsum; return *this; } PathSum operator+(const PathSum &that) const { return PathSum(*this) += that; } PathSum reverse() const { return *this; } }; struct PathAdd { mint add; PathAdd() { } explicit PathAdd(mint add_): add(add_) { } PathAdd &operator+=(const PathAdd &that) { add += that.add; return *this; } void addToVal(Val &val) const { val.val += add * val.coef; } void addToSum(PathSum &sum) const { sum.sum += add * sum.coefsum; } }; struct Node { Node *parent; Node *pathLeft, *pathRight; Val val; PathSum pathSum; PathAdd pathAdd; Node(): parent(NULL), pathLeft(NULL), pathRight(NULL), val(), pathSum(), pathAdd() { } bool isPathRoot() const { return !parent; } static PathSum getPathSum(const Node *p) { if(!p) return PathSum(); PathSum pathSum = p->pathSum; p->pathAdd.addToSum(pathSum); return pathSum; } static void addToPath(Node *p, const PathAdd &add) { if(p != NULL) p->pathAdd += add; } PathSum getSingletonPathSum() const { return PathSum(val); } void propagate() { if(pathLeft != NULL) pathLeft->pathAdd += pathAdd; if(pathRight != NULL) pathRight->pathAdd += pathAdd; pathAdd.addToVal(val); pathAdd.addToSum(pathSum); pathAdd = PathAdd(); } void update() { pathSum = getPathSum(pathLeft) + getSingletonPathSum() + getPathSum(pathRight); } bool debugCheckUpdated() const { Node tmp = *this; tmp.update(); return memcmp(this, &tmp, sizeof(Node)) == 0; } }; struct BiasedHeavyLightDecompositionPathOnly { vector<Node> nodes; vector<int> pathRoots; //ノード に対して、それが属するパスの path tree 上の根のノード vector<int> subpathLeft, subpathRight; //path tree 上でそのノードが表す subpath の offset の区間 [left, right] vector<int> globalOrder; HeavyLightDecomposition hld; void build(const vector<vi> &g, int root, const vector<Val> &initVal) { hld.build(g, root); int n = g.size(); nodes.assign(n, Node()); vector<int> subtreeSize(n, 1); for(int ix = n-1; ix > 0; -- ix) { int i = hld.sortednodes[ix], p = hld.parentnodes[i]; subtreeSize[p] += subtreeSize[i]; } vector<int> childrenSize = subtreeSize; for(int ix = 1; ix < n; ++ ix) { int i = hld.sortednodes[ix], p = hld.parentnodes[i]; if(hld.colors[i] == hld.colors[p]) childrenSize[p] -= subtreeSize[i]; } buildPathTrees(childrenSize); getGlobalOrder(root); subpathLeft.resize(n); subpathRight.resize(n); for(int ix = n-1; ix >= 0; -- ix) { int i = globalOrder[ix]; Node *a = &nodes[i]; a->val = initVal[i]; a->update(); subpathLeft[i] = a->pathLeft == NULL ? hld.positions[i] : subpathLeft[getNodeIndex(a->pathLeft)]; subpathRight[i] = a->pathRight == NULL ? hld.positions[i] : subpathRight[getNodeIndex(a->pathRight)]; } } Val getVal(int x) { propagatePath(&nodes[x]); return nodes[x].val; } void setVal(int x, const Val &val) { propagatePath(&nodes[x]); nodes[x].val = val; updatePath(&nodes[x]); } //node ancestor -> node decendant //ancestor は decendant の祖先である必要がある PathSum sumDecendingPath(int ancestor, int decendant) { Node *a = &nodes[decendant]; int ancestorColor = hld.colors[ancestor]; PathSum sum; while(a != NULL && hld.colors[getNodeIndex(a)] != ancestorColor) { sum = sumHeavyPathFromHead(a) + sum; a = goUpToParentPath(a); } assert(a != NULL); return sumHeavyPath(&nodes[ancestor], a) + sum; } void addToDecendingPath(int ancestor, int decendant, const PathAdd &add) { Node *a = &nodes[decendant]; int ancestorColor = hld.colors[ancestor]; while(a != NULL && hld.colors[getNodeIndex(a)] != ancestorColor) { addToHeavyPathFromHead(a, add); a = goUpToParentPath(a); } assert(a != NULL); addToHeavyPath(&nodes[ancestor], a, add); } //x -> y PathSum sumPath(int x, int y) { int z = lowestCommonAncestor(x, y); PathSum sum = sumDecendingPath(z, x).reverse(); if(y != z) { int child = findAncestorChild(z, y); assert(child != -1); sum += sumDecendingPath(child, y); } return sum; } //x -> y void addToPath(int x, int y, const PathAdd &add) { int z = lowestCommonAncestor(x, y); addToDecendingPath(z, x, add); if(y != z) { int child = findAncestorChild(z, y); assert(child != -1); addToDecendingPath(child, y, add); } } //使うのでここに置いておく int lowestCommonAncestor(int x, int y) const { int cx, px, cy, py; hld.get(x, cx, px); hld.get(y, cy, py); while(cx != cy) { if(hld.depths[*hld.nodesBegin(cx)] < hld.depths[*hld.nodesBegin(cy)]) hld.go_up(cy, py); else hld.go_up(cx, px); } return hld.nodesBegin(cx)[min(px, py)]; } //ancestorの直接の子でdecendantの祖先であるものを返す。 //存在しないなら-1を返す。 int findAncestorChild(int ancestor, int decendant) const { int ac, ap; int c, p; hld.get(ancestor, ac, ap); hld.get(decendant, c, p); int prevc = -1; while(c != ac) { prevc = c; if(!hld.go_up(c, p)) return -1; } if(prevc == -1 || ap != p) { if(ap >= p) return -1; else return hld.nodesBegin(ac)[ap + 1]; }else { return hld.nodesBegin(prevc)[0]; } } private: int getNodeIndex(const Node *a) const { return static_cast<int>(a - &nodes[0]); } Node *goUpToParentPath(const Node *a) { int c, p; hld.get(getNodeIndex(a), c, p); if(!hld.go_up(c, p)) return NULL; else return &nodes[hld.nodesBegin(c)[p]]; } void propagatePath(Node *a) { Node *r = a, *q = a->parent; while(q != NULL) { Node *p = q; q = p->parent; p->parent = r; r = p; } while(r != a) { Node *c = r->parent; r->parent = q; q = r; r->propagate(); r = c; } a->propagate(); } void updatePath(Node *a) { while(a != NULL) { a->update(); a = a->parent; } } PathSum sumHeavyPathFromHead(Node *a) { propagatePath(a); PathSum sum; while(1) { sum = Node::getPathSum(a->pathLeft) + a->getSingletonPathSum() + sum; while(a->parent != NULL && a->parent->pathLeft == a) a = a->parent; if(a->parent == NULL) break; a = a->parent; } return sum; } PathSum sumHeavyPath(Node *l, Node *r) { propagatePath(l); propagatePath(r); const Node *lca = findLowestCommonAncestorOnPathTree(l, r); assert(lca != NULL); PathSum leftSum, rightSum; while(l != lca) { leftSum = leftSum + l->getSingletonPathSum() + Node::getPathSum(l->pathRight); while(l->parent != lca && l->parent->pathRight == l) l = l->parent; l = l->parent; } while(r != lca) { rightSum = Node::getPathSum(r->pathLeft) + r->getSingletonPathSum() + rightSum; while(r->parent != lca && r->parent->pathLeft == r) r = r->parent; r = r->parent; } assert((l == lca || l == lca->pathLeft) && (r == lca || r == lca->pathRight)); return leftSum + lca->getSingletonPathSum() + rightSum; } void addToHeavyPathFromHead(Node *a, const PathAdd &add) { Node *orgA = a; propagatePath(a); while(1) { Node::addToPath(a->pathLeft, add); add.addToVal(a->val); while(a->parent != NULL && a->parent->pathLeft == a) a = a->parent; if(a->parent == NULL) break; a = a->parent; } updatePath(orgA); } void addToHeavyPath(Node *l, Node *r, const PathAdd &add) { Node *orgL = l, *orgR = r; propagatePath(l); propagatePath(r); Node *lca = findLowestCommonAncestorOnPathTree(l, r); assert(lca != NULL); while(l != lca) { Node::addToPath(l->pathRight, add); add.addToVal(l->val); while(l->parent != lca && l->parent->pathRight == l) l = l->parent; l = l->parent; } while(r != lca) { Node::addToPath(r->pathLeft, add); add.addToVal(r->val); while(r->parent != lca && r->parent->pathLeft == r) r = r->parent; r = r->parent; } assert((l == lca || l == lca->pathLeft) && (r == lca || r == lca->pathRight)); add.addToVal(lca->val); updatePath(orgL); updatePath(orgR); } //lとrが同じpath treeに含まれることを仮定する Node *findLowestCommonAncestorOnPathTree(Node *l, Node *r) const { int lPos = hld.positions[getNodeIndex(l)]; int rPos = hld.positions[getNodeIndex(r)]; if(lPos > rPos) { swap(l, r); swap(lPos, rPos); } Node *a = l; while(1) { if(rPos <= subpathRight[getNodeIndex(a)]) return a; a = a->parent; } } void buildPathTrees(const vector<int> &sizes) { vector<int> weights, childL, childR; pathRoots.resize(nodes.size()); int C = hld.lengths.size(); for(int c = 0; c < C; ++ c) { int len = hld.lengths[c]; const int *path = hld.nodesBegin(c); weights.resize(len); for(int j = 0; j < len; ++ j) weights[j] = sizes[path[j]]; int rootj = makeBiasedBinarySearchTree(weights, childL, childR); int rootNode = path[rootj]; for(int j = 0; j < len; ++ j) pathRoots[path[j]] = rootNode; nodes[rootNode].parent = NULL; for(int j = 0; j < len; ++ j) { Node *a = &nodes[path[j]]; Node *l = childL[j] == -1 ? NULL : &nodes[path[childL[j]]]; Node *r = childR[j] == -1 ? NULL : &nodes[path[childR[j]]]; if((a->pathLeft = l) != NULL) l->parent = a; if((a->pathRight = r) != NULL) r->parent = a; } } } //weightsは破壊される int makeBiasedBinarySearchTree(vector<int> &weights, vector<int> &resL, vector<int> &resR) { int n = weights.size(); weights.resize(n + 1); int sum = 0; for(int i = 0; i < n; ++ i) { int w = weights[i]; weights[i] = sum; sum += w; } weights[n] = sum; resL.resize(n); resR.resize(n); return makeBiasedBinarySearchTreeRec(-1, 0, n, weights, resL, resR); } //最初2倍してく2分探索でうまくコストを log(小さい方のサイズ) にすませるようにすれば O(n) にできる。 //けど、ここではやってない。この単純な2分探索でも、任意の重みで呼ばれるわけではないのであんまりコストがかかることはない気がする。 int makeBiasedBinarySearchTreeRec(int p, int i, int j, const vector<int> &prefixSums, vector<int> &resL, vector<int> &resR) { if(i == j) return -1; //prefixSums[mid+1] - prefixSums[i] >= prefixSums[j] - prefixSums[mid] //prefixSums[mid] + prefixSums[mid+1] >= prefixSums[i] + prefixSums[j] int mid; if(i + 1 == j) { mid = i; }else { int t = prefixSums[i] + prefixSums[j]; int l = i, u = j-1; while(u - l > 0) { int m = (l + u) / 2; if(prefixSums[m] + prefixSums[m+1] >= t) u = m; else l = m + 1; } mid = u; } assert(mid < j); resL[mid] = makeBiasedBinarySearchTreeRec(mid * 2 + 0, i, mid, prefixSums, resL, resR); resR[mid] = makeBiasedBinarySearchTreeRec(mid * 2 + 1, mid + 1, j, prefixSums, resL, resR); return mid; } void getGlobalOrder(int globalRoot) { globalOrder.clear(); globalOrder.reserve(nodes.size()); vector<const Node *> stk; int C = hld.lengths.size(); for(int c = 0; c < C; ++ c) { stk.push_back(&nodes[pathRoots[hld.nodesBegin(c)[0]]]); while(!stk.empty()) { const Node *a = stk.back(); stk.pop_back(); if(a == NULL) continue; globalOrder.push_back(getNodeIndex(a)); stk.push_back(a->pathLeft); stk.push_back(a->pathRight); } } assert(globalOrder.size() == nodes.size()); } }; bool naivegetpath(int i, int p, int t, const vector<vi> &g, vi &path) { bool r = false; if(i == t) { r = true; }else { each(j, g[i]) if(*j != p) r = r || naivegetpath(*j, i, t, g, path); } if(r) path.push_back(i); return r; } #ifdef MY_LOCAL_RUN #include "C:\Dropbox\backup\implements\Util\MyAssert.hpp" #undef assert #define assert my_assert #endif int main() { int N; for(int iii = 0; ; ++ iii) { if(!~scanf("%d", &N)) break; // N=rand()%10+1; vector<int> S(N), C(N); rep(i, N) { scanf("%d", &S[i]); // S[i]=rand()%100; } rep(i, N) { scanf("%d", &C[i]); // C[i]=rand()%100; } vector<Val> initVals(N); rep(i, N) initVals[i] = Val(S[i], C[i]); vector<vi> g(N); rep(i, N-1) { int A, B; scanf("%d%d", &A, &B), -- A, -- B; // A=i+1,B=rand()%(i+1); g[A].push_back(B); g[B].push_back(A); } BiasedHeavyLightDecompositionPathOnly bhld; bhld.build(g, 0, initVals); vector<mint> naiveval(all(S)); int Q; scanf("%d", &Q); // Q=rand()%100+1; rep(ii, Q) { int ty; scanf("%d", &ty); // ty=rand()%2; if(ty == 0) { int X, Y, Z; scanf("%d%d%d", &X, &Y, &Z), -- X, -- Y; // X=rand()%N,Y=rand()%N,Z=rand()%100; bhld.addToPath(X, Y, PathAdd(Z)); vi naivepath; naivegetpath(X, -1, Y, g, naivepath); each(j, naivepath) naiveval[*j] += mint(Z) * C[*j]; }else { int X, Y; scanf("%d%d", &X, &Y), -- X, -- Y; // X=rand()%N,Y=rand()%N; PathSum pathSum = bhld.sumPath(X, Y); mint ans = pathSum.sum; printf("%d\n", ans.get()); // vi naivepath; naivegetpath(X, -1, Y, g, naivepath); // mint naivesum, naivecoefsum; // each(j, naivepath) naivecoefsum += C[*j], naivesum += naiveval[*j]; // if(ans.get() != naivesum.get()) // cerr << ans.get() << " != " << naivesum.get() << endl; } // rep(i, N) bhld.getVal(i); // rep(i, N) bhld.setVal(i, bhld.getVal(i)); // rep(i, N) assert(bhld.nodes[i].debugCheckUpdated()); // rep(i, N) assert(bhld.getVal(i).val.x == naiveval[i].x); } } return 0; }