結果
| 問題 |
No.235 めぐるはめぐる (5)
|
| コンテスト | |
| ユーザー |
anta
|
| 提出日時 | 2015-08-26 00:00:13 |
| 言語 | C++11(廃止可能性あり) (gcc 13.3.0) |
| 結果 |
AC
|
| 実行時間 | 594 ms / 10,000 ms |
| コード長 | 19,535 bytes |
| コンパイル時間 | 1,346 ms |
| コンパイル使用メモリ | 110,604 KB |
| 実行使用メモリ | 40,232 KB |
| 最終ジャッジ日時 | 2024-07-18 14:36:40 |
| 合計ジャッジ時間 | 4,333 ms |
|
ジャッジサーバーID (参考情報) |
judge1 / judge3 |
(要ログイン)
| ファイルパターン | 結果 |
|---|---|
| other | AC * 3 |
コンパイルメッセージ
main.cpp: In function ‘int main()’:
main.cpp:677:30: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result]
677 | scanf("%d", &S[i]);
| ~~~~~^~~~~~~~~~~~~
main.cpp:684:30: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result]
684 | scanf("%d", &C[i]);
| ~~~~~^~~~~~~~~~~~~
main.cpp:696:30: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result]
696 | scanf("%d%d", &A, &B), -- A, -- B;
| ~~~~~^~~~~~~~~~~~~~~~
main.cpp:711:22: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result]
711 | scanf("%d", &Q);
| ~~~~~^~~~~~~~~~
main.cpp:718:30: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result]
718 | scanf("%d", &ty);
| ~~~~~^~~~~~~~~~~
main.cpp:725:38: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result]
725 | scanf("%d%d%d", &X, &Y, &Z), -- X, -- Y;
| ~~~~~^~~~~~~~~~~~~~~~~~~~~~
main.cpp:739:38: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result]
739 | scanf("%d%d", &X, &Y), -- X, -- Y;
| ~~~~~^~~~~~~~~~~~~~~~
ソースコード
#include <string>
#include <vector>
#include <algorithm>
#include <numeric>
#include <set>
#include <map>
#include <queue>
#include <iostream>
#include <sstream>
#include <cstdio>
#include <cmath>
#include <ctime>
#include <cstring>
#include <cctype>
#include <cassert>
#include <limits>
#include <functional>
#define rep(i,n) for(int (i)=0;(i)<(int)(n);++(i))
#define rer(i,l,u) for(int (i)=(int)(l);(i)<=(int)(u);++(i))
#define reu(i,l,u) for(int (i)=(int)(l);(i)<(int)(u);++(i))
#if defined(_MSC_VER) || __cplusplus > 199711L
#define aut(r,v) auto r = (v)
#else
#define aut(r,v) __typeof(v) r = (v)
#endif
#define each(it,o) for(aut(it, (o).begin()); it != (o).end(); ++ it)
#define all(o) (o).begin(), (o).end()
#define pb(x) push_back(x)
#define mp(x,y) make_pair((x),(y))
#define mset(m,v) memset(m,v,sizeof(m))
#define INF 0x3f3f3f3f
#define INFL 0x3f3f3f3f3f3f3f3fLL
using namespace std;
typedef vector<int> vi; typedef pair<int,int> pii; typedef vector<pair<int,int> > vpii; typedef long long ll;
template<typename T, typename U> inline void amin(T &x, U y) { if(y < x) x = y; }
template<typename T, typename U> inline void amax(T &x, U y) { if(x < y) x = y; }
#ifndef MY_LOCAL_RUN
#undef assert
#define assert(e)
#endif
template<int MOD>
struct ModInt {
static const int Mod = MOD;
unsigned x;
ModInt(): x(0) { }
ModInt(signed sig) { int sigt = sig % MOD; if(sigt < 0) sigt += MOD; x = sigt; }
ModInt(signed long long sig) { int sigt = sig % MOD; if(sigt < 0) sigt += MOD; x = sigt; }
int get() const { return (int)x; }
ModInt &operator+=(ModInt that) { if((x += that.x) >= MOD) x -= MOD; return *this; }
ModInt &operator-=(ModInt that) { if((x += MOD - that.x) >= MOD) x -= MOD; return *this; }
ModInt &operator*=(ModInt that) { x = (unsigned long long)x * that.x % MOD; return *this; }
ModInt operator+(ModInt that) const { return ModInt(*this) += that; }
ModInt operator-(ModInt that) const { return ModInt(*this) -= that; }
ModInt operator*(ModInt that) const { return ModInt(*this) *= that; }
};
typedef ModInt<1000000007> mint;
struct HeavyLightDecomposition {
vector<int> colors, positions; //Vertex -> Color, Vertex -> Offset
vector<int> lengths, parents, branches; //Color -> Int, Color -> Color, Color -> Offset
vector<int> parentnodes, depths; //Vertex -> Vertex, Vertex -> Int
//vector<FenwickTree>とかを避けて1次元にしたい時に使う
//sortednodesの[lefts[v], rights[v])はvのsubtreeとなっている
vector<int> sortednodes, offsets; //Index -> Vertex, Color -> Index
vector<int> lefts, rights; //Vertex -> Index
struct BuildDFSState {
int i, len, parent;
BuildDFSState() { }
BuildDFSState(int i_, int l, int p): i(i_), len(l), parent(p) { }
};
//両方の辺があってもいいし、親から子への辺だけでもよい
void build(const vector<vi> &g, int root) {
int n = g.size();
colors.assign(n, -1); positions.assign(n, -1);
lengths.clear(); parents.clear(); branches.clear();
parentnodes.assign(n, -1); depths.assign(n, -1);
sortednodes.clear(); offsets.clear();
lefts.assign(n, -1); rights.assign(n, -1);
vector<int> subtreesizes;
measure(g, root, subtreesizes);
typedef BuildDFSState State;
depths[root] = 0;
vector<State> s;
s.push_back(State(root, 0, -1));
while(!s.empty()) {
State t = s.back(); s.pop_back();
int i = t.i, len = t.len;
int index = sortednodes.size();
int color = lengths.size();
if(t.parent == -3) {
rights[i] = index;
continue;
}
if(t.parent != -2) {
assert(parents.size() == color);
parents.push_back(t.parent);
branches.push_back(len);
offsets.push_back(index);
len = 0;
}
colors[i] = color;
positions[i] = len;
lefts[i] = index;
sortednodes.push_back(i);
int maxsize = -1, maxj = -1;
each(j, g[i]) if(colors[*j] == -1) {
if(maxsize < subtreesizes[*j]) {
maxsize = subtreesizes[*j];
maxj = *j;
}
parentnodes[*j] = i;
depths[*j] = depths[i] + 1;
}
s.push_back(State(i, -1, -3));
if(maxj == -1) {
lengths.push_back(len + 1);
}else {
each(j, g[i]) if(colors[*j] == -1 && *j != maxj)
s.push_back(State(*j, len, color));
s.push_back(State(maxj, len + 1, -2));
}
}
}
void get(int v, int &c, int &p) const {
c = colors[v]; p = positions[v];
}
bool go_up(int &c, int &p) const {
p = branches[c]; c = parents[c];
return c != -1;
}
inline const int *nodesBegin(int c) const { return &sortednodes[0] + offsets[c]; }
inline const int *nodesEnd(int c) const { return &sortednodes[0] + (c+1 == offsets.size() ? sortednodes.size() : offsets[c+1]); }
private:
void measure(const vector<vi> &g, int root, vector<int> &out_subtreesizes) const {
out_subtreesizes.assign(g.size(), -1);
vector<int> s;
s.push_back(root);
while(!s.empty()) {
int i = s.back(); s.pop_back();
if(out_subtreesizes[i] == -2) {
int s = 1;
each(j, g[i]) if(out_subtreesizes[*j] != -2)
s += out_subtreesizes[*j];
out_subtreesizes[i] = s;
}else {
s.push_back(i);
each(j, g[i]) if(out_subtreesizes[*j] == -1)
s.push_back(*j);
out_subtreesizes[i] = -2;
}
}
}
};
struct Val {
mint val, coef;
explicit Val(): val(), coef() { }
explicit Val(mint val_, mint coef_): val(val_), coef(coef_) { }
};
struct PathSum {
mint sum, coefsum;
PathSum(): sum(), coefsum() { }
explicit PathSum(const Val &val): sum(val.val), coefsum(val.coef) { }
PathSum &operator+=(const PathSum &that) {
sum += that.sum;
coefsum += that.coefsum;
return *this;
}
PathSum operator+(const PathSum &that) const { return PathSum(*this) += that; }
PathSum reverse() const {
return *this;
}
};
struct PathAdd {
mint add;
PathAdd() { }
explicit PathAdd(mint add_): add(add_) { }
PathAdd &operator+=(const PathAdd &that) { add += that.add; return *this; }
void addToVal(Val &val) const { val.val += add * val.coef; }
void addToSum(PathSum &sum) const { sum.sum += add * sum.coefsum; }
};
struct Node {
Node *parent;
Node *pathLeft, *pathRight;
Val val;
PathSum pathSum;
PathAdd pathAdd;
Node(): parent(NULL), pathLeft(NULL), pathRight(NULL),
val(), pathSum(), pathAdd() { }
bool isPathRoot() const { return !parent; }
static PathSum getPathSum(const Node *p) {
if(!p) return PathSum();
PathSum pathSum = p->pathSum;
p->pathAdd.addToSum(pathSum);
return pathSum;
}
static mint getPathSum2(const Node *p) {
if(!p) return mint();
return p->pathSum.sum + p->pathAdd.add * p->pathSum.coefsum;
}
static void addToPath(Node *p, const PathAdd &add) {
if(p != NULL)
p->pathAdd += add;
}
PathSum getSingletonPathSum() const {
return PathSum(val);
}
void propagate() {
if(pathAdd.add.x != 0) {
if(pathLeft != NULL) pathLeft->pathAdd += pathAdd;
if(pathRight != NULL) pathRight->pathAdd += pathAdd;
pathAdd.addToVal(val);
pathAdd.addToSum(pathSum);
pathAdd = PathAdd();
}
}
void update() {
pathSum = getPathSum(pathLeft) + getSingletonPathSum() + getPathSum(pathRight);
}
void update2() {
pathSum.sum = val.val;
if(pathLeft) pathSum.sum += pathLeft->pathSum.sum + pathLeft->pathSum.coefsum * pathLeft->pathAdd.add;
if(pathRight) pathSum.sum += pathRight->pathSum.sum + pathRight->pathSum.coefsum * pathRight->pathAdd.add;
}
bool debugCheckUpdated() const {
Node tmp = *this;
tmp.update();
return memcmp(this, &tmp, sizeof(Node)) == 0;
}
};
struct BiasedHeavyLightDecompositionPathOnly {
vector<Node> nodes;
vector<int> pathRoots; //ノード に対して、それが属するパスの path tree 上の根のノード
vector<int> subpathLeft, subpathRight; //path tree 上でそのノードが表す subpath の offset の区間 [left, right]
vector<int> globalOrder;
HeavyLightDecomposition hld;
void build(const vector<vi> &g, int root, const vector<Val> &initVal) {
hld.build(g, root);
int n = g.size();
nodes.assign(n, Node());
vector<int> subtreeSize(n, 1);
for(int ix = n-1; ix > 0; -- ix) {
int i = hld.sortednodes[ix], p = hld.parentnodes[i];
subtreeSize[p] += subtreeSize[i];
}
vector<int> childrenSize = subtreeSize;
for(int ix = 1; ix < n; ++ ix) {
int i = hld.sortednodes[ix], p = hld.parentnodes[i];
if(hld.colors[i] == hld.colors[p])
childrenSize[p] -= subtreeSize[i];
}
buildPathTrees(childrenSize);
getGlobalOrder(root);
subpathLeft.resize(n);
subpathRight.resize(n);
for(int ix = n-1; ix >= 0; -- ix) {
int i = globalOrder[ix];
Node *a = &nodes[i];
a->val = initVal[i];
a->update();
subpathLeft[i] = a->pathLeft == NULL ? hld.positions[i] : subpathLeft[getNodeIndex(a->pathLeft)];
subpathRight[i] = a->pathRight == NULL ? hld.positions[i] : subpathRight[getNodeIndex(a->pathRight)];
}
}
Val getVal(int x) {
propagatePath(&nodes[x]);
return nodes[x].val;
}
void setVal(int x, const Val &val) {
propagatePath(&nodes[x]);
nodes[x].val = val;
updatePath(&nodes[x]);
}
//node ancestor -> node decendant
//ancestor は decendant の祖先である必要がある
mint sumDecendingPath(int ancestor, int decendant) {
Node *a = &nodes[decendant];
int ancestorColor = hld.colors[ancestor];
mint sum;
while(a != NULL && hld.colors[getNodeIndex(a)] != ancestorColor) {
sum += sumHeavyPathFromHead(a);
a = goUpToParentPath(a);
}
assert(a != NULL);
sum += sumHeavyPath(&nodes[ancestor], a);
return sum;
}
void addToDecendingPath(int ancestor, int decendant, const PathAdd &add) {
Node *a = &nodes[decendant];
int ancestorColor = hld.colors[ancestor];
while(a != NULL && hld.colors[getNodeIndex(a)] != ancestorColor) {
addToHeavyPathFromHead(a, add);
a = goUpToParentPath(a);
}
assert(a != NULL);
addToHeavyPath(&nodes[ancestor], a, add);
}
//x -> y
mint sumPath(int x, int y) {
int z = lowestCommonAncestor(x, y);
mint sum = sumDecendingPath(z, x);
if(y != z) {
int child = findAncestorChild(z, y);
assert(child != -1);
sum += sumDecendingPath(child, y);
}
return sum;
}
//x -> y
void addToPath(int x, int y, const PathAdd &add) {
int z = lowestCommonAncestor(x, y);
addToDecendingPath(z, x, add);
if(y != z) {
int child = findAncestorChild(z, y);
assert(child != -1);
addToDecendingPath(child, y, add);
}
}
//使うのでここに置いておく
int lowestCommonAncestor(int x, int y) const {
int cx, px, cy, py;
hld.get(x, cx, px);
hld.get(y, cy, py);
while(cx != cy) {
if(hld.depths[*hld.nodesBegin(cx)] < hld.depths[*hld.nodesBegin(cy)])
hld.go_up(cy, py);
else
hld.go_up(cx, px);
}
return hld.nodesBegin(cx)[min(px, py)];
}
//ancestorの直接の子でdecendantの祖先であるものを返す。
//存在しないなら-1を返す。
int findAncestorChild(int ancestor, int decendant) const {
int ac, ap;
int c, p;
hld.get(ancestor, ac, ap);
hld.get(decendant, c, p);
int prevc = -1;
while(c != ac) {
prevc = c;
if(!hld.go_up(c, p)) return -1;
}
if(prevc == -1 || ap != p) {
if(ap >= p)
return -1;
else
return hld.nodesBegin(ac)[ap + 1];
}else {
return hld.nodesBegin(prevc)[0];
}
}
private:
int getNodeIndex(const Node *a) const {
return static_cast<int>(a - &nodes[0]);
}
Node *goUpToParentPath(const Node *a) {
int c, p;
hld.get(getNodeIndex(a), c, p);
if(!hld.go_up(c, p))
return NULL;
else
return &nodes[hld.nodesBegin(c)[p]];
}
void propagatePath(Node *a) {
Node *r = a, *q = a->parent;
while(q != NULL) {
Node *p = q;
q = p->parent;
p->parent = r;
r = p;
}
while(r != a) {
Node *c = r->parent;
r->parent = q;
q = r;
r->propagate();
r = c;
}
a->propagate();
}
void updatePath(Node *a) {
while(a != NULL) {
a->update();
a = a->parent;
}
}
void updatePath2(Node *a) {
while(a != NULL) {
a->update2();
a = a->parent;
}
}
mint sumHeavyPathFromHead(Node *a) {
propagatePath(a);
mint sum;
while(1) {
sum += Node::getPathSum2(a->pathLeft);
sum += a->val.val;
while(a->parent != NULL && a->parent->pathLeft == a)
a = a->parent;
if(a->parent == NULL)
break;
a = a->parent;
}
return sum;
}
mint sumHeavyPath(Node *l, Node *r) {
propagatePath(l);
propagatePath(r);
const Node *lca = findLowestCommonAncestorOnPathTree(l, r);
assert(lca != NULL);
mint sum;
while(l != lca) {
sum += l->val.val;
sum += Node::getPathSum2(l->pathRight);
while(l->parent != lca && l->parent->pathRight == l)
l = l->parent;
l = l->parent;
}
while(r != lca) {
sum += Node::getPathSum2(r->pathLeft);
sum += r->val.val;
while(r->parent != lca && r->parent->pathLeft == r)
r = r->parent;
r = r->parent;
}
assert((l == lca || l == lca->pathLeft) && (r == lca || r == lca->pathRight));
sum += lca->val.val;
return sum;
}
void addToHeavyPathFromHead(Node *a, const PathAdd &add) {
Node *orgA = a;
// propagatePath(a);
while(1) {
Node::addToPath(a->pathLeft, add);
add.addToVal(a->val);
while(a->parent != NULL && a->parent->pathLeft == a)
a = a->parent;
if(a->parent == NULL)
break;
a = a->parent;
}
updatePath2(orgA);
}
void addToHeavyPath(Node *l, Node *r, const PathAdd &add) {
Node *orgL = l, *orgR = r;
// propagatePath(l);
// propagatePath(r);
Node *lca = findLowestCommonAncestorOnPathTree(l, r);
assert(lca != NULL);
while(l != lca) {
Node::addToPath(l->pathRight, add);
add.addToVal(l->val);
while(l->parent != lca && l->parent->pathRight == l)
l = l->parent;
l = l->parent;
}
while(r != lca) {
Node::addToPath(r->pathLeft, add);
add.addToVal(r->val);
while(r->parent != lca && r->parent->pathLeft == r)
r = r->parent;
r = r->parent;
}
assert((l == lca || l == lca->pathLeft) && (r == lca || r == lca->pathRight));
add.addToVal(lca->val);
updatePath2(orgL);
updatePath2(orgR);
}
//lとrが同じpath treeに含まれることを仮定する
Node *findLowestCommonAncestorOnPathTree(Node *l, Node *r) const {
int lPos = hld.positions[getNodeIndex(l)];
int rPos = hld.positions[getNodeIndex(r)];
if(lPos > rPos) {
swap(l, r);
swap(lPos, rPos);
}
Node *a = l;
while(1) {
if(rPos <= subpathRight[getNodeIndex(a)])
return a;
a = a->parent;
}
}
void buildPathTrees(const vector<int> &sizes) {
vector<int> weights, childL, childR;
pathRoots.resize(nodes.size());
int C = hld.lengths.size();
for(int c = 0; c < C; ++ c) {
int len = hld.lengths[c];
const int *path = hld.nodesBegin(c);
weights.resize(len);
for(int j = 0; j < len; ++ j)
weights[j] = sizes[path[j]];
int rootj = makeBiasedBinarySearchTree(weights, childL, childR);
int rootNode = path[rootj];
for(int j = 0; j < len; ++ j)
pathRoots[path[j]] = rootNode;
nodes[rootNode].parent = NULL;
for(int j = 0; j < len; ++ j) {
Node *a = &nodes[path[j]];
Node *l = childL[j] == -1 ? NULL : &nodes[path[childL[j]]];
Node *r = childR[j] == -1 ? NULL : &nodes[path[childR[j]]];
if((a->pathLeft = l) != NULL)
l->parent = a;
if((a->pathRight = r) != NULL)
r->parent = a;
}
}
}
//weightsは破壊される
int makeBiasedBinarySearchTree(vector<int> &weights, vector<int> &resL, vector<int> &resR) {
int n = weights.size();
weights.resize(n + 1);
int sum = 0;
for(int i = 0; i < n; ++ i) {
int w = weights[i];
weights[i] = sum;
sum += w;
}
weights[n] = sum;
resL.resize(n);
resR.resize(n);
return makeBiasedBinarySearchTreeRec(-1, 0, n, weights, resL, resR);
}
//最初2倍してく2分探索でうまくコストを log(小さい方のサイズ) にすませるようにすれば O(n) にできる。
//けど、ここではやってない。この単純な2分探索でも、任意の重みで呼ばれるわけではないのであんまりコストがかかることはない気がする。
int makeBiasedBinarySearchTreeRec(int p, int i, int j, const vector<int> &prefixSums, vector<int> &resL, vector<int> &resR) {
if(i == j)
return -1;
//prefixSums[mid+1] - prefixSums[i] >= prefixSums[j] - prefixSums[mid]
//prefixSums[mid] + prefixSums[mid+1] >= prefixSums[i] + prefixSums[j]
int mid;
if(i + 1 == j) {
mid = i;
}else {
int t = prefixSums[i] + prefixSums[j];
int l = i, u = j-1;
while(u - l > 0) {
int m = (l + u) / 2;
if(prefixSums[m] + prefixSums[m+1] >= t)
u = m;
else
l = m + 1;
}
mid = u;
}
assert(mid < j);
resL[mid] = makeBiasedBinarySearchTreeRec(mid * 2 + 0, i, mid, prefixSums, resL, resR);
resR[mid] = makeBiasedBinarySearchTreeRec(mid * 2 + 1, mid + 1, j, prefixSums, resL, resR);
return mid;
}
void getGlobalOrder(int globalRoot) {
globalOrder.clear();
globalOrder.reserve(nodes.size());
vector<const Node *> stk;
int C = hld.lengths.size();
for(int c = 0; c < C; ++ c) {
stk.push_back(&nodes[pathRoots[hld.nodesBegin(c)[0]]]);
while(!stk.empty()) {
const Node *a = stk.back(); stk.pop_back();
if(a == NULL) continue;
globalOrder.push_back(getNodeIndex(a));
stk.push_back(a->pathLeft);
stk.push_back(a->pathRight);
}
}
assert(globalOrder.size() == nodes.size());
}
};
bool naivegetpath(int i, int p, int t, const vector<vi> &g, vi &path) {
bool r = false;
if(i == t) {
r = true;
}else {
each(j, g[i]) if(*j != p)
r = r || naivegetpath(*j, i, t, g, path);
}
if(r)
path.push_back(i);
return r;
}
#ifdef MY_LOCAL_RUN
#include "C:\Dropbox\backup\implements\Util\MyAssert.hpp"
#undef assert
#define assert my_assert
#define TEST
#endif
int main() {
int N;
for(int iii = 0; ; ++ iii) {
#ifndef TEST
if(!~scanf("%d", &N)) break;
#else
if(iii % 100 == 0) cerr << iii << "\r", cerr.flush();
N=rand()%10+1;
#endif
vector<int> S(N), C(N);
rep(i, N) {
#ifndef TEST
scanf("%d", &S[i]);
#else
S[i]=rand()%100;
#endif
}
rep(i, N) {
#ifndef TEST
scanf("%d", &C[i]);
#else
C[i]=rand()%100;
#endif
}
vector<Val> initVals(N);
rep(i, N)
initVals[i] = Val(S[i], C[i]);
vector<vi> g(N);
rep(i, N-1) {
int A, B;
#ifndef TEST
scanf("%d%d", &A, &B), -- A, -- B;
#else
A=i+1,B=rand()%(i+1);
#endif
g[A].push_back(B);
g[B].push_back(A);
}
BiasedHeavyLightDecompositionPathOnly bhld;
bhld.build(g, 0, initVals);
#ifdef TEST
vector<mint> naiveval(all(S));
#endif
int Q;
#ifndef TEST
scanf("%d", &Q);
#else
Q=rand()%100+1;
#endif
rep(ii, Q) {
int ty;
#ifndef TEST
scanf("%d", &ty);
#else
ty=rand()%2;
#endif
if(ty == 0) {
int X, Y, Z;
#ifndef TEST
scanf("%d%d%d", &X, &Y, &Z), -- X, -- Y;
#else
X=rand()%N,Y=rand()%N,Z=rand()%100;
#endif
bhld.addToPath(X, Y, PathAdd(Z));
#ifdef TEST
vi naivepath; naivegetpath(X, -1, Y, g, naivepath);
each(j, naivepath) naiveval[*j] += mint(Z) * C[*j];
#endif
}else {
int X, Y;
#ifndef TEST
scanf("%d%d", &X, &Y), -- X, -- Y;
#else
X=rand()%N,Y=rand()%N;
#endif
mint ans = bhld.sumPath(X, Y);
#ifndef TEST
printf("%d\n", ans.get());
#else
vi naivepath; naivegetpath(X, -1, Y, g, naivepath);
mint naivesum, naivecoefsum;
each(j, naivepath) naivecoefsum += C[*j], naivesum += naiveval[*j];
if(ans.get() != naivesum.get())
cerr << ans.get() << " != " << naivesum.get() << endl;
#endif
}
#ifdef TEST
// rep(i, N) bhld.getVal(i);
// rep(i, N) bhld.setVal(i, bhld.getVal(i));
rep(i, N) assert(bhld.nodes[i].debugCheckUpdated());
// rep(i, N) assert(bhld.getVal(i).val.x == naiveval[i].x);
#endif
}
}
return 0;
}
anta