結果

問題 No.235 めぐるはめぐる (5)
ユーザー antaanta
提出日時 2015-08-26 00:41:38
言語 C++11
(gcc 11.4.0)
結果
AC  
実行時間 596 ms / 10,000 ms
コード長 18,229 bytes
コンパイル時間 1,356 ms
コンパイル使用メモリ 113,888 KB
実行使用メモリ 43,336 KB
最終ジャッジ日時 2024-07-18 14:38:09
合計ジャッジ時間 4,439 ms
ジャッジサーバーID
(参考情報)
judge3 / judge2
このコードへのチャレンジ
(要ログイン)

テストケース

テストケース表示
入力 結果 実行時間
実行使用メモリ
testcase_00 AC 596 ms
42,356 KB
testcase_01 AC 361 ms
43,336 KB
testcase_02 AC 458 ms
42,452 KB
権限があれば一括ダウンロードができます
コンパイルメッセージ
main.cpp: In function ‘int main()’:
main.cpp:607:30: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result]
  607 |                         scanf("%d", &S[i]);
      |                         ~~~~~^~~~~~~~~~~~~
main.cpp:614:30: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result]
  614 |                         scanf("%d", &C[i]);
      |                         ~~~~~^~~~~~~~~~~~~
main.cpp:626:30: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result]
  626 |                         scanf("%d%d", &A, &B), -- A, -- B;
      |                         ~~~~~^~~~~~~~~~~~~~~~
main.cpp:641:22: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result]
  641 |                 scanf("%d", &Q);
      |                 ~~~~~^~~~~~~~~~
main.cpp:648:30: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result]
  648 |                         scanf("%d", &ty);
      |                         ~~~~~^~~~~~~~~~~
main.cpp:655:38: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result]
  655 |                                 scanf("%d%d%d", &X, &Y, &Z), -- X, -- Y;
      |                                 ~~~~~^~~~~~~~~~~~~~~~~~~~~~
main.cpp:669:38: warning: ignoring return value of ‘int scanf(const char*, ...)’ declared with attribute ‘warn_unused_result’ [-Wunused-result]
  669 |                                 scanf("%d%d", &X, &Y), -- X, -- Y;
      |                                 ~~~~~^~~~~~~~~~~~~~~~

ソースコード

diff #

#include <string>
#include <vector>
#include <algorithm>
#include <numeric>
#include <set>
#include <map>
#include <queue>
#include <iostream>
#include <sstream>
#include <cstdio>
#include <cmath>
#include <ctime>
#include <cstring>
#include <cctype>
#include <cassert>
#include <limits>
#include <functional>
#define rep(i,n) for(int (i)=0;(i)<(int)(n);++(i))
#define rer(i,l,u) for(int (i)=(int)(l);(i)<=(int)(u);++(i))
#define reu(i,l,u) for(int (i)=(int)(l);(i)<(int)(u);++(i))
#if defined(_MSC_VER) || __cplusplus > 199711L
#define aut(r,v) auto r = (v)
#else
#define aut(r,v) __typeof(v) r = (v)
#endif
#define each(it,o) for(aut(it, (o).begin()); it != (o).end(); ++ it)
#define all(o) (o).begin(), (o).end()
#define pb(x) push_back(x)
#define mp(x,y) make_pair((x),(y))
#define mset(m,v) memset(m,v,sizeof(m))
#define INF 0x3f3f3f3f
#define INFL 0x3f3f3f3f3f3f3f3fLL
using namespace std;
typedef vector<int> vi; typedef pair<int,int> pii; typedef vector<pair<int,int> > vpii; typedef long long ll;
template<typename T, typename U> inline void amin(T &x, U y) { if(y < x) x = y; }
template<typename T, typename U> inline void amax(T &x, U y) { if(x < y) x = y; }

#ifndef MY_LOCAL_RUN
#undef assert
#define assert(e) 
#endif

template<int MOD>
struct ModInt {
	static const int Mod = MOD;
	unsigned x;
	ModInt(): x(0) { }
	ModInt(signed sig) { int sigt = sig % MOD; if(sigt < 0) sigt += MOD; x = sigt; }
	ModInt(signed long long sig) { int sigt = sig % MOD; if(sigt < 0) sigt += MOD; x = sigt; }
	int get() const { return (int)x; }
	
	ModInt &operator+=(ModInt that) { if((x += that.x) >= MOD) x -= MOD; return *this; }
	ModInt &operator-=(ModInt that) { if((x += MOD - that.x) >= MOD) x -= MOD; return *this; }
	ModInt &operator*=(ModInt that) { x = (unsigned long long)x * that.x % MOD; return *this; }
	
	ModInt operator+(ModInt that) const { return ModInt(*this) += that; }
	ModInt operator-(ModInt that) const { return ModInt(*this) -= that; }
	ModInt operator*(ModInt that) const { return ModInt(*this) *= that; }
};
typedef ModInt<1000000007> mint;

class SchieberVishkinLCA {
public:
	typedef unsigned Word;
	typedef int Vertex;
private:

	static inline Word lowestOneBit(Word v) {
		return v & (~v+1);
	}
	static inline Word highestOneBitMask(Word v) {
		v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16;
		return v >> 1;
	}

	std::vector<Word> indices;			//Vertex -> index
	std::vector<Word> maxHIndices;		//Vertex -> index
	std::vector<Word> ancestorHeights;	//Vertex -> Word
	std::vector<Vertex> pathParents;	//index-1 -> Vertex
public:

	void build(const std::vector<Vertex> &preorder, const std::vector<Vertex> &parents, Vertex root) {
		Vertex N = static_cast<Vertex>(preorder.size());

		ancestorHeights.resize(N);
		maxHIndices.resize(N);
		indices.resize(N);
		pathParents.resize(N);

		for(Vertex ix = 0; ix < N; ++ ix)
			indices[preorder[ix]] = ix + 1;

		for(Vertex i = 0; i < N; ++ i)
			maxHIndices[i] = indices[i];
		for(Vertex ix = N-1; ix > 0; -- ix) {
			Vertex v = preorder[ix], parent = parents[v];
			if(lowestOneBit(maxHIndices[parent]) < lowestOneBit(maxHIndices[v]))
				maxHIndices[parent] = maxHIndices[v];
		}

		ancestorHeights[root] = 0;
		for(Vertex ix = 1; ix < N; ++ ix) {
			Vertex v = preorder[ix], parent = parents[v];
			ancestorHeights[v] = ancestorHeights[parent] | lowestOneBit(maxHIndices[v]);
		}

		pathParents[0] = root;
		for(Vertex ix = 1; ix < N; ++ ix) {
			Vertex v = preorder[ix], parent = parents[v];
			if(maxHIndices[v] != maxHIndices[parent])
				pathParents[indices[v]-1] = parent;
			else
				pathParents[indices[v]-1] = pathParents[indices[parent]-1];
		}
	}

	Vertex query(Vertex v, Vertex u) const {
		Word Iv = maxHIndices[v], Iu = maxHIndices[u];
		Word hIv = lowestOneBit(Iv), hIu = lowestOneBit(Iu);
		Word hbMask = highestOneBitMask((Iv ^ Iu) | hIv | hIu);
		Word j = lowestOneBit(ancestorHeights[v] & ancestorHeights[u] & ~hbMask);
		Vertex x, y;
		if(j == hIv) x = v;
		else {
			Word kMask = highestOneBitMask(ancestorHeights[v] & (j-1));
			x = pathParents[(indices[v] & ~kMask | (kMask+1))-1];
		}
		if(j == hIu) y = u;
		else {
			Word kMask = highestOneBitMask(ancestorHeights[u] & (j-1));
			y = pathParents[(indices[u] & ~kMask | (kMask+1))-1];
		}
		return indices[x] < indices[y] ? x : y;
	}
};

struct HeavyLightDecomposition {
	vector<int> colors, positions;	//Vertex -> Color, Vertex -> Offset
	vector<int> lengths, parents, branches;	//Color -> Int, Color -> Color, Color -> Offset
	vector<int> parentnodes, depths;	//Vertex -> Vertex, Vertex -> Int
	//vector<FenwickTree>とかを避けて1次元にしたい時に使う
	//sortednodesの[lefts[v], rights[v])はvのsubtreeとなっている
	vector<int> sortednodes, offsets;	//Index -> Vertex, Color -> Index
	vector<int> lefts, rights;	//Vertex -> Index

	struct BuildDFSState {
		int i, len, parent;
		BuildDFSState() { }
		BuildDFSState(int i_, int l, int p): i(i_), len(l), parent(p) { }
	};

	//両方の辺があってもいいし、親から子への辺だけでもよい
	void build(const vector<vi> &g, int root) {
		int n = g.size();

		colors.assign(n, -1); positions.assign(n, -1);
		lengths.clear(); parents.clear(); branches.clear();
		parentnodes.assign(n, -1); depths.assign(n, -1);

		sortednodes.clear(); offsets.clear();
		lefts.assign(n, -1); rights.assign(n, -1);

		vector<int> subtreesizes;
		measure(g, root, subtreesizes);

		typedef BuildDFSState State;
		depths[root] = 0;
		vector<State> s;
		s.push_back(State(root, 0, -1));
		while(!s.empty()) {
			State t = s.back(); s.pop_back();
			int i = t.i, len = t.len;
			int index = sortednodes.size();
			int color = lengths.size();

			if(t.parent == -3) {
				rights[i] = index;
				continue;
			}

			if(t.parent != -2) {
				assert(parents.size() == color);
				parents.push_back(t.parent);
				branches.push_back(len);
				offsets.push_back(index);
				len = 0;
			}
			colors[i] = color;
			positions[i] = len;

			lefts[i] = index;
			sortednodes.push_back(i);

			int maxsize = -1, maxj = -1;
			each(j, g[i]) if(colors[*j] == -1) {
				if(maxsize < subtreesizes[*j]) {
					maxsize = subtreesizes[*j];
					maxj = *j;
				}
				parentnodes[*j] = i;
				depths[*j] = depths[i] + 1;
			}
			s.push_back(State(i, -1, -3));
			if(maxj == -1) {
				lengths.push_back(len + 1);
			}else {
				each(j, g[i]) if(colors[*j] == -1 && *j != maxj)
					s.push_back(State(*j, len, color));
				s.push_back(State(maxj, len + 1, -2));
			}
		}
	}
	
	void get(int v, int &c, int &p) const {
		c = colors[v]; p = positions[v];
	}
	bool go_up(int &c, int &p) const {
		p = branches[c]; c = parents[c];
		return c != -1;
	}

	inline const int *nodesBegin(int c) const { return &sortednodes[0] + offsets[c]; }
	inline const int *nodesEnd(int c) const { return &sortednodes[0] + (c+1 == offsets.size() ? sortednodes.size() : offsets[c+1]); }

private:
	void measure(const vector<vi> &g, int root, vector<int> &out_subtreesizes) const {
		out_subtreesizes.assign(g.size(), -1);
		vector<int> s;
		s.push_back(root);
		while(!s.empty()) {
			int i = s.back(); s.pop_back();
			if(out_subtreesizes[i] == -2) {
				int s = 1;
				each(j, g[i]) if(out_subtreesizes[*j] != -2)
					s += out_subtreesizes[*j];
				out_subtreesizes[i] = s;
			}else {
				s.push_back(i);
				each(j, g[i]) if(out_subtreesizes[*j] == -1)
					s.push_back(*j);
				out_subtreesizes[i] = -2;
			}
		}
	}
};

struct Val {
	mint val, coef;
	explicit Val(): val(), coef() { }
	explicit Val(mint val_, mint coef_): val(val_), coef(coef_) { }
};

struct PathSum {
	mint sum, coefsum;

	PathSum(): sum(), coefsum() { }

	explicit PathSum(const Val &val): sum(val.val), coefsum(val.coef) { }

	PathSum &operator+=(const PathSum &that) {
		sum += that.sum;
		coefsum += that.coefsum;
		return *this;
	}
	PathSum operator+(const PathSum &that) const { return PathSum(*this) += that; }

	PathSum reverse() const {
		return *this;
	}
};

struct PathAdd {
	mint add;
	PathAdd() { }
	explicit PathAdd(mint add_): add(add_) { }
	PathAdd &operator+=(const PathAdd &that) { add += that.add; return *this; }
	void addToVal(Val &val) const { val.val += add * val.coef; }
	void addToSum(PathSum &sum) const { sum.sum += add * sum.coefsum; }
};

struct Node {
	Node *parent;
	Node *pathLeft, *pathRight;

	Val val;
	PathSum pathSum;
	PathAdd pathAdd;

	Node(): parent(NULL), pathLeft(NULL), pathRight(NULL),
		val(), pathSum(), pathAdd() { }

	bool isPathRoot() const { return !parent || (parent->pathLeft != this && parent->pathRight != this); }

	static PathSum getPathSum(const Node *p) {
		if(!p) return PathSum();
		PathSum pathSum = p->pathSum;
		p->pathAdd.addToSum(pathSum);
		return pathSum;
	}

	static mint getPathSum2(const Node *p) {
		if(!p) return mint();
		return p->pathSum.sum + p->pathAdd.add * p->pathSum.coefsum;
	}

	static void addToPath(Node *p, const PathAdd &add) {
		if(p != NULL)
			p->pathAdd += add;
	}

	PathSum getSingletonPathSum() const {
		return PathSum(val);
	}

	void propagate() {
		if(pathAdd.add.x != 0) {
			if(pathLeft != NULL) pathLeft->pathAdd += pathAdd;
			if(pathRight != NULL) pathRight->pathAdd += pathAdd;
			pathAdd.addToVal(val);
			pathAdd.addToSum(pathSum);
			pathAdd = PathAdd();
		}
	}

	void update() {
		pathSum = getPathSum(pathLeft) + getSingletonPathSum() + getPathSum(pathRight);
	}

	bool debugCheckUpdated() const {
		Node tmp = *this;
		tmp.update();
		return memcmp(this, &tmp, sizeof(Node)) == 0;
	}

};

struct BiasedHeavyLightDecompositionPathOnly {
	vector<Node> nodes;
	vector<int> pathRoots;	//ノード に対して、それが属するパスの path tree 上の根のノード
	vector<int> subpathLeft, subpathRight;	//path tree 上でそのノードが表す subpath の offset の区間 [left, right]
	vector<int> globalOrder;
	HeavyLightDecomposition hld;
	SchieberVishkinLCA lca;

	void build(const vector<vi> &g, int root, const vector<Val> &initVal) {
		hld.build(g, root);

		int n = g.size();
		nodes.assign(n, Node());

		vector<int> subtreeSize(n, 1);
		for(int ix = n-1; ix > 0; -- ix) {
			int i = hld.sortednodes[ix], p = hld.parentnodes[i];
			subtreeSize[p] += subtreeSize[i];
		}

		vector<int> childrenSize = subtreeSize;
		for(int ix = 1; ix < n; ++ ix) {
			int i = hld.sortednodes[ix], p = hld.parentnodes[i];
			if(hld.colors[i] == hld.colors[p])
				childrenSize[p] -= subtreeSize[i];
		}

		buildPathTrees(childrenSize);

		getGlobalOrder(root);

		subpathLeft.resize(n);
		subpathRight.resize(n);
		for(int ix = n-1; ix >= 0; -- ix) {
			int i = globalOrder[ix];
			Node *a = &nodes[i];
			a->val = initVal[i];
			a->update();
			subpathLeft[i] = a->pathLeft == NULL ? hld.positions[i] : subpathLeft[getNodeIndex(a->pathLeft)];
			subpathRight[i] = a->pathRight == NULL ? hld.positions[i] : subpathRight[getNodeIndex(a->pathRight)];

			if(pathRoots[i] == i) {
				int c, p;
				hld.get(i, c, p);
				if(hld.go_up(c, p)) {
					assert(a->parent == NULL);
					a->parent = &nodes[hld.nodesBegin(c)[p]];
				}
			}
		}

		lca.build(hld.sortednodes, hld.parentnodes, root);
	}

	//x -> y
	mint sumPath(int x, int y) {
		globalPropagate(&nodes[x]);
		globalPropagate(&nodes[y]);
		int z = lca.query(x, y);
		const Node *ceiling = nodes[pathRoots[z]].parent;
		mint sum;
		sum += sumPathFromRoot(x, ceiling);
		sum += sumPathFromRoot(y, ceiling);
		sum -= sumPathFromRoot(z, ceiling) * 2;
		sum += nodes[z].val.val;
		return sum;
	}

	mint sumPathFromRoot(int x, const Node *ceiling) const {
		const Node *a = &nodes[x];
		mint sum;
		while(a != ceiling) {
			sum += Node::getPathSum2(a->pathLeft);
			sum += a->val.val;
			while(a->parent != NULL && a->parent->pathLeft == a)
				a = a->parent;
			a = a->parent;
		}
		return sum;
	}

	//x -> y
	void addToPath(int x, int y, const PathAdd &add) {
		int z = lca.query(x, y);
		const Node *ceiling = nodes[pathRoots[z]].parent;
		addToPathFromRoot(x, add, ceiling);
		addToPathFromRoot(y, add, ceiling);
		add.addToVal(nodes[z].val);
		addToPathFromRoot(z, PathAdd(add.add * -2), ceiling);
	}

	void addToPathFromRoot(int x, const PathAdd &add, const Node *ceiling) {
		Node *a = &nodes[x];
		while(a != ceiling) {
			Node::addToPath(a->pathLeft, add);
			add.addToVal(a->val);
			while(a->parent != NULL && a->parent->pathLeft == a)
				a = a->parent;
			a = a->parent;
		}
		globalUpdate(&nodes[x]);
	}
private:
	int getNodeIndex(const Node *a) const {
		return static_cast<int>(a - &nodes[0]);
	}

	Node *goUpToParentPath(const Node *a) {
		int c, p;
		hld.get(getNodeIndex(a), c, p);
		if(!hld.go_up(c, p))
			return NULL;
		else
			return &nodes[hld.nodesBegin(c)[p]];
	}

	void globalPropagate(Node *a) {
		Node *r = a, *q = a->parent;
		while(q != NULL) {
			Node *p = q;
			q = p->parent;
			p->parent = r;
			r = p;
		}
		while(r != a) {
			Node *c = r->parent;
			r->parent = q;
			q = r;
			r->propagate();
			r = c;
		}
		a->propagate();
	}

	void globalUpdate(Node *a) {
		while(a != NULL) {
			a->update();
			a = a->parent;
		}
	}

	void buildPathTrees(const vector<int> &sizes) {
		vector<int> weights, childL, childR;
		pathRoots.resize(nodes.size());

		int C = hld.lengths.size();
		for(int c = 0; c < C; ++ c) {
			int len = hld.lengths[c];
			const int *path = hld.nodesBegin(c);
			weights.resize(len);
			for(int j = 0; j < len; ++ j)
				weights[j] = sizes[path[j]];
			int rootj = makeBiasedBinarySearchTree(weights, childL, childR);
			int rootNode = path[rootj];
			for(int j = 0; j < len; ++ j)
				pathRoots[path[j]] = rootNode;

			nodes[rootNode].parent = NULL;
			for(int j = 0; j < len; ++ j) {
				Node *a = &nodes[path[j]];
				Node *l = childL[j] == -1 ? NULL : &nodes[path[childL[j]]];
				Node *r = childR[j] == -1 ? NULL : &nodes[path[childR[j]]];
				if((a->pathLeft = l) != NULL)
					l->parent = a;
				if((a->pathRight = r) != NULL)
					r->parent = a;
			}
		}
	}

	//weightsは破壊される
	int makeBiasedBinarySearchTree(vector<int> &weights, vector<int> &resL, vector<int> &resR) {
		int n = weights.size();
		weights.resize(n + 1);
		int sum = 0;
		for(int i = 0; i < n; ++ i) {
			int w = weights[i];
			weights[i] = sum;
			sum += w;
		}
		weights[n] = sum;
		resL.resize(n);
		resR.resize(n);
		return makeBiasedBinarySearchTreeRec(-1, 0, n, weights, resL, resR);
	}

	//最初2倍してく2分探索でうまくコストを log(小さい方のサイズ) にすませるようにすれば O(n) にできる。
	//けど、ここではやってない。この単純な2分探索でも、任意の重みで呼ばれるわけではないのであんまりコストがかかることはない気がする。
	int makeBiasedBinarySearchTreeRec(int p, int i, int j, const vector<int> &prefixSums, vector<int> &resL, vector<int> &resR) {
		if(i == j)
			return -1;
		//prefixSums[mid+1] - prefixSums[i] >= prefixSums[j] - prefixSums[mid]
		//prefixSums[mid] + prefixSums[mid+1] >= prefixSums[i] + prefixSums[j]
		int mid;
		if(i + 1 == j) {
			mid = i;
		}else {
			int t = prefixSums[i] + prefixSums[j];
			int l = i, u = j-1;
			while(u - l > 0) {
				int m = (l + u) / 2;
				if(prefixSums[m] + prefixSums[m+1] >= t)
					u = m;
				else
					l = m + 1;
			}
			mid = u;
		}
		assert(mid < j);
		resL[mid] = makeBiasedBinarySearchTreeRec(mid * 2 + 0, i, mid, prefixSums, resL, resR);
		resR[mid] = makeBiasedBinarySearchTreeRec(mid * 2 + 1, mid + 1, j, prefixSums, resL, resR);
		return mid;
	}

	void getGlobalOrder(int globalRoot) {
		globalOrder.clear();
		globalOrder.reserve(nodes.size());
		vector<const Node *> stk;
		int C = hld.lengths.size();
		for(int c = 0; c < C; ++ c) {
			stk.push_back(&nodes[pathRoots[hld.nodesBegin(c)[0]]]);
			while(!stk.empty()) {
				const Node *a = stk.back(); stk.pop_back();
				if(a == NULL) continue;
				globalOrder.push_back(getNodeIndex(a));
			
				stk.push_back(a->pathLeft);
				stk.push_back(a->pathRight);
			}
		}
		assert(globalOrder.size() == nodes.size());
	}

};

bool naivegetpath(int i, int p, int t, const vector<vi> &g, vi &path) {
	bool r = false;
	if(i == t) {
		r = true;
	}else {
		each(j, g[i]) if(*j != p)
			r = r || naivegetpath(*j, i, t, g, path);
	}
	if(r)
		path.push_back(i);
	return r;
}



#ifdef MY_LOCAL_RUN
#include "C:\Dropbox\backup\implements\Util\MyAssert.hpp"
#undef assert
#define assert my_assert
#define TEST
#endif

int main() {
	int N;
	for(int iii = 0; ; ++ iii) {
#ifndef TEST
		if(!~scanf("%d", &N)) break;
#else
		if(iii % 100 == 0) cerr << iii << "\r", cerr.flush();
		N=rand()%10+1;
#endif

		vector<int> S(N), C(N);
		rep(i, N) {
#ifndef TEST
			scanf("%d", &S[i]);
#else
			S[i]=rand()%100;
#endif
		}
		rep(i, N) {
#ifndef TEST
			scanf("%d", &C[i]);
#else
			C[i]=rand()%100;
#endif
		}
		vector<Val> initVals(N);
		rep(i, N)
			initVals[i] = Val(S[i], C[i]);
		vector<vi> g(N);
		rep(i, N-1) {
			int A, B;
#ifndef TEST
			scanf("%d%d", &A, &B), -- A, -- B;
#else
			A=i+1,B=rand()%(i+1);
#endif
			g[A].push_back(B);
			g[B].push_back(A);
		}
		BiasedHeavyLightDecompositionPathOnly bhld;
		bhld.build(g, 0, initVals);

#ifdef TEST
		vector<mint> naiveval(all(S));
#endif
		int Q;
#ifndef TEST
		scanf("%d", &Q);
#else
		Q=rand()%100+1;
#endif
		rep(ii, Q) {
			int ty;
#ifndef TEST
			scanf("%d", &ty);
#else
			ty=rand()%2;
#endif
			if(ty == 0) {
				int X, Y, Z;
#ifndef TEST
				scanf("%d%d%d", &X, &Y, &Z), -- X, -- Y;
#else
				X=rand()%N,Y=rand()%N,Z=rand()%100;
#endif

				bhld.addToPath(X, Y, PathAdd(Z));

#ifdef TEST
				vi naivepath; naivegetpath(X, -1, Y, g, naivepath);
				each(j, naivepath) naiveval[*j] += mint(Z) * C[*j];
#endif
			}else {
				int X, Y;
#ifndef TEST
				scanf("%d%d", &X, &Y), -- X, -- Y;
#else
				X=rand()%N,Y=rand()%N;
#endif
				mint ans = bhld.sumPath(X, Y);
#ifndef TEST
				printf("%d\n", ans.get());
#else

				vi naivepath; naivegetpath(X, -1, Y, g, naivepath);
				mint naivesum, naivecoefsum;
				each(j, naivepath) naivecoefsum += C[*j], naivesum += naiveval[*j];
				if(ans.get() != naivesum.get())
					cerr << ans.get() << " != " << naivesum.get() << endl;
#endif
			}
#ifdef TEST
//			rep(i, N) bhld.getVal(i);
//			rep(i, N) bhld.setVal(i, bhld.getVal(i));
			rep(i, N) assert(bhld.nodes[i].debugCheckUpdated());
//			rep(i, N) assert(bhld.getVal(i).val.x == naiveval[i].x);
#endif
		}
	}
	return 0;
}
0