#[doc = " https://github.com/akiradeveloper/rust-comp-snippets"] #[allow(unused_imports)] use std::cmp::{max, min, Ordering}; #[allow(unused_imports)] use std::collections::{BTreeMap, BTreeSet, BinaryHeap, HashMap, HashSet, VecDeque}; #[allow(unused_imports)] use std::iter::FromIterator; #[macro_export] macro_rules ! chmax { ( $ x : expr , $ ( $ v : expr ) ,+ ) => { $ ( $ x = std :: cmp :: max ( $ x ,$ v ) ; ) + } ; } #[macro_export] macro_rules ! chmin { ( $ x : expr , $ ( $ v : expr ) ,+ ) => { $ ( $ x = std :: cmp :: min ( $ x ,$ v ) ; ) + } ; } #[macro_export] macro_rules ! max { ( $ x : expr ) => ( $ x ) ; ( $ x : expr , $ ( $ xs : expr ) ,+ ) => { std :: cmp :: max ( $ x , max ! ( $ ( $ xs ) ,+ ) ) } ; } #[macro_export] macro_rules ! min { ( $ x : expr ) => ( $ x ) ; ( $ x : expr , $ ( $ xs : expr ) ,+ ) => { std :: cmp :: min ( $ x , min ! ( $ ( $ xs ) ,+ ) ) } ; } #[macro_export] macro_rules ! dvec { ( $ t : expr ; $ len : expr ) => { vec ! [ $ t ; $ len ] } ; ( $ t : expr ; $ len : expr , $ ( $ rest : expr ) ,* ) => { vec ! [ dvec ! ( $ t ; $ ( $ rest ) ,* ) ; $ len ] } ; } #[macro_export] macro_rules ! cfor { ( ; $ ( $ rest : tt ) * ) => { cfor ! ( ( ) ; $ ( $ rest ) * ) } ; ( $ ( $ init : stmt ) ,+; ; $ ( $ rest : tt ) * ) => { cfor ! ( $ ( $ init ) ,+; ! false ; $ ( $ rest ) * ) } ; ( $ ( $ init : stmt ) ,+; $ cond : expr ; ; $ body : block ) => { cfor ! { $ ( $ init ) ,+; $ cond ; ( ) ; $ body } } ; ( $ ( $ init : stmt ) ,+; $ cond : expr ; $ ( $ step : expr ) ,+; $ body : block ) => { { $ ( $ init ; ) + while $ cond { let mut _first = true ; let mut _continue = false ; loop { if ! _first { _continue = true ; break } _first = false ; $ body } if ! _continue { break } $ ( $ step ; ) + } } } ; } #[doc = " main"] #[allow(unused_imports)] use std::io::{stdin, stdout, BufWriter, Write}; #[macro_export] macro_rules ! input { ( source = $ s : expr , $ ( $ r : tt ) * ) => { let mut parser = Parser :: from_str ( $ s ) ; input_inner ! { parser , $ ( $ r ) * } } ; ( parser = $ parser : ident , $ ( $ r : tt ) * ) => { input_inner ! { $ parser , $ ( $ r ) * } } ; ( new_stdin_parser = $ parser : ident , $ ( $ r : tt ) * ) => { let stdin = std :: io :: stdin ( ) ; let reader = std :: io :: BufReader :: new ( stdin . lock ( ) ) ; let mut $ parser = Parser :: new ( reader ) ; input_inner ! { $ parser , $ ( $ r ) * } } ; ( $ ( $ r : tt ) * ) => { input ! { new_stdin_parser = parser , $ ( $ r ) * } } ; } #[macro_export] macro_rules ! input_inner { ( $ parser : ident ) => { } ; ( $ parser : ident , ) => { } ; ( $ parser : ident , $ var : ident : $ t : tt $ ( $ r : tt ) * ) => { let $ var = read_value ! ( $ parser , $ t ) ; input_inner ! { $ parser $ ( $ r ) * } } ; } #[macro_export] macro_rules ! read_value { ( $ parser : ident , ( $ ( $ t : tt ) ,* ) ) => { ( $ ( read_value ! ( $ parser , $ t ) ) ,* ) } ; ( $ parser : ident , [ $ t : tt ; $ len : expr ] ) => { ( 0 ..$ len ) . map ( | _ | read_value ! ( $ parser , $ t ) ) . collect ::< Vec < _ >> ( ) } ; ( $ parser : ident , chars ) => { read_value ! ( $ parser , String ) . chars ( ) . collect ::< Vec < char >> ( ) } ; ( $ parser : ident , usize1 ) => { read_value ! ( $ parser , usize ) - 1 } ; ( $ parser : ident , $ t : ty ) => { $ parser . next ::<$ t > ( ) . expect ( "Parse error" ) } ; } use std::io; use std::io::BufRead; use std::str; pub struct Parser { reader: R, buf: Vec, pos: usize, } impl Parser { pub fn from_str(s: &str) -> Parser { Parser { reader: io::empty(), buf: s.as_bytes().to_vec(), pos: 0, } } } impl Parser { pub fn new(reader: R) -> Parser { Parser { reader: reader, buf: vec![], pos: 0, } } pub fn update_buf(&mut self) { self.buf.clear(); self.pos = 0; loop { let (len, complete) = { let buf2 = self.reader.fill_buf().unwrap(); self.buf.extend_from_slice(buf2); let len = buf2.len(); if len == 0 { break; } (len, buf2[len - 1] <= 0x20) }; self.reader.consume(len); if complete { break; } } } pub fn next(&mut self) -> Result { loop { let mut begin = self.pos; while begin < self.buf.len() && (self.buf[begin] <= 0x20) { begin += 1; } let mut end = begin; while end < self.buf.len() && (self.buf[end] > 0x20) { end += 1; } if begin != self.buf.len() { self.pos = end; return str::from_utf8(&self.buf[begin..end]).unwrap().parse::(); } else { self.update_buf(); } } } } #[allow(unused_macros)] macro_rules ! debug { ( $ ( $ a : expr ) ,* ) => { eprintln ! ( concat ! ( $ ( stringify ! ( $ a ) , " = {:?}, " ) ,* ) , $ ( $ a ) ,* ) ; } } #[doc = " https://github.com/hatoo/competitive-rust-snippets"] const BIG_STACK_SIZE: bool = true; #[allow(dead_code)] fn main() { use std::thread; if BIG_STACK_SIZE { thread::Builder::new() .stack_size(32 * 1024 * 1024) .name("solve".into()) .spawn(solve) .unwrap() .join() .unwrap(); } else { solve(); } } fn solve() { let out = stdout(); let mut out = BufWriter::new(out.lock()); input!{ n:usize,q:usize, ab:[(usize,usize);n-1], px:[(usize,u64);q], } let mut t = HLDecomposition::new(n); for (a,b) in ab { t.connect(a-1, b-1); } t.build(0); // dbg!(&t.subcnt); let mut sum = 0; for (p,x) in px { let m = t.subcnt[p-1]; sum += m as u64 * x; writeln!(out,"{}",sum); } } #[doc = " HL分解は、木構造をパスの集合に分解する。"] #[doc = " こうして木構造をvid木上で一直線状にすることにより、"] #[doc = " セグツリーなどの配列構造に対するアルゴリズムを適用可能になる。"] #[doc = " そして、頂点や辺にvidをつける。ここで辺は子ノードの頂点vidで表される。"] #[doc = " クエリ(u,v)に対して、木上のパスに含まれるパス集合(vidベース)を列挙する。"] #[doc = " "] #[doc = " 構築 O(N)"] struct HLDecomposition { n: usize, g: Vec>, subcnt: Vec, depth: Vec, pub par: Vec>, heavy_next: Vec>, heavy_head: Vec, real_to_virt: Vec, pub virt_to_real: Vec, } impl HLDecomposition { pub fn new(n: usize) -> Self { HLDecomposition { n: n, g: vec![vec![]; n], subcnt: vec![0; n], depth: vec![0; n], par: vec![None; n], heavy_next: vec![None; n], heavy_head: vec![n; n], real_to_virt: vec![n; n], virt_to_real: vec![n; n], } } pub fn connect(&mut self, u: usize, v: usize) { self.g[u].push(v); self.g[v].push(u); } #[doc = " O(N)"] pub fn build(&mut self, root: usize) { self.dfs1(root); self.dfs2(root); self.bfs(root); } fn dfs1(&mut self, root: usize) { self.depth[root] = 0; self.par[root] = None; self.dfs1_sub(root, None); } fn dfs1_sub(&mut self, u: usize, par: Option) -> usize { let mut cnt = 1; for v in self.g[u].clone() { if Some(v) == par { continue; } self.depth[v] = self.depth[u] + 1; self.par[v] = Some(u); cnt += self.dfs1_sub(v, Some(u)); } self.subcnt[u] = cnt; cnt } fn dfs2(&mut self, root: usize) { self.dfs2_sub(root, None); } fn dfs2_sub(&mut self, u: usize, par: Option) { let mut maxv = 0; let mut heavy_next = None; let cld = self.g[u].clone(); for &v in &cld { if Some(v) == par { continue; } if self.subcnt[v] > maxv { maxv = self.subcnt[v]; heavy_next = Some(v); } } if let Some(hn) = heavy_next { self.heavy_next[u] = Some(hn); self.dfs2_sub(hn, Some(u)); } for &v in &cld { if Some(v) == par || Some(v) == heavy_next { continue; } self.dfs2_sub(v, Some(u)); } } fn bfs(&mut self, root: usize) { let mut cur_virt_id = 0; let mut q = VecDeque::new(); q.push_back(root); while let Some(h) = q.pop_front() { let mut cur0 = Some(h); while cur0.is_some() { let cur = cur0.unwrap(); self.real_to_virt[cur] = cur_virt_id; self.virt_to_real[cur_virt_id] = cur; cur_virt_id += 1; self.heavy_head[cur] = h; for v in self.g[cur].clone() { if Some(v) == self.par[cur] || Some(v) == self.heavy_next[cur] { continue; } q.push_back(v); } cur0 = self.heavy_next[cur]; } } } #[doc = " O(log N)"] pub fn lca(&self, u: usize, v: usize) -> usize { let mut l = u; let mut r = v; loop { if self.real_to_virt[l] > self.real_to_virt[r] { std::mem::swap(&mut l, &mut r); } if self.heavy_head[l] == self.heavy_head[r] { return l; } r = self.par[self.heavy_head[r]].unwrap(); } } #[doc = " O(N)"] pub fn vertex_decomposition(&self) -> Vec<(usize, usize)> { let mut vhead = vec![self.n; self.n]; for i in 0..self.n { vhead[i] = self.real_to_virt[self.heavy_head[i]]; } let mut hs = std::collections::HashMap::new(); for x in vhead { *hs.entry(x).or_insert(0) += 1; } let mut res = vec![]; for (k, v) in hs { res.push((k, k + v - 1)); } res } #[doc = " O(N)"] pub fn edge_decomposition(&self) -> Vec<(usize, usize)> { let V = self.vertex_decomposition(); let mut res = vec![]; for (u, v) in V { let u = if u == 0 { 1 } else { u }; res.push((u, v)); } res } pub fn vertex_decomposition_between(&self, u: usize, v: usize) -> Vec<(usize, usize)> { let mut res = vec![]; let mut l = u; let mut r = v; loop { if self.real_to_virt[l] > self.real_to_virt[r] { std::mem::swap(&mut l, &mut r); } let p = ( std::cmp::max(self.real_to_virt[self.heavy_head[r]], self.real_to_virt[l]), self.real_to_virt[r], ); res.push(p); if self.heavy_head[l] != self.heavy_head[r] { r = self.par[self.heavy_head[r]].unwrap(); } else { break; } } res } pub fn edge_decomposition_between(&self, u: usize, v: usize) -> Vec<(usize, usize)> { let mut res = vec![]; let mut l = u; let mut r = v; loop { if self.real_to_virt[l] > self.real_to_virt[r] { std::mem::swap(&mut l, &mut r); } if self.heavy_head[l] != self.heavy_head[r] { let p = (self.real_to_virt[self.heavy_head[r]], self.real_to_virt[r]); res.push(p); r = self.par[self.heavy_head[r]].unwrap(); } else { if l != r { let p = (self.real_to_virt[l] + 1, self.real_to_virt[r]); res.push(p); } break; } } res } pub fn distance(&self, u: usize, v: usize) -> usize { self.depth[u] + self.depth[v] - 2 * self.depth[self.lca(u, v)] } }