#[doc = " https://github.com/akiradeveloper/rust-comp-snippets"] #[allow(unused_imports)] use std::cmp::{max, min, Ordering}; #[allow(unused_imports)] use std::collections::{BTreeMap, BTreeSet, BinaryHeap, HashMap, HashSet, VecDeque}; #[allow(unused_imports)] use std::iter::FromIterator; #[macro_export] macro_rules ! chmax { ( $ x : expr , $ ( $ v : expr ) ,+ ) => { $ ( $ x = std :: cmp :: max ( $ x ,$ v ) ; ) + } ; } #[macro_export] macro_rules ! chmin { ( $ x : expr , $ ( $ v : expr ) ,+ ) => { $ ( $ x = std :: cmp :: min ( $ x ,$ v ) ; ) + } ; } #[macro_export] macro_rules ! max { ( $ x : expr ) => ( $ x ) ; ( $ x : expr , $ ( $ xs : expr ) ,+ ) => { std :: cmp :: max ( $ x , max ! ( $ ( $ xs ) ,+ ) ) } ; } #[macro_export] macro_rules ! min { ( $ x : expr ) => ( $ x ) ; ( $ x : expr , $ ( $ xs : expr ) ,+ ) => { std :: cmp :: min ( $ x , min ! ( $ ( $ xs ) ,+ ) ) } ; } #[macro_export] macro_rules ! dvec { ( $ t : expr ; $ len : expr ) => { vec ! [ $ t ; $ len ] } ; ( $ t : expr ; $ len : expr , $ ( $ rest : expr ) ,* ) => { vec ! [ dvec ! ( $ t ; $ ( $ rest ) ,* ) ; $ len ] } ; } #[macro_export] macro_rules ! cfor { ( ; $ ( $ rest : tt ) * ) => { cfor ! ( ( ) ; $ ( $ rest ) * ) } ; ( $ ( $ init : stmt ) ,+; ; $ ( $ rest : tt ) * ) => { cfor ! ( $ ( $ init ) ,+; ! false ; $ ( $ rest ) * ) } ; ( $ ( $ init : stmt ) ,+; $ cond : expr ; ; $ body : block ) => { cfor ! { $ ( $ init ) ,+; $ cond ; ( ) ; $ body } } ; ( $ ( $ init : stmt ) ,+; $ cond : expr ; $ ( $ step : expr ) ,+; $ body : block ) => { { $ ( $ init ; ) + while $ cond { let mut _first = true ; let mut _continue = false ; loop { if ! _first { _continue = true ; break } _first = false ; $ body } if ! _continue { break } $ ( $ step ; ) + } } } ; } #[doc = " main"] #[allow(unused_imports)] use std::io::{stdin, stdout, BufWriter, Write}; #[macro_export] macro_rules ! input { ( source = $ s : expr , $ ( $ r : tt ) * ) => { let mut parser = Parser :: from_str ( $ s ) ; input_inner ! { parser , $ ( $ r ) * } } ; ( parser = $ parser : ident , $ ( $ r : tt ) * ) => { input_inner ! { $ parser , $ ( $ r ) * } } ; ( new_stdin_parser = $ parser : ident , $ ( $ r : tt ) * ) => { let stdin = std :: io :: stdin ( ) ; let reader = std :: io :: BufReader :: new ( stdin . lock ( ) ) ; let mut $ parser = Parser :: new ( reader ) ; input_inner ! { $ parser , $ ( $ r ) * } } ; ( $ ( $ r : tt ) * ) => { input ! { new_stdin_parser = parser , $ ( $ r ) * } } ; } #[macro_export] macro_rules ! input_inner { ( $ parser : ident ) => { } ; ( $ parser : ident , ) => { } ; ( $ parser : ident , $ var : ident : $ t : tt $ ( $ r : tt ) * ) => { let $ var = read_value ! ( $ parser , $ t ) ; input_inner ! { $ parser $ ( $ r ) * } } ; } #[macro_export] macro_rules ! read_value { ( $ parser : ident , ( $ ( $ t : tt ) ,* ) ) => { ( $ ( read_value ! ( $ parser , $ t ) ) ,* ) } ; ( $ parser : ident , [ $ t : tt ; $ len : expr ] ) => { ( 0 ..$ len ) . map ( | _ | read_value ! ( $ parser , $ t ) ) . collect ::< Vec < _ >> ( ) } ; ( $ parser : ident , chars ) => { read_value ! ( $ parser , String ) . chars ( ) . collect ::< Vec < char >> ( ) } ; ( $ parser : ident , usize1 ) => { read_value ! ( $ parser , usize ) - 1 } ; ( $ parser : ident , $ t : ty ) => { $ parser . next ::<$ t > ( ) . expect ( "Parse error" ) } ; } use std::io; use std::io::BufRead; use std::str; pub struct Parser { reader: R, buf: Vec, pos: usize, } impl Parser { pub fn from_str(s: &str) -> Parser { Parser { reader: io::empty(), buf: s.as_bytes().to_vec(), pos: 0, } } } impl Parser { pub fn new(reader: R) -> Parser { Parser { reader: reader, buf: vec![], pos: 0, } } pub fn update_buf(&mut self) { self.buf.clear(); self.pos = 0; loop { let (len, complete) = { let buf2 = self.reader.fill_buf().unwrap(); self.buf.extend_from_slice(buf2); let len = buf2.len(); if len == 0 { break; } (len, buf2[len - 1] <= 0x20) }; self.reader.consume(len); if complete { break; } } } pub fn next(&mut self) -> Result { loop { let mut begin = self.pos; while begin < self.buf.len() && (self.buf[begin] <= 0x20) { begin += 1; } let mut end = begin; while end < self.buf.len() && (self.buf[end] > 0x20) { end += 1; } if begin != self.buf.len() { self.pos = end; return str::from_utf8(&self.buf[begin..end]).unwrap().parse::(); } else { self.update_buf(); } } } } #[allow(unused_macros)] macro_rules ! debug { ( $ ( $ a : expr ) ,* ) => { eprintln ! ( concat ! ( $ ( stringify ! ( $ a ) , " = {:?}, " ) ,* ) , $ ( $ a ) ,* ) ; } } #[doc = " https://github.com/hatoo/competitive-rust-snippets"] const BIG_STACK_SIZE: bool = true; #[allow(dead_code)] fn main() { use std::thread; if BIG_STACK_SIZE { thread::Builder::new() .stack_size(32 * 1024 * 1024) .name("solve".into()) .spawn(solve) .unwrap() .join() .unwrap(); } else { solve(); } } enum Q { Q1(i64,i64,i64), Q2(i64,i64), } struct Tri { x0:i64,y0:i64, x1:i64,y1:i64, x2:i64,y2:i64, } impl Tri { fn area(&self) -> i64 { let a = (self.x1-self.x0, self.y1-self.y0); let b = (self.x2-self.x0, self.y2-self.y0); i64::abs(a.0*b.1 - a.1*b.0) } fn left(&self) -> i64 { let mut v = 1<<60; chmin!(v,self.x0); chmin!(v,self.x1); chmin!(v,self.x2); v } fn right(&self) -> i64 { let mut v = 0; chmax!(v,self.x0); chmax!(v,self.x1); chmax!(v,self.x2); v } } fn solve() { let out = stdout(); let mut out = BufWriter::new(out.lock()); input!{ new_stdin_parser=parser, n:usize,q:usize, tris:[(i64,i64,i64,i64,i64,i64);n], } let mut T = vec![]; for tri in tris { let tri = Tri { x0:tri.0, y0:tri.1, x1:tri.2, y1:tri.3, x2:tri.4, y2:tri.5, }; T.push((tri.left(), tri.right(), tri.area())); } let mut Q = vec![]; for i in 0..q { input!{ parser=parser, ty:usize, } if ty == 1 { input!{ parser=parser, tri:(i64,i64,i64,i64,i64,i64), } let tri = Tri { x0:tri.0, y0:tri.1, x1:tri.2, y1:tri.3, x2:tri.4, y2:tri.5, }; let q = Q::Q1(tri.left(), tri.right(), tri.area()); Q.push(q); } else { input!{ parser=parser, l:i64,r:i64, } let q = Q::Q2(l,r); Q.push(q); } } let mut X = vec![]; let mut Y = vec![]; for t in &T { X.push(t.0); Y.push(t.1); } for q in &Q { match q { Q::Q1(l,r,x) => { X.push(*l); Y.push(*r); }, Q::Q2(l,r) => { X.push(*l); Y.push(*r); } } } let xcomp = CoordCompression::new(&X, 0); let ycomp = CoordCompression::new(&Y, 0); let mut xy = vec![vec![]; xcomp.comp.len()]; for t in &T { let i = xcomp.compress(t.0); let j = ycomp.compress(t.1); xy[i].push(j); } for q in &Q { match q { Q::Q1(l,r,x) => { let i = xcomp.compress(*l); let j = ycomp.compress(*r); xy[i].push(j); }, Q::Q2(l,r) => { let i = xcomp.compress(*l); let j = ycomp.compress(*r); xy[i].push(j); } } } let n = xy.len(); let mut seg: SEG2d = SEG2d::new(xy); for t in &T { let i = xcomp.compress(t.0); let j = ycomp.compress(t.1); seg.update(i,j,t.2); } for q in &Q { match q { Q::Q1(l,r,x) => { let i = xcomp.compress(*l); let j = ycomp.compress(*r); seg.update(i,j,*x); }, Q::Q2(l,r) => { let i = xcomp.compress(*l); let j = ycomp.compress(*r); let maxv = seg.query(i,n,0,j+1); writeln!(out,"{}",maxv); } } } } pub struct SEG2d { tree: SEGTree, segs: Vec>, index: Vec>, } impl SEG2d { pub fn new(xy: Vec>) -> Self { struct SetAdd {} impl Monoid for SetAdd { type T = Vec; fn id() -> Self::T { vec![] } fn op(a: &Self::T, b: &Self::T) -> Self::T { let mut c = vec![]; for &x in a { c.push(x); } for &x in b { c.push(x); } c.sort(); c.dedup(); c } } let n = xy.len(); let tree = SEGTree::new(n); let mut s: SEG = SEG::new(n); for i in 0..n { let mut y = xy[i].clone(); y.sort(); y.dedup(); s.update(i, y); } let index: Vec> = s.buf; let mut segs = vec![]; for ii in &index { let s: SEG = SEG::new(ii.len()); segs.push(s); } Self { tree, index, segs } } #[doc = " 計算量"] #[doc = " O(logH logW)"] pub fn update(&mut self, x: usize, y: usize, v: M::T) { let nodes = self.tree.update_nodes(x); for node in nodes { match node { SEGNode::Leaf { k } => { let i = self.index[k].binary_search(&y).unwrap(); self.segs[k].update(i, v.clone()); } SEGNode::Branch { k, l, r } => { let mut v = M::id(); if let Ok(il) = self.index[l].binary_search(&y) { let vl = self.segs[l].get(il); v = M::op(&v, &vl); } if let Ok(ir) = self.index[r].binary_search(&y) { let vr = self.segs[r].get(ir); v = M::op(&v, &vr); } let i = self.index[k].binary_search(&y).unwrap(); self.segs[k].update(i, v); } } } } #[doc = " [x0,x1) x [y0,y1)"] #[doc = " 計算量"] #[doc = " O(logH logW)"] pub fn query(&self, x0: usize, x1: usize, y0: usize, y1: usize) -> M::T { let nodes = self.tree.query_nodes(x0, x1); let mut ans = M::id(); for k in nodes { let l = self.index[k].lower_bound(&y0); let r = self.index[k].lower_bound(&y1); let v = self.segs[k].query(l, r); ans = M::op(&ans, &v); } ans } } #[derive(PartialEq, Debug)] pub enum SEGNode { Leaf { k: usize }, Branch { k: usize, l: usize, r: usize }, } pub struct SEGTree { #[doc = " 葉の数(2の累乗)"] pub n: usize, } impl SEGTree { pub fn new(n: usize) -> SEGTree { let n = n.next_power_of_two(); Self { n: n } } pub fn update_nodes(&self, i: usize) -> Vec { use SEGNode::*; let mut i = i + self.n; let mut res = vec![Leaf { k: i }]; while i > 1 { i >>= 1; res.push(Branch { k: i, l: i * 2, r: i * 2 + 1, }); } res } #[doc = " [l,r)"] pub fn query_nodes(&self, l: usize, r: usize) -> Vec { let mut ret = vec![]; let mut l = l + self.n; let mut r = r + self.n; while l < r { if l & 1 > 0 { ret.push(l); l += 1; } if r & 1 > 0 { r -= 1; ret.push(r); } l >>= 1; r >>= 1; } ret.sort(); ret } } struct MAX; impl Monoid for MAX { type T = i64; fn id() -> Self::T { -1 } fn op(a: &Self::T, b: &Self::T) -> Self::T { std::cmp::max(*a, *b) } } #[doc = " フェニック木の一般化"] #[doc = " "] #[doc = " 各ノードには最初、idに相当する値が入っている。"] #[doc = " get i: a[i]を返す"] #[doc = " update i x: a[i]=x"] #[doc = " query l r: [l,r)をカバーするノードに対してopを適用したもの"] #[allow(dead_code)] pub trait Monoid { type T: Clone + std::fmt::Debug; fn id() -> Self::T; fn op(a: &Self::T, b: &Self::T) -> Self::T; } #[allow(dead_code)] pub struct SEG { pub n: usize, pub buf: Vec, } impl SEG { #[allow(dead_code)] pub fn new(n: usize) -> SEG { let mut m = 1; while m < n { m *= 2; } SEG { n: m, buf: vec![M::id().clone(); 2 * m], } } #[allow(dead_code)] pub fn update(&mut self, k: usize, a: M::T) { let mut k = k + self.n; self.buf[k] = a; while k > 1 { k = k >> 1; self.buf[k] = M::op(&self.buf[k * 2], &self.buf[k * 2 + 1]); } } #[allow(dead_code)] pub fn get(&self, k: usize) -> M::T { self.buf[k + self.n].clone() } pub fn do_query(&self, a: usize, b: usize, k: usize, l: usize, r: usize) -> M::T { if r <= a || b <= l { return M::id(); } if a <= l && r <= b { return self.buf[k].clone(); } else { let vl = self.do_query(a, b, k * 2, l, (l + r) / 2); let vr = self.do_query(a, b, k * 2 + 1, (l + r) / 2, r); return M::op(&vl, &vr); } } #[allow(dead_code)] pub fn query(&self, a: usize, b: usize) -> M::T { self.do_query(a, b, 1, 0, self.n) } } struct CoordCompression { comp: std::collections::HashMap, dcmp: std::collections::HashMap, } impl CoordCompression { pub fn new(xs: &[i64], start: usize) -> CoordCompression { let mut xs = xs.to_owned(); xs.sort(); let mut comp = std::collections::HashMap::new(); let mut dcmp = std::collections::HashMap::new(); let mut acc = start; for x in xs { if comp.contains_key(&x) { continue; } comp.insert(x, acc); dcmp.insert(acc, x); acc += 1; } CoordCompression { comp: comp, dcmp: dcmp, } } pub fn compress(&self, x: i64) -> usize { *self.comp.get(&x).unwrap() } pub fn decompress(&self, x: usize) -> i64 { *self.dcmp.get(&x).unwrap() } } #[doc = " Equivalent to std::lowerbound and std::upperbound in c++"] pub trait LowerBound { fn lower_bound(&self, x: &T) -> usize; fn upper_bound(&self, x: &T) -> usize; } impl LowerBound for [T] { fn lower_bound(&self, x: &T) -> usize { let mut low = 0; let mut high = self.len(); while low != high { let mid = (low + high) / 2; match self[mid].cmp(x) { Ordering::Less => { low = mid + 1; } Ordering::Equal | Ordering::Greater => { high = mid; } } } low } fn upper_bound(&self, x: &T) -> usize { let mut low = 0; let mut high = self.len(); while low != high { let mid = (low + high) / 2; match self[mid].cmp(x) { Ordering::Less | Ordering::Equal => { low = mid + 1; } Ordering::Greater => { high = mid; } } } low } }