結果

問題 No.798 コレクション
ユーザー koyumeishikoyumeishi
提出日時 2019-03-15 22:43:57
言語 C++14
(gcc 12.3.0 + boost 1.83.0)
結果
TLE  
実行時間 -
コード長 22,109 bytes
コンパイル時間 2,635 ms
コンパイル使用メモリ 150,840 KB
実行使用メモリ 226,784 KB
最終ジャッジ日時 2023-09-14 13:55:50
合計ジャッジ時間 6,471 ms
ジャッジサーバーID
(参考情報)
judge12 / judge11
このコードへのチャレンジ
(要ログイン)

テストケース

テストケース表示
入力 結果 実行時間
実行使用メモリ
testcase_00 AC 1 ms
4,380 KB
testcase_01 AC 2 ms
4,376 KB
testcase_02 AC 2 ms
4,380 KB
testcase_03 AC 2 ms
4,380 KB
testcase_04 AC 2 ms
4,376 KB
testcase_05 AC 1 ms
4,376 KB
testcase_06 AC 1 ms
4,376 KB
testcase_07 AC 2 ms
4,376 KB
testcase_08 AC 2 ms
4,376 KB
testcase_09 AC 2 ms
4,380 KB
testcase_10 AC 2 ms
4,376 KB
testcase_11 AC 2 ms
4,380 KB
testcase_12 AC 2 ms
4,376 KB
testcase_13 TLE -
testcase_14 -- -
testcase_15 -- -
testcase_16 -- -
testcase_17 -- -
testcase_18 -- -
testcase_19 -- -
testcase_20 -- -
testcase_21 -- -
testcase_22 -- -
testcase_23 -- -
testcase_24 -- -
testcase_25 -- -
権限があれば一括ダウンロードができます

ソースコード

diff #

#include <iostream>
#include <vector>
#include <cstdio>
#include <sstream>
#include <map>
#include <string>
#include <algorithm>
#include <queue>
#include <cmath>
#include <functional>
#include <set>
#include <ctime>
#include <random>
#include <chrono>
#include <cassert>
#include <tuple>
#include <utility>
using namespace std;

namespace {
  using Integer = long long; //__int128;
  template<class T, class S> istream& operator >> (istream& is, pair<T,S>& p){return is >> p.first >> p.second;}
  template<class T> istream& operator >> (istream& is, vector<T>& vec){for(T& val: vec) is >> val; return is;}
  template<class T> istream& operator ,  (istream& is, T& val){ return is >> val;}
  template<class T, class S> ostream& operator << (ostream& os, const pair<T,S>& p){return os << p.first << " " << p.second;}
  template<class T> ostream& operator << (ostream& os, const vector<T>& vec){for(size_t i=0; i<vec.size(); i++) os << vec[i] << (i==vec.size()-1?"":" "); return os;}
  template<class T> ostream& operator ,  (ostream& os, const T& val){ return os << " " << val;}

  template<class H> void print(const H& head){ cout << head; }
  template<class H, class ... T> void print(const H& head, const T& ... tail){ cout << head << " "; print(tail...); }
  template<class ... T> void println(const T& ... values){ print(values...); cout << endl; }

  template<class H> void eprint(const H& head){ cerr << head; }
  template<class H, class ... T> void eprint(const H& head, const T& ... tail){ cerr << head << " "; eprint(tail...); }
  template<class ... T> void eprintln(const T& ... values){ eprint(values...); cerr << endl; }

  class range{ Integer start_, end_, step_; public: struct range_iterator{ Integer val, step_; range_iterator(Integer v, Integer step) : val(v), step_(step) {} Integer operator * (){return val;} void operator ++ (){val += step_;} bool operator != (range_iterator& x){return step_ > 0 ? val < x.val : val > x.val;} }; range(Integer len) : start_(0), end_(len), step_(1) {} range(Integer start, Integer end) : start_(start), end_(end), step_(1) {} range(Integer start, Integer end, Integer step) : start_(start), end_(end), step_(step) {} range_iterator begin(){ return range_iterator(start_, step_); } range_iterator   end(){ return range_iterator(  end_, step_); } };

  inline string operator "" _s (const char* str, size_t size){ return move(string(str)); }
  constexpr Integer my_pow(Integer x, Integer k, Integer z=1){return k==0 ? z : k==1 ? z*x : (k&1) ? my_pow(x*x,k>>1,z*x) : my_pow(x*x,k>>1,z);}
  constexpr Integer my_pow_mod(Integer x, Integer k, Integer M, Integer z=1){return k==0 ? z%M : k==1 ? z*x%M : (k&1) ? my_pow_mod(x*x%M,k>>1,M,z*x%M) : my_pow_mod(x*x%M,k>>1,M,z);}
  constexpr unsigned long long operator "" _ten (unsigned long long value){ return my_pow(10,value); }

  inline int k_bit(Integer x, int k){return (x>>k)&1;} //0-indexed

  mt19937 mt(chrono::duration_cast<chrono::nanoseconds>(chrono::steady_clock::now().time_since_epoch()).count());

  template<class T> string join(const vector<T>& v, const string& sep){ stringstream ss; for(size_t i=0; i<v.size(); i++){ if(i>0) ss << sep; ss << v[i]; } return ss.str(); }

  inline string operator * (string s, int k){ string ret; while(k){ if(k&1) ret += s; s += s; k >>= 1; } return ret; }
}
constexpr long long mod = 9_ten + 7;

// min cost flow 
// https://min-25.hatenablog.com/entry/2017/08/12/162908

#define _rep(_1, _2, _3, _4, name, ...) name
#define rep2(i, n) rep3(i, 0, n)
#define rep3(i, a, b) rep4(i, a, b, 1)
#define rep4(i, a, b, c) for (int i = int(a); i < int(b); i += int(c))
#define rep(...) _rep(__VA_ARGS__, rep4, rep3, rep2, _)(__VA_ARGS__)

using namespace std;

using i64 = long long;

template <typename T>
struct BinaryHeap {
  struct node {
    bool operator < (const node& rhs) const { return data < rhs.data; }
    T data;
    int id;
  };
  BinaryHeap(int N) : size(0) {
    nodes = new node[N + 1];
    indices = new int[N];
  }
  ~BinaryHeap() {
    delete [] nodes;
    delete [] indices;
  }
  void swap_node(int a, int b) {
    swap(nodes[a], nodes[b]);
    indices[nodes[a].id] = a;
    indices[nodes[b].id] = b;
  }
  void down_heap(int pos) {
    for (int k = pos, nk = k; 2 * k <= size; k = nk) {
      if (nodes[2 * k] < nodes[nk]) nk = 2 * k;
      if (2 * k + 1 <= size && nodes[2 * k + 1] < nodes[nk]) nk = 2 * k + 1;
      if (nk == k) break;
      swap_node(k, nk);
    }
  }
  void up_heap(int pos) {
    for (int k = pos; k > 1 && nodes[k] < nodes[k >> 1]; k >>= 1) {
      swap_node(k, k >> 1);
    }
  }
  node get() const {
    assert(size >= 1);
    return nodes[1];
  }
  void pop(int pos=1) {
    nodes[pos] = nodes[size--];
    indices[nodes[pos].id] = pos;
    down_heap(pos);
  }
  void erase(int id) {
    pop(indices[id]);
  }
  void update(int id, T v) {
    bool up = (v <= nodes[indices[id]].data);
    nodes[indices[id]].data = v;
    if (up) up_heap(indices[id]);
    else down_heap(indices[id]);
  }
  void push(int id, T v) {
    indices[id] = ++size;
    nodes[size] = {v, id};
    up_heap(size);
  }
  void valid() {
    bool ok = true;
    rep(i, 1, size + 1) {
      if (2 * i + 0 <= size) ok &= nodes[i].data <= nodes[2 * i].data;
      if (2 * i + 1 <= size) ok &= nodes[i].data <= nodes[2 * i + 1].data;
    }
    assert(ok);
  }

  int size;
  node* nodes;
  int* indices;
};

template <typename input_edge_t, typename weight1_t, typename weight1_sum_t>
struct YoungTarjanOrlinMCR {
  using weight2_t = int;
  using weight2_sum_t = int;
  using ratio_t = double;
  static constexpr ratio_t inf = 9e18;

  struct node {
    node() : par(-1), prev(-1), next(-1), head(-1), eid(-1) {}
    int par, prev, next, head, eid;
  };
  
  struct edge {
    int from, to;
    weight1_t w1;
    // weight2_t w2;
    weight1_t weight1() const { return w1; }
    weight2_t weight2() const { return 1; }
  };

  YoungTarjanOrlinMCR(int N, const vector<input_edge_t>& E) 
      : N(N), ofs(N + 1), rofs(N + 1), edges(E.size()), redges(E.size()) {
    nodes = new node[N + 1];
    for (auto& e : E) ofs[e.from + 1] += 1, rofs[e.to + 1] += 1;
    rep(i, 1, N + 1) ofs[i] += ofs[i - 1];
    rep(i, 1, N + 1) rofs[i] += rofs[i - 1];
    rep(i, E.size()) {
      auto e = E[i];
      redges[rofs[e.to]++] = ofs[e.from];
      edges[ofs[e.from]++] = {e.from, e.to, e.weight1()};
    }
    rep(i, N) ofs[N - i] = ofs[N - 1 - i];
    rep(i, N) rofs[N - i] = rofs[N - 1 - i];
    ofs[0] = rofs[0] = 0;
  }
  
  ~YoungTarjanOrlinMCR() {
    delete [] nodes;
  }

  // node operations

  void link(int u, int v, int eid) {
    if (nodes[u].head < 0) {
      nodes[u].head = v;
    } else {
      int w = nodes[u].head;
      nodes[u].head = nodes[w].prev = v;
      nodes[v].next = w;
    }
    nodes[v].par = u; 
    nodes[v].eid = eid;
  }
  
  void cut(int u, int v) {
    int& uh = nodes[u].head;
    int& vp = nodes[v].prev, &vn = nodes[v].next;
    if (uh == v) {
      if (vn < 0) uh = -1;
      else uh = vn, nodes[vn].prev = -1;
    } else {
      if (vn < 0) nodes[vp].next = -1;
      else nodes[vp].next = vn, nodes[vn].prev = vp;
    }
    vp = vn = -1;
  }

  void cut_and_link(int u, int v, int eid) {
    assert(nodes[v].par >= 0);
    cut(nodes[v].par, v);
    link(u, v, eid);
  }
  
  int tabulate_subtree_nodes(int u, int size, vector<int>& vs, vector<bool>& has) const {
    has[vs[size++] = u] = true;
    for (int v = nodes[u].head; v >= 0; v = nodes[v].next) {
      size = tabulate_subtree_nodes(v, size, vs, has);
    }
    return size;
  }

  void print_subtree(int root, int indent=0) const {
    rep(i, indent) putchar(' ');
    printf("%d\n", root);
    for (int v = nodes[root].head; v >= 0; v = nodes[v].next) {
      print_subtree(v, indent + 1);
    }
  }

  // 

  void trace_back_to_floored_ratio(weight1_t floored_ratio, vector< pair<ratio_t, int> >& update_log) {
    for (int i = update_log.size() - 1; i >= 0; --i) {
      if (update_log[i].first <= floored_ratio) break;
      int eid = update_log[i].second;
      int u = (eid < 0) ? N : edges[eid].from;
      int v = (eid < 0) ? ~eid : edges[eid].to;
      cut_and_link(u, v, eid);
    }
  }

  void calc_potential(int u, weight1_t lambda) {
    for (int v = nodes[u].head; v >= 0; v = nodes[v].next) {
      int eid = nodes[v].eid;
      weight1[v] = weight1[u] + (eid < 0 ? 0 : edges[eid].weight1());
      weight1[v] -= (eid < 0 ? 1 : edges[eid].weight2()) * lambda;
      calc_potential(v, lambda);
    }
  }

  ratio_t cycle_ratio(int eid) const {
    auto& e = edges[eid];
    int u = e.from, v = e.to;
    auto delta_w2 = weight2[u] + e.weight2() - weight2[v];
    if (delta_w2 <= 0) return inf;
    auto delta_w1 = weight1[u] + e.weight1() - weight1[v];
    return ratio_t(delta_w1) / delta_w2;
  }

  void verify_potential(const weight1_t ratio) const {
    rep(u, N) rep(eid, ofs[u], ofs[u + 1]) {
      auto d = weight1[u] + edges[eid].weight1() - edges[eid].weight2() * ratio;
      assert(d >= weight1[edges[eid].to]);
    }
  }

  void reconstruct_sp_tree(int eid, vector<int>& subtree_nodes, int subtree_size) {
    auto& e = edges[eid];
    int u = e.from, v = e.to;
    auto dw1 = weight1[u] + e.weight1() - weight1[v];
    auto dw2 = weight2[u] + e.weight2() - weight2[v];
    rep(i, subtree_size) {
      int u = subtree_nodes[i];
      weight1[u] += dw1, weight2[u] += dw2;
    }
    cut_and_link(u, v, eid);
  }

  ratio_t minimum_cycle_ratio(bool use_integral_ratio=false) {
    const int root = N;
    rep(i, N + 1) nodes[i] = node();
    rep(i, N) link(root, i, ~i);
    
    weight1.assign(N + 1, 0);
    weight2.assign(N + 1, 1); weight2[root] = 0;
    vector<int> best_eid(N + 1, -1);
    vector<ratio_t> min_ratios(edges.size(), inf);

    rep(u, N) rep(eid, ofs[u], ofs[u + 1]) {
      int v = edges[eid].to;
      min_ratios[eid] = cycle_ratio(eid);
      if (best_eid[v] < 0 || min_ratios[eid] < min_ratios[best_eid[v]]) {
        best_eid[v] = eid;
      }
    }
    auto heap = BinaryHeap<ratio_t>(N + 1); heap.push(N, inf);
    rep(v, N) if (best_eid[v] >= 0) heap.push(v, min_ratios[best_eid[v]]);

    vector<bool> subtree_has(N);
    vector<int> subtree_nodes(N);
    vector< pair<ratio_t, int> > change_log;

    auto curr_ratio = inf;
    while (1) {
      auto node = heap.get();
      curr_ratio = node.data;

      if (curr_ratio == inf) break;

      auto& e = edges[best_eid[node.id]];
      int subtree_root = e.to; 
      int subtree_size = tabulate_subtree_nodes(subtree_root, 0, subtree_nodes, subtree_has);
      if (subtree_has[e.from]) break;

      change_log.emplace_back(curr_ratio, nodes[subtree_root].eid);
      reconstruct_sp_tree(best_eid[subtree_root], subtree_nodes, subtree_size);

      auto update_ratio = [&](int v, int eid) {
        auto best_ratio = min_ratios[best_eid[v]];
        if ((min_ratios[eid] = cycle_ratio(eid)) < best_ratio) {
          heap.update(v, min_ratios[best_eid[v] = eid]);
        }
      };

      rep(i, subtree_size) {
        int u = subtree_nodes[i];
        // u -> v (v is not in the subtree)
        rep(eid, ofs[u], ofs[u + 1]) {
          int v = edges[eid].to;
          if (!subtree_has[v]) update_ratio(v, eid);
        }
        // u <- v 
        if (!subtree_has[edges[best_eid[u]].from]) {
          // check all the neighbors of u.
          min_ratios[best_eid[u]] = inf;
          rep(reid, rofs[u], rofs[u + 1]) update_ratio(u, redges[reid]);
          heap.update(u, min_ratios[best_eid[u]]); 
        } else {
          // check the the neighbors of u that is not in the subtree.
          rep(reid, rofs[u], rofs[u + 1]) {
            auto eid = redges[reid];
            if (subtree_has[edges[eid].from]) continue;
            update_ratio(u, eid);
          }
        }
      }
      rep(i, subtree_size) subtree_has[subtree_nodes[i]] = false;
    }
    if (use_integral_ratio) {
      weight1_t floored_ratio = -ceil(-curr_ratio);
      trace_back_to_floored_ratio(floored_ratio, change_log);
      calc_potential(N, floored_ratio);
      // verify_potential(floored_ratio);
      return floored_ratio;
    } else {
      return curr_ratio;
    }
  }

  int N;
  node* nodes;
  vector<int> ofs, rofs;
  vector<edge> edges;
  vector<weight1_sum_t> weight1;
  vector<weight2_sum_t> weight2;
  vector<int> redges;
};

namespace circulation {

using cap_t = int;
using cap_sum_t = i64;

using cost_t = i64; // |V| C 
using cost_sum_t = i64; // |V|^2 C should be < 2^63.

class Dinic {
  using cap_t = cap_sum_t;
  static const cap_t inf = 9e18;

  struct edge {
    int to, rev;
    cap_t cap;
    int eid;
  };

public:
  Dinic() {}
  Dinic(int N) : N(N), edges(N) {}

  void add_directed_edge(int u, int v, cap_t cap, int eid) {
    edges[u].push_back({v, int(edges[v].size()), cap, eid});
    edges[v].push_back({u, int(edges[u].size() - 1), 0, -1});
  }

  bool construct(int s, int t, int* que) {
    dist.assign(N, -1); dist[s] = 0;
    que[0] = s;
    for (int qh = 0, qt = 1; qh < qt; ) {
      int u = que[qh++];
      if (u == t) break;
      for (auto& e : edges[u]) if (dist[e.to] < 0 && e.cap > 0) {
        dist[e.to] = dist[u] + 1;
        que[qt++] = e.to;
      }
    }
    return dist[t] >= 0;
  }

  // seems faster
  cap_t block_flow(int u, int t, cap_t f) {
    if (u == t) return f;
    for (int& ei = last[u]; ei < int(edges[u].size()); ++ei) {
      auto& e = edges[u][ei];
      if (e.cap == 0 || dist[e.to] <= dist[u]) continue;
      cap_t aug = block_flow(e.to, t, min(f, e.cap));
      if (aug == 0) continue;
      e.cap -= aug; edges[e.to][e.rev].cap += aug;
      f -= aug;
      return aug;
    }
    return 0;
  }

  cap_sum_t block_flow_all(int u, int t, cap_t f) {
    if (u == t) return f;
    cap_sum_t ret = 0;
    for (int& ei = last[u]; ei < int(edges[u].size()); ++ei) {
      auto& e = edges[u][ei];
      if (e.cap == 0 || dist[e.to] <= dist[u]) continue;
      cap_t aug = block_flow(e.to, t, min(f, e.cap));
      if (aug == 0) continue;
      e.cap -= aug; edges[e.to][e.rev].cap += aug;
      f -= aug; ret += aug;
      if (f == 0) break;
    }
    return ret;
  }

  cap_sum_t maximum_flow(int s, int t) {
    if (s == t) return 0;
    cap_sum_t flow = 0;
    vector<int> que(N);
    last.resize(N);
    while (construct(s, t, que.data())) {
      fill_n(last.begin(), N, 0);
      // flow += block_flow_all(s, t, inf);
      for (cap_t f; (f = block_flow(s, t, inf)) > 0; flow += f);
    }
    return flow;
  }

  int N;
  vector< vector<edge> > edges;

private:
  vector<int> last, dist;
};

class MinimumCostCirculation {
  static constexpr cost_t inf = 9e18;
  struct edge {
    bool operator < (const edge& rhs) const {
      return from < rhs.from || (from == rhs.from && to < rhs.to);
    }
    cost_t weight1() const {
      return cost;
    }
    int from, to;
    cap_t b, c, flow;
    cost_t cost;
    int rev;
  };

public:
  MinimumCostCirculation(int N, bool avoid_parallel_edges=false) : 
    N(N), avoid_parallel_edges(avoid_parallel_edges) {}

  void add_directed_edge(int u, int v, cap_t b, cap_t c, cost_t cost) {
    assert(u != v);
    bool rev = 0;
    if (avoid_parallel_edges) {
      rev = u > v;
      if (rev) swap(u, v);
    }
    edges.push_back({u, v, b, c, 0, cost, rev});
  }

  void convert_to_simple_graph() {
    sort(edges.begin(), edges.end());
    int edge_count = edges.size();
    for (int ei = 0; ei < edge_count; ) {
      int u = edges[ei].from, v = edges[ei].to;
      if (edges[ei].rev) swap(edges[ei].from, edges[ei].to);
      for (ei += 1; ei < edge_count && edges[ei].from == u && edges[ei].to == v; ++ei) {
        int s = u, t = v;
        if (edges[ei].rev) swap(s, t);
        auto b = edges[ei].b, c = edges[ei].c;
        edges[ei] = {s, N, b, c, 0, edges[ei].cost, 0};
        edges.push_back({N, t, b, c, 0, 0, 0});
        original_edges.push_back({s, t});
        ++N;
      }
    }
  }

  bool has_feasible_circulation() {
    auto dinic = Dinic(N + 2);
    vector<cap_sum_t> capacity(2 * N, 0);
    rep(ei, edges.size()) {
      auto& e = edges[ei];
      int u = e.from, v = e.to;
      dinic.add_directed_edge(u, v, e.c - e.b, ei);
      if (e.b > 0) capacity[v] += e.b, capacity[u + N] += e.b;
      else capacity[u] += -e.b, capacity[v + N] += -e.b;
    }
    rep(i, N) dinic.add_directed_edge(N, i, capacity[i], -1);
    rep(i, N) dinic.add_directed_edge(i, N + 1, capacity[i + N], -1);
    auto max_flow = dinic.maximum_flow(N, N + 1);
    cap_sum_t s = 0;
    rep(i, N) s += capacity[i];
    if (max_flow < s) return false;
    rep(u, dinic.N) for (auto& e : dinic.edges[u]) if (e.eid >= 0) {
      edges[e.eid].flow = edges[e.eid].c - e.cap;
    }
    return true;
  }

  void antisymmetrize() {
    vector<edge> curr_edges = edges;
    ofs.assign(N + 1, 0);
    edges.resize(2 * curr_edges.size());
    for (auto& e : curr_edges) ofs[e.from + 1]++, ofs[e.to + 1]++;
    rep(i, N) ofs[i + 1] += ofs[i];
    for (auto& e : curr_edges) {
      e.rev = ofs[e.to]; edges[ofs[e.from]] = e; 
      edges[ofs[e.to]++] = {e.to, e.from, -e.c, -e.b, -e.flow, -e.cost, ofs[e.from]++};
    }
    rep(i, N) ofs[N - i] = ofs[N - 1 - i];
    ofs[0] = 0;
  }

  vector<edge> residual_graph(const vector<edge>& edges) {
    vector<edge> residual;
    for (auto& e : edges) if (e.c > e.flow) residual.push_back(e);
    return residual;
  }

  void enlarge_edge_cost(const int multiplier) {
    for (auto& e : edges) e.cost *= multiplier;
  }

  void refine(const cost_t eps, vector<cost_sum_t>& potential) {
    /*
    complexity: O(|V|^2 |E|)
     - relabel: O(|V|^2)
     - saturating push: O(|V||E|)
     - non-saturating push: O(|V|^2 |E|)
    */

    assert(eps >= 1);
    auto cost_p = [&](const edge& e) {
      return e.cost + potential[e.from] - potential[e.to];
    };

    // e.flow := residue
    for (auto& e : edges) {
      if (cost_p(e) < 0) e.flow = 0, edges[e.rev].flow = e.c - e.b;
      else if (cost_p(e) == 0) e.flow = e.c - e.flow;
    }

    vector<cap_sum_t> excess(N, 0);
    for (auto& e : edges) excess[e.to] += e.c - e.flow;

    // practically, stack<int> seems better than queue<int>.
    vector<int> admissible_vertices; admissible_vertices.reserve(N);
    rep(u, N) if (excess[u] > 0) admissible_vertices.push_back(u);

    auto residue = [&](const edge& e) {
      return e.flow;
    };
    auto push = [&](int u, edge& e, cap_t df) {
      e.flow -= df; edges[e.rev].flow += df;
      excess[e.to] += df; excess[u] -= df;
      if (excess[e.to] > 0 && excess[e.to] - df <= 0) {
        admissible_vertices.push_back(e.to);
      }
    };
    auto relabel = [&](int u, cost_t delta) {
      potential[u] -= delta + eps;
    };

    // simpler version of push look-ahead.
    auto relabel_in_advance = [&](int u) {
      if (excess[u] != 0) return false;
      auto delta = inf;
      rep(ei, ofs[u], ofs[u + 1]) {
        auto& e = edges[ei];
        if (residue(e) > 0) {
          if (cost_p(e) < 0) return false;
          else delta = min(delta, cost_p(e));
        }
      }
      relabel(u, delta);
      return true;
    };

    while (!admissible_vertices.empty()) {
      auto u = admissible_vertices.back(); admissible_vertices.pop_back();
      bool rel = true;
      auto delta = inf;
      rep(ei, ofs[u], ofs[u + 1]) {
        auto& e = edges[ei];
        if (residue(e) > 0) {
          if (cost_p(e) < 0) {
            if (relabel_in_advance(e.to)) {
              --ei; continue;
            }
            cap_t df = min(excess[u], cap_sum_t(residue(e)));
            push(u, e, df);
            if (excess[u]) continue;
            rel = false;
            break;
          } else delta = min(delta, cost_p(e));
        }
      }
      if (rel) relabel(u, delta), admissible_vertices.push_back(u);
    }

    // e.flow := (real) flow
    for (auto& e : edges) e.flow = e.c - e.flow;

    // for (auto& e : edges) assert(e.b <= e.flow && e.flow <= e.c);
    // rep(i, N) assert(excess[i] == 0);
  }

  pair<bool, cost_sum_t> minimum_cost_circulation() {
    /* 
    complexity: O(|V|^2 |E| \log(|V|C))
    (Practically, it runs much faster.)
    */

    assert(edges.size() > 0);
    if (avoid_parallel_edges) convert_to_simple_graph();
    if (!has_feasible_circulation()) return {false, -1};
    const int cost_multiplier = 2 << __lg(N); // should be > |V|
    enlarge_edge_cost(cost_multiplier);
    antisymmetrize();
    while (1) {
      auto yto = YoungTarjanOrlinMCR<edge, cost_t, cost_sum_t>(N, residual_graph(edges));
      cost_t eps = -yto.minimum_cycle_ratio(true); // integral ratio
      if (eps <= 0) break;
      // auto time_beg = clock();
      refine(eps / 2, yto.weight1);
      // fprintf(stderr, "refine: |V| = %d, |E| = %d, eps = %lld, elapsed: %.6f sec.\n", 
      //   N, int(edges.size()), eps, double(clock() - time_beg) / CLOCKS_PER_SEC);
    }
    cost_sum_t ret = 0;
    for (auto& e : edges) ret += (e.cost / cost_multiplier) * e.flow;
    return {true, ret / 2};
  }
  
private:
  int N;
  vector< pair<int, int> > original_edges;
  vector<int> ofs;
  vector<edge> edges;
  bool avoid_parallel_edges;
};

} // namespace circulation

int main(){
  int n;
  cin >> n;

  vector<pair<int,int>> v(n);
  cin >> v;

  // Min_Cost_Flow<long long> f(n + n + 1 + 2, 1000000000);
  // MCC f(n+n+3);
  circulation::MinimumCostCirculation f(n+n+3);
  int source = n+n+1;
  int sink = source+1;
  int odd = n+n;
  // f.add_edge(odd, sink, n/3, 0);
  f.add_directed_edge(odd, sink, n/3, n/3, 0);
  for(int i=0; i<n; i++){
    // f.add_edge(source, i, 1, v[i].first);
    f.add_directed_edge(source, i, 0, 1, v[i].first);
    for(int j=0; j<n-n/3; j++){
      // f.add_edge(i, n+j, 1, v[i].second * j);
      f.add_directed_edge(i, n+j, 0, 1, v[i].second * j);
    }
    // f.add_edge(i, odd, 1, -v[i].first);
    f.add_directed_edge(i, odd, 0, 1, -v[i].first);
  }
  for(int i=0; i<n-n/3; i++){
    // f.add_edge(n+i, sink, 1, 0);
    f.add_directed_edge(n+i, sink, 1, 1, 0);
  }

  f.add_directed_edge(sink, source, n, n, 0);

  // long long cost = f.min_cost_flow(source, sink, n);
  long long cost = f.minimum_cost_circulation().second;
  println(cost);

  return 0;
}
0