結果
問題 | No.924 紲星 |
ユーザー | hitonanode |
提出日時 | 2019-11-09 18:15:32 |
言語 | C++14 (gcc 12.3.0 + boost 1.83.0) |
結果 |
TLE
|
実行時間 | - |
コード長 | 5,370 bytes |
コンパイル時間 | 2,327 ms |
コンパイル使用メモリ | 187,200 KB |
実行使用メモリ | 34,944 KB |
最終ジャッジ日時 | 2024-09-15 04:42:13 |
合計ジャッジ時間 | 8,431 ms |
ジャッジサーバーID (参考情報) |
judge1 / judge5 |
(要ログイン)
テストケース
テストケース表示入力 | 結果 | 実行時間 実行使用メモリ |
---|---|---|
testcase_00 | AC | 2 ms
10,624 KB |
testcase_01 | AC | 2 ms
5,376 KB |
testcase_02 | AC | 2 ms
5,376 KB |
testcase_03 | AC | 9 ms
5,376 KB |
testcase_04 | AC | 4 ms
5,376 KB |
testcase_05 | AC | 11 ms
5,376 KB |
testcase_06 | AC | 9 ms
5,376 KB |
testcase_07 | AC | 5 ms
5,376 KB |
testcase_08 | TLE | - |
testcase_09 | -- | - |
testcase_10 | -- | - |
testcase_11 | -- | - |
testcase_12 | -- | - |
testcase_13 | -- | - |
testcase_14 | -- | - |
testcase_15 | -- | - |
testcase_16 | -- | - |
testcase_17 | -- | - |
testcase_18 | -- | - |
ソースコード
#include <bits/stdc++.h> using namespace std; #pragma GCC optimize("O3", "unroll-loops") #pragma GCC target("avx") using lint = long long int; using pint = pair<int, int>; using plint = pair<lint, lint>; struct fast_ios { fast_ios(){ cin.tie(0); ios::sync_with_stdio(false); cout << fixed << setprecision(20); }; } fast_ios_; #define ALL(x) (x).begin(), (x).end() #define SZ(x) ((lint)(x).size()) #define POW2(n) (1LL << (n)) #define FOR(i, begin, end) for(int i=(begin),i##_end_=(end);i<i##_end_;i++) #define IFOR(i, begin, end) for(int i=(end)-1,i##_begin_=(begin);i>=i##_begin_;i--) #define REP(i, n) FOR(i,0,n) #define IREP(i, n) IFOR(i,0,n) template<typename T> void ndarray(vector<T> &vec, int len) { vec.resize(len); } template<typename T, typename... Args> void ndarray(vector<T> &vec, int len, Args... args) { vec.resize(len); for (auto &v : vec) ndarray(v, args...); } template<typename T> bool mmax(T &m, const T q) { if (m < q) {m = q; return true;} else return false; } template<typename T> bool mmin(T &m, const T q) { if (m > q) {m = q; return true;} else return false; } template<typename T1, typename T2> pair<T1, T2> operator+(const pair<T1, T2> &l, const pair<T1, T2> &r) { return make_pair(l.first + r.first, l.second + r.second); } template<typename T1, typename T2> pair<T1, T2> operator-(const pair<T1, T2> &l, const pair<T1, T2> &r) { return make_pair(l.first - r.first, l.second - r.second); } template<typename T> istream &operator>>(istream &is, vector<T> &vec){ for (auto &v : vec) is >> v; return is; } ///// This part below is only for debug, not used ///// template<typename T> ostream &operator<<(ostream &os, const vector<T> &vec){ os << "["; for (auto v : vec) os << v << ","; os << "]"; return os; } template<typename T> ostream &operator<<(ostream &os, const deque<T> &vec){ os << "deq["; for (auto v : vec) os << v << ","; os << "]"; return os; } template<typename T> ostream &operator<<(ostream &os, const set<T> &vec){ os << "{"; for (auto v : vec) os << v << ","; os << "}"; return os; } template<typename T> ostream &operator<<(ostream &os, const unordered_set<T> &vec){ os << "{"; for (auto v : vec) os << v << ","; os << "}"; return os; } template<typename T> ostream &operator<<(ostream &os, const multiset<T> &vec){ os << "{"; for (auto v : vec) os << v << ","; os << "}"; return os; } template<typename T> ostream &operator<<(ostream &os, const unordered_multiset<T> &vec){ os << "{"; for (auto v : vec) os << v << ","; os << "}"; return os; } template<typename T1, typename T2> ostream &operator<<(ostream &os, const pair<T1, T2> &pa){ os << "(" << pa.first << "," << pa.second << ")"; return os; } template<typename TK, typename TV> ostream &operator<<(ostream &os, const map<TK, TV> &mp){ os << "{"; for (auto v : mp) os << v.first << "=>" << v.second << ","; os << "}"; return os; } template<typename TK, typename TV> ostream &operator<<(ostream &os, const unordered_map<TK, TV> &mp){ os << "{"; for (auto v : mp) os << v.first << "=>" << v.second << ","; os << "}"; return os; } #define dbg(x) cerr << #x << " = " << (x) << " (L" << __LINE__ << ") " << __FILE__ << endl; ///// END ///// vector<lint> A, Aacc; // 1-indexed BIT (i : [1, len]) int N, Q; struct BIT { using T = lint; int len; vector<T> val; BIT(int num) : len(num), val(num + 1) {} BIT() : BIT(0) {} void reset() { fill(val.begin(), val.end(), 0); } void add(int pos, T v) { while (pos > 0 and pos <= len) val[pos] += v, pos += pos & -pos; } T sum(int pos) const // (0, pos] { T res = 0; while (pos > 0) res += val[pos], pos -= pos & -pos; return res; } }; template<typename T> ostream &operator<<(ostream &os, const BIT &b){ T prv = 0; os << "["; REP(i, b.len) { T nxt = b.sum(i + 1); os << nxt - prv << ","; prv = nxt; } os << "]"; return os; } BIT cou, tot; vector<int> L, R; vector<plint> solve(const vector<lint> &c) { cou = BIT(N + 1); tot = BIT(N + 1); vector<plint> ret(Q); priority_queue<plint, vector<plint>, greater<plint>> pq, as; REP(i, Q) pq.emplace(c[i], i); REP(i, N) as.emplace(A[i], i); while (!pq.empty()) { lint q, i; tie(q, i) = pq.top(); while (!as.empty() and as.top().first < q) { lint i, val; tie(val, i) = as.top(); as.pop(); cou.add(i + 1, 1); tot.add(i + 1, val); } pq.pop(); ret[i].first = cou.sum(R[i]) - cou.sum(L[i]); ret[i].second = tot.sum(R[i]) - tot.sum(L[i]); } return ret; } int main() { cin >> N >> Q; A.resize(N); cin >> A; Aacc.resize(N + 1); REP(i, N) Aacc[i + 1] = Aacc[i] + A[i]; L.resize(Q); R.resize(Q); REP(i, Q) cin >> L[i] >> R[i]; REP(i, Q) L[i]--; vector<lint> lo(Q, -1e9 - 10), hi(Q, 1e9 + 10); REP(_, 40) { vector<lint> c(Q); REP(i, Q) c[i] = (lo[i] + hi[i]) / 2; vector<plint> ret = solve(c); REP(i, Q) { if (ret[i].first <= (R[i] - L[i]) / 2) lo[i] = c[i]; else hi[i] = c[i]; } } vector<plint> ret = solve(lo); REP(i, Q) { lint m = lo[i]; lint n = R[i] - L[i]; lint T = Aacc[R[i]] - Aacc[L[i]]; printf("%lld\n", (T - ret[i].second - (n - ret[i].first) * m) + ret[i].first * m - ret[i].second); } }