結果
| 問題 |
No.2026 Yet Another Knapsack Problem
|
| コンテスト | |
| ユーザー |
qwewe
|
| 提出日時 | 2025-05-14 13:18:46 |
| 言語 | C++17 (gcc 13.3.0 + boost 1.87.0) |
| 結果 |
TLE
|
| 実行時間 | - |
| コード長 | 11,363 bytes |
| コンパイル時間 | 1,138 ms |
| コンパイル使用メモリ | 113,868 KB |
| 実行使用メモリ | 13,064 KB |
| 最終ジャッジ日時 | 2025-05-14 13:19:51 |
| 合計ジャッジ時間 | 15,708 ms |
|
ジャッジサーバーID (参考情報) |
judge5 / judge1 |
(要ログイン)
| ファイルパターン | 結果 |
|---|---|
| sample | AC * 3 |
| other | AC * 35 TLE * 1 -- * 6 |
ソースコード
#include <iostream>
#include <vector>
#include <deque>
#include <cmath>
#include <algorithm>
using namespace std;
// Use long long for values and DP results to handle potentially large sums.
typedef long long ll;
// Define a constant for negative infinity.
// Needs to be significantly smaller than any possible achievable value.
// Max possible value sum is ~ N * 10^9 = 2.5 * 10^12.
// Min possible value sum is ~ N * (-10^9) = -2.5 * 10^12.
// Using -4e18 ensures it's far below any possible sum and avoids overflow issues
// during addition/subtraction with values up to 10^9.
const ll INF = -4e18;
/**
* @brief Updates the DP table for a given item type using monotonic queue optimization.
*
* This function implements the core logic for the bounded knapsack variation required by the problem.
* It updates a DP table `dp[k][w]` representing the maximum value achievable using exactly `k` items
* with a total weight of exactly `w`. The update considers adding `j` items (where 0 <= j <= c)
* of the current type (weight `item_idx`, value `v`).
* The monotonic queue optimization processes states efficiently along diagonals in the (k, q) space,
* where w = q * item_idx + r, achieving O(N^2) complexity per item type for the full DP table update.
*
* @param N_weights The maximum total weight allowed (problem constraint N).
* @param item_idx The index (and weight) of the item type. Must be > 0.
* @param c The count of available items of this type.
* @param v The value of one item of this type.
* @param dp The DP table (passed by reference, will be updated). dp[k][w] stores max value for k items, weight w.
* The dimensions of dp are assumed to be (max_k + 1) x (N_weights + 1).
* @param max_k The maximum number of items (k) to consider in the DP states. This limits the rows updated/read.
* Used to optimize complexity by splitting items into small/large weight categories.
*/
void update_dp_restricted(int N_weights, int item_idx, int c, ll v, vector<vector<ll>>& dp, int max_k) {
int w_item = item_idx;
// Basic check: item weight must be positive.
if (w_item <= 0) return;
// Create a copy of the DP table to read old values from. Updates are written to dp_next.
// This prevents using partially updated values within the same iteration for item i.
vector<vector<ll>> dp_next = dp;
// Iterate over all possible remainders modulo item weight. This partitions states into groups.
for (int r = 0; r < w_item; ++r) {
// Iterate over all relevant diagonals defined by d = k - q.
// k represents item count, q represents number of full item_idx weights in total weight w (w = q*w_item + r).
// The range of d ensures all relevant (k, q) pairs are covered.
int max_q_overall = (N_weights - r) / w_item; // Max possible q for this remainder r.
// Minimum k is 0, max q -> min d = -max_q. Maximum k is max_k, min q=0 -> max d = max_k.
for (int d = -max_q_overall; d <= max_k; ++d) {
// Monotonic queue stores pairs {k', Y_k'} optimizing the sliding window maximum calculation.
// Y_k' = dp[k'][q' * w_item + r] - k' * v, represents the value contribution adjusted for queue comparison.
deque<pair<int, ll>> mono_q;
// Iterate through states (k, w) along the current diagonal d = k - q by increasing q.
int max_q = (N_weights - r) / w_item; // Max q for current remainder r.
for (int q = 0; q <= max_q; ++q) {
int k = q + d; // Calculate corresponding item count k.
int w = q * w_item + r; // Calculate corresponding total weight w.
// Check if k is within the valid range [0, max_k] for this DP update phase.
if (k < 0 || k > max_k) {
continue; // Skip if k is out of the bounds relevant to this phase.
}
// w is guaranteed <= N_weights because q <= max_q.
// Step 1: Maintain the sliding window. Remove elements from the front of the queue
// that are outside the window [k-c, k]. k' < k-c means the state is too old.
while (!mono_q.empty() && mono_q.front().first < k - c) {
mono_q.pop_front();
}
// Step 2: Calculate the maximum value contribution from adding j >= 1 items of the current type.
ll V_max = INF; // Initialize max value contribution to negative infinity.
if (!mono_q.empty()) {
// The front element of the queue holds the maximum Y_k' value within the window.
// The total value for state (k, w) derived from this best previous state k' is Y_{k'_front} + k * v.
V_max = mono_q.front().second + (ll)k * v;
}
// Step 3: Update the DP state dp_next[k][w].
// The new value is the maximum of:
// a) The value without using any more items of type i (dp[k][w], stored initially in dp_next[k][w]).
// b) The value V_max obtained by using the optimal number (>=1) of items of type i.
if (V_max > INF) { // Only update if V_max represents a valid reachable state.
dp_next[k][w] = max(dp_next[k][w], V_max);
}
// Step 4: Prepare the value Y_k = dp[k][w] - k * v associated with the current state (k, w)
// for potential insertion into the monotonic queue. Read from the *original* dp table.
ll current_Y = INF;
// Ensure indices k, w are within bounds of the original dp table before reading.
// The dp table has dimension (max_k_read+1) x (N_weights+1).
if (k >= 0 && k < dp.size() && w >= 0 && w < dp[k].size() && dp[k][w] > INF) {
current_Y = dp[k][w] - (ll)k * v;
}
// Step 5: Add the current state {k, current_Y} to the back of the queue.
// Maintain the monotonic property (decreasing Y values) by removing elements from the back
// that are less than or equal to current_Y.
if (current_Y > INF) { // Only add reachable states.
while (!mono_q.empty() && mono_q.back().second <= current_Y) {
mono_q.pop_back();
}
mono_q.push_back({k, current_Y});
}
}
}
}
// After processing all remainders and diagonals for the current item type, update the main DP table.
dp = dp_next;
}
// Wrapper function for the general DP update case (used for small items).
// This calls the restricted update function with max_k set to N, effectively using the full DP table.
void update_dp_general(int N, int item_idx, int c, ll v, vector<vector<ll>>& dp) {
// The dp table here is (N+1) x (N+1). max_k should be N.
update_dp_restricted(N, item_idx, c, v, dp, N);
}
int main() {
// Faster I/O operations
ios_base::sync_with_stdio(false);
cin.tie(NULL);
int N; // Number of item types and max weight constraint
cin >> N;
// Store item data: counts and values for each type i=1..N
vector<pair<int, ll>> items_data(N + 1);
// Meta-data to categorize items based on weight for optimization strategy
vector<pair<int, int>> small_items_meta, large_items_meta; // Stores pairs {item_idx, count}
// Determine the threshold for small/large weights, typically sqrt(N)
int sqrtN_floor = floor(sqrt((double)N));
// Read input and categorize items into small (weight <= sqrtN) and large (weight > sqrtN) groups
for (int i = 1; i <= N; ++i) {
cin >> items_data[i].first >> items_data[i].second;
if (i <= 0) continue; // Should not happen based on constraints, but safe check.
if (i <= sqrtN_floor) {
small_items_meta.push_back({i, items_data[i].first});
} else {
large_items_meta.push_back({i, items_data[i].first});
}
}
// --- Phase 1: Process large weight items ---
// Use a restricted DP table dp_L optimized for k <= sqrtN_floor.
// dp_L[k][w] stores max value using k items (from large types), total weight w.
// Size is (sqrtN_floor + 1) rows, (N + 1) columns.
vector<vector<ll>> dp_L(sqrtN_floor + 1, vector<ll>(N + 1, INF));
// Initialize base case: 0 items, 0 weight, 0 value. Check if sqrtN_floor is valid index.
if (sqrtN_floor >= 0) {
dp_L[0][0] = 0;
}
// Apply DP updates for each large item type using the restricted DP table and k limit.
for (const auto& p : large_items_meta) {
int item_idx = p.first;
int count = p.second;
ll value = items_data[item_idx].second;
update_dp_restricted(N, item_idx, count, value, dp_L, sqrtN_floor);
}
// --- Phase 2: Process small weight items ---
// Use the full DP table dp[N+1][N+1].
vector<vector<ll>> dp(N + 1, vector<ll>(N + 1, INF));
// Initialize the full DP table using the results from the large items phase (dp_L).
for (int k = 0; k <= sqrtN_floor; ++k) {
// Ensure k is within bounds of the target dp table
if (k >= dp.size()) continue;
for (int w = 0; w <= N; ++w) {
// Ensure w is within bounds of the target dp table row
if (w >= dp[k].size()) continue;
// Copy reachable states from dp_L to the full dp table. Check bounds of dp_L.
if (k < dp_L.size() && w < dp_L[k].size() && dp_L[k][w] > INF) {
dp[k][w] = dp_L[k][w];
}
}
}
// Explicitly ensure the base case dp[0][0]=0 is correct. It should inherit from dp_L[0][0].
// If N=0, dp table size might be 1x1. dp[0][0]=0 needs to be manually set if N=0 case needed handling.
// The constraints say N>=1, so dp size is at least 2x2.
if (N >= 0) dp[0][0] = 0;
// Apply DP updates for each small item type using the full DP table.
for (const auto& p : small_items_meta) {
int item_idx = p.first;
int count = p.second;
ll value = items_data[item_idx].second;
// Use the general update function which internally calls restricted update with max_k = N.
update_dp_general(N, item_idx, count, value, dp);
}
// --- Final step: Compute and output answers ---
// The final DP table `dp[k][w]` contains max value for exactly k items, exactly weight w.
// We need max value for exactly k items, weight <= N.
vector<ll> final_ans(N + 1, INF);
for (int k = 1; k <= N; ++k) {
ll current_max_val = INF;
// Find max(dp[k][w]) over all possible weights w from 0 to N.
for (int w = 0; w <= N; ++w) {
// Check bounds before accessing dp table elements
if (k < dp.size() && w < dp[k].size()) {
current_max_val = max(current_max_val, dp[k][w]);
}
}
// The problem guarantees that a solution exists for each k, so current_max_val should be > INF.
final_ans[k] = current_max_val;
}
// Output the results for k=1 to N.
for (int k = 1; k <= N; ++k) {
cout << final_ans[k] << "\n";
}
return 0;
}
qwewe