結果
| 問題 |
No.1867 Partitions and Inversions
|
| コンテスト | |
| ユーザー |
qwewe
|
| 提出日時 | 2025-05-14 13:26:30 |
| 言語 | PyPy3 (7.3.15) |
| 結果 |
TLE
|
| 実行時間 | - |
| コード長 | 6,274 bytes |
| コンパイル時間 | 168 ms |
| コンパイル使用メモリ | 82,244 KB |
| 実行使用メモリ | 60,644 KB |
| 最終ジャッジ日時 | 2025-05-14 13:27:26 |
| 合計ジャッジ時間 | 6,851 ms |
|
ジャッジサーバーID (参考情報) |
judge4 / judge5 |
(要ログイン)
| ファイルパターン | 結果 |
|---|---|
| sample | -- * 3 |
| other | AC * 2 TLE * 1 -- * 62 |
ソースコード
import sys
def solve():
N = int(sys.stdin.readline())
P = list(map(int, sys.stdin.readline().split()))
# dp[k][i] = min inversions for prefix P[0...i-1] using k segments
# Here, indices are 0-based for Python lists.
# P[0...N-1]
# A sequence A_1, ..., A_{k_prob+1}
# 1 = A_1 < ... < A_{k_prob+1} = N+1
# Segments are P[A_idx - 1 ... A_{idx+1} - 2]
# dp[num_segments][end_idx_plus_1]
# dp[s][j] = min inversions for P[0...j-1] partitioned into s segments.
# Base case: dp[0][0] = 0. dp[0][j > 0] = infinity.
dp = [[float('inf')] * (N + 1) for _ in range(N + 1)]
dp[0][0] = 0
# Precompute frequency structures for cost calculation if possible
# For cost(p_len, segment_slice P[p_len...j-1]):
# inversions between {P[0]...P[p_len-1]} and sorted(segment_slice)
# To store values of P[0...p_len-1] for quick processing
# We need counts of values. Maximum value is N.
# freq_prefix[val] = count of val in P[0...p_len-1]
# cost_val_lt_S_val[val] = count of elements in S smaller than val
memo_cost = {}
for s in range(1, N + 1): # s = number of segments
for j in range(1, N + 1): # j = end_idx_plus_1 (length of prefix)
if s > j: continue # Cannot partition j elements into more than j segments
# The s-th segment is P[p ... j-1]
# The prefix P[0 ... p-1] was partitioned into s-1 segments
for p_idx in range(s - 1, j): # p_idx is length of prefix for dp[s-1]
# so P[0...p_idx-1]
# current segment starts at index p_idx
if dp[s-1][p_idx] == float('inf'):
continue
current_segment_slice = P[p_idx:j]
if not current_segment_slice: # Should not happen if p_idx < j
cost = 0
else:
# Calculate cost: inversions between P[0...p_idx-1] and sorted(current_segment_slice)
# key = (p_idx, tuple(current_segment_slice)) # tuple for hash key too slow
# key = (p_idx, p_idx, j) # Identifies the two sets of elements
# The elements P[0...p_idx-1] are fixed by p_idx.
# The elements P[p_idx...j-1] are fixed by (p_idx, j).
# However, sorted(current_segment_slice) is what matters.
# This cost calculation is the bottleneck
# O((j-p_idx)log(j-p_idx) + N)
sorted_current_segment = sorted(current_segment_slice)
current_cost = 0
if p_idx > 0 and sorted_current_segment:
# Optimized calculation for cost:
# Sum over x in P[0...p_idx-1] of (count of y in sorted_current_segment where y < x)
# Method 1: Using frequency arrays (if values are 1 to N)
# freq_S = [0] * (N + 1) # Max value in P is N
# for val_in_S in sorted_current_segment:
# freq_S[val_in_S] += 1
# cum_freq_S = [0] * (N + 1)
# for val_iter in range(1, N + 1):
# cum_freq_S[val_iter] = cum_freq_S[val_iter-1] + freq_S[val_iter]
# for val_in_prefix_P in P[0:p_idx]:
# if val_in_prefix_P > 0: # Values are 1-indexed
# current_cost += cum_freq_S[val_in_prefix_P - 1]
# Method 2: Two pointers (less direct due to P not sorted)
# Or for each element in P[0:p_idx], binary search in sorted_current_segment
ptr_s = 0
len_sorted_segment = len(sorted_current_segment)
# For each P_val in P[0...p_idx-1]:
# Count elements in sorted_current_segment smaller than P_val
# This is slow: p_idx * len_sorted_segment in worst case
# Using binary search (bisect_left): p_idx * log(len_sorted_segment)
# from bisect import bisect_left
# for val_in_prefix_P_val in P[0:p_idx]:
# current_cost += bisect_left(sorted_current_segment, val_in_prefix_P_val)
# Merge-sort like counting (O(p_idx + len_sorted_segment))
# Create pairs (value, type), sort by value, then iterate
# This is for inversions between two arbitrary arrays.
# Here one is P[0:p_idx] (unsorted), other is sorted_current_segment.
temp_p_vals = P[0:p_idx] # O(p_idx)
# Count inversions (x > y) where x in temp_p_vals, y in sorted_current_segment
# Iterate x in temp_p_vals. For each x, count y in sorted_current_segment s.t. y < x.
# This is what bisect_left does.
# To make it faster, sort temp_p_vals and use two pointers.
# But sorting temp_p_vals is O(p_idx log p_idx).
# The Fenwick tree approach over values 1..N is best if N not too large.
# Cost: sort segment + (p_idx + len_seg) * log N_values for BIT ops.
# Here N_values is N.
# Simplest to write: iterate one, bisect other.
# Cost per (p_idx, j) transition: O(L log L + p_idx log L) where L = j-p_idx
from bisect import bisect_left
for val_prefix in P[0:p_idx]:
current_cost += bisect_left(sorted_current_segment, val_prefix)
cost = current_cost
dp[s][j] = min(dp[s][j], dp[s-1][p_idx] + cost)
sys.stdout.write(str(dp[s][N]) + "\n")
solve()
qwewe