結果
| 問題 |
No.2146 2 Pows
|
| コンテスト | |
| ユーザー |
qwewe
|
| 提出日時 | 2025-05-14 13:03:45 |
| 言語 | PyPy3 (7.3.15) |
| 結果 |
WA
|
| 実行時間 | - |
| コード長 | 7,034 bytes |
| コンパイル時間 | 167 ms |
| コンパイル使用メモリ | 82,576 KB |
| 実行使用メモリ | 53,376 KB |
| 最終ジャッジ日時 | 2025-05-14 13:05:16 |
| 合計ジャッジ時間 | 6,072 ms |
|
ジャッジサーバーID (参考情報) |
judge4 / judge1 |
(要ログイン)
| ファイルパターン | 結果 |
|---|---|
| sample | AC * 2 |
| other | AC * 2 WA * 1 TLE * 1 -- * 28 |
ソースコード
import sys
# Define the main function to solve the problem
def solve():
# Read input values for N, A, B, C
N, A, B, C = map(int, sys.stdin.readline().split())
# Initialize a list to store the minimum cost for each residue k (0 to N-1)
# Use float('inf') to represent infinity, useful for finding minimums
ans = [float('inf')] * N
# Convert A, B, C to Python's arbitrary precision integers.
# This is important because costs can exceed standard 64-bit integer limits.
A = int(A)
B = int(B)
C = int(C)
# --- Case Distinction based on A vs C ---
# The optimal strategy depends on the relative costs of adding elements (A)
# versus increasing the maximum exponent (C).
if A < C:
# Case 1: A < C
# It's cheaper to use multiple copies of smaller powers of 2 than larger ones.
# Specifically, replacing {2^{i+1}} with {2^i, 2^i} decreases cost by C - A > 0.
# Repeated application suggests the optimal representation uses only 1s (2^0).
# For a target sum K, the multiset S = {1, 1, ..., 1} (K times) is optimal.
# Cost for this S: x=K, y=1 (only 2^0 is used), z=0. Cost = A*K + B*1 + C*0 = AK + B.
# We need to find the minimum cost AK + B over all K such that K = k + mN > 0.
# To minimize AK + B, we must minimize K.
# For k = 0: The smallest positive K is N (when m=1).
cost_k0 = A * N + B
ans[0] = cost_k0
# For k = 1 to N-1: The smallest positive K is k itself (when m=0).
for k in range(1, N):
cost_k = A * k + B
ans[k] = cost_k
else: # Case 2: A >= C
# It's generally better or equal cost to use larger powers of 2 instead of multiple copies
# of smaller ones (e.g., {2^{i+1}} is preferred over {2^i, 2^i} because A >= C).
# This suggests the optimal representation for a fixed sum K is its standard binary form.
# Let K = sum(b_i * 2^i) where b_i are 0 or 1.
# The multiset S consists of 2^i for each b_i = 1.
# x = number of set bits (popcount) = sum(b_i)
# y = number of distinct elements = number of set bits (popcount)
# z = floor(log2(K)) = maximum exponent i such that b_i = 1
# Cost f(K) = A*popcount(K) + B*popcount(K) + C*floor(log2(K))
# Cost f(K) = (A + B) * popcount(K) + C * floor(log2(K)).
# We need to minimize f(K) over all K such that K = k + mN > 0.
# Helper function to calculate the cost f(K) for a given K
def calculate_cost(K):
# K must be positive as S is non-empty.
if K <= 0: return float('inf')
# Calculate popcount (number of set bits) efficiently
# bin(K).count('1') works across Python versions
pc = bin(K).count('1')
# Calculate floor(log2(K)) efficiently using bit_length()
# K.bit_length() returns the number of bits needed to represent K in binary
# floor(log2(K)) = K.bit_length() - 1 for K > 0
log2_val = K.bit_length() - 1
# Compute the cost using potentially large integers
cost = (A + B) * pc + C * log2_val
return cost
# -- Optimization 1: Check candidates of the form K = 2^p --
# If K is a power of 2, K = 2^p, then popcount=1. The cost is (A+B) + C*p. This is often low.
# We precompute the minimal exponent `p` for each residue `k` (1 to N-1) such that 2^p = k mod N.
first_p = {} # Dictionary to store {residue: minimal_p}
current_pow_2_mod_N = 1 # Start with 2^0 = 1
# Iterate p from 0 up to N. The order of 2 mod N is at most N, so this covers all reachable residues.
for p in range(N + 1):
k_res = current_pow_2_mod_N
# If this residue is non-zero and seen for the first time, store p.
if k_res != 0 and k_res not in first_p:
first_p[k_res] = p
# Calculate the next power of 2 modulo N
current_pow_2_mod_N = (current_pow_2_mod_N * 2) % N
# Update initial minimum costs for k=1..N-1 using these K=2^p candidates
for k in range(1, N):
if k in first_p:
pk = first_p[k]
# Cost for K = 2^pk
cost_k_pow2 = (A + B) + C * pk
ans[k] = min(ans[k], cost_k_pow2)
# -- Optimization 2: Check candidates of the form K = 2^p + 1 for k=0 --
# Another form with low popcount (2) is K = 2^p + 2^q.
# For k=0, we need K = 0 mod N. A relevant simple form is K = 2^p + 1 (where q=0).
# We need 2^p + 1 = 0 mod N, which means 2^p = -1 mod N.
# We find the smallest positive exponent `d` such that 2^d = -1 mod N.
min_d = -1 # Sentinel value if no such d exists
current_pow_2_mod_N = 1 # Start with 2^0 = 1
target_neg_1 = (N - 1) % N # Calculate -1 mod N correctly (handles N=1 where it's 0)
# Iterate d from 1 up to N.
for d in range(1, N + 1):
# Compute 2^d mod N
current_pow_2_mod_N = (current_pow_2_mod_N * 2) % N
# Check if it equals -1 mod N
if current_pow_2_mod_N == target_neg_1:
min_d = d # Found the smallest positive d
break
# If such a d was found, calculate the cost for K = 2^d + 1 and update ans[0]
if min_d != -1:
# For K = 2^min_d + 1: popcount=2, max exponent=min_d
cost_k0_pow2sum = 2 * (A + B) + C * min_d
ans[0] = min(ans[0], cost_k0_pow2sum)
# -- Main Exploration: Check K = k + mN for small m --
# We explore values of K = k + mN for m from 0 up to a limit M.
# M=64 is chosen heuristically; powers of 2 up to 2^64 cover standard integer types,
# and values of K generated this way seem sufficient based on examples and typical problem constraints.
M = 64
# Iterate through m from 0 to M
for m in range(M + 1):
# Consider k from 1 to N-1
for k in range(1, N):
# Calculate K = k + mN using potentially large integers
K = k + m * N
# Calculate cost f(K)
cost = calculate_cost(K)
# Update the minimum cost found for residue k
ans[k] = min(ans[k], cost)
# Consider k = 0 case
# We need K = t*N where t >= 1 (since K must be positive).
# Let t = m+1. K = (m+1)*N.
K = (m + 1) * N
# Calculate cost f(K)
cost = calculate_cost(K)
# Update the minimum cost found for residue 0
ans[0] = min(ans[0], cost)
# Print the final minimum costs stored in the ans list
for res in ans:
print(res)
# Execute the solve function when the script is run
solve()
qwewe