結果
| 問題 |
No.5022 XOR Printer
|
| コンテスト | |
| ユーザー |
prussian_coder
|
| 提出日時 | 2025-07-15 10:14:08 |
| 言語 | PyPy3 (7.3.15) |
| 結果 |
TLE
|
| 実行時間 | - |
| コード長 | 7,379 bytes |
| コンパイル時間 | 257 ms |
| コンパイル使用メモリ | 82,100 KB |
| 実行使用メモリ | 256,396 KB |
| スコア | 3,863,242,447 |
| 最終ジャッジ日時 | 2025-07-26 12:42:13 |
| 合計ジャッジ時間 | 99,404 ms |
|
ジャッジサーバーID (参考情報) |
judge3 / judge5 |
(要ログイン)
| ファイルパターン | 結果 |
|---|---|
| other | AC * 37 TLE * 13 |
ソースコード
## Beam Search解法
from __future__ import annotations
import copy
import heapq
import random
import sys
N, T = map(int, input().split())
N2 = N * N
board: list[int] = []
for _ in range(N):
board.extend(list(map(int, input().split())))
def to_2d(position: int) -> tuple[int, int]:
"""Convert 1D position to 2D coordinates."""
return position // N, position % N
def to_1d(x: int, y: int) -> int:
"""Convert 2D coordinates to 1D position."""
return x * N + y
def manhattan_distance(pos1: int, pos2: int) -> int:
"""Calculate Manhattan distance between two positions."""
x1, y1 = to_2d(pos1)
x2, y2 = to_2d(pos2)
return abs(x1 - x2) + abs(y1 - y2)
def generate_move_commands(start: int, end: int) -> list[str]:
"""Generate movement commands from start to end position.
Args:
start: Starting position (1D index)
end: Ending position (1D index)
Returns:
List of movement commands ('U', 'D', 'L', 'R')
"""
commands: list[str] = []
x1, y1 = to_2d(start)
x2, y2 = to_2d(end)
current_x, current_y = x1, y1
# Move vertically first
while current_x < x2:
commands.append("D")
current_x += 1
while current_x > x2:
commands.append("U")
current_x -= 1
# Move horizontally
while current_y < y2:
commands.append("R")
current_y += 1
while current_y > y2:
commands.append("L")
current_y -= 1
return commands
NEAR_POS = [set() for _ in range(N2)]
for v1 in range(N2):
for v2 in range(N2):
if manhattan_distance(v1, v2) <= 8:
NEAR_POS[v1].add(v2)
class State:
def __init__(
self,
board: list[int],
s: int,
pos: int,
turn: int,
score: int,
prev_state: State | None = None,
prev_action: tuple[str, int] | None = None,
) -> None:
self.s = s
self.board = board
self.pos = pos
self.turn = turn
self.score = score
self.prev_state = prev_state
self.prev_action = prev_action
def __copy__(self) -> State:
return State(
board=self.board[:],
s=self.s,
pos=self.pos,
turn=self.turn,
score=self.score,
prev_state=self,
prev_action=None,
)
def __eq__(self, other: State) -> bool:
return self.score == other.score
def __ne__(self, other: State) -> bool:
return not self.__eq__(other)
def __lt__(self, other: State) -> bool:
return self.score < other.score
def __le__(self, other: State) -> bool:
return self.score <= other.score
def __gt__(self, other: State) -> bool:
return self.score > other.score
def __ge__(self, other: State) -> bool:
return self.score >= other.score
def is_done(self) -> bool:
return self.turn == T
def is_valid(self) -> bool:
if self.is_done():
return True
b = (1 << 19) - 1
for v in self.board:
b &= v
if b == 0:
return True
return False
def update_score(self) -> None:
self.score = sum(self.board)
def get_next_moves(self) -> list[tuple[str, int]]:
next_moves = []
for v2 in NEAR_POS[self.pos]:
next_moves.append(("W", v2))
next_moves.append(("C", v2))
return next_moves
def get_next_turn(self, pos: int) -> int:
dist = manhattan_distance(self.pos, pos)
return self.turn + dist + 1
def calc_next_score(self, key: str, pos: int) -> int:
if key == "C":
return self.score
new_v = self.board[pos] ^ self.s
return self.score + new_v - self.board[pos]
def process_move(self, key: str, pos: int) -> State:
new_state = copy.copy(self)
if key == "C":
new_state.s ^= self.board[pos]
new_state.turn = self.get_next_turn(pos)
else:
new_state.score = self.calc_next_score(key, pos)
new_state.turn = self.get_next_turn(pos)
new_state.board[pos] ^= new_state.s
new_state.pos = pos
new_state.prev_action = (key, self.pos)
return new_state
def last_score(self) -> int:
min_v = min(self.board)
min_pos = self.board.index(min_v)
dist = manhattan_distance(min_pos, self.pos)
if self.turn + dist + 3 >= T:
return 0
score = sum(self.board) + self.s - min_v
return score
def last_move(self) -> list[str]:
min_v = min(self.board)
min_pos = self.board.index(min_v)
return generate_move_commands(self.pos, min_pos) + ["W", "C", "W"]
class BeamSearchSolver:
def __init__(self) -> None:
self.beam_width = 80
self.state_que_size = 10
def process(self, board: list[int]) -> State:
states: list[list[State]] = [[] for _ in range(self.state_que_size)]
initial_state = State(
board,
s=0,
pos=0,
turn=0,
score=sum(board),
)
states[0].append(initial_state)
best_score = 0
best_state = None
for turn in range(T):
score_seen = set()
for state in states[turn % self.state_que_size]:
current_score = state.score
current_s = state.s
if (current_score, current_s) in score_seen:
continue
score_seen.add((current_score, current_s))
next_moves = state.get_next_moves()
for key, pos in next_moves:
next_turn = state.get_next_turn(pos)
next_t = next_turn % self.state_que_size
if next_turn > T:
continue
score = state.calc_next_score(key, pos)
if score < state.score:
continue
if len(states[next_t]) < self.beam_width:
new_state = state.process_move(key, pos)
if new_state.is_valid():
heapq.heappush(states[next_t], new_state)
elif score > states[next_t][0].score:
new_state = state.process_move(key, pos)
if new_state.is_valid():
heapq.heappop(states[next_t])
heapq.heappush(states[next_t], new_state)
if turn >= T - 30:
for state in states[turn % self.state_que_size]:
final_score = state.last_score()
if final_score > best_score:
best_score = final_score
best_state = state
states[turn % self.state_que_size] = []
return best_state
solver = BeamSearchSolver()
best_state = solver.process(board)
last_actions = best_state.last_move()
actions = []
while best_state.prev_state is not None:
pos = best_state.pos
key, last_pos = best_state.prev_action
actions.append(key)
actions.extend(generate_move_commands(last_pos, pos)[::-1])
best_state = best_state.prev_state
actions.reverse()
actions.extend(last_actions)
for a in actions:
print(a)
prussian_coder