結果
| 問題 |
No.1744 Selfish Spies 1 (à la Princess' Perfectionism)
|
| コンテスト | |
| ユーザー |
qwewe
|
| 提出日時 | 2025-05-14 13:04:15 |
| 言語 | PyPy3 (7.3.15) |
| 結果 |
WA
|
| 実行時間 | - |
| コード長 | 10,749 bytes |
| コンパイル時間 | 178 ms |
| コンパイル使用メモリ | 82,112 KB |
| 実行使用メモリ | 109,320 KB |
| 最終ジャッジ日時 | 2025-05-14 13:06:09 |
| 合計ジャッジ時間 | 6,609 ms |
|
ジャッジサーバーID (参考情報) |
judge5 / judge2 |
(要ログイン)
| ファイルパターン | 結果 |
|---|---|
| other | AC * 21 WA * 18 |
ソースコード
import sys
# Increase recursion depth limit for deep DFS paths in Tarjan's algorithm or Dinic's DFS.
# The default limit (usually 1000) might be insufficient for large graphs.
# Set a higher limit like 200000, which should be safe for typical contest constraints.
try:
# Set a significantly higher recursion depth limit.
# N, M <= 500, N+M <= 1000. L <= 100000.
# Max depth could be around N+M for DFS/SCC.
# Let's set it high enough to be safe.
sys.setrecursionlimit(200000)
except Exception:
# Ignore if setting limit fails (e.g., due to permissions or system restrictions)
pass
# Use faster I/O
input = sys.stdin.readline
def solve():
N, M, L = map(int, input().split())
# Store edges with their original 0-based index to preserve output order.
# Edges are stored as ((Spy, Task), original_index)
edges_list_with_orig_idx = []
for i in range(L):
S, T = map(int, input().split())
# Use 1-based indexing for spies S and tasks T as given in problem.
edges_list_with_orig_idx.append(((S, T), i))
# Setup for Max Flow (Dinic's Algorithm) to find maximum bipartite matching.
# Node mapping:
# Source: 0
# Spies: 1 to N
# Tasks: N+1 to N+M
# Sink: N+M+1
source = 0
sink = N + M + 1
num_nodes = N + M + 2 # Total nodes in the flow network
# Adjacency list representation for the flow graph.
# Each entry stores: [to_node, capacity, reverse_edge_index]
graph = [[] for _ in range(num_nodes)]
# Helper function to add an edge and its residual counterpart.
def add_edge(u, v, cap):
# Forward edge
graph[u].append([v, cap, len(graph[v])])
# Residual edge (backward edge with 0 initial capacity)
graph[v].append([u, 0, len(graph[u]) - 1])
# Add edges from source to all spy nodes with capacity 1.
for i in range(1, N + 1):
add_edge(source, i, 1)
# Add edges from all task nodes to sink with capacity 1.
for i in range(N + 1, N + M + 1):
add_edge(i, sink, 1)
# Add edges between spies and tasks based on the input list L.
# Capacity is 1, representing that each spy-task assignment can be used at most once.
for i in range(len(edges_list_with_orig_idx)):
(S, T), original_idx = edges_list_with_orig_idx[i]
u = S # Spy node index
v = N + T # Task node index
add_edge(u, v, 1)
# Dinic BFS phase: build level graph.
level = [-1] * num_nodes # Stores distance from source
def bfs():
# Reset levels for all nodes
for i in range(num_nodes): level[i] = -1
level[source] = 0 # Source is at level 0
# Standard BFS implementation
q = [source]
head = 0
while head < len(q):
u = q[head]; head += 1
for i in range(len(graph[u])):
v, cap, _ = graph[u][i]
# Explore edge if it has capacity and destination node hasn't been visited
if cap > 0 and level[v] < 0:
level[v] = level[u] + 1 # Assign level
q.append(v) # Add to queue
# Return True if sink is reachable, False otherwise
return level[sink] != -1
# Dinic DFS phase: find augmenting paths in the level graph.
# Uses iterator `iter_nodes` to avoid re-exploring dead ends within the same phase.
iter_nodes = [0] * num_nodes
def dfs(u, flow):
# Base case: reached the sink
if u == sink: return flow
# Iterate through edges using the iterator pointer
while iter_nodes[u] < len(graph[u]):
i = iter_nodes[u]
v, cap, rev = graph[u][i]
# Check if edge has capacity and leads to a node in the next level
if cap > 0 and level[u] < level[v]:
# Recursively find flow downstream
pushed_flow = dfs(v, min(flow, cap))
if pushed_flow > 0:
# Augment flow: decrease capacity of forward edge, increase capacity of backward edge
graph[u][i][1] -= pushed_flow
graph[v][rev][1] += pushed_flow
return pushed_flow # Return the amount of flow pushed
# Move iterator to the next edge if current edge didn't yield flow
iter_nodes[u] += 1
return 0 # No path found from this node
# Main loop of Dinic's algorithm
max_matching_size = 0
while bfs(): # While sink is reachable (augmenting paths might exist)
# Reset DFS iterators for the new level graph
iter_nodes = [0] * num_nodes
while True:
# Push flow along augmenting paths found by DFS
f = dfs(source, float('inf'))
if f == 0: break # No more augmenting paths in this level graph
max_matching_size += f # Accumulate total flow
# After max flow computation, identify edges belonging to the maximum matching M*.
# An edge (S, T) is in M* if flow of 1 was sent from S to N+T.
# This corresponds to the residual capacity of edge S -> N+T being 0.
match_edges_set = set()
for S in range(1, N + 1):
u = S # Spy node
# Check edges originating from spy node S
for i in range(len(graph[u])):
edge_data = graph[u][i]
v, cap, rev_idx = edge_data
# Check if it's an edge to a task node
if N + 1 <= v <= N + M:
# If residual capacity is 0, means flow=1 passed (since initial capacity was 1).
if graph[u][i][1] == 0:
T = v - N # Get original task ID
match_edges_set.add((S, T))
break # Each spy can be matched at most once, move to next spy.
# Construct the directed graph D required for SCC analysis.
# Nodes in D correspond to spies (1..N) and tasks (N+1..N+M).
scc_graph_adj = [[] for _ in range(N + M + 1)] # Using 1-based indexing for consistency
for (S, T), _ in edges_list_with_orig_idx:
u = S # Spy node index
v = N + T # Task node index (shifted)
if (S, T) in match_edges_set:
# If (S, T) is in the matching M*, add edge from task node v to spy node u in D.
scc_graph_adj[v].append(u)
else:
# If (S, T) is not in M*, add edge from spy node u to task node v in D.
scc_graph_adj[u].append(v)
# Find Strongly Connected Components (SCCs) using Tarjan's algorithm.
visited = [False] * (N + M + 1) # Track visited nodes
stack = [] # Stack for Tarjan's algorithm
ids = [-1] * (N + M + 1) # Discovery time assigned by DFS
low = [-1] * (N + M + 1) # Lowest discovery time reachable (including self)
onStack = [False] * (N + M + 1) # Track nodes currently in the recursion stack
timer = 0 # Global timer for discovery times
scc_count = 0 # Counter for SCCs found
scc_ids = [-1] * (N + M + 1) # Stores the SCC ID for each node
# Standard Tarjan's DFS implementation
def tarjan_dfs(at):
nonlocal timer, scc_count
visited[at] = True
ids[at] = low[at] = timer # Initialize discovery time and low-link value
timer += 1
stack.append(at) # Push node onto stack
onStack[at] = True
# Explore neighbors
for to in scc_graph_adj[at]:
if not visited[to]: # If neighbor not visited, recurse
tarjan_dfs(to)
# After recursion returns, update low-link value of 'at' based on 'to'
# If 'to' can reach an ancestor node with lower ID, 'at' can too via 'to'.
low[at] = min(low[at], low[to])
elif onStack[to]: # If neighbor 'to' is visited AND on stack, it's a back-edge or cross-edge to an ancestor/node currently in stack path.
# Update low-link based on the discovery time ('ids') of 'to'.
low[at] = min(low[at], ids[to])
# If 'at' is the root of an SCC (its low-link value is its own discovery time)
if ids[at] == low[at]:
# Pop nodes from stack until 'at' is popped. All popped nodes form an SCC.
while stack:
node = stack.pop()
onStack[node] = False # Mark node as popped
scc_ids[node] = scc_count # Assign SCC ID
if node == at: break # Stop when root 'at' is popped
scc_count += 1 # Increment SCC counter
# Run Tarjan's starting from each unvisited node to find all SCCs.
for i in range(1, N + M + 1): # Iterate through all potential nodes (spies 1..N, tasks N+1..N+M)
if not visited[i]:
tarjan_dfs(i)
# Process queries based on the original indices to maintain output order.
results = [''] * L # Array to store results, indexed by original order
for k in range(len(edges_list_with_orig_idx)):
(S, T), original_idx = edges_list_with_orig_idx[k]
# Check if the edge (S, T) is part of the maximum matching M* found.
if (S, T) not in match_edges_set:
# If an edge is NOT in M*, it means there exists at least one maximum matching (M*)
# that does not use this edge. Therefore, it's possible to achieve the maximum number
# of assignments without using (S, T). The answer is "Yes".
results[original_idx] = "Yes"
else:
# If the edge (S, T) IS in M*, we need to check if it's critical.
# An edge (S, T) in M* is critical if and only if S and T belong to different SCCs in graph D.
# If they are in the same SCC, the edge is part of an alternating cycle and can be swapped out,
# meaning another maximum matching exists without this edge.
u = S # Spy node index
v = N + T # Task node index (shifted)
# Check if both nodes have valid SCC IDs assigned and if they are the same.
# Nodes might not have an SCC ID if they are isolated (not reachable/part of any edge).
# But nodes involved in edges must be visited and assigned an SCC ID unless graph logic fails.
if scc_ids[u] != -1 and scc_ids[u] == scc_ids[v]:
# S and T are in the same SCC -> edge (S, T) is NOT critical.
results[original_idx] = "Yes"
else:
# S and T are in different SCCs -> edge (S, T) IS critical.
results[original_idx] = "No"
# Output results for each query in the original order.
for res in results:
print(res)
solve()
qwewe