結果
| 問題 | No.3418 【絶望】30個並列ごちゃ混ぜHit&Blowで遊ぼう! |
| コンテスト | |
| ユーザー |
|
| 提出日時 | 2025-12-16 17:16:08 |
| 言語 | C++23 (gcc 13.3.0 + boost 1.89.0) |
| 結果 |
AC
|
| 実行時間 | 1,313 ms / 5,000 ms |
| コード長 | 33,682 bytes |
| 記録 | |
| コンパイル時間 | 5,533 ms |
| コンパイル使用メモリ | 336,628 KB |
| 実行使用メモリ | 26,084 KB |
| スコア | 9,995,820 |
| 平均クエリ数 | 41.80 |
| 最終ジャッジ日時 | 2025-12-25 02:00:35 |
| 合計ジャッジ時間 | 106,122 ms |
|
ジャッジサーバーID (参考情報) |
judge3 / judge5 |
| 純コード判定しない問題か言語 |
(要ログイン)
| ファイルパターン | 結果 |
|---|---|
| other | AC * 100 |
ソースコード
#include <bits/stdc++.h>
using namespace std;
/*
yukicoder interactive (Advent Calendar Contest 2025 Final):
30 parallel mixed Hit&Blow (length=5, digits 0..9, all distinct)
Masterpiece solver (aggressive but safe):
Core idea:
Maintain the set U of NOT-YET-DISCOVERED hidden strings.
Each query q returns a multiset of 30 (hit,blow) pairs, sorted.
(5,0) appears for already-discovered strings and for q itself if hidden.
We convert the answer into a 21-category histogram (H,B) for U.
Then we repeatedly ask the element with the highest estimated existence
probability in U.
Estimation & propagation:
1) Zero-category pruning:
If some past query says category r count is 0, any candidate x with
HB(q,x)=r cannot be in U.
2) Peeling:
When we newly discover s (= current q), remove its contribution from
all past histograms. This can create new zeros and more pruning.
3) Maximum-entropy existence probability (IPF / raking):
Find weights w[x] (approx P[x in U]) so that for every past query q_j
and category r, sum_{x in alive, HB(q_j,x)=r} w[x] = cnt[j][r].
Then ask argmax w[x].
4) Strong pairwise propagation (transportation polytope):
For a pair of past queries (A,B), candidates split into 21x21 cells by
(HB(A,x), HB(B,x)). Feasible U must satisfy row/col sums exactly.
We solve ONE max-flow for the pair and build the residual graph over
row/col nodes. From reachability:
- fixed0 cell: flow==0 and col->row NOT reachable => cell must be 0
- fixedFull cell: flow==cap and row->col NOT reachable => cell must be full
fixed0 => prune all candidates in that cell
fixedFull => candidates in that cell are guaranteed to exist => push to forced queue
5) Endgame Monte Carlo (n_rem <= 8):
Build random feasible completions using a strong query pair, reject by
all constraints, and estimate existence probabilities by frequency.
The solver uses time checks and tries to spend most of the 5s budget on
constraint propagation + probability sharpening.
*/
static constexpr int DIG = 10;
static constexpr int LEN = 5;
static constexpr int NALL = 30240;
static constexpr int K = 21; // valid (hit,blow) pairs
static int HB_ID[LEN+1][LEN+1];
static int ID_50;
static inline int popcnt10(uint16_t x){ return __builtin_popcount((unsigned)x); }
struct Code{
array<uint8_t, LEN> d;
uint16_t mask;
array<char, LEN+1> s;
};
struct Timer{
chrono::steady_clock::time_point st;
Timer(): st(chrono::steady_clock::now()) {}
double ms() const {
return chrono::duration<double, milli>(chrono::steady_clock::now()-st).count();
}
};
// Small Dinic for bipartite flow
struct Dinic {
struct Edge{ int to, rev; int cap; };
int n;
vector<vector<Edge>> g;
vector<int> level, it;
Dinic(int n=0){ init(n); }
void init(int n_){
n=n_;
g.assign(n, {});
level.assign(n, 0);
it.assign(n, 0);
}
int add_edge(int fr,int to,int cap){
Edge a{to, (int)g[to].size(), cap};
Edge b{fr, (int)g[fr].size(), 0};
g[fr].push_back(a);
g[to].push_back(b);
return (int)g[fr].size()-1; // index in g[fr]
}
bool bfs(int s,int t){
fill(level.begin(), level.end(), -1);
queue<int> q;
level[s]=0;
q.push(s);
while(!q.empty()){
int v=q.front(); q.pop();
for(const auto &e: g[v]){
if(e.cap>0 && level[e.to]<0){
level[e.to]=level[v]+1;
q.push(e.to);
}
}
}
return level[t]>=0;
}
int dfs(int v,int t,int f){
if(v==t) return f;
for(int &i=it[v]; i<(int)g[v].size(); i++){
Edge &e=g[v][i];
if(e.cap<=0 || level[e.to]!=level[v]+1) continue;
int ret=dfs(e.to,t,min(f,e.cap));
if(ret>0){
e.cap-=ret;
g[e.to][e.rev].cap+=ret;
return ret;
}
}
return 0;
}
int maxflow(int s,int t){
int flow=0;
while(bfs(s,t)){
fill(it.begin(), it.end(), 0);
while(true){
int f=dfs(s,t,1e9);
if(!f) break;
flow+=f;
}
}
return flow;
}
};
struct Solver {
// all codes
vector<Code> all;
// state
vector<uint8_t> alive, asked, forcedFlag;
vector<int> aliveList;
vector<double> w;
int found = 0; // number of already discovered hidden strings
// history
vector<int> queryCode; // code index for each asked query
vector<array<int,K>> cnt; // residual histogram for remaining unknown U
vector<vector<uint8_t>> cat; // cat[qid][code] = HB(query[qid], code)
vector<uint32_t> zeroMask; // bitmask of categories with cnt==0
deque<int> forcedQ;
Timer timer;
mt19937 rng;
// time budget
static constexpr double TIME_LIMIT_MS = 4950.0; // keep some margin
Solver(): rng(71236721u) {
build_mapping();
build_all_codes();
alive.assign(NALL, 1);
asked.assign(NALL, 0);
forcedFlag.assign(NALL, 0);
w.assign(NALL, 1.0);
aliveList.resize(NALL);
iota(aliveList.begin(), aliveList.end(), 0);
}
static void build_mapping(){
for(int h=0; h<=LEN; h++) for(int b=0; b<=LEN; b++) HB_ID[h][b] = -1;
int kk=0;
for(int h=0; h<=LEN; h++) for(int b=0; b<=LEN-h; b++) HB_ID[h][b] = kk++;
ID_50 = HB_ID[5][0];
}
void build_all_codes(){
all.reserve(NALL);
for(int a=0;a<10;a++) for(int b=0;b<10;b++) if(b!=a)
for(int c=0;c<10;c++) if(c!=a && c!=b)
for(int d=0;d<10;d++) if(d!=a && d!=b && d!=c)
for(int e=0;e<10;e++) if(e!=a && e!=b && e!=c && e!=d){
Code x;
x.d = {(uint8_t)a,(uint8_t)b,(uint8_t)c,(uint8_t)d,(uint8_t)e};
x.mask = (uint16_t)((1u<<a)|(1u<<b)|(1u<<c)|(1u<<d)|(1u<<e));
x.s = {(char)('0'+a),(char)('0'+b),(char)('0'+c),(char)('0'+d),(char)('0'+e), '\0'};
all.push_back(x);
}
}
inline uint8_t hb_id(int ia, int ib) const {
const Code &A = all[ia];
const Code &B = all[ib];
int hits=0;
hits += (A.d[0]==B.d[0]);
hits += (A.d[1]==B.d[1]);
hits += (A.d[2]==B.d[2]);
hits += (A.d[3]==B.d[3]);
hits += (A.d[4]==B.d[4]);
int common = popcnt10((uint16_t)(A.mask & B.mask));
int blow = common - hits;
return (uint8_t)HB_ID[hits][blow];
}
inline double time_left() const { return TIME_LIMIT_MS - timer.ms(); }
inline bool time_ok(double margin=80.0) const { return timer.ms() + margin < TIME_LIMIT_MS; }
void clean_alive(){
vector<int> nxt;
nxt.reserve(aliveList.size());
for(int i: aliveList){
if(alive[i]) nxt.push_back(i);
}
aliveList.swap(nxt);
}
// prune candidates whose category for query qid is in maskbits
bool prune_by_mask(int qid, uint32_t maskbits){
if(maskbits==0){
clean_alive();
return false;
}
bool changed=false;
const auto &cq = cat[qid];
vector<int> nxt;
nxt.reserve(aliveList.size());
for(int i: aliveList){
if(!alive[i]) continue;
if(maskbits & (1u<<cq[i])){
alive[i]=0;
w[i]=0.0;
changed=true;
}else nxt.push_back(i);
}
aliveList.swap(nxt);
return changed;
}
void normalize_weights(int n_rem){
if(aliveList.empty()) return;
double sum=0.0;
for(int i: aliveList) sum += w[i];
if(sum<=0.0){
double uni = (double)n_rem / (double)aliveList.size();
for(int i: aliveList) w[i]=uni;
return;
}
double sc = (double)n_rem / sum;
for(int i: aliveList) w[i]*=sc;
}
// IPF / raking for existence probabilities
void ipf(int sweeps, double damp){
if(sweeps<=0 || queryCode.empty() || aliveList.empty()) return;
int n_rem = 30 - found;
array<double,K> sum;
array<double,K> fac;
for(int it=0; it<sweeps; it++){
normalize_weights(n_rem);
for(int q=0; q<(int)queryCode.size(); q++){
sum.fill(0.0);
const auto &cq = cat[q];
for(int i: aliveList) sum[cq[i]] += w[i];
for(int r=0;r<K;r++){
if(sum[r] > 0.0) fac[r] = (double)cnt[q][r] / sum[r];
else fac[r] = (cnt[q][r]==0 ? 1.0 : 0.0);
if(damp!=1.0) fac[r] = pow(fac[r], damp);
}
for(int i: aliveList) w[i] *= fac[cq[i]];
}
}
normalize_weights(n_rem);
}
// Discovering a new hidden string s: peel its contribution from all past histograms
void peel_new_found(int sidx){
int lastQ = (int)queryCode.size() - 1;
for(int q=0; q<lastQ; q++){
int cid = cat[q][sidx];
int &v = cnt[q][cid];
v--;
if(v==0){
uint32_t bit = (1u<<cid);
if(!(zeroMask[q] & bit)){
zeroMask[q] |= bit;
prune_by_mask(q, bit);
}
}
}
}
// Single-query forcing:
// If for some query q and category r, alive_count(q,r) == cnt[q][r], then all in that bucket are forced.
bool single_forcing_scan(){
if(aliveList.empty()) return false;
if(queryCode.empty()) return false;
if(!time_ok(120.0)) return false;
bool added=false;
// We scan all queries; K is small.
vector<vector<int>> buckets(K);
for(int q=0; q<(int)queryCode.size(); q++){
for(int r=0;r<K;r++) buckets[r].clear();
const auto &cq = cat[q];
for(int i: aliveList){
buckets[cq[i]].push_back(i);
}
for(int r=0;r<K;r++){
int need = cnt[q][r];
if(need<=0) continue;
if((int)buckets[r].size() == need){
for(int idx: buckets[r]){
if(alive[idx] && !asked[idx] && !forcedFlag[idx]){
forcedFlag[idx]=1;
forcedQ.push_back(idx);
added=true;
}
}
}
}
}
return added;
}
// Pairwise transportation analysis for queries qa,qb (indices in history)
// Returns true if pruned something or forced something.
bool pair_propagate(int qa, int qb){
if(qa==qb) return false;
if(aliveList.empty()) return false;
if(!time_ok(150.0)) return false;
int n_rem = 30 - found;
if(n_rem<=0) return false;
static int cap[K][K];
static int flow[K][K];
static bool fixed0[K][K];
static bool fixedFull[K][K];
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
cap[a][b]=0;
fixed0[a][b]=false;
fixedFull[a][b]=false;
}
}
const auto &A = cat[qa];
const auto &B = cat[qb];
for(int idx: aliveList){
cap[A[idx]][B[idx]]++;
}
// Build flow network
// Nodes: S, rows(0..K-1), cols(0..K-1), T
int S = 0;
int row0 = 1;
int col0 = 1 + K;
int T = 1 + K + K;
Dinic din(T+1);
int rowSum[K], colSum[K];
for(int a=0;a<K;a++) rowSum[a]=cnt[qa][a];
for(int b=0;b<K;b++) colSum[b]=cnt[qb][b];
// Quick infeasibility check (should not happen)
int sumR=0,sumC=0;
for(int a=0;a<K;a++) sumR += rowSum[a];
for(int b=0;b<K;b++) sumC += colSum[b];
if(sumR!=n_rem || sumC!=n_rem) return false;
for(int a=0;a<K;a++) if(rowSum[a]>0) din.add_edge(S, row0+a, rowSum[a]);
for(int b=0;b<K;b++) if(colSum[b]>0) din.add_edge(col0+b, T, colSum[b]);
static int edgePos[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) edgePos[a][b] = -1;
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
if(cap[a][b]>0){
int pos = din.add_edge(row0+a, col0+b, cap[a][b]);
edgePos[a][b] = pos;
}
}
}
int f = din.maxflow(S, T);
if(f != n_rem){
// If this happens, our alive set is too small for this pair's marginals.
// Should not happen with safe pruning.
return false;
}
// Extract flow on row->col edges
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
if(edgePos[a][b] >= 0){
const auto &e = din.g[row0+a][edgePos[a][b]];
int used = cap[a][b] - e.cap;
flow[a][b] = used;
}else flow[a][b]=0;
}
}
// Build residual graph on 2K nodes (rows:0..K-1, cols:K..2K-1)
const int N = 2*K;
static uint64_t reach[2*K];
for(int u=0;u<N;u++) reach[u] = (1ULL<<u);
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
if(cap[a][b]==0) continue;
if(flow[a][b] < cap[a][b]){
// can increase => row a -> col b
reach[a] |= (1ULL<<(K+b));
}
if(flow[a][b] > 0){
// can decrease => col b -> row a
reach[K+b] |= (1ULL<<a);
}
}
}
// transitive closure (Warshall with bitsets)
for(int k=0;k<N;k++){
uint64_t mk = reach[k];
for(int u=0;u<N;u++){
if(reach[u] & (1ULL<<k)) reach[u] |= mk;
}
}
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
if(cap[a][b]==0) continue;
if(flow[a][b]==0){
bool can = (reach[K+b] & (1ULL<<a));
if(!can) fixed0[a][b]=true;
}
if(flow[a][b]==cap[a][b]){
bool can = (reach[a] & (1ULL<<(K+b)));
if(!can) fixedFull[a][b]=true;
}
}
}
bool changed=false;
vector<int> nxt;
nxt.reserve(aliveList.size());
for(int idx: aliveList){
int a = A[idx];
int b = B[idx];
if(fixed0[a][b]){
alive[idx]=0;
w[idx]=0.0;
changed=true;
continue;
}
if(fixedFull[a][b]){
if(!asked[idx] && !forcedFlag[idx]){
forcedFlag[idx]=1;
forcedQ.push_back(idx);
changed=true; // treat as progress
}
}
nxt.push_back(idx);
}
aliveList.swap(nxt);
return changed;
}
// Heuristic strength score for choosing partner queries
long long spikiness(int qid) const {
long long s=0;
for(int r=0;r<K;r++) s += 1LL*cnt[qid][r]*cnt[qid][r];
return s;
}
// Propagate constraints aggressively within time.
void propagate(){
if(queryCode.empty() || aliveList.empty()) return;
int m = (int)queryCode.size();
int n_rem = 30 - found;
// Iterate to exploit cascades; spend more time when we have budget.
int rounds = 4;
double rem = time_left();
if(rem < 1200) rounds = 1;
else if(rem < 2200) rounds = 2;
else if(rem < 3200) rounds = 3;
for(int round=0; round<rounds; round++){
if(!time_ok(240.0)) break;
bool changed = false;
// Single forcing is always safe and often triggers cascades after pruning.
if((int)aliveList.size() <= 26000 || n_rem <= 22){
changed |= single_forcing_scan();
}
if(m >= 2){
int last = m - 1;
// partners sorted by spikiness (stronger constraints first)
vector<int> partners;
partners.reserve(last);
for(int i=0;i<last;i++) partners.push_back(i);
sort(partners.begin(), partners.end(), [&](int a,int b){
return spikiness(a) > spikiness(b);
});
int maxPartners = last;
rem = time_left();
if(rem < 1000) maxPartners = min(maxPartners, 8);
else if(rem < 1800) maxPartners = min(maxPartners, 18);
else if(rem < 2600) maxPartners = min(maxPartners, 30);
else if(rem < 3400) maxPartners = min(maxPartners, 45);
// else: all (we want to spend time here)
partners.resize(maxPartners);
for(int i: partners){
if(!time_ok(200.0)) break;
changed |= pair_propagate(i, last);
}
// Cross pairs among top-strong queries: helps fixed0/fixedFull cascades.
if(time_ok(260.0) && m >= 4){
int topN = min(10, m);
vector<int> strong(topN);
iota(strong.begin(), strong.end(), 0);
sort(strong.begin(), strong.end(), [&](int a,int b){
return spikiness(a) > spikiness(b);
});
strong.resize(topN);
int crossBudget = 0;
rem = time_left();
if(rem > 3000) crossBudget = 45;
else if(rem > 2200) crossBudget = 25;
else if(rem > 1600) crossBudget = 12;
else if(rem > 1100) crossBudget = 6;
for(int x=0; x<(int)strong.size() && crossBudget>0; x++){
for(int y=x+1; y<(int)strong.size() && crossBudget>0; y++){
if(!time_ok(200.0)) { crossBudget = 0; break; }
changed |= pair_propagate(strong[x], strong[y]);
crossBudget--;
}
}
}
}
// Run single forcing again after pair pruning (cheap cascade).
if(changed && time_ok(220.0) && ((int)aliveList.size() <= 22000 || n_rem <= 18)){
changed |= single_forcing_scan();
}
if(!changed) break;
}
}
// Compute entropy of HB distribution for candidate q (weighted by current w)
// Compute entropy of HB distribution for candidate q (weighted by current w)
double weighted_entropy_for_candidate(int qidx){
int n_rem = 30 - found;
if(n_rem<=0) return 0.0;
array<double,K> sum;
sum.fill(0.0);
for(int i: aliveList){
uint8_t cid = hb_id(qidx, i);
sum[cid] += w[i];
}
double H=0.0;
for(int r=0;r<K;r++){
double p = sum[r] / (double)n_rem;
if(p>1e-15) H -= p * log(p);
}
return H;
}
// pick best query from alive by w; tie-break among top few by entropy
int pick_best_query(){
// forced first
while(!forcedQ.empty()){
int x = forcedQ.front();
forcedQ.pop_front();
if(alive[x] && !asked[x]) return x;
}
// no constraint yet: choose informative initial query
if(queryCode.empty()){
return best_initial_query();
}
int n_rem = 30 - found;
// Endgame Monte Carlo refinement (stronger posterior estimate)
if(n_rem <= 10 && (int)aliveList.size() <= 14000 && time_left() > 650.0){
int best=-1;
if(endgame_mc(best)) return best;
}
// IPF
double rem = time_left();
int m = (int)queryCode.size();
int aliveN = (int)aliveList.size();
// adaptive sweeps
int sweeps = 0;
double damp = 1.0;
if(rem > 3500){
sweeps = (aliveN>20000 ? 95 : 125);
}else if(rem > 2600){
sweeps = (aliveN>20000 ? 80 : 110);
}else if(rem > 1800){
sweeps = (aliveN>20000 ? 60 : 85);
}else if(rem > 1200){
sweeps = (aliveN>20000 ? 40 : 60);
}else{
sweeps = (aliveN>20000 ? 22 : 36);
}
// stage-dependent damping (more exploration early, more exact late)
if(n_rem >= 22) damp = 0.78;
else if(n_rem >= 12) damp = 0.86;
else damp = 0.93;
// if many constraints, convergence is faster
if(m >= 25) sweeps = max(10, sweeps - 10);
ipf(sweeps, damp);
// pick top-L by weight
const int L = 8;
vector<pair<double,int>> top;
top.reserve(L+1);
for(int i: aliveList){
top.push_back({w[i], i});
}
nth_element(top.begin(), top.begin()+min(L,(int)top.size())-1, top.end(),
[](auto &x, auto &y){ return x.first > y.first; });
sort(top.begin(), top.end(), [](auto &x, auto &y){ return x.first > y.first; });
if((int)top.size() > L) top.resize(L);
int best = top[0].second;
double bestW = top[0].first;
// entropy tie-break among close candidates
double lambda = (n_rem >= 18 ? 0.020 : 0.010);
double bestScore = bestW;
double margin = 0.015; // only consider those near the max
for(auto [wi, idx] : top){
if(wi + 1e-12 < bestW - margin) break;
double H = weighted_entropy_for_candidate(idx);
double score = wi + lambda * H;
if(score > bestScore){
bestScore = score;
best = idx;
}
}
return best;
}
// Choose a good initial query by entropy over ALL codes (uniform)
int find_index_of_string(const string &s){
for(int i=0;i<NALL;i++){
bool ok=true;
for(int k=0;k<LEN;k++) if(all[i].s[k]!=s[k]){ ok=false; break; }
if(ok) return i;
}
return 0;
}
int best_initial_query(){
vector<int> cand;
cand.reserve(260);
cand.push_back(0); // 01234 by construction
cand.push_back(find_index_of_string("56789"));
cand.push_back(find_index_of_string("13579"));
cand.push_back(find_index_of_string("02468"));
// random sample
for(int t=0;t<220;t++) cand.push_back((int)(rng()%NALL));
sort(cand.begin(), cand.end());
cand.erase(unique(cand.begin(), cand.end()), cand.end());
int best = cand[0];
double bestH = -1.0;
array<int,K> hist;
for(int idx : cand){
hist.fill(0);
for(int j=0;j<NALL;j++){
hist[hb_id(idx, j)]++;
}
double H=0.0;
for(int r=0;r<K;r++){
if(hist[r]==0) continue;
double p = (double)hist[r] / (double)NALL;
H -= p * log(p);
}
if(H > bestH){ bestH=H; best=idx; }
}
return best;
}
// Endgame Monte Carlo: try to sample completions consistent with all constraints
bool endgame_mc(int &outBest){
if(queryCode.size() < 2) return false;
int n_rem = 30 - found;
if(n_rem > 10) return false;
if((int)aliveList.size() > 14000) return false;
if(time_left() < 650.0) return false;
if(!time_ok(320.0)) return false;
int m = (int)queryCode.size();
int last = m - 1;
// Candidate pivot pairs
// (last, bestPartner) and (top1, top2) by spikiness.
auto best_partner_of_last = [&](){
int bp = 0;
long long best = -1;
for(int i=0;i<last;i++){
long long s = spikiness(i);
if(s > best){ best=s; bp=i; }
}
return bp;
};
vector<int> ord(m);
iota(ord.begin(), ord.end(), 0);
sort(ord.begin(), ord.end(), [&](int a,int b){
return spikiness(a) > spikiness(b);
});
int candPairs[2][2] = {
{last, best_partner_of_last()},
{ord[0], ord[1]}
};
auto pair_metric = [&](int qa, int qb)->long long{
static int cap[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) cap[a][b]=0;
const auto &A = cat[qa];
const auto &B = cat[qb];
for(int idx: aliveList) cap[A[idx]][B[idx]]++;
long long s=0;
for(int a=0;a<K;a++) for(int b=0;b<K;b++){
int v=cap[a][b];
if(v) s += 1LL*v*v;
}
return s;
};
int qa=candPairs[0][0], qb=candPairs[0][1];
long long bestM = pair_metric(qa,qb);
for(int t=1;t<2;t++){
int a=candPairs[t][0], b=candPairs[t][1];
if(a==b) continue;
long long sc = pair_metric(a,b);
if(sc > bestM){ bestM=sc; qa=a; qb=b; }
}
if(qa==qb) return false;
// Build cell lists for (qa,qb)
static vector<int> cell[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) cell[a][b].clear();
const auto &A = cat[qa];
const auto &B = cat[qb];
for(int idx: aliveList){
cell[A[idx]][B[idx]].push_back(idx);
}
int row[K], col[K];
for(int a=0;a<K;a++) row[a]=cnt[qa][a];
for(int b=0;b<K;b++) col[b]=cnt[qb][b];
// Build a feasible base flow matrix via maxflow
static int cap[K][K], base[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++){
cap[a][b] = (int)cell[a][b].size();
base[a][b] = 0;
}
int S=0, row0=1, col0=1+K, T=1+K+K;
Dinic din(T+1);
for(int a=0;a<K;a++) if(row[a]>0) din.add_edge(S, row0+a, row[a]);
for(int b=0;b<K;b++) if(col[b]>0) din.add_edge(col0+b, T, col[b]);
static int edgePos[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) edgePos[a][b]=-1;
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
if(cap[a][b]>0){
edgePos[a][b] = din.add_edge(row0+a, col0+b, cap[a][b]);
}
}
}
int f = din.maxflow(S,T);
if(f != n_rem) return false;
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
if(edgePos[a][b]>=0){
const auto &e = din.g[row0+a][edgePos[a][b]];
base[a][b] = cap[a][b] - e.cap;
}
}
}
// Constraint check order: strong first (reject early)
vector<int> checkOrd(m);
iota(checkOrd.begin(), checkOrd.end(), 0);
sort(checkOrd.begin(), checkOrd.end(), [&](int x,int y){
return spikiness(x) > spikiness(y);
});
auto ok_all = [&](const vector<int>& Sset)->bool{
static int tmp[K];
for(int qi: checkOrd){
for(int r=0;r<K;r++) tmp[r]=0;
const auto &cq = cat[qi];
for(int idx: Sset) tmp[cq[idx]]++;
for(int r=0;r<K;r++) if(tmp[r]!=cnt[qi][r]) return false;
}
return true;
};
// MCMC on count matrix x with random 2x2 cycle moves
static int x[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) x[a][b]=base[a][b];
uniform_int_distribution<int> distRow(0, K-1);
uniform_int_distribution<int> distCol(0, K-1);
auto cycle_moves = [&](int moves){
for(int t=0;t<moves;t++){
int a1=distRow(rng), a2=distRow(rng);
int b1=distCol(rng), b2=distCol(rng);
if(a1==a2 || b1==b2) continue;
if(rng() & 1){
int d = min({cap[a1][b1]-x[a1][b1], cap[a2][b2]-x[a2][b2], x[a1][b2], x[a2][b1]});
if(d<=0) continue;
int delta = 1 + (int)(rng()%d);
x[a1][b1]+=delta; x[a2][b2]+=delta;
x[a1][b2]-=delta; x[a2][b1]-=delta;
}else{
int d = min({x[a1][b1], x[a2][b2], cap[a1][b2]-x[a1][b2], cap[a2][b1]-x[a2][b1]});
if(d<=0) continue;
int delta = 1 + (int)(rng()%d);
x[a1][b1]-=delta; x[a2][b2]-=delta;
x[a1][b2]+=delta; x[a2][b1]+=delta;
}
}
};
// Sampling budget
double start = timer.ms();
double budget = min(950.0, time_left()-220.0);
if(budget < 250.0) return false;
int targetAcc = (n_rem <= 8 ? 4500 : 3000);
if(time_left() < 1400.0) targetAcc = min(targetAcc, 1600);
vector<int> freq(NALL, 0);
int accepted=0;
vector<int> Sset;
Sset.reserve(n_rem);
while(timer.ms() - start < budget){
// burn / mix a bit between samples
int moves = (accepted < 200 ? 90 : 45);
cycle_moves(moves);
// draw concrete indices for each cell
Sset.clear();
bool fail=false;
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
int take = x[a][b];
if(take==0) continue;
auto &vec = cell[a][b];
int sz = (int)vec.size();
if(sz < take){ fail=true; break; }
// choose 'take' distinct positions (take <= 10 here)
int pickedPos[16];
for(int t=0;t<take;t++){
int pos;
int tries=0;
while(true){
pos = (int)(rng()%sz);
bool ok=true;
for(int u=0;u<t;u++) if(pickedPos[u]==pos){ ok=false; break; }
if(ok){ pickedPos[t]=pos; break; }
if(++tries > 200){ fail=true; break; }
}
if(fail) break;
Sset.push_back(vec[pos]);
}
if(fail) break;
}
if(fail) break;
}
if(fail) continue;
if((int)Sset.size()!=n_rem) continue;
if(!ok_all(Sset)) continue;
accepted++;
for(int idx: Sset) freq[idx]++;
if(accepted >= targetAcc) break;
}
if(accepted < 400) return false;
// overwrite weights from MC frequencies
for(int idx: aliveList){
w[idx] = (double)freq[idx] / (double)accepted;
}
normalize_weights(n_rem);
int best = aliveList[0];
double bw = w[best];
for(int idx: aliveList){
if(w[idx] > bw){ bw=w[idx]; best=idx; }
}
outBest = best;
return true;
}
// Perform one interaction step; returns false if finished.
// Perform one interaction step; returns false if finished.
bool step(){
if(found>=30) return false;
if(aliveList.empty()) return false;
int qidx = pick_best_query();
// Ask
cout.write(all[qidx].s.data(), LEN);
cout << '\n';
cout.flush(); // must flush
// Read 30 pairs (must read all)
array<int,K> hist;
hist.fill(0);
bool bad=false;
pair<int,int> first={-2,-2};
for(int i=0;i<30;i++){
int h,b;
if(!(cin>>h>>b)) return false;
if(i==0) first={h,b};
if(h==-1 && b==-1) bad=true;
if(!bad){
int id = HB_ID[h][b];
if(id>=0) hist[id]++;
}
}
if(bad) return false;
if(first.first==5 && first.second==0) return false; // all solved
int c50 = hist[ID_50];
bool new_found = (c50 > found);
found = c50;
// Residual for remaining unknown U
array<int,K> resid = hist;
resid[ID_50] -= found;
// Add query to history
queryCode.push_back(qidx);
cnt.push_back(resid);
int qid = (int)queryCode.size()-1;
cat.emplace_back(NALL);
auto &cq = cat.back();
for(int i=0;i<NALL;i++) cq[i] = hb_id(qidx, i);
uint32_t z=0;
for(int r=0;r<K;r++) if(cnt[qid][r]==0) z |= (1u<<r);
zeroMask.push_back(z);
// Mark asked & remove from alive
asked[qidx]=1;
alive[qidx]=0;
w[qidx]=0.0;
// Prune by zero categories (includes cleaning alive list)
prune_by_mask(qid, z);
// Peel if newly found (current query is that string)
if(new_found){
peel_new_found(qidx);
}
// Strong propagation
propagate();
return true;
}
void run(){
// interactive: start immediately
while(time_ok(20.0)){
if(!step()) break;
}
}
};
int main(){
ios::sync_with_stdio(false);
cin.tie(nullptr);
Solver solver;
solver.run();
return 0;
}