結果
| 問題 | No.3418 【絶望】30個並列ごちゃ混ぜHit&Blowで遊ぼう! |
| コンテスト | |
| ユーザー |
|
| 提出日時 | 2025-12-17 23:23:01 |
| 言語 | C++23 (gcc 13.3.0 + boost 1.89.0) |
| 結果 |
AC
|
| 実行時間 | 4,320 ms / 5,000 ms |
| コード長 | 63,570 bytes |
| 記録 | |
| コンパイル時間 | 6,475 ms |
| コンパイル使用メモリ | 337,312 KB |
| 実行使用メモリ | 54,896 KB |
| スコア | 9,995,838 |
| 平均クエリ数 | 41.62 |
| 最終ジャッジ日時 | 2025-12-25 02:21:58 |
| 合計ジャッジ時間 | 399,578 ms |
|
ジャッジサーバーID (参考情報) |
judge5 / judge2 |
| 純コード判定しない問題か言語 |
(要ログイン)
| ファイルパターン | 結果 |
|---|---|
| other | AC * 100 |
ソースコード
#include <bits/stdc++.h>
using namespace std;
/*
yukicoder interactive (Advent Calendar Contest 2025 Final):
30 parallel mixed Hit&Blow (length=5, digits 0..9, all distinct)
Masterpiece solver (aggressive but safe):
Core idea:
Maintain the set U of NOT-YET-DISCOVERED hidden strings.
Each query q returns a multiset of 30 (hit,blow) pairs, sorted.
(5,0) appears for already-discovered strings and for q itself if hidden.
We convert the answer into a 21-category histogram (H,B) for U.
Then we repeatedly ask the element with the highest estimated existence
probability in U.
Estimation & propagation:
1) Zero-category pruning:
If some past query says category r count is 0, any candidate x with
HB(q,x)=r cannot be in U.
2) Peeling:
When we newly discover s (= current q), remove its contribution from
all past histograms. This can create new zeros and more pruning.
3) Maximum-entropy existence probability (IPF / raking):
Find weights w[x] (approx P[x in U]) so that for every past query q_j
and category r, sum_{x in alive, HB(q_j,x)=r} w[x] = cnt[j][r].
Then ask argmax w[x].
4) Strong pairwise propagation (transportation polytope):
For a pair of past queries (A,B), candidates split into 21x21 cells by
(HB(A,x), HB(B,x)). Feasible U must satisfy row/col sums exactly.
We solve ONE max-flow for the pair and build the residual graph over
row/col nodes. From reachability:
- fixed0 cell: flow==0 and col->row NOT reachable => cell must be 0
- fixedFull cell: flow==cap and row->col NOT reachable => cell must be full
fixed0 => prune all candidates in that cell
fixedFull => candidates in that cell are guaranteed to exist => push to forced queue
5) Endgame Monte Carlo (n_rem <= 8):
Build random feasible completions using a strong query pair, reject by
all constraints, and estimate existence probabilities by frequency.
The solver uses time checks and tries to spend most of the 5s budget on
constraint propagation + probability sharpening.
*/
static constexpr int DIG = 10;
static constexpr int LEN = 5;
static constexpr int NALL = 30240;
static constexpr int K = 21; // valid (hit,blow) pairs
static int HB_ID[LEN+1][LEN+1];
static int ID_50;
static inline int popcnt10(uint16_t x){ return __builtin_popcount((unsigned)x); }
struct Code{
array<uint8_t, LEN> d;
uint16_t mask;
array<char, LEN+1> s;
};
struct Timer{
chrono::steady_clock::time_point st;
Timer(): st(chrono::steady_clock::now()) {}
double ms() const {
return chrono::duration<double, milli>(chrono::steady_clock::now()-st).count();
}
};
// Small Dinic for bipartite flow
struct Dinic {
struct Edge{ int to, rev; int cap; };
int n;
vector<vector<Edge>> g;
vector<int> level, it;
Dinic(int n=0){ init(n); }
void init(int n_){
n=n_;
g.assign(n, {});
level.assign(n, 0);
it.assign(n, 0);
}
int add_edge(int fr,int to,int cap){
Edge a{to, (int)g[to].size(), cap};
Edge b{fr, (int)g[fr].size(), 0};
g[fr].push_back(a);
g[to].push_back(b);
return (int)g[fr].size()-1; // index in g[fr]
}
bool bfs(int s,int t){
fill(level.begin(), level.end(), -1);
queue<int> q;
level[s]=0;
q.push(s);
while(!q.empty()){
int v=q.front(); q.pop();
for(const auto &e: g[v]){
if(e.cap>0 && level[e.to]<0){
level[e.to]=level[v]+1;
q.push(e.to);
}
}
}
return level[t]>=0;
}
int dfs(int v,int t,int f){
if(v==t) return f;
for(int &i=it[v]; i<(int)g[v].size(); i++){
Edge &e=g[v][i];
if(e.cap<=0 || level[e.to]!=level[v]+1) continue;
int ret=dfs(e.to,t,min(f,e.cap));
if(ret>0){
e.cap-=ret;
g[e.to][e.rev].cap+=ret;
return ret;
}
}
return 0;
}
int maxflow(int s,int t){
int flow=0;
while(bfs(s,t)){
fill(it.begin(), it.end(), 0);
while(true){
int f=dfs(s,t,1e9);
if(!f) break;
flow+=f;
}
}
return flow;
}
};
struct Solver {
// all codes
vector<Code> all;
// state
vector<uint8_t> alive, asked, forcedFlag;
vector<int> aliveList;
vector<double> w;
int found = 0; // number of already discovered hidden strings
// history
vector<int> queryCode; // code index for each asked query
vector<array<int,K>> cnt; // residual histogram for remaining unknown U
vector<vector<uint8_t>> cat; // cat[qid][code] = HB(query[qid], code)
vector<uint32_t> zeroMask; // bitmask of categories with cnt==0
deque<int> forcedQ;
Timer timer;
mt19937 rng;
// time budget
static constexpr double TIME_LIMIT_MS = 4950.0; // keep some margin
Solver(): rng(71236721u) {
build_mapping();
build_all_codes();
alive.assign(NALL, 1);
asked.assign(NALL, 0);
forcedFlag.assign(NALL, 0);
w.assign(NALL, 1.0);
aliveList.resize(NALL);
iota(aliveList.begin(), aliveList.end(), 0);
}
static void build_mapping(){
for(int h=0; h<=LEN; h++) for(int b=0; b<=LEN; b++) HB_ID[h][b] = -1;
int kk=0;
for(int h=0; h<=LEN; h++) for(int b=0; b<=LEN-h; b++) HB_ID[h][b] = kk++;
ID_50 = HB_ID[5][0];
}
void build_all_codes(){
all.reserve(NALL);
for(int a=0;a<10;a++) for(int b=0;b<10;b++) if(b!=a)
for(int c=0;c<10;c++) if(c!=a && c!=b)
for(int d=0;d<10;d++) if(d!=a && d!=b && d!=c)
for(int e=0;e<10;e++) if(e!=a && e!=b && e!=c && e!=d){
Code x;
x.d = {(uint8_t)a,(uint8_t)b,(uint8_t)c,(uint8_t)d,(uint8_t)e};
x.mask = (uint16_t)((1u<<a)|(1u<<b)|(1u<<c)|(1u<<d)|(1u<<e));
x.s = {(char)('0'+a),(char)('0'+b),(char)('0'+c),(char)('0'+d),(char)('0'+e), '\0'};
all.push_back(x);
}
}
inline uint8_t hb_id(int ia, int ib) const {
const Code &A = all[ia];
const Code &B = all[ib];
int hits=0;
hits += (A.d[0]==B.d[0]);
hits += (A.d[1]==B.d[1]);
hits += (A.d[2]==B.d[2]);
hits += (A.d[3]==B.d[3]);
hits += (A.d[4]==B.d[4]);
int common = popcnt10((uint16_t)(A.mask & B.mask));
int blow = common - hits;
return (uint8_t)HB_ID[hits][blow];
}
inline double time_left() const { return TIME_LIMIT_MS - timer.ms(); }
inline bool time_ok(double margin=80.0) const { return timer.ms() + margin < TIME_LIMIT_MS; }
void clean_alive(){
vector<int> nxt;
nxt.reserve(aliveList.size());
for(int i: aliveList){
if(alive[i]) nxt.push_back(i);
}
aliveList.swap(nxt);
}
// prune candidates whose category for query qid is in maskbits
bool prune_by_mask(int qid, uint32_t maskbits){
if(maskbits==0){
clean_alive();
return false;
}
bool changed=false;
const auto &cq = cat[qid];
vector<int> nxt;
nxt.reserve(aliveList.size());
for(int i: aliveList){
if(!alive[i]) continue;
if(maskbits & (1u<<cq[i])){
alive[i]=0;
w[i]=0.0;
changed=true;
}else nxt.push_back(i);
}
aliveList.swap(nxt);
return changed;
}
void normalize_weights(int n_rem){
if(aliveList.empty()) return;
double sum=0.0;
for(int i: aliveList) sum += w[i];
if(sum<=0.0){
double uni = (double)n_rem / (double)aliveList.size();
for(int i: aliveList) w[i]=uni;
return;
}
double sc = (double)n_rem / sum;
for(int i: aliveList) w[i]*=sc;
}
// IPF / raking for existence probabilities
void ipf(int sweeps, double damp){
if(sweeps<=0 || queryCode.empty() || aliveList.empty()) return;
int n_rem = 30 - found;
array<double,K> sum;
array<double,K> fac;
for(int it=0; it<sweeps; it++){
normalize_weights(n_rem);
for(int q=0; q<(int)queryCode.size(); q++){
sum.fill(0.0);
const auto &cq = cat[q];
for(int i: aliveList) sum[cq[i]] += w[i];
for(int r=0;r<K;r++){
if(sum[r] > 0.0) fac[r] = (double)cnt[q][r] / sum[r];
else fac[r] = (cnt[q][r]==0 ? 1.0 : 0.0);
if(damp!=1.0) fac[r] = pow(fac[r], damp);
}
for(int i: aliveList) w[i] *= fac[cq[i]];
}
}
normalize_weights(n_rem);
}
// Discovering a new hidden string s: peel its contribution from all past histograms
void peel_new_found(int sidx){
int lastQ = (int)queryCode.size() - 1;
for(int q=0; q<lastQ; q++){
int cid = cat[q][sidx];
int &v = cnt[q][cid];
v--;
if(v==0){
uint32_t bit = (1u<<cid);
if(!(zeroMask[q] & bit)){
zeroMask[q] |= bit;
prune_by_mask(q, bit);
}
}
}
}
// Single-query forcing:
// If for some query q and category r, alive_count(q,r) == cnt[q][r], then all in that bucket are forced.
bool single_forcing_scan(){
if(aliveList.empty()) return false;
if(queryCode.empty()) return false;
if(!time_ok(120.0)) return false;
bool added=false;
// We scan all queries; K is small.
vector<vector<int>> buckets(K);
for(int q=0; q<(int)queryCode.size(); q++){
for(int r=0;r<K;r++) buckets[r].clear();
const auto &cq = cat[q];
for(int i: aliveList){
buckets[cq[i]].push_back(i);
}
for(int r=0;r<K;r++){
int need = cnt[q][r];
if(need<=0) continue;
if((int)buckets[r].size() == need){
for(int idx: buckets[r]){
if(alive[idx] && !asked[idx] && !forcedFlag[idx]){
forcedFlag[idx]=1;
forcedQ.push_back(idx);
added=true;
}
}
}
}
}
return added;
}
// Pairwise transportation analysis for queries qa,qb (indices in history)
// Returns true if pruned something or forced something.
bool pair_propagate(int qa, int qb){
if(qa==qb) return false;
if(aliveList.empty()) return false;
if(!time_ok(150.0)) return false;
int n_rem = 30 - found;
if(n_rem<=0) return false;
static int cap[K][K];
static int flow[K][K];
static bool fixed0[K][K];
static bool fixedFull[K][K];
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
cap[a][b]=0;
fixed0[a][b]=false;
fixedFull[a][b]=false;
}
}
const auto &A = cat[qa];
const auto &B = cat[qb];
for(int idx: aliveList){
cap[A[idx]][B[idx]]++;
}
// Build flow network
// Nodes: S, rows(0..K-1), cols(0..K-1), T
int S = 0;
int row0 = 1;
int col0 = 1 + K;
int T = 1 + K + K;
Dinic din(T+1);
int rowSum[K], colSum[K];
for(int a=0;a<K;a++) rowSum[a]=cnt[qa][a];
for(int b=0;b<K;b++) colSum[b]=cnt[qb][b];
// Quick infeasibility check (should not happen)
int sumR=0,sumC=0;
for(int a=0;a<K;a++) sumR += rowSum[a];
for(int b=0;b<K;b++) sumC += colSum[b];
if(sumR!=n_rem || sumC!=n_rem) return false;
for(int a=0;a<K;a++) if(rowSum[a]>0) din.add_edge(S, row0+a, rowSum[a]);
for(int b=0;b<K;b++) if(colSum[b]>0) din.add_edge(col0+b, T, colSum[b]);
static int edgePos[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) edgePos[a][b] = -1;
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
if(cap[a][b]>0){
int pos = din.add_edge(row0+a, col0+b, cap[a][b]);
edgePos[a][b] = pos;
}
}
}
int f = din.maxflow(S, T);
if(f != n_rem){
// If this happens, our alive set is too small for this pair's marginals.
// Should not happen with safe pruning.
return false;
}
// Extract flow on row->col edges
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
if(edgePos[a][b] >= 0){
const auto &e = din.g[row0+a][edgePos[a][b]];
int used = cap[a][b] - e.cap;
flow[a][b] = used;
}else flow[a][b]=0;
}
}
// Build residual graph on 2K nodes (rows:0..K-1, cols:K..2K-1)
const int N = 2*K;
static uint64_t reach[2*K];
for(int u=0;u<N;u++) reach[u] = (1ULL<<u);
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
if(cap[a][b]==0) continue;
if(flow[a][b] < cap[a][b]){
// can increase => row a -> col b
reach[a] |= (1ULL<<(K+b));
}
if(flow[a][b] > 0){
// can decrease => col b -> row a
reach[K+b] |= (1ULL<<a);
}
}
}
// transitive closure (Warshall with bitsets)
for(int k=0;k<N;k++){
uint64_t mk = reach[k];
for(int u=0;u<N;u++){
if(reach[u] & (1ULL<<k)) reach[u] |= mk;
}
}
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
if(cap[a][b]==0) continue;
if(flow[a][b]==0){
bool can = (reach[K+b] & (1ULL<<a));
if(!can) fixed0[a][b]=true;
}
if(flow[a][b]==cap[a][b]){
bool can = (reach[a] & (1ULL<<(K+b)));
if(!can) fixedFull[a][b]=true;
}
}
}
bool changed=false;
vector<int> nxt;
nxt.reserve(aliveList.size());
for(int idx: aliveList){
int a = A[idx];
int b = B[idx];
if(fixed0[a][b]){
alive[idx]=0;
w[idx]=0.0;
changed=true;
continue;
}
if(fixedFull[a][b]){
if(!asked[idx] && !forcedFlag[idx]){
forcedFlag[idx]=1;
forcedQ.push_back(idx);
changed=true; // treat as progress
}
}
nxt.push_back(idx);
}
aliveList.swap(nxt);
return changed;
}
// Heuristic strength score for choosing partner queries
long long spikiness(int qid) const {
long long s=0;
for(int r=0;r<K;r++) s += 1LL*cnt[qid][r]*cnt[qid][r];
return s;
}
// Propagate constraints aggressively within time.
void propagate(){
if(queryCode.size()<1) return;
if(aliveList.empty()) return;
// A couple of rounds to exploit cascades
for(int round=0; round<2; round++){
if(!time_ok(200.0)) break;
bool changed=false;
// Single forcing is more useful when alive is smaller
if((int)aliveList.size() <= 20000 || (30-found) <= 18) {
changed |= single_forcing_scan();
}
// Pair propagation: newest query with many strong partners
int m = (int)queryCode.size();
if(m>=2){
int last = m-1;
vector<int> partners;
partners.reserve(last);
for(int i=0;i<last;i++) partners.push_back(i);
// sort by strength (spiky marginals first)
sort(partners.begin(), partners.end(), [&](int a,int b){
return spikiness(a) > spikiness(b);
});
double rem = time_left();
int maxPartners = last;
if(rem < 1200) maxPartners = min(maxPartners, 6);
else if(rem < 2200) maxPartners = min(maxPartners, 14);
else if(rem < 3200) maxPartners = min(maxPartners, 22);
// else: all
partners.resize(maxPartners);
for(int i: partners){
if(!time_ok(180.0)) break;
changed |= pair_propagate(i, last);
}
// Extra cross pairs among top few strong queries (cheap insurance)
if(time_ok(220.0) && m>=4){
int a = partners[0];
int b = partners.size()>=2 ? partners[1] : 1;
int c = partners.size()>=3 ? partners[2] : 2;
if(a!=b) changed |= pair_propagate(a,b);
if(time_ok(180.0) && a!=c) changed |= pair_propagate(a,c);
if(time_ok(180.0) && b!=c) changed |= pair_propagate(b,c);
}
}
if(!changed) break;
}
}
// Compute entropy of HB distribution for candidate q (weighted by current w)
double weighted_entropy_for_candidate(int qidx){
int n_rem = 30 - found;
if(n_rem<=0) return 0.0;
array<double,K> sum;
sum.fill(0.0);
for(int i: aliveList){
uint8_t cid = hb_id(qidx, i);
sum[cid] += w[i];
}
double H=0.0;
for(int r=0;r<K;r++){
double p = sum[r] / (double)n_rem;
if(p>1e-15) H -= p * log(p);
}
return H;
}
// ---- Exact / near-exact existence probability for the very early stage ----
//
// m==1 : exact inclusion probability.
// Conditioning on a single histogram, all remaining candidates in the same
// (hit,blow) category are symmetric. The posterior is uniform among subsets
// satisfying the histogram counts, so:
// P(x in U) = cnt[r] / M[r] (exact)
// where M[r] is the number of remaining candidates in category r.
bool pick_exact_single_query(int &outBest){
if((int)queryCode.size()!=1) return false;
int qid = 0;
int n_rem = 30 - found;
if(n_rem<=0 || aliveList.empty()) return false;
const auto &cq = cat[qid];
array<int,K> bucket; bucket.fill(0);
for(int idx: aliveList) bucket[cq[idx]]++;
for(int idx: aliveList){
int r = cq[idx];
int den = bucket[r];
w[idx] = (den>0 ? (double)cnt[qid][r] / (double)den : 0.0);
}
int best = aliveList[0];
double bestW = w[best];
for(int idx: aliveList){
double v=w[idx];
if(v>bestW){ bestW=v; best=idx; }
}
// tiny tie-break among near-equals (stable; still essentially "max P")
double margin = max(1e-12, 0.02*bestW);
double bestScore = bestW + 0.010*weighted_entropy_for_candidate(best);
for(int idx: aliveList){
double v=w[idx];
if(v + 1e-12 < bestW - margin) continue;
double score = v + 0.010*weighted_entropy_for_candidate(idx);
if(score > bestScore){ bestScore=score; best=idx; }
}
outBest=best;
return true;
}
// m==2 : near-exact inclusion probability via MH on the 21x21 cell-count matrix.
// For the first two queries (qid=0,1), candidates split into 21x21 cells by
// (HB(q0,x), HB(q1,x)). Let cap[a][b] be cell sizes.
// The induced distribution on cell-count matrices X is:
// P(X) ∝ Π_{a,b} C(cap[a][b], X[a][b])
// under row/col margin constraints.
// We run an MH chain on X with 2x2 moves and Rao-Blackwellize:
// P(x in U) = E[X[cell]] / cap[cell].
bool pick_near_exact_two_query(int &outBest){
if((int)queryCode.size()!=2) return false;
if(aliveList.empty()) return false;
int n_rem = 30 - found;
if(n_rem<=0) return false;
if(!time_ok(220.0)) return false;
const int qa=0, qb=1;
const auto &A = cat[qa];
const auto &B = cat[qb];
static int cap[K][K];
static int xcnt[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) cap[a][b]=0;
for(int idx: aliveList) cap[A[idx]][B[idx]]++;
int row[K], col[K];
int sumR=0,sumC=0;
for(int a=0;a<K;a++){ row[a]=cnt[qa][a]; sumR+=row[a]; }
for(int b=0;b<K;b++){ col[b]=cnt[qb][b]; sumC+=col[b]; }
if(sumR!=n_rem || sumC!=n_rem) return false;
// Initial feasible matrix via maxflow
int S=0, row0=1, col0=1+K, T=1+K+K;
Dinic din(T+1);
for(int a=0;a<K;a++) if(row[a]>0) din.add_edge(S,row0+a,row[a]);
for(int b=0;b<K;b++) if(col[b]>0) din.add_edge(col0+b,T,col[b]);
static int edgePos[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) edgePos[a][b] = -1;
for(int a=0;a<K;a++) for(int b=0;b<K;b++) if(cap[a][b]>0){
edgePos[a][b] = din.add_edge(row0+a, col0+b, cap[a][b]);
}
int f = din.maxflow(S,T);
if(f!=n_rem) return false;
for(int a=0;a<K;a++) for(int b=0;b<K;b++) xcnt[a][b]=0;
for(int a=0;a<K;a++) for(int b=0;b<K;b++) if(edgePos[a][b]>=0){
const auto &e = din.g[row0+a][edgePos[a][b]];
xcnt[a][b] = cap[a][b] - e.cap; // used flow
}
vector<int> rows, cols;
rows.reserve(K); cols.reserve(K);
for(int a=0;a<K;a++) if(row[a]>0) rows.push_back(a);
for(int b=0;b<K;b++) if(col[b]>0) cols.push_back(b);
// If degenerate, X is essentially fixed.
if(rows.size()<=1 || cols.size()<=1){
double sumW=0.0;
for(int idx: aliveList){
int a=A[idx], b=B[idx];
int den = cap[a][b];
w[idx] = (den>0 ? (double)xcnt[a][b] / (double)den : 0.0);
sumW += w[idx];
}
if(sumW>0){
double sc=(double)n_rem/sumW;
for(int idx: aliveList) w[idx] *= sc;
}
}else{
auto mh_step = [&](){
int r1 = rows[(int)(rng()%rows.size())];
int r2 = rows[(int)(rng()%rows.size())];
while(r2==r1) r2 = rows[(int)(rng()%rows.size())];
int c1 = cols[(int)(rng()%cols.size())];
int c2 = cols[(int)(rng()%cols.size())];
while(c2==c1) c2 = cols[(int)(rng()%cols.size())];
if(rng()&1){
int &x11=xcnt[r1][c1]; int &x22=xcnt[r2][c2];
int &x12=xcnt[r1][c2]; int &x21=xcnt[r2][c1];
int cap11=cap[r1][c1], cap22=cap[r2][c2], cap12=cap[r1][c2], cap21=cap[r2][c1];
if(x11<=0 || x22<=0) return;
if(x12>=cap12 || x21>=cap21) return;
double ratio=1.0;
ratio *= (double)x11 / (double)(cap11 - x11 + 1);
ratio *= (double)x22 / (double)(cap22 - x22 + 1);
ratio *= (double)(cap12 - x12) / (double)(x12 + 1);
ratio *= (double)(cap21 - x21) / (double)(x21 + 1);
if(ratio >= 1.0 || (double)(rng() / (double)rng.max()) < ratio){
x11--; x22--; x12++; x21++;
}
}else{
int &x12=xcnt[r1][c2]; int &x21=xcnt[r2][c1];
int &x11=xcnt[r1][c1]; int &x22=xcnt[r2][c2];
int cap12=cap[r1][c2], cap21=cap[r2][c1], cap11=cap[r1][c1], cap22=cap[r2][c2];
if(x12<=0 || x21<=0) return;
if(x11>=cap11 || x22>=cap22) return;
double ratio=1.0;
ratio *= (double)x12 / (double)(cap12 - x12 + 1);
ratio *= (double)x21 / (double)(cap21 - x21 + 1);
ratio *= (double)(cap11 - x11) / (double)(x11 + 1);
ratio *= (double)(cap22 - x22) / (double)(x22 + 1);
if(ratio >= 1.0 || (double)(rng() / (double)rng.max()) < ratio){
x12--; x21--; x11++; x22++;
}
}
};
vector<pair<int,int>> cells;
cells.reserve(441);
for(int a=0;a<K;a++) for(int b=0;b<K;b++) if(cap[a][b]>0) cells.push_back({a,b});
static double mu[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) mu[a][b]=0.0;
double start = timer.ms();
double baseBudget = (time_left() > 3600.0 ? 1750.0 : (time_left() > 2600.0 ? 1450.0 : 1150.0));
double budget = min(baseBudget, time_left() - 260.0);
if(budget < 180.0) return false;
int burn = 8000;
int mixPer = 30;
for(int i=0;i<burn;i++) mh_step();
int samples=0;
while(timer.ms() - start < budget){
for(int i=0;i<mixPer;i++) mh_step();
samples++;
for(auto [a,b] : cells) mu[a][b] += (double)xcnt[a][b];
if(samples >= 24000) break;
}
if(samples<=0) return false;
double sumW=0.0;
for(int idx: aliveList){
int a=A[idx], b=B[idx];
int den = cap[a][b];
double mean = mu[a][b] / (double)samples;
w[idx] = (den>0 ? mean / (double)den : 0.0);
sumW += w[idx];
}
if(sumW>0){
double sc=(double)n_rem/sumW;
for(int idx: aliveList) w[idx] *= sc;
}
}
int best = aliveList[0];
double bestW = w[best];
for(int idx: aliveList){
double v=w[idx];
if(v>bestW){ bestW=v; best=idx; }
}
double margin = max(1e-12, 0.02*bestW);
double bestScore = bestW + 0.008*weighted_entropy_for_candidate(best);
for(int idx: aliveList){
double v=w[idx];
if(v + 1e-12 < bestW - margin) continue;
double score = v + 0.008*weighted_entropy_for_candidate(idx);
if(score > bestScore){ bestScore=score; best=idx; }
}
outBest=best;
return true;
}
// m==3..5 : near-exact inclusion probabilities by exact rejection sampling
// over the full constraints (all m queries), using a pivot pair (qa,qb).
//
// Proposal:
// 1) sample a 21x21 cell-count matrix X for (qa,qb) from
// P(X) ∝ Π C(cap[a][b], X[a][b]) with row/col marginals,
// via an MH chain on 2x2 moves.
// 2) sample a subset S of size n_rem uniformly given X (uniform within each cell).
// 3) accept iff S matches the histograms for ALL m queries.
//
// The proposal is uniform over subsets satisfying (qa,qb) marginals; rejection yields
// the exact uniform posterior for the intersection constraints. We Rao-Blackwellize
// by grouping candidates that share the full category-vector across all queries.
bool pick_near_exact_small_m(int &outBest){
int m = (int)queryCode.size();
if(m < 3 || m > 5) return false;
if(found >= 10) return false;
if(aliveList.empty()) return false;
int n_rem = 30 - found;
if(n_rem <= 0) return false;
if(!time_ok(450.0)) return false;
// pivot pair: top 2 by spikiness (strong marginals -> better mixing / acceptance)
vector<int> ord(m);
iota(ord.begin(), ord.end(), 0);
sort(ord.begin(), ord.end(), [&](int x,int y){ return spikiness(x) > spikiness(y); });
int qa = ord[0], qb = ord[1];
if(qa==qb) return false;
const auto &A = cat[qa];
const auto &B = cat[qb];
static vector<int> cell[K][K];
static int cap[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++){ cell[a][b].clear(); cap[a][b]=0; }
for(int idx: aliveList){
int a=A[idx], b=B[idx];
cell[a][b].push_back(idx);
cap[a][b]++;
}
int row[K], col[K];
int sumR=0,sumC=0;
for(int a=0;a<K;a++){ row[a]=cnt[qa][a]; sumR+=row[a]; }
for(int b=0;b<K;b++){ col[b]=cnt[qb][b]; sumC+=col[b]; }
if(sumR!=n_rem || sumC!=n_rem) return false;
// Initial feasible X via maxflow
int S=0, row0=1, col0=1+K, T=1+K+K;
Dinic din(T+1);
for(int a=0;a<K;a++) if(row[a]>0) din.add_edge(S,row0+a,row[a]);
for(int b=0;b<K;b++) if(col[b]>0) din.add_edge(col0+b,T,col[b]);
static int edgePos[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) edgePos[a][b]=-1;
for(int a=0;a<K;a++) for(int b=0;b<K;b++) if(cap[a][b]>0){
edgePos[a][b] = din.add_edge(row0+a, col0+b, cap[a][b]);
}
int f = din.maxflow(S,T);
if(f!=n_rem) return false;
static int xcnt[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) xcnt[a][b]=0;
for(int a=0;a<K;a++) for(int b=0;b<K;b++) if(edgePos[a][b]>=0){
const auto &e = din.g[row0+a][edgePos[a][b]];
xcnt[a][b] = cap[a][b] - e.cap;
}
// Active rows / cols (positive marginals)
vector<int> rows, cols;
rows.reserve(K); cols.reserve(K);
for(int a=0;a<K;a++) if(row[a]>0) rows.push_back(a);
for(int b=0;b<K;b++) if(col[b]>0) cols.push_back(b);
// Precompute column intersections for row pairs to avoid useless proposals
static vector<int> inter[K][K];
for(int i=0;i<K;i++) for(int j=0;j<K;j++) inter[i][j].clear();
for(int i=0;i<(int)rows.size();i++){
int r1 = rows[i];
for(int j=i+1;j<(int)rows.size();j++){
int r2 = rows[j];
auto &v = inter[r1][r2];
v.reserve(cols.size());
for(int c: cols){
if(cap[r1][c]>0 && cap[r2][c]>0) v.push_back(c);
}
inter[r2][r1] = v;
}
}
auto rand01 = [&]()->double{
return (double)(rng() / (double)rng.max());
};
// MH step on X with 2x2 moves (target Π C(cap,x))
auto mh_step = [&](){
if(rows.size()<2 || cols.size()<2) return;
int r1 = rows[(int)(rng()%rows.size())];
int r2 = rows[(int)(rng()%rows.size())];
while(r2==r1) r2 = rows[(int)(rng()%rows.size())];
const auto &cands = inter[r1][r2];
if(cands.size() < 2) return;
int c1 = cands[(int)(rng()%cands.size())];
int c2 = cands[(int)(rng()%cands.size())];
while(c2==c1) c2 = cands[(int)(rng()%cands.size())];
if(rng()&1){
int &x11=xcnt[r1][c1]; int &x22=xcnt[r2][c2];
int &x12=xcnt[r1][c2]; int &x21=xcnt[r2][c1];
int cap11=cap[r1][c1], cap22=cap[r2][c2], cap12=cap[r1][c2], cap21=cap[r2][c1];
if(x11<=0 || x22<=0) return;
if(x12>=cap12 || x21>=cap21) return;
double ratio=1.0;
ratio *= (double)x11 / (double)(cap11 - x11 + 1);
ratio *= (double)x22 / (double)(cap22 - x22 + 1);
ratio *= (double)(cap12 - x12) / (double)(x12 + 1);
ratio *= (double)(cap21 - x21) / (double)(x21 + 1);
if(ratio >= 1.0 || rand01() < ratio){
x11--; x22--; x12++; x21++;
}
}else{
int &x12=xcnt[r1][c2]; int &x21=xcnt[r2][c1];
int &x11=xcnt[r1][c1]; int &x22=xcnt[r2][c2];
int cap12=cap[r1][c2], cap21=cap[r2][c1], cap11=cap[r1][c1], cap22=cap[r2][c2];
if(x12<=0 || x21<=0) return;
if(x11>=cap11 || x22>=cap22) return;
double ratio=1.0;
ratio *= (double)x12 / (double)(cap12 - x12 + 1);
ratio *= (double)x21 / (double)(cap21 - x21 + 1);
ratio *= (double)(cap11 - x11) / (double)(x11 + 1);
ratio *= (double)(cap22 - x22) / (double)(x22 + 1);
if(ratio >= 1.0 || rand01() < ratio){
x12--; x21--; x11++; x22++;
}
}
};
// Sample subset uniformly given X (uniform within each pivot cell)
auto sample_subset = [&](vector<int> &out)->bool{
out.clear();
out.reserve(n_rem);
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
int take = xcnt[a][b];
if(take<=0) continue;
auto &vec = cell[a][b];
int sz = (int)vec.size();
if(sz < take) return false;
// take distinct positions by small rejection (take<=30 total)
int picked[32];
int pc=0;
for(int t=0;t<take;t++){
while(true){
int p = (int)(rng()%sz);
bool dup=false;
for(int k=0;k<pc;k++) if(picked[k]==p){ dup=true; break; }
if(dup) continue;
picked[pc++] = p;
out.push_back(vec[p]);
break;
}
}
}
}
return (int)out.size()==n_rem;
};
// Fast check for all m queries (early reject on overflow)
auto ok_all_fast = [&](const vector<int> &Sset)->bool{
static int tmp[5][K];
for(int q=0;q<m;q++){
for(int r=0;r<K;r++) tmp[q][r]=0;
}
for(int idx: Sset){
for(int q=0;q<m;q++){
int r = cat[q][idx];
int v = ++tmp[q][r];
if(v > cnt[q][r]) return false;
}
}
for(int q=0;q<m;q++){
for(int r=0;r<K;r++){
if(tmp[q][r] != cnt[q][r]) return false;
}
}
return true;
};
// Grouping by full category-vector across all queries (exchangeability)
int powKm = 1;
for(int i=0;i<m;i++) powKm *= K;
static vector<int> gSize;
static vector<int> gSum;
if((int)gSize.size() < powKm){
gSize.assign(powKm, 0);
gSum.assign(powKm, 0);
}else{
fill(gSize.begin(), gSize.begin()+powKm, 0);
fill(gSum.begin(), gSum.begin()+powKm, 0);
}
auto group_id = [&](int idx)->int{
int id=0;
for(int q=0;q<m;q++){
id = id*K + (int)cat[q][idx];
}
return id;
};
for(int idx: aliveList){
int gid = group_id(idx);
gSize[gid]++;
}
// Budget: spend as much as possible here (few calls, huge impact on early misses).
double start = timer.ms();
double baseBudget;
if(m==3) baseBudget = 1750.0;
else if(m==4) baseBudget = 1300.0;
else baseBudget = 950.0;
double budget = min(baseBudget, time_left() - 260.0);
if(budget < 220.0) return false;
int burn = (m==3 ? 9000 : (m==4 ? 7000 : 5200));
int mixPer = (m==3 ? 80 : (m==4 ? 120 : 160));
int targetAcc = (m==3 ? 24000 : (m==4 ? 16000 : 11000));
int minAcc = (m==3 ? 4500 : (m==4 ? 3200 : 2300));
// Additional tightening when we have plenty of time
if(time_left() > 3600.0){
burn += 2500;
targetAcc = (int)(targetAcc * 1.10);
}
for(int i=0;i<burn;i++) mh_step();
vector<int> cur;
int accepted=0;
while(timer.ms() - start < budget){
for(int i=0;i<mixPer;i++) mh_step();
if(!sample_subset(cur)) continue;
if(!ok_all_fast(cur)) continue;
accepted++;
for(int idx: cur){
int gid = group_id(idx);
gSum[gid]++;
}
if(accepted >= targetAcc) break;
}
if(accepted < minAcc) return false;
double sumW=0.0;
for(int idx: aliveList){
int gid = group_id(idx);
int sz = gSize[gid];
if(sz<=0){ w[idx]=0.0; continue; }
double meanCount = (double)gSum[gid] / (double)accepted; // E[count in group]
w[idx] = meanCount / (double)sz; // per-element inclusion probability
sumW += w[idx];
}
if(sumW>0.0){
double sc = (double)n_rem / sumW;
for(int idx: aliveList) w[idx] *= sc;
}else{
// fallback to uniform
double uni = (double)n_rem / (double)aliveList.size();
for(int idx: aliveList) w[idx]=uni;
}
int best = aliveList[0];
double bestW = w[best];
for(int idx: aliveList){
double v=w[idx];
if(v>bestW){ bestW=v; best=idx; }
}
// Keep tie-break extremely mild: priority is still "max P(exists)".
double margin = max(1e-12, 0.02*bestW);
double bestScore = bestW + 0.006*weighted_entropy_for_candidate(best);
for(int idx: aliveList){
double v=w[idx];
if(v + 1e-12 < bestW - margin) continue;
double score = v + 0.006*weighted_entropy_for_candidate(idx);
if(score > bestScore){ bestScore=score; best=idx; }
}
outBest = best;
return true;
}
// pick best query from alive by w; tie-break among top few by entropy
int pick_best_query(){
// forced first
while(!forcedQ.empty()){
int x = forcedQ.front();
forcedQ.pop_front();
if(alive[x] && !asked[x]) return x;
}
// no constraint yet: choose informative initial query
if(queryCode.empty()){
return best_initial_query();
}
int n_rem = 30 - found;
// Very early: use exact / near-exact inclusion probabilities (most accurate).
// This directly targets early misses which dominate Q.
if((int)queryCode.size()==1){
int best=-1;
if(pick_exact_single_query(best)) return best;
}
if((int)queryCode.size()==2 && found < 10 && time_left() > 2000.0){
int best=-1;
if(pick_near_exact_two_query(best)) return best;
}
if((int)queryCode.size()>=3 && (int)queryCode.size()<=5 && found < 10 && time_left() > 2200.0){
int best=-1;
if(pick_near_exact_small_m(best)) return best;
}
// Endgame Monte Carlo refinement
if(n_rem <= 8 && (int)aliveList.size() <= 9000 && time_left() > 600.0){
int best=-1;
if(endgame_mc(best)) return best;
}
// IPF
double rem = time_left();
int m = (int)queryCode.size();
int aliveN = (int)aliveList.size();
// adaptive sweeps
int sweeps = 0;
double damp = 1.0;
if(rem > 3500){
// We have plenty of time: spend more sweeps early to sharpen ordering.
sweeps = (aliveN>20000 ? 120 : 160);
}else if(rem > 2200){
sweeps = (aliveN>20000 ? 85 : 120);
}else if(rem > 1200){
sweeps = (aliveN>20000 ? 55 : 85);
}else{
sweeps = (aliveN>20000 ? 25 : 45);
}
// stage-dependent damping (more exploration early, more exact late)
if(n_rem >= 22) damp = 0.78;
else if(n_rem >= 12) damp = 0.86;
else damp = 0.93;
// if many constraints, convergence is faster
if(m >= 25) sweeps = max(10, sweeps - 10);
ipf(sweeps, damp);
// (Optional) early-stage MC refinement:
// Running MC from the very beginning (when posterior is almost uniform)
// often doesn't help: it may pick a query that is marginally more likely
// but significantly less "informative", increasing total Q.
//
// So we only enable MC when IPF has already become *somewhat* peaked
// (meaning there is real signal to refine), while we are still in the early/mid phase.
if(found >= 1 && found < 10 && m >= 3 && m <= 25 && n_rem >= 16 && rem > 2300.0){
double avgW = (double)n_rem / (double)aliveN;
int P = min(620, aliveN);
vector<pair<double,int>> pool;
pool.reserve(aliveN);
for(int i: aliveList) pool.push_back({w[i], i});
nth_element(pool.begin(), pool.begin()+P-1, pool.end(),
[](auto &x, auto &y){ return x.first > y.first; });
pool.resize(P);
sort(pool.begin(), pool.end(), [](auto &x, auto &y){ return x.first > y.first; });
double bestW0 = pool[0].first;
double peak = bestW0 / max(1e-12, avgW);
// only when there is some peakiness to refine (too flat -> little benefit; too sharp -> greedy wins)
if(peak > 1.45 && peak < 22.0 && time_left() > 1500.0){
vector<int> poolIdx;
poolIdx.reserve(P);
for(auto &p : pool) poolIdx.push_back(p.second);
int mcBest=-1;
if(early_mc_refine(poolIdx, mcBest)) return mcBest;
}
}
// pick top-L by weight (fallback)
const int L = 10;
vector<pair<double,int>> top;
top.reserve(L+1);
for(int i: aliveList){
top.push_back({w[i], i});
}
nth_element(top.begin(), top.begin()+min(L,(int)top.size())-1, top.end(),
[](auto &x, auto &y){ return x.first > y.first; });
sort(top.begin(), top.end(), [](auto &x, auto &y){ return x.first > y.first; });
if((int)top.size() > L) top.resize(L);
int best = top[0].second;
double bestW = top[0].first;
// entropy tie-break among close candidates
double lambda = (n_rem >= 18 ? 0.020 : 0.010);
double bestScore = bestW;
double margin = 0.015; // only consider those near the max
for(auto [wi, idx] : top){
if(wi + 1e-12 < bestW - margin) break;
double H = weighted_entropy_for_candidate(idx);
double score = wi + lambda * H;
if(score > bestScore){
bestScore = score;
best = idx;
}
}
return best;
}
// Choose a good initial query by entropy over ALL codes (uniform)
int find_index_of_string(const string &s){
for(int i=0;i<NALL;i++){
bool ok=true;
for(int k=0;k<LEN;k++) if(all[i].s[k]!=s[k]){ ok=false; break; }
if(ok) return i;
}
return 0;
}
int best_initial_query(){
vector<int> cand;
cand.reserve(260);
cand.push_back(0); // 01234 by construction
cand.push_back(find_index_of_string("56789"));
cand.push_back(find_index_of_string("13579"));
cand.push_back(find_index_of_string("02468"));
// random sample
for(int t=0;t<220;t++) cand.push_back((int)(rng()%NALL));
sort(cand.begin(), cand.end());
cand.erase(unique(cand.begin(), cand.end()), cand.end());
int best = cand[0];
double bestH = -1.0;
array<int,K> hist;
for(int idx : cand){
hist.fill(0);
for(int j=0;j<NALL;j++){
hist[hb_id(idx, j)]++;
}
double H=0.0;
for(int r=0;r<K;r++){
if(hist[r]==0) continue;
double p = (double)hist[r] / (double)NALL;
H -= p * log(p);
}
if(H > bestH){ bestH=H; best=idx; }
}
return best;
}
// Endgame Monte Carlo: try to sample completions consistent with all constraints
bool endgame_mc(int &outBest){
if(queryCode.size() < 2) return false;
int n_rem = 30 - found;
if(n_rem > 8) return false;
if(!time_ok(350.0)) return false;
// pick two strong pivot queries
int m = (int)queryCode.size();
int qa = 0, qb = 1;
// choose top two by spikiness
vector<int> ord(m);
iota(ord.begin(), ord.end(), 0);
sort(ord.begin(), ord.end(), [&](int x,int y){ return spikiness(x) > spikiness(y); });
qa = ord[0];
qb = ord[1];
// Build cell vectors
static vector<int> cell[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) cell[a][b].clear();
const auto &A = cat[qa];
const auto &B = cat[qb];
for(int idx: aliveList){
cell[A[idx]][B[idx]].push_back(idx);
}
// shuffle cells once (to avoid bias from index order)
for(int a=0;a<K;a++) for(int b=0;b<K;b++){
if(cell[a][b].size()>=2) shuffle(cell[a][b].begin(), cell[a][b].end(), rng);
}
int row[K], col[K];
for(int a=0;a<K;a++) row[a]=cnt[qa][a];
for(int b=0;b<K;b++) col[b]=cnt[qb][b];
vector<int> rows;
rows.reserve(K);
for(int a=0;a<K;a++) if(row[a]>0) rows.push_back(a);
if(rows.empty()) return false;
// time budget for MC
double start = timer.ms();
double budget = min(650.0, time_left()-200.0);
if(budget < 150.0) return false;
vector<int> freq(NALL, 0);
int accepted = 0;
int attempts = 0;
vector<int> sampleSet;
sampleSet.reserve(n_rem);
// helper to check all constraints
auto ok_all = [&](const vector<int>& S)->bool{
static int tmp[K];
for(int q=0;q<m;q++){
// build hist
for(int r=0;r<K;r++) tmp[r]=0;
const auto &cq = cat[q];
for(int idx: S) tmp[cq[idx]]++;
for(int r=0;r<K;r++) if(tmp[r]!=cnt[q][r]) return false;
}
return true;
};
// generate random feasible matrix x (row/col sums) and pick indices accordingly
while(timer.ms() - start < budget){
attempts++;
// remaining col demands
int colRem[K];
for(int b=0;b<K;b++) colRem[b]=col[b];
// x counts (sparse), store only cells with positive counts
static int xcnt[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) xcnt[a][b]=0;
// randomize row order a bit
vector<int> rowOrder = rows;
shuffle(rowOrder.begin(), rowOrder.end(), rng);
bool fail=false;
for(int ri=0; ri<(int)rowOrder.size(); ri++){
int a = rowOrder[ri];
int need = row[a];
if(need==0) continue;
if(ri == (int)rowOrder.size()-1){
// last row: must satisfy remaining columns exactly
for(int b=0;b<K;b++){
int v = colRem[b];
if(v==0) continue;
if((int)cell[a][b].size() < v){ fail=true; break; }
xcnt[a][b]=v;
}
if(fail) break;
}else{
// allocate need across columns
vector<int> cols;
cols.reserve(K);
for(int b=0;b<K;b++) if(colRem[b]>0 && !cell[a][b].empty()) cols.push_back(b);
shuffle(cols.begin(), cols.end(), rng);
// quick check available
int totalAvail=0;
for(int b: cols) totalAvail += min((int)cell[a][b].size(), colRem[b]);
if(totalAvail < need){ fail=true; break; }
for(int ci=0; ci<(int)cols.size(); ci++){
int b = cols[ci];
int avail = min((int)cell[a][b].size(), colRem[b]);
if(avail<=0) continue;
// future availability
int futureAvail=0;
for(int cj=ci+1; cj<(int)cols.size(); cj++){
int bb = cols[cj];
futureAvail += min((int)cell[a][bb].size(), colRem[bb]);
}
int lb = max(0, need - futureAvail);
int ub = min(avail, need);
if(lb>ub){ fail=true; break; }
int take;
if(lb==ub) take=lb;
else {
// biased random toward middle
int span = ub-lb+1;
take = lb + (int)(rng()%span);
}
if(take){
xcnt[a][b]=take;
need -= take;
colRem[b] -= take;
}
if(need==0) break;
}
if(fail || need!=0){ fail=true; break; }
}
}
if(fail) continue;
sampleSet.clear();
// pick concrete candidates for each used cell
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
int take = xcnt[a][b];
if(take==0) continue;
auto &vec = cell[a][b];
int sz = (int)vec.size();
if(sz < take){ fail=true; break; }
int startPos = (sz==take ? 0 : (int)(rng()%sz));
for(int t=0;t<take;t++){
sampleSet.push_back(vec[(startPos+t)%sz]);
}
}
if(fail) break;
}
if(fail || (int)sampleSet.size()!=n_rem) continue;
if(!ok_all(sampleSet)) continue;
accepted++;
for(int idx: sampleSet) freq[idx]++;
// stop early if enough samples
if(accepted >= 2000) break;
}
if(accepted < 200) return false;
// override weights by sample frequencies
for(int idx: aliveList) w[idx] = (double)freq[idx] / (double)accepted;
normalize_weights(n_rem);
int best = aliveList[0];
double bw = w[best];
for(int idx: aliveList){
if(w[idx] > bw){ bw=w[idx]; best=idx; }
}
outBest = best;
return true;
}
// Early-stage Monte Carlo refinement (for sharper existence probabilities when posterior is flat).
//
// We pick a strong pivot pair of queries (qa,qb). Under the pivot constraints only,
// the unknown set U of size n_rem induces a 21x21 cell-count matrix X[a][b] with
// row sums = cnt[qa], col sums = cnt[qb], and 0<=X[a][b]<=cap[a][b] (#alive candidates in that cell).
//
// To sample uniformly among subsets satisfying the pivot constraints, note that the induced
// distribution on X is proportional to Π_{a,b} C(cap[a][b], X[a][b]). We run an MH chain
// on X using 2x2 moves with acceptance ratio computed from binomial coefficient ratios.
// Then we sample concrete elements uniformly within each cell, and reject samples that
// do not satisfy all other query histograms. Accepted samples are (approximately) uniform
// over all subsets satisfying all constraints, giving a sharper estimate of inclusion probs.
//
// We only use MC to pick among a pre-filtered pool of candidates (top by IPF), to avoid
// occasional sampling noise choosing a bizarre element.
bool early_mc_refine(const vector<int>& pool, int &outBest){
if(queryCode.size() < 2) return false;
int n_rem = 30 - found;
if(n_rem <= 0) return false;
if(pool.empty()) return false;
if(!time_ok(280.0)) return false;
int m = (int)queryCode.size();
// pick pivot pair: top 2 by spikiness
vector<int> ord(m);
iota(ord.begin(), ord.end(), 0);
sort(ord.begin(), ord.end(), [&](int x,int y){ return spikiness(x) > spikiness(y); });
int qa = ord[0], qb = ord[1];
if(qa==qb) return false;
// Build cell vectors and caps for pivot pair
static vector<int> cell[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) cell[a][b].clear();
const auto &A = cat[qa];
const auto &B = cat[qb];
static int cap[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) cap[a][b]=0;
for(int idx: aliveList){
int a=A[idx], b=B[idx];
cell[a][b].push_back(idx);
cap[a][b]++;
}
int row[K], col[K];
int sumR=0,sumC=0;
for(int a=0;a<K;a++){ row[a]=cnt[qa][a]; sumR+=row[a]; }
for(int b=0;b<K;b++){ col[b]=cnt[qb][b]; sumC+=col[b]; }
if(sumR!=n_rem || sumC!=n_rem) return false;
// Initial feasible matrix via maxflow
int S = 0;
int row0 = 1;
int col0 = 1 + K;
int T = 1 + K + K;
Dinic din(T+1);
for(int a=0;a<K;a++) if(row[a]>0) din.add_edge(S, row0+a, row[a]);
for(int b=0;b<K;b++) if(col[b]>0) din.add_edge(col0+b, T, col[b]);
static int edgePos[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) edgePos[a][b] = -1;
for(int a=0;a<K;a++) for(int b=0;b<K;b++){
if(cap[a][b]>0){
int pos = din.add_edge(row0+a, col0+b, cap[a][b]);
edgePos[a][b] = pos;
}
}
int f = din.maxflow(S,T);
if(f!=n_rem) return false;
static int xcnt[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) xcnt[a][b]=0;
for(int a=0;a<K;a++){
for(int b=0;b<K;b++) if(edgePos[a][b]>=0){
const auto &e = din.g[row0+a][edgePos[a][b]];
int used = cap[a][b] - e.cap;
xcnt[a][b] = used;
}
}
// pool index mapping
vector<int> pos(NALL, -1);
for(int i=0;i<(int)pool.size();i++) pos[pool[i]] = i;
vector<int> freq(pool.size(), 0);
// helper: check all query histograms
auto ok_all = [&](const vector<int>& Sset)->bool{
static int tmp[K];
for(int q=0;q<m;q++){
for(int r=0;r<K;r++) tmp[r]=0;
const auto &cq = cat[q];
for(int idx: Sset) tmp[cq[idx]]++;
for(int r=0;r<K;r++) if(tmp[r]!=cnt[q][r]) return false;
}
return true;
};
// MH step on xcnt using 2x2 moves, target weight Π C(cap, x)
auto mh_step = [&](){
int r1 = (int)(rng()%K);
int r2 = (int)(rng()%K);
while(r2==r1) r2 = (int)(rng()%K);
int c1 = (int)(rng()%K);
int c2 = (int)(rng()%K);
while(c2==c1) c2 = (int)(rng()%K);
if(rng()&1){
int &x11 = xcnt[r1][c1];
int &x22 = xcnt[r2][c2];
int &x12 = xcnt[r1][c2];
int &x21 = xcnt[r2][c1];
int cap11 = cap[r1][c1], cap22 = cap[r2][c2], cap12 = cap[r1][c2], cap21 = cap[r2][c1];
if(x11<=0 || x22<=0) return;
if(x12>=cap12 || x21>=cap21) return;
// ratio = C(cap11,x11-1)/C(cap11,x11) * C(cap22,x22-1)/C(cap22,x22)
// * C(cap12,x12+1)/C(cap12,x12) * C(cap21,x21+1)/C(cap21,x21)
// = (x11/(cap11-x11+1))*(x22/(cap22-x22+1))*((cap12-x12)/(x12+1))*((cap21-x21)/(x21+1))
double ratio = 1.0;
ratio *= (double)x11 / (double)(cap11 - x11 + 1);
ratio *= (double)x22 / (double)(cap22 - x22 + 1);
ratio *= (double)(cap12 - x12) / (double)(x12 + 1);
ratio *= (double)(cap21 - x21) / (double)(x21 + 1);
if(ratio >= 1.0 || (double)(rng() / (double)rng.max()) < ratio){
x11--; x22--; x12++; x21++;
}
}else{
// opposite diagonal
int &x12 = xcnt[r1][c2];
int &x21 = xcnt[r2][c1];
int &x11 = xcnt[r1][c1];
int &x22 = xcnt[r2][c2];
int cap12 = cap[r1][c2], cap21 = cap[r2][c1], cap11 = cap[r1][c1], cap22 = cap[r2][c2];
if(x12<=0 || x21<=0) return;
if(x11>=cap11 || x22>=cap22) return;
double ratio = 1.0;
ratio *= (double)x12 / (double)(cap12 - x12 + 1);
ratio *= (double)x21 / (double)(cap21 - x21 + 1);
ratio *= (double)(cap11 - x11) / (double)(x11 + 1);
ratio *= (double)(cap22 - x22) / (double)(x22 + 1);
if(ratio >= 1.0 || (double)(rng() / (double)rng.max()) < ratio){
x12--; x21--; x11++; x22++;
}
}
};
// sampling subset given xcnt (uniform within each cell)
auto sample_subset = [&](vector<int> &out)->bool{
out.clear();
out.reserve(n_rem);
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
int take = xcnt[a][b];
if(take<=0) continue;
auto &vec = cell[a][b];
int sz = (int)vec.size();
if(sz < take) return false;
// sample 'take' distinct indices via rejection; take is small, total is n_rem<=30.
int pickedPos[32];
int pc=0;
for(int t=0;t<take;t++){
while(true){
int p = (int)(rng()%sz);
bool dup=false;
for(int k=0;k<pc;k++) if(pickedPos[k]==p){ dup=true; break; }
if(dup) continue;
pickedPos[pc++] = p;
out.push_back(vec[p]);
break;
}
}
}
}
return (int)out.size()==n_rem;
};
// Time budget for early MC: spend extra time only early when we have room.
double start = timer.ms();
// Spend more time here: this is where we try to turn slight signal into early hits.
double budget = min(900.0, time_left() - 240.0);
if(budget < 120.0) return false;
int burn = 2800;
int mixPer = 200;
int targetAcc = 5000;
int minAcc = 1200;
if(found < 10){
targetAcc = (m <= 4 ? 11000 : 6500);
minAcc = (m <= 4 ? 2000 : 1500);
burn = (m <= 4 ? 3600 : 3000);
}else if(m <= 4){
// easier to accept; use more samples
targetAcc = 9000;
burn = 3500;
}
for(int i=0;i<burn;i++) mh_step();
vector<int> cur;
int accepted=0;
while(timer.ms() - start < budget){
for(int i=0;i<mixPer;i++) mh_step();
if(!sample_subset(cur)) continue;
if(!ok_all(cur)) continue;
accepted++;
for(int idx: cur){
int p = pos[idx];
if(p>=0) freq[p]++;
}
if(accepted >= targetAcc) break;
}
if(accepted < minAcc) return false;
// Choose by inclusion probability, but preserve the useful "informativeness" tie-break
// among near-ties. This keeps the strong greedy behavior while avoiding queries that
// are only marginally more likely but significantly less informative.
int bestIdx = pool[0];
double bestP = (double)freq[0] / (double)accepted;
for(int i=1;i<(int)pool.size();i++){
double p = (double)freq[i] / (double)accepted;
if(p > bestP){ bestP=p; bestIdx=pool[i]; }
}
double marginP = max(0.004, 0.04 * bestP);
double lambda = (n_rem >= 20 ? 0.018 : 0.012);
double bestScore = bestP + lambda * weighted_entropy_for_candidate(bestIdx);
for(int i=0;i<(int)pool.size();i++){
double p = (double)freq[i] / (double)accepted;
if(p + 1e-12 < bestP - marginP) continue;
int idx = pool[i];
double H = weighted_entropy_for_candidate(idx);
double score = p + lambda * H;
if(score > bestScore){
bestScore = score;
bestIdx = idx;
}
}
outBest = bestIdx;
return true;
}
// Perform one interaction step; returns false if finished.
bool step(){
if(found>=30) return false;
if(aliveList.empty()) return false;
int qidx = pick_best_query();
// Ask
cout.write(all[qidx].s.data(), LEN);
cout << '\n';
cout.flush(); // must flush
// Read 30 pairs (must read all)
array<int,K> hist;
hist.fill(0);
bool bad=false;
pair<int,int> first={-2,-2};
for(int i=0;i<30;i++){
int h,b;
if(!(cin>>h>>b)) return false;
if(i==0) first={h,b};
if(h==-1 && b==-1) bad=true;
if(!bad){
int id = HB_ID[h][b];
if(id>=0) hist[id]++;
}
}
if(bad) return false;
if(first.first==5 && first.second==0) return false; // all solved
int c50 = hist[ID_50];
bool new_found = (c50 > found);
found = c50;
// Residual for remaining unknown U
array<int,K> resid = hist;
resid[ID_50] -= found;
// Add query to history
queryCode.push_back(qidx);
cnt.push_back(resid);
int qid = (int)queryCode.size()-1;
cat.emplace_back(NALL);
auto &cq = cat.back();
for(int i=0;i<NALL;i++) cq[i] = hb_id(qidx, i);
uint32_t z=0;
for(int r=0;r<K;r++) if(cnt[qid][r]==0) z |= (1u<<r);
zeroMask.push_back(z);
// Mark asked & remove from alive
asked[qidx]=1;
alive[qidx]=0;
w[qidx]=0.0;
// Prune by zero categories (includes cleaning alive list)
prune_by_mask(qid, z);
// Peel if newly found (current query is that string)
if(new_found){
peel_new_found(qidx);
}
// Strong propagation
propagate();
return true;
}
void run(){
// interactive: start immediately
while(time_ok(20.0)){
if(!step()) break;
}
}
};
int main(){
ios::sync_with_stdio(false);
cin.tie(nullptr);
Solver solver;
solver.run();
return 0;
}