#include using namespace std; /* yukicoder interactive (Advent Calendar Contest 2025 Final): 30 parallel mixed Hit&Blow (length=5, digits 0..9, all distinct) Masterpiece solver (aggressive but safe): Core idea: Maintain the set U of NOT-YET-DISCOVERED hidden strings. Each query q returns a multiset of 30 (hit,blow) pairs, sorted. (5,0) appears for already-discovered strings and for q itself if hidden. We convert the answer into a 21-category histogram (H,B) for U. Then we repeatedly ask the element with the highest estimated existence probability in U. Estimation & propagation: 1) Zero-category pruning: If some past query says category r count is 0, any candidate x with HB(q,x)=r cannot be in U. 2) Peeling: When we newly discover s (= current q), remove its contribution from all past histograms. This can create new zeros and more pruning. 3) Maximum-entropy existence probability (IPF / raking): Find weights w[x] (approx P[x in U]) so that for every past query q_j and category r, sum_{x in alive, HB(q_j,x)=r} w[x] = cnt[j][r]. Then ask argmax w[x]. 4) Strong pairwise propagation (transportation polytope): For a pair of past queries (A,B), candidates split into 21x21 cells by (HB(A,x), HB(B,x)). Feasible U must satisfy row/col sums exactly. We solve ONE max-flow for the pair and build the residual graph over row/col nodes. From reachability: - fixed0 cell: flow==0 and col->row NOT reachable => cell must be 0 - fixedFull cell: flow==cap and row->col NOT reachable => cell must be full fixed0 => prune all candidates in that cell fixedFull => candidates in that cell are guaranteed to exist => push to forced queue 5) Endgame Monte Carlo (n_rem <= 8): Build random feasible completions using a strong query pair, reject by all constraints, and estimate existence probabilities by frequency. The solver uses time checks and tries to spend most of the 5s budget on constraint propagation + probability sharpening. */ static constexpr int DIG = 10; static constexpr int LEN = 5; static constexpr int NALL = 30240; static constexpr int K = 21; // valid (hit,blow) pairs static int HB_ID[LEN+1][LEN+1]; static int ID_50; static inline int popcnt10(uint16_t x){ return __builtin_popcount((unsigned)x); } struct Code{ array d; uint16_t mask; array s; }; struct Timer{ chrono::steady_clock::time_point st; Timer(): st(chrono::steady_clock::now()) {} double ms() const { return chrono::duration(chrono::steady_clock::now()-st).count(); } }; // Small Dinic for bipartite flow struct Dinic { struct Edge{ int to, rev; int cap; }; int n; vector> g; vector level, it; Dinic(int n=0){ init(n); } void init(int n_){ n=n_; g.assign(n, {}); level.assign(n, 0); it.assign(n, 0); } int add_edge(int fr,int to,int cap){ Edge a{to, (int)g[to].size(), cap}; Edge b{fr, (int)g[fr].size(), 0}; g[fr].push_back(a); g[to].push_back(b); return (int)g[fr].size()-1; // index in g[fr] } bool bfs(int s,int t){ fill(level.begin(), level.end(), -1); queue q; level[s]=0; q.push(s); while(!q.empty()){ int v=q.front(); q.pop(); for(const auto &e: g[v]){ if(e.cap>0 && level[e.to]<0){ level[e.to]=level[v]+1; q.push(e.to); } } } return level[t]>=0; } int dfs(int v,int t,int f){ if(v==t) return f; for(int &i=it[v]; i<(int)g[v].size(); i++){ Edge &e=g[v][i]; if(e.cap<=0 || level[e.to]!=level[v]+1) continue; int ret=dfs(e.to,t,min(f,e.cap)); if(ret>0){ e.cap-=ret; g[e.to][e.rev].cap+=ret; return ret; } } return 0; } int maxflow(int s,int t){ int flow=0; while(bfs(s,t)){ fill(it.begin(), it.end(), 0); while(true){ int f=dfs(s,t,1e9); if(!f) break; flow+=f; } } return flow; } }; struct Solver { // all codes vector all; // state vector alive, asked, forcedFlag; vector aliveList; vector w; int found = 0; // number of already discovered hidden strings // history vector queryCode; // code index for each asked query vector> cnt; // residual histogram for remaining unknown U vector> cat; // cat[qid][code] = HB(query[qid], code) vector zeroMask; // bitmask of categories with cnt==0 deque forcedQ; Timer timer; mt19937 rng; // time budget static constexpr double TIME_LIMIT_MS = 4950.0; // keep some margin Solver(): rng(71236721u) { build_mapping(); build_all_codes(); alive.assign(NALL, 1); asked.assign(NALL, 0); forcedFlag.assign(NALL, 0); w.assign(NALL, 1.0); aliveList.resize(NALL); iota(aliveList.begin(), aliveList.end(), 0); } static void build_mapping(){ for(int h=0; h<=LEN; h++) for(int b=0; b<=LEN; b++) HB_ID[h][b] = -1; int kk=0; for(int h=0; h<=LEN; h++) for(int b=0; b<=LEN-h; b++) HB_ID[h][b] = kk++; ID_50 = HB_ID[5][0]; } void build_all_codes(){ all.reserve(NALL); for(int a=0;a<10;a++) for(int b=0;b<10;b++) if(b!=a) for(int c=0;c<10;c++) if(c!=a && c!=b) for(int d=0;d<10;d++) if(d!=a && d!=b && d!=c) for(int e=0;e<10;e++) if(e!=a && e!=b && e!=c && e!=d){ Code x; x.d = {(uint8_t)a,(uint8_t)b,(uint8_t)c,(uint8_t)d,(uint8_t)e}; x.mask = (uint16_t)((1u< nxt; nxt.reserve(aliveList.size()); for(int i: aliveList){ if(alive[i]) nxt.push_back(i); } aliveList.swap(nxt); } // prune candidates whose category for query qid is in maskbits bool prune_by_mask(int qid, uint32_t maskbits){ if(maskbits==0){ clean_alive(); return false; } bool changed=false; const auto &cq = cat[qid]; vector nxt; nxt.reserve(aliveList.size()); for(int i: aliveList){ if(!alive[i]) continue; if(maskbits & (1u< sum; array fac; for(int it=0; it 0.0) fac[r] = (double)cnt[q][r] / sum[r]; else fac[r] = (cnt[q][r]==0 ? 1.0 : 0.0); if(damp!=1.0) fac[r] = pow(fac[r], damp); } for(int i: aliveList) w[i] *= fac[cq[i]]; } } normalize_weights(n_rem); } // Discovering a new hidden string s: peel its contribution from all past histograms void peel_new_found(int sidx){ int lastQ = (int)queryCode.size() - 1; for(int q=0; q> buckets(K); for(int q=0; q<(int)queryCode.size(); q++){ for(int r=0;r0) din.add_edge(S, row0+a, rowSum[a]); for(int b=0;b0) din.add_edge(col0+b, T, colSum[b]); static int edgePos[K][K]; for(int a=0;a0){ int pos = din.add_edge(row0+a, col0+b, cap[a][b]); edgePos[a][b] = pos; } } } int f = din.maxflow(S, T); if(f != n_rem){ // If this happens, our alive set is too small for this pair's marginals. // Should not happen with safe pruning. return false; } // Extract flow on row->col edges for(int a=0;a= 0){ const auto &e = din.g[row0+a][edgePos[a][b]]; int used = cap[a][b] - e.cap; flow[a][b] = used; }else flow[a][b]=0; } } // Build residual graph on 2K nodes (rows:0..K-1, cols:K..2K-1) const int N = 2*K; static uint64_t reach[2*K]; for(int u=0;u row a -> col b reach[a] |= (1ULL<<(K+b)); } if(flow[a][b] > 0){ // can decrease => col b -> row a reach[K+b] |= (1ULL< nxt; nxt.reserve(aliveList.size()); for(int idx: aliveList){ int a = A[idx]; int b = B[idx]; if(fixed0[a][b]){ alive[idx]=0; w[idx]=0.0; changed=true; continue; } if(fixedFull[a][b]){ if(!asked[idx] && !forcedFlag[idx]){ forcedFlag[idx]=1; forcedQ.push_back(idx); changed=true; // treat as progress } } nxt.push_back(idx); } aliveList.swap(nxt); return changed; } // Heuristic strength score for choosing partner queries long long spikiness(int qid) const { long long s=0; for(int r=0;r=2){ int last = m-1; vector partners; partners.reserve(last); for(int i=0;i spikiness(b); }); double rem = time_left(); int maxPartners = last; if(rem < 1200) maxPartners = min(maxPartners, 10); else if(rem < 2200) maxPartners = min(maxPartners, 18); else if(rem < 3200) maxPartners = min(maxPartners, 30); // else: all partners.resize(maxPartners); for(int i: partners){ if(!time_ok(180.0)) break; changed |= pair_propagate(i, last); } // Extra cross pairs among top few strong queries (cheap insurance) if(time_ok(220.0) && m>=4){ int P = min((int)partners.size(), 6); for(int i=0;i sum; sum.fill(0.0); for(int i: aliveList){ uint8_t cid = hb_id(qidx, i); sum[cid] += w[i]; } double H=0.0; for(int r=0;r1e-15) H -= p * log(p); } return H; } // pick best query from alive by w; tie-break among top few by entropy int pick_best_query(){ // forced first while(!forcedQ.empty()){ int x = forcedQ.front(); forcedQ.pop_front(); if(alive[x] && !asked[x]) return x; } // no constraint yet: choose informative initial query if(queryCode.empty()){ return best_initial_query(); } int n_rem = 30 - found; // Endgame Monte Carlo refinement if(n_rem <= 8 && (int)aliveList.size() <= 9000 && time_left() > 600.0){ int best=-1; if(endgame_mc(best)) return best; } // IPF double rem = time_left(); int m = (int)queryCode.size(); int aliveN = (int)aliveList.size(); // adaptive sweeps int sweeps = 0; double damp = 1.0; if(rem > 3500){ sweeps = (aliveN>20000 ? 95 : 120); }else if(rem > 2200){ sweeps = (aliveN>20000 ? 80 : 105); }else if(rem > 1200){ sweeps = (aliveN>20000 ? 55 : 80); }else{ sweeps = (aliveN>20000 ? 35 : 55); } // stage-dependent damping (more exploration early, more exact late) if(n_rem >= 22) damp = 0.78; else if(n_rem >= 12) damp = 0.86; else damp = 0.93; // if many constraints, convergence is faster if(m >= 25) sweeps = max(10, sweeps - 10); ipf(sweeps, damp); // pick top-L by weight const int L = 12; vector> top; top.reserve(L+1); for(int i: aliveList){ top.push_back({w[i], i}); } nth_element(top.begin(), top.begin()+min(L,(int)top.size())-1, top.end(), [](auto &x, auto &y){ return x.first > y.first; }); sort(top.begin(), top.end(), [](auto &x, auto &y){ return x.first > y.first; }); if((int)top.size() > L) top.resize(L); int best = top[0].second; double bestW = top[0].first; // entropy tie-break among close candidates double lambda = (n_rem >= 18 ? 0.020 : 0.010); double bestScore = bestW; double margin = 0.015; // only consider those near the max for(auto [wi, idx] : top){ if(wi + 1e-12 < bestW - margin) break; double H = weighted_entropy_for_candidate(idx); double score = wi + lambda * H; if(score > bestScore){ bestScore = score; best = idx; } } return best; } // Choose a good initial query by entropy over ALL codes (uniform) int find_index_of_string(const string &s){ for(int i=0;i cand; cand.reserve(260); cand.push_back(0); // 01234 by construction cand.push_back(find_index_of_string("56789")); cand.push_back(find_index_of_string("13579")); cand.push_back(find_index_of_string("02468")); // random sample for(int t=0;t<220;t++) cand.push_back((int)(rng()%NALL)); sort(cand.begin(), cand.end()); cand.erase(unique(cand.begin(), cand.end()), cand.end()); int best = cand[0]; double bestH = -1.0; array hist; for(int idx : cand){ hist.fill(0); for(int j=0;j bestH){ bestH=H; best=idx; } } return best; } // Endgame Monte Carlo: try to sample completions consistent with all constraints bool endgame_mc(int &outBest){ if(queryCode.size() < 2) return false; int n_rem = 30 - found; if(n_rem > 8) return false; if(!time_ok(350.0)) return false; // pick two strong pivot queries int m = (int)queryCode.size(); int qa = 0, qb = 1; // choose top two by spikiness vector ord(m); iota(ord.begin(), ord.end(), 0); sort(ord.begin(), ord.end(), [&](int x,int y){ return spikiness(x) > spikiness(y); }); qa = ord[0]; qb = ord[1]; // Build cell vectors static vector cell[K][K]; for(int a=0;a=2) shuffle(cell[a][b].begin(), cell[a][b].end(), rng); } int row[K], col[K]; for(int a=0;a rows; rows.reserve(K); for(int a=0;a0) rows.push_back(a); if(rows.empty()) return false; // time budget for MC double start = timer.ms(); double budget = min(650.0, time_left()-200.0); if(budget < 150.0) return false; vector freq(NALL, 0); int accepted = 0; int attempts = 0; vector sampleSet; sampleSet.reserve(n_rem); // helper to check all constraints auto ok_all = [&](const vector& S)->bool{ static int tmp[K]; for(int q=0;q rowOrder = rows; shuffle(rowOrder.begin(), rowOrder.end(), rng); bool fail=false; for(int ri=0; ri<(int)rowOrder.size(); ri++){ int a = rowOrder[ri]; int need = row[a]; if(need==0) continue; if(ri == (int)rowOrder.size()-1){ // last row: must satisfy remaining columns exactly for(int b=0;b cols; cols.reserve(K); for(int b=0;b0 && !cell[a][b].empty()) cols.push_back(b); shuffle(cols.begin(), cols.end(), rng); // quick check available int totalAvail=0; for(int b: cols) totalAvail += min((int)cell[a][b].size(), colRem[b]); if(totalAvail < need){ fail=true; break; } for(int ci=0; ci<(int)cols.size(); ci++){ int b = cols[ci]; int avail = min((int)cell[a][b].size(), colRem[b]); if(avail<=0) continue; // future availability int futureAvail=0; for(int cj=ci+1; cj<(int)cols.size(); cj++){ int bb = cols[cj]; futureAvail += min((int)cell[a][bb].size(), colRem[bb]); } int lb = max(0, need - futureAvail); int ub = min(avail, need); if(lb>ub){ fail=true; break; } int take; if(lb==ub) take=lb; else { // biased random toward middle int span = ub-lb+1; take = lb + (int)(rng()%span); } if(take){ xcnt[a][b]=take; need -= take; colRem[b] -= take; } if(need==0) break; } if(fail || need!=0){ fail=true; break; } } } if(fail) continue; sampleSet.clear(); // pick concrete candidates for each used cell for(int a=0;a= 2000) break; } if(accepted < 200) return false; // override weights by sample frequencies for(int idx: aliveList) w[idx] = (double)freq[idx] / (double)accepted; normalize_weights(n_rem); int best = aliveList[0]; double bw = w[best]; for(int idx: aliveList){ if(w[idx] > bw){ bw=w[idx]; best=idx; } } outBest = best; return true; } // Perform one interaction step; returns false if finished. bool step(){ if(found>=30) return false; if(aliveList.empty()) return false; int qidx = pick_best_query(); // Ask cout.write(all[qidx].s.data(), LEN); cout << '\n'; cout.flush(); // must flush // Read 30 pairs (must read all) array hist; hist.fill(0); bool bad=false; pair first={-2,-2}; for(int i=0;i<30;i++){ int h,b; if(!(cin>>h>>b)) return false; if(i==0) first={h,b}; if(h==-1 && b==-1) bad=true; if(!bad){ int id = HB_ID[h][b]; if(id>=0) hist[id]++; } } if(bad) return false; if(first.first==5 && first.second==0) return false; // all solved int c50 = hist[ID_50]; bool new_found = (c50 > found); found = c50; // Residual for remaining unknown U array resid = hist; resid[ID_50] -= found; // Add query to history queryCode.push_back(qidx); cnt.push_back(resid); int qid = (int)queryCode.size()-1; cat.emplace_back(NALL); auto &cq = cat.back(); for(int i=0;i