結果
| 問題 | No.3418 【絶望】30個並列ごちゃ混ぜHit&Blowで遊ぼう! |
| コンテスト | |
| ユーザー |
|
| 提出日時 | 2025-12-16 03:41:58 |
| 言語 | C++23 (gcc 13.3.0 + boost 1.89.0) |
| 結果 |
AC
|
| 実行時間 | 626 ms / 5,000 ms |
| コード長 | 15,199 bytes |
| 記録 | |
| コンパイル時間 | 4,651 ms |
| コンパイル使用メモリ | 324,608 KB |
| 実行使用メモリ | 26,228 KB |
| スコア | 9,995,843 |
| 平均クエリ数 | 41.57 |
| 最終ジャッジ日時 | 2025-12-25 01:58:13 |
| 合計ジャッジ時間 | 54,883 ms |
|
ジャッジサーバーID (参考情報) |
judge4 / judge3 |
| 純コード判定しない問題か言語 |
(要ログイン)
| ファイルパターン | 結果 |
|---|---|
| other | AC * 100 |
ソースコード
#include <bits/stdc++.h>
using namespace std;
/*
yukicoder interactive: 30 parallel Hit&Blow
- Keep candidate set (|S|=30240) of still-possible strings.
- Maintain for each past query q_j the histogram counts for the *remaining* unknown strings.
- Estimate existence probabilities via iterative proportional fitting (IPF / raking)
on the exponential-family model w(x) ∝ exp(sum_j theta_{j,HB(q_j,x)}).
Query the element with maximum estimated existence probability.
Extra time (up to 5s) is spent on:
- More IPF sweeps (near-converged marginals)
- Pairwise feasibility pruning using 2-query flow constraints:
given two queries A,B, a remaining solution must realize both histograms simultaneously.
For each (HB(A,x), HB(B,x)) type that cannot appear in ANY feasible subset,
we can safely discard all candidates of that type.
- Tight-bucket forcing: if for some query/category, remaining candidates count == required count,
all of them are forced to exist (we push them to a "forced" queue).
The program auto-throttles computation based on elapsed time.
*/
static constexpr int DIG = 10;
static constexpr int LEN = 5;
static constexpr int NALL = 30240;
static constexpr int K = 21; // number of valid (hit,blow) pairs
static int HB_ID[LEN+1][LEN+1];
static int ID_50; // HB_ID[5][0]
struct Code {
array<uint8_t, LEN> d;
uint16_t mask; // 10-bit
};
static inline int popcnt10(uint16_t x){
return __builtin_popcount((unsigned)x);
}
static vector<Code> ALL;
static vector<string> ALL_STR;
static inline uint8_t hb_id(int a_idx, int b_idx){
const auto &A = ALL[a_idx];
const auto &B = ALL[b_idx];
int hits = 0;
for(int i=0;i<LEN;i++) hits += (A.d[i] == B.d[i]);
int common = popcnt10((uint16_t)(A.mask & B.mask));
int blow = common - hits;
return (uint8_t)HB_ID[hits][blow];
}
// Dinic maxflow for small networks
struct Dinic {
struct Edge{ int to, rev; int cap; };
int n;
vector<vector<Edge>> g;
vector<int> level, it;
Dinic(int n_=0){ init(n_); }
void init(int n_){
n = n_;
g.assign(n, {});
level.assign(n, 0);
it.assign(n, 0);
}
void add_edge(int fr,int to,int cap){
Edge a{to, (int)g[to].size(), cap};
Edge b{fr, (int)g[fr].size(), 0};
g[fr].push_back(a);
g[to].push_back(b);
}
bool bfs(int s,int t){
fill(level.begin(), level.end(), -1);
queue<int> q;
level[s] = 0;
q.push(s);
while(!q.empty()){
int v=q.front(); q.pop();
for(const auto &e: g[v]){
if(e.cap>0 && level[e.to]<0){
level[e.to]=level[v]+1;
q.push(e.to);
}
}
}
return level[t]>=0;
}
int dfs(int v,int t,int f){
if(v==t) return f;
for(int &i=it[v]; i<(int)g[v].size(); i++){
Edge &e = g[v][i];
if(e.cap<=0 || level[e.to]!=level[v]+1) continue;
int ret = dfs(e.to, t, min(f, e.cap));
if(ret>0){
e.cap -= ret;
g[e.to][e.rev].cap += ret;
return ret;
}
}
return 0;
}
int maxflow(int s,int t){
int flow=0;
while(bfs(s,t)){
fill(it.begin(), it.end(), 0);
while(true){
int f = dfs(s,t,1e9);
if(!f) break;
flow += f;
}
}
return flow;
}
};
struct Timer {
chrono::steady_clock::time_point st;
Timer(){ st = chrono::steady_clock::now(); }
double ms() const {
auto now = chrono::steady_clock::now();
return chrono::duration<double, milli>(now - st).count();
}
};
int main(){
ios::sync_with_stdio(false);
cin.tie(nullptr);
// Build HB_ID mapping
for(int h=0; h<=LEN; h++) for(int b=0; b<=LEN; b++) HB_ID[h][b] = -1;
int kk=0;
for(int h=0; h<=LEN; h++) for(int b=0; b<=LEN-h; b++) HB_ID[h][b] = kk++;
ID_50 = HB_ID[5][0];
// Generate all 30240 strings
ALL.reserve(NALL);
ALL_STR.reserve(NALL);
for(int a=0;a<10;a++) for(int b=0;b<10;b++) if(b!=a)
for(int c=0;c<10;c++) if(c!=a&&c!=b)
for(int d=0;d<10;d++) if(d!=a&&d!=b&&d!=c)
for(int e=0;e<10;e++) if(e!=a&&e!=b&&e!=c&&e!=d){
Code x;
x.d = {(uint8_t)a,(uint8_t)b,(uint8_t)c,(uint8_t)d,(uint8_t)e};
x.mask = (uint16_t)((1u<<a)|(1u<<b)|(1u<<c)|(1u<<d)|(1u<<e));
ALL.push_back(x);
string s;
s.push_back('0'+a);
s.push_back('0'+b);
s.push_back('0'+c);
s.push_back('0'+d);
s.push_back('0'+e);
ALL_STR.push_back(s);
}
// State
vector<uint8_t> alive(NALL, 1);
vector<uint8_t> asked(NALL, 0);
vector<double> w(NALL, 1.0);
vector<int> alive_list(NALL);
iota(alive_list.begin(), alive_list.end(), 0);
int found = 0;
// History
vector<int> queries; // queried code index
vector<array<int,K>> cnt; // residual counts for remaining unknowns
vector<vector<uint8_t>> cats; // cats[qid][i] = HB(query[qid], i)
vector<uint32_t> zeroMask; // bitmask of categories with zero remaining
deque<int> forced; // candidates proven to exist
vector<uint8_t> in_forced(NALL, 0);
Timer timer;
auto normalize = [&](int n_rem){
if(alive_list.empty()) return;
double sum=0.0;
for(int i: alive_list) sum += w[i];
if(sum<=0) {
double uni = (double)n_rem / (double)alive_list.size();
for(int i: alive_list) w[i] = uni;
return;
}
double sc = (double)n_rem / sum;
for(int i: alive_list) w[i] *= sc;
};
auto ipf = [&](int n_rem, int sweeps, double damp){
if(queries.empty() || alive_list.empty()) return;
for(int it=0; it<sweeps; it++){
normalize(n_rem);
for(int q=0; q<(int)queries.size(); q++){
double sum[K];
for(int r=0;r<K;r++) sum[r]=0.0;
const auto &catq = cats[q];
for(int i: alive_list) sum[catq[i]] += w[i];
double fac[K];
for(int r=0;r<K;r++){
if(sum[r] > 0){
fac[r] = (double)cnt[q][r] / sum[r];
}else{
// No alive candidates in this category.
// If judge says 0: keep factor 1 (no effect). Otherwise infeasible; set 0.
fac[r] = (cnt[q][r]==0 ? 1.0 : 0.0);
}
if(damp!=1.0) fac[r] = pow(fac[r], damp);
}
for(int i: alive_list) w[i] *= fac[catq[i]];
}
}
normalize(n_rem);
};
auto prune_by_mask = [&](int qid, uint32_t maskbits){
if(maskbits==0) return;
const auto &catq = cats[qid];
vector<int> nxt;
nxt.reserve(alive_list.size());
for(int i: alive_list){
if(!alive[i]) continue;
if(maskbits & (1u<<catq[i])){
alive[i]=0;
w[i]=0.0;
}else nxt.push_back(i);
}
alive_list.swap(nxt);
};
// Tight-bucket forcing: if for some query/category, #alive == required count -> all are forced.
auto extract_forced = [&](){
if(alive_list.empty()) return;
// Only do this when we still have time.
if(timer.ms() > 4800) return;
array<vector<int>, K> buckets;
for(int q=0; q<(int)queries.size(); q++){
for(int r=0;r<K;r++) buckets[r].clear();
const auto &catq = cats[q];
for(int i: alive_list){
buckets[catq[i]].push_back(i);
}
for(int r=0;r<K;r++){
int need = cnt[q][r];
if(need>0 && (int)buckets[r].size()==need){
for(int idx: buckets[r]){
if(alive[idx] && !asked[idx] && !in_forced[idx]){
in_forced[idx]=1;
forced.push_back(idx);
}
}
}
}
}
};
// Pairwise feasibility pruning for 2 queries (qa,qb): discard HB-type (ra,rb) that can never appear.
auto pair_prune = [&](int qa, int qb, int n_rem, int max_checks){
if(qa==qb || n_rem<=0 || alive_list.empty()) return;
if(timer.ms() > 4700) return;
static int cap[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) cap[a][b]=0;
// Build type capacities
for(int idx: alive_list){
int a = cats[qa][idx];
int b = cats[qb][idx];
cap[a][b]++;
}
// candidate types to check
vector<pair<int,int>> types;
types.reserve(K*K);
for(int a=0;a<K;a++) for(int b=0;b<K;b++){
if(cap[a][b]==0) continue;
if(cnt[qa][a]==0 || cnt[qb][b]==0) continue;
types.push_back({a,b});
}
// Heuristic: smaller cap first (often becomes impossible), but we will stop by time anyway.
sort(types.begin(), types.end(), [&](auto &x, auto &y){
return cap[x.first][x.second] < cap[y.first][y.second];
});
if((int)types.size() > max_checks) types.resize(max_checks);
static uint8_t impossible[K][K];
for(int a=0;a<K;a++) for(int b=0;b<K;b++) impossible[a][b]=0;
auto feasible_with_lb = [&](int ra,int rb)->bool{
if(cnt[qa][ra] <= 0 || cnt[qb][rb] <= 0) return false;
if(cap[ra][rb] <= 0) return false;
// Fix 1 flow on (ra,rb)
int need = n_rem - 1;
if(need < 0) return true;
// Build flow network
int S = 0;
int L0 = 1;
int R0 = 1 + K;
int T = 1 + K + K;
Dinic mf(T+1);
// Supplies (with one consumed at ra)
for(int a=0;a<K;a++){
int sup = cnt[qa][a] - (a==ra ? 1 : 0);
if(sup>0) mf.add_edge(S, L0+a, sup);
}
// Demands (with one consumed at rb)
for(int b=0;b<K;b++){
int dem = cnt[qb][b] - (b==rb ? 1 : 0);
if(dem>0) mf.add_edge(R0+b, T, dem);
}
// Inner edges (cap reduced by 1 on (ra,rb))
for(int a=0;a<K;a++){
for(int b=0;b<K;b++){
int c = cap[a][b] - ((a==ra && b==rb) ? 1 : 0);
if(c>0) mf.add_edge(L0+a, R0+b, c);
}
}
int f = mf.maxflow(S,T);
return f==need;
};
for(auto [ra,rb] : types){
if(timer.ms() > 4700) break;
if(!feasible_with_lb(ra,rb)){
impossible[ra][rb] = 1;
}
}
// prune candidates in impossible types
vector<int> nxt;
nxt.reserve(alive_list.size());
for(int idx: alive_list){
int ra=cats[qa][idx];
int rb=cats[qb][idx];
if(impossible[ra][rb]){
alive[idx]=0;
w[idx]=0.0;
}else nxt.push_back(idx);
}
alive_list.swap(nxt);
};
auto choose_query = [&](int n_rem)->int{
// Clean forced queue
while(!forced.empty()){
int x = forced.front();
if(alive[x] && !asked[x]) return x;
forced.pop_front();
}
// Spend time to sharpen estimation when we have enough budget.
double t = timer.ms();
int sweeps;
double damp;
if(t < 1000) { sweeps = 60; damp = 0.85; }
else if(t < 2500) { sweeps = 50; damp = 0.85; }
else if(t < 4000) { sweeps = 30; damp = 0.90; }
else { sweeps = 10; damp = 0.95; }
ipf(n_rem, sweeps, damp);
int best = alive_list[0];
double bw = w[best];
for(int i: alive_list){
if(w[i] > bw){ bw = w[i]; best = i; }
}
return best;
};
// Main interactive loop
while(true){
if(found >= 30) break;
if(alive_list.empty()){
// Should not happen; fallback
break;
}
int n_rem = 30 - found;
int qidx = choose_query(n_rem);
// Ask
cout << ALL_STR[qidx] << "\n";
cout.flush(); // required
// Read 30 pairs (must read all) and build histogram
array<int,K> hist{};
hist.fill(0);
bool bad = false;
int H0=-2,B0=-2;
for(int i=0;i<30;i++){
int h,b;
if(!(cin >> h >> b)) return 0; // EOF
if(i==0){ H0=h; B0=b; }
if(h==-1 && b==-1) bad = true;
if(h>=0 && h<=5 && b>=0 && b<=5-h){
hist[HB_ID[h][b]]++;
}
}
if(bad){
// Already judged wrong
return 0;
}
if(H0==5 && B0==0){
// Solved
return 0;
}
int c50 = hist[ID_50];
bool new_found = (c50 > found);
// Store query info
queries.push_back(qidx);
array<int,K> resid = hist;
resid[ID_50] -= c50; // remove already-found ones; remaining unknowns never contribute (5,0)
cnt.push_back(resid);
// Prepare cats for this query
cats.emplace_back(NALL);
int qid = (int)queries.size()-1;
for(int i=0;i<NALL;i++){
cats[qid][i] = hb_id(qidx, i);
}
// Mark asked/removed
asked[qidx]=1;
alive[qidx]=0;
w[qidx]=0.0;
// Update found
if(new_found) found = c50;
// This query's zero categories => prune
uint32_t z=0;
for(int r=0;r<K;r++) if(cnt[qid][r]==0) z |= (1u<<r);
zeroMask.push_back(z);
prune_by_mask(qid, z);
// Peel if found new code (the queried code must be the new one)
if(new_found){
int sidx = qidx;
for(int q=0;q<qid;q++){
int cid = cats[q][sidx];
cnt[q][cid]--;
if(cnt[q][cid]==0){
uint32_t bit = (1u<<cid);
if(!(zeroMask[q] & bit)){
zeroMask[q] |= bit;
prune_by_mask(q, bit);
}
}
}
}
// Extra pruning work (time-bounded)
if(timer.ms() < 4300 && (int)queries.size() >= 2){
int last = (int)queries.size()-1;
int nrem2 = 30 - found;
// A few pairs involving the newest query
const int checks = (timer.ms() < 1500 ? 260 : (timer.ms() < 3000 ? 180 : 80));
pair_prune(0, last, nrem2, checks);
if(last>=1) pair_prune(1, last, nrem2, checks);
if(last>=2) pair_prune(last-1, last, nrem2, checks);
}
// Try to extract forced elements
extract_forced();
}
return 0;
}