結果
| 問題 | No.3418 【絶望】30個並列ごちゃ混ぜHit&Blowで遊ぼう! |
| コンテスト | |
| ユーザー |
|
| 提出日時 | 2025-12-16 01:53:12 |
| 言語 | C++23 (gcc 13.3.0 + boost 1.89.0) |
| 結果 |
AC
|
| 実行時間 | 942 ms / 5,000 ms |
| コード長 | 11,461 bytes |
| 記録 | |
| コンパイル時間 | 2,867 ms |
| コンパイル使用メモリ | 218,628 KB |
| 実行使用メモリ | 153,268 KB |
| スコア | 9,991,214 |
| 平均クエリ数 | 87.86 |
| 最終ジャッジ日時 | 2025-12-25 01:57:14 |
| 合計ジャッジ時間 | 89,096 ms |
|
ジャッジサーバーID (参考情報) |
judge5 / judge3 |
| 純コード判定しない問題か言語 |
(要ログイン)
| ファイルパターン | 結果 |
|---|---|
| other | AC * 100 |
ソースコード
/*
* Problem: Advent Calendar Contest 2025 - 30 Parallel Hit & Blow
* Strategy: Entropy-based greedy strategy with candidate filtering.
*
* Approach:
* 1. Maintain a list of all potentially valid candidates (Probability > 0).
* 2. Select the next query from the candidates that maximizes Information Gain (Entropy).
* - To satisfy "most likely", we strictly choose q from the current candidates.
* - To optimize score, we pick the q that best distinguishes the remaining candidates.
* 3. Filter candidates based on the aggregated response.
* - If a candidate 'c' would produce a Hit/Blow pair not present in the non-found part of the response, 'c' is impossible.
* 4. Dynamic Time Management:
* - The entropy calculation is O(N^2). We use sampling and time checks to fit within 5s.
*/
#include <iostream>
#include <vector>
#include <string>
#include <algorithm>
#include <map>
#include <set>
#include <chrono>
#include <random>
#include <cmath>
#include <iomanip>
using namespace std;
// === Constants & Globals ===
const int TIME_LIMIT_MS = 4800; // Safe margin for 5000ms
const int SAMPLE_SIZE_LIMIT = 150; // Max sample size for entropy check to save time
chrono::system_clock::time_point start_time;
// === Helper Functions ===
// Check if string has unique characters
bool has_unique_chars(const string& s) {
int mask = 0;
for (char c : s) {
int bit = 1 << (c - '0');
if (mask & bit) return false;
mask |= bit;
}
return true;
}
// Generate all valid 5-digit strings with distinct digits
vector<string> generate_all_candidates() {
vector<string> candidates;
// Permutation generation
string s = "0123456789";
do {
string sub = s.substr(0, 5);
if (has_unique_chars(sub)) {
candidates.push_back(sub);
}
// Optimized permutation handling:
// We need permutations of size 5 from 10.
// Standard next_permutation on "0123456789" gives too many checks.
// Faster approach:
// However, 30240 is small enough to just generate naively or using specific logic.
// Let's use a simpler distinct digit check loop or standard perm logic carefully.
} while (next_permutation(s.begin(), s.end()));
// Correct logic to get strictly distinct 30240 items:
candidates.clear();
vector<int> p = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
do {
string t = "";
for(int i=0; i<5; ++i) t += to_string(p[i]);
candidates.push_back(t);
// We need next permutation of 10 items, but we only care about first 5.
// This is inefficient (3.6M iters). Let's use recursive generation.
} while (next_permutation(p.begin(), p.end()));
// Re-do generation efficiently
candidates.clear();
auto dfs = [&](auto self, string current, int mask) -> void {
if (current.size() == 5) {
candidates.push_back(current);
return;
}
for (int i = 0; i <= 9; ++i) {
if (!(mask & (1 << i))) {
self(self, current + to_string(i), mask | (1 << i));
}
}
};
dfs(dfs, "", 0);
return candidates;
}
// Compute Hit and Blow
pair<int, int> calc_hb(const string& a, const string& b) {
int hit = 0, blow = 0;
// Using bitmask for blow calc speedup
int mask_a = 0;
int mask_b = 0;
for (int i = 0; i < 5; ++i) {
if (a[i] == b[i]) {
hit++;
} else {
mask_a |= (1 << (a[i] - '0'));
mask_b |= (1 << (b[i] - '0'));
}
}
// Count set bits in intersection
int common = mask_a & mask_b;
// __builtin_popcount is GCC specific, standard bitset or loop works
while (common) {
if (common & 1) blow++;
common >>= 1;
}
return {hit, blow};
}
// === Solver Class ===
class Solver {
vector<string> candidates;
int found_count = 0;
mt19937 rng;
public:
Solver() : rng(777) {
candidates = generate_all_candidates();
}
void solve() {
start_time = chrono::system_clock::now();
while (found_count < 30) {
// 1. Select best query
string query = select_best_query();
// 2. Output query
cout << query << endl;
// 3. Receive response
// Response is sorted list of 30 pairs
vector<pair<int, int>> response(30);
int new_hit_count = 0;
int exact_matches_in_response = 0;
for (int i = 0; i < 30; ++i) {
cin >> response[i].first >> response[i].second;
if (response[i].first == 5 && response[i].second == 0) {
exact_matches_in_response++;
}
}
// Check system termination signals (all -1)
if (response[0].first == -1) exit(0);
// 4. Update found status
// The response contains (5,0) for all previously found strings + potentially the current one
bool found_new = false;
if (exact_matches_in_response > found_count) {
found_new = true;
found_count++;
}
if (found_count == 30) return;
// 5. Filter candidates
// Prepare valid HB pairs from response
// We must ignore the (5,0)s that correspond to ALREADY FOUND strings and CURRENT query (if found).
// Actually, simply: the response corresponds to {S0...S29}.
// If we found k strings, k entries are (5,0).
// The OTHER entries correspond to the UNKNOWN strings.
// We only filter based on the UNKNOWN strings.
// Logic: Create a multiset of HB from response, remove 'found_count' instances of (5,0).
// If current query was a hit, it also contributes a (5,0), so we remove that too effectively
// from the perspective of "candidates for REMAINING unknown strings".
map<pair<int, int>, int> response_counts;
for (auto p : response) {
response_counts[p]++;
}
// Remove (5,0)s corresponding to already found strings (including current if new found)
// Wait, if we found a NEW one, `query` is that new one. It is removed from `candidates` later.
// So we need to match `candidates` (which are unknown) against the `response` (unknown parts).
// The response has `found_count` entries of (5,0).
int entries_to_ignore = found_count;
// Note: found_count is already updated.
// If we had 5 found, and found 1 more -> found_count=6. Response has 6 (5,0)s.
// We want to filter the REMAINING 24 candidates.
// The response has 24 entries that represent the remaining strings.
// So we remove found_count * (5,0) from the map.
if (response_counts.count({5, 0})) {
response_counts[{5, 0}] -= entries_to_ignore;
if (response_counts[{5, 0}] <= 0) response_counts.erase({5, 0});
}
vector<string> next_candidates;
next_candidates.reserve(candidates.size());
// Remove the current query from candidates (whether hit or miss, we won't ask it again)
// Actually, if it's a hit, it's definitely gone. If miss, it's gone.
for (const string& cand : candidates) {
if (cand == query) continue; // Remove current query
pair<int, int> hb = calc_hb(cand, query);
// If this candidate 'cand' is one of the remaining hidden strings,
// its HB value MUST be present in the remaining response_counts.
// Since we don't know strict 1-to-1 mapping, we just check existence.
if (response_counts.count(hb)) {
next_candidates.push_back(cand);
}
}
candidates = next_candidates;
}
}
private:
string select_best_query() {
// If only 1 candidate, pick it.
if (candidates.size() <= 1) return candidates[0];
// Check time
auto now = chrono::system_clock::now();
double elapsed = chrono::duration_cast<chrono::milliseconds>(now - start_time).count();
// If time is tight or too many candidates, simplify.
// If candidates are huge, O(N^2) is impossible.
// Strategy: Sample a subset of candidates to be 'potential queries'.
// For each potential query, measure how well it splits the current 'candidates'.
bool fast_mode = (elapsed > TIME_LIMIT_MS) || (candidates.size() > 3000);
// 3000^2 is 9M ops, slightly risky in loop, but let's try entropy on sample.
int sample_size = (elapsed > 4000) ? 1 :
(elapsed > 3000) ? 20 :
(candidates.size() > 500) ? 50 : candidates.size();
if (elapsed > 4500) sample_size = 1; // Emergency exit
vector<string> query_pool;
if (candidates.size() <= sample_size) {
query_pool = candidates;
} else {
// Random sampling
vector<int> indices(candidates.size());
iota(indices.begin(), indices.end(), 0);
// shuffle is slow if vector is huge, just pick random indices
for(int i=0; i<sample_size; ++i) {
int idx = uniform_int_distribution<int>(0, candidates.size()-1)(rng);
query_pool.push_back(candidates[idx]);
}
}
string best_q = query_pool[0];
double min_collision_score = 1e18; // We want to minimize collisions (sum of sq sizes) -> Maximize Entropy
// If very fast mode, just return first
if (sample_size == 1) return best_q;
for (const string& q : query_pool) {
// Calculate distribution of HB values against all candidates
// We use a flat array map for speed. HB pairs map to integer 0..55 approx.
// H in 0..5, B in 0..5. Index = H*6 + B.
int counts[36] = {0};
for (const string& c : candidates) {
// Self check optimization: if c==q, it's (5,0).
// But in filtering step, q is removed. So strictly we simulate q vs (candidates - q).
// However, inclusion of q doesn't shift entropy much if N is large.
if (c == q) continue;
pair<int, int> hb = calc_hb(c, q);
counts[hb.first * 6 + hb.second]++;
}
// Calculate score: Sum of squares (Collision count)
// Lower is better (flatter distribution)
long long current_score = 0;
for (int k = 0; k < 36; ++k) {
if (counts[k] > 0) {
current_score += (long long)counts[k] * counts[k];
}
}
if (current_score < min_collision_score) {
min_collision_score = current_score;
best_q = q;
}
}
return best_q;
}
};
int main() {
// Optimization for fast IO
cin.tie(NULL);
ios_base::sync_with_stdio(false);
Solver solver;
solver.solve();
return 0;
}