diff --git a/src/search.cpp b/src/search.cpp index c9832c8787b..b82217157a5 100644 --- a/src/search.cpp +++ b/src/search.cpp @@ -94,7 +94,7 @@ int correction_value(const Worker& w, const Position& pos, const Stack* const ss m.is_ok() ? (*(ss - 2)->continuationCorrectionHistory)[pos.piece_on(m.to_sq())][m.to_sq()] : 0; - return 7037 * pcv + 6671 * micv + 7631 * (wnpcv + bnpcv) + 6362 * cntcv; + return 6995 * pcv + 6593 * micv + 7753 * (wnpcv + bnpcv) + 6049 * cntcv; } // Add correctionHistory value to raw staticEval and guarantee evaluation @@ -110,11 +110,11 @@ void update_correction_history(const Position& pos, const Move m = (ss - 1)->currentMove; const Color us = pos.side_to_move(); - static constexpr int nonPawnWeight = 159; + static constexpr int nonPawnWeight = 165; workerThread.pawnCorrectionHistory[pawn_structure_index(pos)][us] - << bonus * 104 / 128; - workerThread.minorPieceCorrectionHistory[minor_piece_index(pos)][us] << bonus * 145 / 128; + << bonus * 109 / 128; + workerThread.minorPieceCorrectionHistory[minor_piece_index(pos)][us] << bonus * 141 / 128; workerThread.nonPawnCorrectionHistory[WHITE][non_pawn_index(pos)][us] << bonus * nonPawnWeight / 128; workerThread.nonPawnCorrectionHistory[BLACK][non_pawn_index(pos)][us] @@ -122,14 +122,14 @@ void update_correction_history(const Position& pos, if (m.is_ok()) (*(ss - 2)->continuationCorrectionHistory)[pos.piece_on(m.to_sq())][m.to_sq()] - << bonus * 146 / 128; + << bonus * 138 / 128; } // History and stats update bonus, based on depth -int stat_bonus(Depth d) { return std::min(154 * d - 102, 1661); } +int stat_bonus(Depth d) { return std::min(158 * d - 98, 1622); } // History and stats update malus, based on depth -int stat_malus(Depth d) { return std::min(831 * d - 269, 2666); } +int stat_malus(Depth d) { return std::min(802 * d - 243, 2850); } // Add a small random component to draw evaluations to avoid 3-fold blindness Value value_draw(size_t nodes) { return VALUE_DRAW - 1 + Value(nodes & 0x2); } @@ -307,7 +307,7 @@ void Search::Worker::iterative_deepening() { int searchAgainCounter = 0; - lowPlyHistory.fill(97); + lowPlyHistory.fill(95); // Iterative deepening loop until requested to stop or the target depth is reached while (++rootDepth < MAX_PLY && !threads.stop @@ -343,13 +343,13 @@ void Search::Worker::iterative_deepening() { selDepth = 0; // Reset aspiration window starting size - delta = 5 + std::abs(rootMoves[pvIdx].meanSquaredScore) / 12991; + delta = 5 + std::abs(rootMoves[pvIdx].meanSquaredScore) / 13000; Value avg = rootMoves[pvIdx].averageScore; alpha = std::max(avg - delta, -VALUE_INFINITE); beta = std::min(avg + delta, VALUE_INFINITE); // Adjust optimism based on root move's averageScore - optimism[us] = 141 * avg / (std::abs(avg) + 83); + optimism[us] = 138 * avg / (std::abs(avg) + 81); optimism[~us] = -optimism[us]; // Start with a small aspiration window and, in the case of a fail @@ -533,11 +533,11 @@ void Search::Worker::iterative_deepening() { // Reset histories, usually before a new game void Search::Worker::clear() { - mainHistory.fill(63); - lowPlyHistory.fill(108); - captureHistory.fill(-631); - pawnHistory.fill(-1210); - pawnCorrectionHistory.fill(0); + mainHistory.fill(65); + lowPlyHistory.fill(107); + captureHistory.fill(-655); + pawnHistory.fill(-1215); + pawnCorrectionHistory.fill(4); minorPieceCorrectionHistory.fill(0); nonPawnCorrectionHistory[WHITE].fill(0); nonPawnCorrectionHistory[BLACK].fill(0); @@ -550,10 +550,10 @@ void Search::Worker::clear() { for (StatsType c : {NoCaptures, Captures}) for (auto& to : continuationHistory[inCheck][c]) for (auto& h : to) - h.fill(-479); + h.fill(-493); for (size_t i = 1; i < reductions.size(); ++i) - reductions[i] = int(2143 / 100.0 * std::log(i)); + reductions[i] = int(2937 / 128.0 * std::log(i)); refreshTable.clear(networks[numaAccessToken]); } @@ -679,12 +679,12 @@ Value Search::Worker::search( { // Bonus for a quiet ttMove that fails high if (!ttCapture) - update_quiet_histories(pos, ss, *this, ttData.move, stat_bonus(depth) * 746 / 1024); + update_quiet_histories(pos, ss, *this, ttData.move, stat_bonus(depth) * 784 / 1024); // Extra penalty for early quiet moves of the previous ply - if (prevSq != SQ_NONE && (ss - 1)->moveCount <= 2 && !priorCapture) + if (prevSq != SQ_NONE && (ss - 1)->moveCount <= 3 && !priorCapture) update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, - -stat_malus(depth + 1) * 1042 / 1024); + -stat_malus(depth + 1) * 1018 / 1024); } // Partial workaround for the graph history interaction problem @@ -791,11 +791,11 @@ Value Search::Worker::search( // Use static evaluation difference to improve quiet move ordering if (((ss - 1)->currentMove).is_ok() && !(ss - 1)->inCheck && !priorCapture) { - int bonus = std::clamp(-10 * int((ss - 1)->staticEval + ss->staticEval), -1881, 1413) + 616; - thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] << bonus * 1151 / 1024; + int bonus = std::clamp(-10 * int((ss - 1)->staticEval + ss->staticEval), -1906, 1450) + 638; + thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] << bonus * 1136 / 1024; if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION) thisThread->pawnHistory[pawn_structure_index(pos)][pos.piece_on(prevSq)][prevSq] - << bonus * 1107 / 1024; + << bonus * 1195 / 1024; } // Set up the improving flag, which is true if current static evaluation is @@ -804,7 +804,7 @@ Value Search::Worker::search( // false otherwise. The improving flag is used in various pruning heuristics. improving = ss->staticEval > (ss - 2)->staticEval; - opponentWorsening = ss->staticEval + (ss - 1)->staticEval > 2; + opponentWorsening = ss->staticEval + (ss - 1)->staticEval > 5; if (priorReduction >= 3 && !opponentWorsening) depth++; @@ -812,27 +812,27 @@ Value Search::Worker::search( // Step 7. Razoring // If eval is really low, skip search entirely and return the qsearch value. // For PvNodes, we must have a guard against mates being returned. - if (!PvNode && eval < alpha - 462 - 297 * depth * depth) + if (!PvNode && eval < alpha - 446 - 303 * depth * depth) return qsearch(pos, ss, alpha, beta); // Step 8. Futility pruning: child node // The depth condition is important for mate finding. if (!ss->ttPv && depth < 14 && eval - futility_margin(depth, cutNode && !ss->ttHit, improving, opponentWorsening) - - (ss - 1)->statScore / 310 + 40 - std::abs(correctionValue) / 131072 + - (ss - 1)->statScore / 326 + 37 - std::abs(correctionValue) / 132821 >= beta && eval >= beta && (!ttData.move || ttCapture) && !is_loss(beta) && !is_win(eval)) return beta + (eval - beta) / 3; // Step 9. Null move search with verification search if (cutNode && (ss - 1)->currentMove != Move::null() && eval >= beta - && ss->staticEval >= beta - 20 * depth + 470 - 60 * improving && !excludedMove + && ss->staticEval >= beta - 21 * depth + 455 - 60 * improving && !excludedMove && pos.non_pawn_material(us) && ss->ply >= thisThread->nmpMinPly && !is_loss(beta)) { assert(eval - beta >= 0); // Null move dynamic reduction based on depth and eval - Depth R = std::min(int(eval - beta) / 215, 7) + depth / 3 + 5; + Depth R = std::min(int(eval - beta) / 237, 6) + depth / 3 + 5; ss->currentMove = Move::null(); ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0]; @@ -870,13 +870,13 @@ Value Search::Worker::search( // Step 10. Internal iterative reductions // For PV nodes without a ttMove as well as for deep enough cutNodes, we decrease depth. // (* Scaler) Especially if they make IIR more aggressive. - if (((PvNode || cutNode) && depth >= 7 - 4 * PvNode) && !ttData.move) - depth -= 2; + if (((PvNode || cutNode) && depth >= 7 - 3 * PvNode) && !ttData.move) + depth--; // Step 11. ProbCut // If we have a good enough capture (or queen promotion) and a reduced search // returns a value much above beta, we can (almost) safely prune the previous move. - probCutBeta = beta + 174 - 56 * improving; + probCutBeta = beta + 187 - 55 * improving; if (depth >= 3 && !is_decisive(beta) // If value from transposition table is lower than probCutBeta, don't attempt @@ -939,7 +939,7 @@ Value Search::Worker::search( moves_loop: // When in check, search starts here // Step 12. A small Probcut idea - probCutBeta = beta + 412; + probCutBeta = beta + 413; if ((ttData.bound & BOUND_LOWER) && ttData.depth >= depth - 4 && ttData.value >= probCutBeta && !is_decisive(beta) && is_valid(ttData.value) && !is_decisive(ttData.value)) return probCutBeta; @@ -1005,7 +1005,7 @@ Value Search::Worker::search( // Smaller or even negative value is better for short time controls // Bigger value is better for long time controls if (ss->ttPv) - r += 1024; + r += 1031; // Step 14. Pruning at shallow depth. // Depth conditions are important for mate finding. @@ -1027,15 +1027,15 @@ Value Search::Worker::search( // Futility pruning for captures if (!givesCheck && lmrDepth < 7 && !ss->inCheck) { - Value futilityValue = ss->staticEval + 271 + 243 * lmrDepth - + PieceValue[capturedPiece] + captHist / 7; + Value futilityValue = ss->staticEval + 242 + 238 * lmrDepth + + PieceValue[capturedPiece] + 95 * captHist / 700; if (futilityValue <= alpha) continue; } // SEE based pruning for captures and checks - int seeHist = std::clamp(captHist / 37, -152 * depth, 141 * depth); - if (!pos.see_ge(move, -156 * depth - seeHist)) + int seeHist = std::clamp(captHist / 36, -153 * depth, 134 * depth); + if (!pos.see_ge(move, -157 * depth - seeHist)) continue; } else @@ -1046,14 +1046,14 @@ Value Search::Worker::search( + thisThread->pawnHistory[pawn_structure_index(pos)][movedPiece][move.to_sq()]; // Continuation history based pruning - if (history < -3901 * depth) + if (history < -4107 * depth) continue; - history += 2 * thisThread->mainHistory[us][move.from_to()]; + history += 68 * thisThread->mainHistory[us][move.from_to()] / 32; - lmrDepth += history / 3459; + lmrDepth += history / 3576; - Value futilityValue = ss->staticEval + (bestMove ? 47 : 137) + 142 * lmrDepth; + Value futilityValue = ss->staticEval + (bestMove ? 49 : 135) + 150 * lmrDepth; // Futility pruning: parent node if (!ss->inCheck && lmrDepth < 12 && futilityValue <= alpha) @@ -1067,7 +1067,7 @@ Value Search::Worker::search( lmrDepth = std::max(lmrDepth, 0); // Prune moves with negative SEE - if (!pos.see_ge(move, -25 * lmrDepth * lmrDepth)) + if (!pos.see_ge(move, -26 * lmrDepth * lmrDepth)) continue; } } @@ -1087,11 +1087,11 @@ Value Search::Worker::search( // and lower extension margins scale well. if (!rootNode && move == ttData.move && !excludedMove - && depth >= 5 - (thisThread->completedDepth > 33) + ss->ttPv + && depth >= 5 - (thisThread->completedDepth > 32) + ss->ttPv && is_valid(ttData.value) && !is_decisive(ttData.value) && (ttData.bound & BOUND_LOWER) && ttData.depth >= depth - 3) { - Value singularBeta = ttData.value - (52 + 74 * (ss->ttPv && !PvNode)) * depth / 64; + Value singularBeta = ttData.value - (55 + 81 * (ss->ttPv && !PvNode)) * depth / 58; Depth singularDepth = newDepth / 2; ss->excludedMove = move; @@ -1101,10 +1101,11 @@ Value Search::Worker::search( if (value < singularBeta) { - int corrValAdj = std::abs(correctionValue) / 262144; - int doubleMargin = 249 * PvNode - 194 * !ttCapture - corrValAdj; + int corrValAdj1 = std::abs(correctionValue) / 265083; + int corrValAdj2 = std::abs(correctionValue) / 253680; + int doubleMargin = 267 * PvNode - 181 * !ttCapture - corrValAdj1; int tripleMargin = - 94 + 287 * PvNode - 249 * !ttCapture + 99 * ss->ttPv - corrValAdj; + 96 + 282 * PvNode - 250 * !ttCapture + 103 * ss->ttPv - corrValAdj2; extension = 1 + (value < singularBeta - doubleMargin) + (value < singularBeta - tripleMargin); @@ -1137,13 +1138,6 @@ Value Search::Worker::search( else if (cutNode) extension = -2; } - - // Extension for capturing the previous moved piece - else if (PvNode && move.to_sq() == prevSq - && thisThread->captureHistory[movedPiece][move.to_sq()] - [type_of(pos.piece_on(move.to_sq()))] - > 4126) - extension = 1; } // Step 16. Make the move @@ -1164,45 +1158,45 @@ Value Search::Worker::search( // Decrease reduction for PvNodes (*Scaler) if (ss->ttPv) - r -= 2061 + (ttData.value > alpha) * 965 + (ttData.depth >= depth) * 960; + r -= 2230 + (ttData.value > alpha) * 925 + (ttData.depth >= depth) * 971; if (PvNode) - r -= 1018; + r -= 1013; // These reduction adjustments have no proven non-linear scaling - r += 307 - moveCount * 32; + r += 316 - moveCount * 63; - r -= std::abs(correctionValue) / 34112; + r -= std::abs(correctionValue) / 31568; // Increase reduction for cut nodes if (cutNode) - r += 2355 - (ttData.depth >= depth && ss->ttPv) * 1141; + r += 2608 - (ttData.depth >= depth && ss->ttPv) * 1159; // Increase reduction if ttMove is a capture but the current move is not a capture if (ttCapture && !capture) - r += 1087 + (depth < 8) * 990; + r += 1123 + (depth < 8) * 982; // Increase reduction if next ply has a lot of fail high if ((ss + 1)->cutoffCnt > 3) - r += 940 + allNode * 887; + r += 981 + allNode * 833; // For first picked move (ttMove) reduce reduction else if (move == ttData.move) - r -= 1960; + r -= 1982; if (capture) ss->statScore = - 7 * int(PieceValue[pos.captured_piece()]) + 688 * int(PieceValue[pos.captured_piece()]) / 100 + thisThread->captureHistory[movedPiece][move.to_sq()][type_of(pos.captured_piece())] - - 4666; + - 4653; else ss->statScore = 2 * thisThread->mainHistory[us][move.from_to()] + (*contHist[0])[movedPiece][move.to_sq()] - + (*contHist[1])[movedPiece][move.to_sq()] - 3874; + + (*contHist[1])[movedPiece][move.to_sq()] - 3591; // Decrease/increase reduction for moves with a good/bad history - r -= ss->statScore * 1451 / 16384; + r -= ss->statScore * 1407 / 16384; // Step 17. Late moves reduction / extension (LMR) if (depth >= 2 && moveCount > 1) @@ -1228,8 +1222,8 @@ Value Search::Worker::search( { // Adjust full-depth search based on LMR results - if the result was // good enough search deeper, if it was bad enough search shallower. - const bool doDeeperSearch = value > (bestValue + 40 + 2 * newDepth); - const bool doShallowerSearch = value < bestValue + 10; + const bool doDeeperSearch = value > (bestValue + 41 + 2 * newDepth); + const bool doShallowerSearch = value < bestValue + 9; newDepth += doDeeperSearch - doShallowerSearch; @@ -1237,7 +1231,7 @@ Value Search::Worker::search( value = -search(pos, ss + 1, -(alpha + 1), -alpha, newDepth, !cutNode); // Post LMR continuation history updates - int bonus = (value >= beta) * 2048; + int bonus = (value >= beta) * 2010; update_continuation_histories(ss, movedPiece, move.to_sq(), bonus); } } @@ -1247,11 +1241,11 @@ Value Search::Worker::search( { // Increase reduction if ttMove is not present if (!ttData.move) - r += 2111; + r += 2117; // Note that if expected reduction is high, we reduce search depth here value = -search(pos, ss + 1, -(alpha + 1), -alpha, - newDepth - (r > 3444) - (r > 5588 && newDepth > 2), !cutNode); + newDepth - (r > 3554) - (r > 5373 && newDepth > 2), !cutNode); } // For PV nodes only, do a full PV search on the first move or after a fail high, @@ -1358,7 +1352,7 @@ Value Search::Worker::search( else { // Reduce other moves if we have found at least one score improvement - if (depth > 2 && depth < 14 && !is_decisive(value)) + if (depth > 2 && depth < 15 && !is_decisive(value)) depth -= 2; assert(depth > 0); @@ -1402,24 +1396,24 @@ Value Search::Worker::search( // Bonus for prior countermove that caused the fail low else if (!priorCapture && prevSq != SQ_NONE) { - int bonusScale = (125 * (depth > 5) + 176 * ((ss - 1)->moveCount > 8) - + 135 * (!ss->inCheck && bestValue <= ss->staticEval - 102) - + 122 * (!(ss - 1)->inCheck && bestValue <= -(ss - 1)->staticEval - 82) - + 87 * ((ss - 1)->isTTMove) + std::min(-(ss - 1)->statScore / 106, 318)); + int bonusScale = (118 * (depth > 5) + 36 * !allNode + 161 * ((ss - 1)->moveCount > 8) + + 133 * (!ss->inCheck && bestValue <= ss->staticEval - 107) + + 120 * (!(ss - 1)->inCheck && bestValue <= -(ss - 1)->staticEval - 84) + + 81 * ((ss - 1)->isTTMove) + std::min(-(ss - 1)->statScore / 108, 320)); bonusScale = std::max(bonusScale, 0); const int scaledBonus = stat_bonus(depth) * bonusScale; update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, - scaledBonus * 436 / 32768); + scaledBonus * 416 / 32768); thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] - << scaledBonus * 207 / 32768; + << scaledBonus * 219 / 32768; if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION) thisThread->pawnHistory[pawn_structure_index(pos)][pos.piece_on(prevSq)][prevSq] - << scaledBonus * 1195 / 32768; + << scaledBonus * 1103 / 32768; } else if (priorCapture && prevSq != SQ_NONE) @@ -1580,7 +1574,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta) if (bestValue > alpha) alpha = bestValue; - futilityBase = ss->staticEval + 301; + futilityBase = ss->staticEval + 325; } const PieceToHistory* contHist[] = {(ss - 1)->continuationHistory, @@ -1643,11 +1637,11 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta) + (*contHist[1])[pos.moved_piece(move)][move.to_sq()] + thisThread->pawnHistory[pawn_structure_index(pos)][pos.moved_piece(move)] [move.to_sq()] - <= 5228) + <= 5389) continue; // Do not search moves with bad enough SEE values - if (!pos.see_ge(move, -80)) + if (!pos.see_ge(move, -75)) continue; } @@ -1714,7 +1708,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta) Depth Search::Worker::reduction(bool i, Depth d, int mn, int delta) const { int reductionScale = reductions[d] * reductions[mn]; - return reductionScale - delta * 768 / rootDelta + !i * reductionScale * 108 / 300 + 1168; + return reductionScale - delta * 735 / rootDelta + !i * reductionScale * 191 / 512 + 1132; } // elapsed() returns the time elapsed since the search started. If the @@ -1810,35 +1804,35 @@ void update_all_stats(const Position& pos, Piece moved_piece = pos.moved_piece(bestMove); PieceType captured; - int bonus = stat_bonus(depth) + 300 * isTTMove; - int malus = stat_malus(depth) - 34 * (moveCount - 1); + int bonus = stat_bonus(depth) + 298 * isTTMove; + int malus = stat_malus(depth) - 32 * (moveCount - 1); if (!pos.capture_stage(bestMove)) { - update_quiet_histories(pos, ss, workerThread, bestMove, bonus * 1216 / 1024); + update_quiet_histories(pos, ss, workerThread, bestMove, bonus * 1202 / 1024); // Decrease stats for all non-best quiet moves for (Move move : quietsSearched) - update_quiet_histories(pos, ss, workerThread, move, -malus * 1062 / 1024); + update_quiet_histories(pos, ss, workerThread, move, -malus * 1152 / 1024); } else { // Increase stats for the best move in case it was a capture move captured = type_of(pos.piece_on(bestMove.to_sq())); - captureHistory[moved_piece][bestMove.to_sq()][captured] << bonus * 1272 / 1024; + captureHistory[moved_piece][bestMove.to_sq()][captured] << bonus * 1236 / 1024; } // Extra penalty for a quiet early move that was not a TT move in // previous ply when it gets refuted. if (prevSq != SQ_NONE && ((ss - 1)->moveCount == 1 + (ss - 1)->ttHit) && !pos.captured_piece()) - update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, -malus * 966 / 1024); + update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq, -malus * 976 / 1024); // Decrease stats for all non-best capture moves for (Move move : capturesSearched) { moved_piece = pos.moved_piece(move); captured = type_of(pos.piece_on(move.to_sq())); - captureHistory[moved_piece][move.to_sq()][captured] << -malus * 1205 / 1024; + captureHistory[moved_piece][move.to_sq()][captured] << -malus * 1224 / 1024; } } @@ -1847,7 +1841,7 @@ void update_all_stats(const Position& pos, // at ply -1, -2, -3, -4, and -6 with current move. void update_continuation_histories(Stack* ss, Piece pc, Square to, int bonus) { static constexpr std::array conthist_bonuses = { - {{1, 1025}, {2, 621}, {3, 325}, {4, 512}, {5, 122}, {6, 534}}}; + {{1, 1029}, {2, 656}, {3, 326}, {4, 536}, {5, 120}, {6, 537}}}; for (const auto [i, weight] : conthist_bonuses) { @@ -1868,12 +1862,12 @@ void update_quiet_histories( workerThread.mainHistory[us][move.from_to()] << bonus; // Untuned to prevent duplicate effort if (ss->ply < LOW_PLY_HISTORY_SIZE) - workerThread.lowPlyHistory[ss->ply][move.from_to()] << bonus * 879 / 1024; + workerThread.lowPlyHistory[ss->ply][move.from_to()] << bonus * 844 / 1024; - update_continuation_histories(ss, pos.moved_piece(move), move.to_sq(), bonus * 888 / 1024); + update_continuation_histories(ss, pos.moved_piece(move), move.to_sq(), bonus * 964 / 1024); int pIndex = pawn_structure_index(pos); - workerThread.pawnHistory[pIndex][pos.moved_piece(move)][move.to_sq()] << bonus * 634 / 1024; + workerThread.pawnHistory[pIndex][pos.moved_piece(move)][move.to_sq()] << bonus * 615 / 1024; } }