Skip to content

Commit

Permalink
VVLTC search tune
Browse files Browse the repository at this point in the history
This set of parameters was derived from 3 tuning attempts:

    https://tests.stockfishchess.org/tests/view/65d19ab61d8e83c78bfd8436 (80+0.8 x8, ~40k games)
    Then tuned with one of linrock's early L1-3072 nets:
    https://tests.stockfishchess.org/tests/view/65def7b04b19edc854ebdec8 (VVLTC, ~36k games)
    Starting from the result of this tuning, the parameters were then tuned with the current master net:
    https://tests.stockfishchess.org/tests/view/65f11c420ec64f0526c46fc4 (VVLTC, ~45k games)

Additionally, at the start of the third tuning phase, 2 parameters were manually changed:

    Notably, the triple extension margin was decreased from 78 to 22. This idea was given by Vizvezdenec:
    https://tests.stockfishchess.org/tests/view/65f0a2360ec64f0526c46752.
    The PvNode extension margin was also adjusted from 50 to 40.

This tune also differs from previous tuning attempts by tuning the evaluation thresholds for smallnet and psqt-only.
The former was increased through the tuning, and this is hypothesized to scale better at VVLTC,
although there is not much evidence of it.

Passed VVLTC 1st sprt: https://tests.stockfishchess.org/tests/view/65f6761d0ec64f0526c4be88
LLR: 2.94 (-2.94,2.94) <0.00,2.00>
Total: 44688 W: 11421 L: 11140 D: 22127
Ptnml(0-2): 1, 4170, 13722, 4449, 2

Passed VVLTC 2nd sprt: https://tests.stockfishchess.org/tests/view/65fa31a30ec64f0526c4f611
LLR: 2.94 (-2.94,2.94) <0.50,2.50>
Total: 27450 W: 7057 L: 6778 D: 13615
Ptnml(0-2): 4, 2545, 8346, 2828, 2

STC Elo estimate: https://tests.stockfishchess.org/tests/view/65fd3e540ec64f0526c521ae
Elo: -7.84 ± 1.8 (95%) LOS: 0.0%
Total: 40000 W: 9899 L: 10802 D: 19299
Ptnml(0-2): 203, 5221, 10025, 4378, 173
nElo: -14.91 ± 3.4 (95%) PairsRatio: 0.84

closes #5130

Bench: 1876107
  • Loading branch information
XInTheDark authored and vondele committed Mar 22, 2024
1 parent 7e42763 commit d99f895
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 38 deletions.
2 changes: 1 addition & 1 deletion src/evaluate.h
Expand Up @@ -29,7 +29,7 @@ class Position;

namespace Eval {

constexpr inline int SmallNetThreshold = 1050, PsqtOnlyThreshold = 2500;
constexpr inline int SmallNetThreshold = 1165, PsqtOnlyThreshold = 2500;

// The default net name MUST follow the format nn-[SHA256 first 12 digits].nnue
// for the build process (profile-build and fishtest) to work. Do not change the
Expand Down
74 changes: 37 additions & 37 deletions src/search.cpp
Expand Up @@ -55,9 +55,9 @@ namespace {

// Futility margin
Value futility_margin(Depth d, bool noTtCutNode, bool improving, bool oppWorsening) {
Value futilityMult = 122 - 46 * noTtCutNode;
Value improvingDeduction = 57 * improving * futilityMult / 32;
Value worseningDeduction = (331 + 45 * improving) * oppWorsening * futilityMult / 1024;
Value futilityMult = 118 - 44 * noTtCutNode;
Value improvingDeduction = 53 * improving * futilityMult / 32;
Value worseningDeduction = (309 + 47 * improving) * oppWorsening * futilityMult / 1024;

return futilityMult * d - improvingDeduction - worseningDeduction;
}
Expand All @@ -69,15 +69,15 @@ constexpr int futility_move_count(bool improving, Depth depth) {
// Add correctionHistory value to raw staticEval and guarantee evaluation does not hit the tablebase range
Value to_corrected_static_eval(Value v, const Worker& w, const Position& pos) {
auto cv = w.correctionHistory[pos.side_to_move()][pawn_structure_index<Correction>(pos)];
v += cv * std::abs(cv) / 11450;
v += cv * std::abs(cv) / 11175;
return std::clamp(v, VALUE_TB_LOSS_IN_MAX_PLY + 1, VALUE_TB_WIN_IN_MAX_PLY - 1);
}

// History and stats update bonus, based on depth
int stat_bonus(Depth d) { return std::min(249 * d - 327, 1192); }
int stat_bonus(Depth d) { return std::min(223 * d - 332, 1258); }

// History and stats update malus, based on depth
int stat_malus(Depth d) { return std::min(516 * d - 299, 1254); }
int stat_malus(Depth d) { return std::min(536 * d - 299, 1353); }

// Add a small random component to draw evaluations to avoid 3-fold blindness
Value value_draw(size_t nodes) { return VALUE_DRAW - 1 + Value(nodes & 0x2); }
Expand Down Expand Up @@ -302,12 +302,12 @@ void Search::Worker::iterative_deepening() {

// Reset aspiration window starting size
Value avg = rootMoves[pvIdx].averageScore;
delta = 9 + avg * avg / 12800;
delta = 10 + avg * avg / 12493;
alpha = std::max(avg - delta, -VALUE_INFINITE);
beta = std::min(avg + delta, VALUE_INFINITE);

// Adjust optimism based on root move's averageScore (~4 Elo)
optimism[us] = 130 * avg / (std::abs(avg) + 90);
optimism[us] = 132 * avg / (std::abs(avg) + 89);
optimism[~us] = -optimism[us];

// Start with a small aspiration window and, in the case of a fail
Expand Down Expand Up @@ -494,7 +494,7 @@ void Search::Worker::clear() {
for (StatsType c : {NoCaptures, Captures})
for (auto& to : continuationHistory[inCheck][c])
for (auto& h : to)
h->fill(-71);
h->fill(-67);

for (size_t i = 1; i < reductions.size(); ++i)
reductions[i] = int((19.80 + std::log(size_t(options["Threads"])) / 2) * std::log(i));
Expand Down Expand Up @@ -729,7 +729,7 @@ Value Search::Worker::search(
// Use static evaluation difference to improve quiet move ordering (~9 Elo)
if (((ss - 1)->currentMove).is_ok() && !(ss - 1)->inCheck && !priorCapture)
{
int bonus = std::clamp(-14 * int((ss - 1)->staticEval + ss->staticEval), -1621, 1238);
int bonus = std::clamp(-13 * int((ss - 1)->staticEval + ss->staticEval), -1578, 1291);
bonus = bonus > 0 ? 2 * bonus : bonus / 2;
thisThread->mainHistory[~us][((ss - 1)->currentMove).from_to()] << bonus;
if (type_of(pos.piece_on(prevSq)) != PAWN && ((ss - 1)->currentMove).type_of() != PROMOTION)
Expand All @@ -752,7 +752,7 @@ Value Search::Worker::search(
// If eval is really low check with qsearch if it can exceed alpha, if it can't,
// return a fail low.
// Adjust razor margin according to cutoffCnt. (~1 Elo)
if (eval < alpha - 462 - (296 - 145 * ((ss + 1)->cutoffCnt > 3)) * depth * depth)
if (eval < alpha - 488 - (289 - 142 * ((ss + 1)->cutoffCnt > 3)) * depth * depth)
{
value = qsearch<NonPV>(pos, ss, alpha - 1, alpha);
if (value < alpha)
Expand All @@ -763,21 +763,21 @@ Value Search::Worker::search(
// The depth condition is important for mate finding.
if (!ss->ttPv && depth < 12
&& eval - futility_margin(depth, cutNode && !ss->ttHit, improving, opponentWorsening)
- (ss - 1)->statScore / 287
- (ss - 1)->statScore / 267
>= beta
&& eval >= beta && eval < VALUE_TB_WIN_IN_MAX_PLY && (!ttMove || ttCapture))
return beta > VALUE_TB_LOSS_IN_MAX_PLY ? (eval + beta) / 2 : eval;

// Step 9. Null move search with verification search (~35 Elo)
if (!PvNode && (ss - 1)->currentMove != Move::null() && (ss - 1)->statScore < 16211
if (!PvNode && (ss - 1)->currentMove != Move::null() && (ss - 1)->statScore < 16878
&& eval >= beta && eval >= ss->staticEval && ss->staticEval >= beta - 20 * depth + 314
&& !excludedMove && pos.non_pawn_material(us) && ss->ply >= thisThread->nmpMinPly
&& beta > VALUE_TB_LOSS_IN_MAX_PLY)
{
assert(eval - beta >= 0);

// Null move dynamic reduction based on depth and eval
Depth R = std::min(int(eval - beta) / 151, 6) + depth / 3 + 4;
Depth R = std::min(int(eval - beta) / 144, 6) + depth / 3 + 4;

ss->currentMove = Move::null();
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
Expand Down Expand Up @@ -825,7 +825,7 @@ Value Search::Worker::search(
// Step 11. ProbCut (~10 Elo)
// If we have a good enough capture (or queen promotion) and a reduced search returns a value
// much above beta, we can (almost) safely prune the previous move.
probCutBeta = beta + 168 - 64 * improving;
probCutBeta = beta + 170 - 64 * improving;
if (
!PvNode && depth > 3
&& std::abs(beta) < VALUE_TB_WIN_IN_MAX_PLY
Expand Down Expand Up @@ -881,7 +881,7 @@ Value Search::Worker::search(
moves_loop: // When in check, search starts here

// Step 12. A small Probcut idea, when we are in check (~4 Elo)
probCutBeta = beta + 410;
probCutBeta = beta + 409;
if (ss->inCheck && !PvNode && ttCapture && (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 4 && ttValue >= probCutBeta
&& std::abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && std::abs(beta) < VALUE_TB_WIN_IN_MAX_PLY)
Expand Down Expand Up @@ -964,15 +964,15 @@ Value Search::Worker::search(
{
Piece capturedPiece = pos.piece_on(move.to_sq());
int futilityEval =
ss->staticEval + 298 + 288 * lmrDepth + PieceValue[capturedPiece]
ss->staticEval + 297 + 284 * lmrDepth + PieceValue[capturedPiece]
+ thisThread->captureHistory[movedPiece][move.to_sq()][type_of(capturedPiece)]
/ 7;
if (futilityEval < alpha)
continue;
}

// SEE based pruning for captures and checks (~11 Elo)
if (!pos.see_ge(move, -202 * depth))
if (!pos.see_ge(move, -203 * depth))
continue;
}
else
Expand All @@ -984,24 +984,24 @@ Value Search::Worker::search(
+ thisThread->pawnHistory[pawn_structure_index(pos)][movedPiece][move.to_sq()];

// Continuation history based pruning (~2 Elo)
if (lmrDepth < 6 && history < -4125 * depth)
if (lmrDepth < 6 && history < -4040 * depth)
continue;

history += 2 * thisThread->mainHistory[us][move.from_to()];

lmrDepth += history / 5686;
lmrDepth += history / 5637;

// Futility pruning: parent node (~13 Elo)
if (!ss->inCheck && lmrDepth < 15
&& ss->staticEval + (bestValue < ss->staticEval - 55 ? 153 : 58)
+ 118 * lmrDepth
&& ss->staticEval + (bestValue < ss->staticEval - 59 ? 141 : 58)
+ 125 * lmrDepth
<= alpha)
continue;

lmrDepth = std::max(lmrDepth, 0);

// Prune moves with negative SEE (~4 Elo)
if (!pos.see_ge(move, -26 * lmrDepth * lmrDepth))
if (!pos.see_ge(move, -27 * lmrDepth * lmrDepth))
continue;
}
}
Expand All @@ -1021,11 +1021,11 @@ Value Search::Worker::search(
// so changing them requires tests at these types of time controls.
// Recursive singular search is avoided.
if (!rootNode && move == ttMove && !excludedMove
&& depth >= 4 - (thisThread->completedDepth > 29) + ss->ttPv
&& depth >= 4 - (thisThread->completedDepth > 30) + ss->ttPv
&& std::abs(ttValue) < VALUE_TB_WIN_IN_MAX_PLY && (tte->bound() & BOUND_LOWER)
&& tte->depth() >= depth - 3)
{
Value singularBeta = ttValue - (58 + 55 * (ss->ttPv && !PvNode)) * depth / 64;
Value singularBeta = ttValue - (58 + 58 * (ss->ttPv && !PvNode)) * depth / 64;
Depth singularDepth = newDepth / 2;

ss->excludedMove = move;
Expand All @@ -1040,11 +1040,11 @@ Value Search::Worker::search(
// We make sure to limit the extensions in some way to avoid a search explosion
if (!PvNode && ss->multipleExtensions <= 16)
{
extension = 2 + (value < singularBeta - 78 && !ttCapture);
extension = 2 + (value < singularBeta - 22 && !ttCapture);
depth += depth < 14;
}
if (PvNode && !ttCapture && ss->multipleExtensions <= 5
&& value < singularBeta - 50)
&& value < singularBeta - 37)
extension = 2;
}

Expand Down Expand Up @@ -1079,7 +1079,7 @@ Value Search::Worker::search(
else if (PvNode && move == ttMove && move.to_sq() == prevSq
&& thisThread->captureHistory[movedPiece][move.to_sq()]
[type_of(pos.piece_on(move.to_sq()))]
> 4315)
> 4026)
extension = 1;
}

Expand Down Expand Up @@ -1129,10 +1129,10 @@ Value Search::Worker::search(
ss->statScore = 2 * thisThread->mainHistory[us][move.from_to()]
+ (*contHist[0])[movedPiece][move.to_sq()]
+ (*contHist[1])[movedPiece][move.to_sq()]
+ (*contHist[3])[movedPiece][move.to_sq()] - 4587;
+ (*contHist[3])[movedPiece][move.to_sq()] - 4723;

// Decrease/increase reduction for moves with a good/bad history (~8 Elo)
r -= ss->statScore / 14956;
r -= ss->statScore / 13659;

// Step 17. Late moves reduction / extension (LMR, ~117 Elo)
if (depth >= 2 && moveCount > 1 + rootNode)
Expand All @@ -1151,7 +1151,7 @@ Value Search::Worker::search(
{
// Adjust full-depth search based on LMR results - if the result
// was good enough search deeper, if it was bad enough search shallower.
const bool doDeeperSearch = value > (bestValue + 48 + 2 * newDepth); // (~1 Elo)
const bool doDeeperSearch = value > (bestValue + 47 + 2 * newDepth); // (~1 Elo)
const bool doShallowerSearch = value < bestValue + newDepth; // (~2 Elo)

newDepth += doDeeperSearch - doShallowerSearch;
Expand Down Expand Up @@ -1269,7 +1269,7 @@ Value Search::Worker::search(
else
{
// Reduce other moves if we have found at least one score improvement (~2 Elo)
if (depth > 2 && depth < 12 && beta < 13665 && value > -12276)
if (depth > 2 && depth < 12 && beta < 14206 && value > -12077)
depth -= 2;

assert(depth > 0);
Expand Down Expand Up @@ -1312,7 +1312,7 @@ Value Search::Worker::search(
// Bonus for prior countermove that caused the fail low
else if (!priorCapture && prevSq != SQ_NONE)
{
int bonus = (depth > 5) + (PvNode || cutNode) + ((ss - 1)->statScore < -14446)
int bonus = (depth > 5) + (PvNode || cutNode) + ((ss - 1)->statScore < -14963)
+ ((ss - 1)->moveCount > 11)
+ (!ss->inCheck && bestValue <= ss->staticEval - 150);
update_continuation_histories(ss - 1, pos.piece_on(prevSq), prevSq,
Expand Down Expand Up @@ -1472,7 +1472,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,
if (bestValue > alpha)
alpha = bestValue;

futilityBase = ss->staticEval + 221;
futilityBase = ss->staticEval + 226;
}

const PieceToHistory* contHist[] = {(ss - 1)->continuationHistory,
Expand Down Expand Up @@ -1552,7 +1552,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,
continue;

// Do not search moves with bad enough SEE values (~5 Elo)
if (!pos.see_ge(move, -79))
if (!pos.see_ge(move, -78))
continue;
}

Expand Down Expand Up @@ -1620,7 +1620,7 @@ Value Search::Worker::qsearch(Position& pos, Stack* ss, Value alpha, Value beta,

Depth Search::Worker::reduction(bool i, Depth d, int mn, int delta) {
int reductionScale = reductions[d] * reductions[mn];
return (reductionScale + 1091 - delta * 759 / rootDelta) / 1024 + (!i && reductionScale > 950);
return (reductionScale + 1107 - delta * 725 / rootDelta) / 1024 + (!i && reductionScale > 956);
}

namespace {
Expand Down Expand Up @@ -1709,7 +1709,7 @@ void update_all_stats(const Position& pos,

if (!pos.capture_stage(bestMove))
{
int bestMoveBonus = bestValue > beta + 167 ? quietMoveBonus // larger bonus
int bestMoveBonus = bestValue > beta + 173 ? quietMoveBonus // larger bonus
: stat_bonus(depth); // smaller bonus

// Increase stats for the best move in case it was a quiet move
Expand Down

0 comments on commit d99f895

Please sign in to comment.