Skip to content

Commit

Permalink
Retuned values after eval quantize patch.
Browse files Browse the repository at this point in the history
The last search tune patch was tested before the implementation of #2733 which
presumably changed the search characteristics noticeably. Another tuning run was
done, see https://tests.stockfishchess.org/tests/view/5ee5b434ca6c451633a9a08c
and the updated values passed these tests:

STC:
LLR: 2.93 (-2.94,2.94) {-0.50,1.50}
Total: 34352 W: 6600 L: 6360 D: 21392
Ptnml(0-2): 581, 3947, 7914, 4119, 615
https://tests.stockfishchess.org/tests/view/5ee62f05ca6c451633a9a15f

LTC 60+0.6 th 1 :
LLR: 2.97 (-2.94,2.94) {0.25,1.75}
Total: 11176 W: 1499 L: 1304 D: 8373
Ptnml(0-2): 69, 933, 3403, 1100, 83
https://tests.stockfishchess.org/tests/view/5ee6205bca6c451633a9a147

SMP LTC 20+0.2 th 8 :
LLR: 2.93 (-2.94,2.94) {0.25,1.75}
Total: 54032 W: 6126 L: 5826 D: 42080
Ptnml(0-2): 278, 4454, 17280, 4698, 306
https://tests.stockfishchess.org/tests/view/5ee62f25ca6c451633a9a162

Closes #2742

Bench 4957812
  • Loading branch information
xoto10 authored and vondele committed Jun 15, 2020
1 parent 42b7dbc commit 995ee4b
Showing 1 changed file with 34 additions and 34 deletions.
68 changes: 34 additions & 34 deletions src/search.cpp
Expand Up @@ -65,17 +65,17 @@ namespace {
constexpr uint64_t TtHitAverageResolution = 1024;

// Razor and futility margins
constexpr int RazorMargin = 516;
constexpr int RazorMargin = 527;
Value futility_margin(Depth d, bool improving) {
return Value(224 * (d - improving));
return Value(227 * (d - improving));
}

// Reductions lookup table, initialized at startup
int Reductions[MAX_MOVES]; // [depth or moveNumber]

Depth reduction(bool i, Depth d, int mn) {
int r = Reductions[d] * Reductions[mn];
return (r + 529) / 1024 + (!i && r > 1050);
return (r + 570) / 1024 + (!i && r > 1018);
}

constexpr int futility_move_count(bool improving, Depth depth) {
Expand All @@ -84,7 +84,7 @@ namespace {

// History and stats update bonus, based on depth
int stat_bonus(Depth d) {
return d > 15 ? 28 : 19 * d * d + 135 * d - 136;
return d > 15 ? 27 : 17 * d * d + 133 * d - 134;
}

// Add a small random component to draw evaluations to avoid 3fold-blindness
Expand Down Expand Up @@ -194,7 +194,7 @@ namespace {
void Search::init() {

for (int i = 1; i < MAX_MOVES; ++i)
Reductions[i] = int((24.9 + std::log(Threads.size())) * std::log(i));
Reductions[i] = int((24.8 + std::log(Threads.size())) * std::log(i));
}


Expand Down Expand Up @@ -403,12 +403,12 @@ void Thread::search() {
if (rootDepth >= 4)
{
Value prev = rootMoves[pvIdx].previousScore;
delta = Value(21);
delta = Value(19);
alpha = std::max(prev - delta,-VALUE_INFINITE);
beta = std::min(prev + delta, VALUE_INFINITE);

// Adjust contempt based on root move's previousScore (dynamic contempt)
int dct = ct + (104 - ct / 2) * prev / (abs(prev) + 143);
int dct = ct + (110 - ct / 2) * prev / (abs(prev) + 140);

contempt = (us == WHITE ? make_score(dct, dct / 2)
: -make_score(dct, dct / 2));
Expand Down Expand Up @@ -506,13 +506,13 @@ void Thread::search() {
&& !Threads.stop
&& !mainThread->stopOnPonderhit)
{
double fallingEval = (293 + 6 * (mainThread->bestPreviousScore - bestValue)
+ 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 742.0;
double fallingEval = (296 + 6 * (mainThread->bestPreviousScore - bestValue)
+ 6 * (mainThread->iterValue[iterIdx] - bestValue)) / 725.0;
fallingEval = Utility::clamp(fallingEval, 0.5, 1.5);

// If the bestMove is stable over several iterations, reduce time accordingly
timeReduction = lastBestMoveDepth + 10 < completedDepth ? 1.93 : 0.96;
double reduction = (1.36 + mainThread->previousTimeReduction) / (2.21 * timeReduction);
timeReduction = lastBestMoveDepth + 10 < completedDepth ? 1.92 : 0.95;
double reduction = (1.47 + mainThread->previousTimeReduction) / (2.22 * timeReduction);

// Use part of the gained time from a previous stable move for the current move
for (Thread* th : Threads)
Expand All @@ -537,7 +537,7 @@ void Thread::search() {
}
else if ( Threads.increaseDepth
&& !mainThread->ponder
&& Time.elapsed() > totalTime * 0.57)
&& Time.elapsed() > totalTime * 0.56)
Threads.increaseDepth = false;
else
Threads.increaseDepth = true;
Expand Down Expand Up @@ -819,18 +819,18 @@ namespace {
// Step 9. Null move search with verification search (~40 Elo)
if ( !PvNode
&& (ss-1)->currentMove != MOVE_NULL
&& (ss-1)->statScore < 24714
&& (ss-1)->statScore < 23824
&& eval >= beta
&& eval >= ss->staticEval
&& ss->staticEval >= beta - 29 * depth - 31 * improving + 119 * ttPv + 299
&& ss->staticEval >= beta - 33 * depth - 33 * improving + 112 * ttPv + 311
&& !excludedMove
&& pos.non_pawn_material(us)
&& (ss->ply >= thisThread->nmpMinPly || us != thisThread->nmpColor))
{
assert(eval - beta >= 0);

// Null move dynamic reduction based on depth and value
Depth R = (793 + 70 * depth) / 252 + std::min(int(eval - beta) / 192, 3);
Depth R = (737 + 77 * depth) / 246 + std::min(int(eval - beta) / 192, 3);

ss->currentMove = MOVE_NULL;
ss->continuationHistory = &thisThread->continuationHistory[0][0][NO_PIECE][0];
Expand Down Expand Up @@ -870,10 +870,10 @@ namespace {
// If we have a good enough capture and a reduced search returns a value
// much above beta, we can (almost) safely prune the previous move.
if ( !PvNode
&& depth > 5
&& depth > 4
&& abs(beta) < VALUE_TB_WIN_IN_MAX_PLY)
{
Value raisedBeta = beta + 182 - 48 * improving;
Value raisedBeta = beta + 176 - 49 * improving;
assert(raisedBeta < VALUE_INFINITE);
MovePicker mp(pos, ttMove, raisedBeta - ss->staticEval, &captureHistory);
int probCutCount = 0;
Expand Down Expand Up @@ -904,7 +904,7 @@ namespace {

// If the qsearch held, perform the regular search
if (value >= raisedBeta)
value = -search<NonPV>(pos, ss+1, -raisedBeta, -raisedBeta+1, depth - 5, !cutNode);
value = -search<NonPV>(pos, ss+1, -raisedBeta, -raisedBeta+1, depth - 4, !cutNode);

pos.undo_move(move);

Expand Down Expand Up @@ -1003,15 +1003,15 @@ namespace {
// Futility pruning: parent node (~5 Elo)
if ( lmrDepth < 6
&& !ss->inCheck
&& ss->staticEval + 252 + 176 * lmrDepth <= alpha
&& ss->staticEval + 284 + 188 * lmrDepth <= alpha
&& (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)]
+ (*contHist[3])[movedPiece][to_sq(move)]
+ (*contHist[5])[movedPiece][to_sq(move)] / 2 < 30251)
+ (*contHist[5])[movedPiece][to_sq(move)] / 2 < 28388)
continue;

// Prune moves with negative SEE (~20 Elo)
if (!pos.see_ge(move, Value(-(31 - std::min(lmrDepth, 18)) * lmrDepth * lmrDepth)))
if (!pos.see_ge(move, Value(-(29 - std::min(lmrDepth, 17)) * lmrDepth * lmrDepth)))
continue;
}
else
Expand All @@ -1027,11 +1027,11 @@ namespace {
&& lmrDepth < 6
&& !(PvNode && abs(bestValue) < 2)
&& !ss->inCheck
&& ss->staticEval + 264 + 397 * lmrDepth + PieceValue[MG][type_of(pos.piece_on(to_sq(move)))] <= alpha)
&& ss->staticEval + 267 + 391 * lmrDepth + PieceValue[MG][type_of(pos.piece_on(to_sq(move)))] <= alpha)
continue;

// See based pruning
if (!pos.see_ge(move, Value(-192) * depth)) // (~25 Elo)
if (!pos.see_ge(move, Value(-202) * depth)) // (~25 Elo)
continue;
}
}
Expand Down Expand Up @@ -1144,12 +1144,12 @@ namespace {
|| moveCountPruning
|| ss->staticEval + PieceValue[EG][pos.captured_piece()] <= alpha
|| cutNode
|| thisThread->ttHitAverage < 399 * TtHitAverageResolution * TtHitAverageWindow / 1024))
|| thisThread->ttHitAverage < 415 * TtHitAverageResolution * TtHitAverageWindow / 1024))
{
Depth r = reduction(improving, depth, moveCount);

// Decrease reduction if the ttHit running average is large
if (thisThread->ttHitAverage > 492 * TtHitAverageResolution * TtHitAverageWindow / 1024)
if (thisThread->ttHitAverage > 473 * TtHitAverageResolution * TtHitAverageWindow / 1024)
r--;

// Reduction if other threads are searching this position.
Expand All @@ -1164,7 +1164,7 @@ namespace {
r++;

// Decrease reduction if opponent's move count is high (~5 Elo)
if ((ss-1)->moveCount > 14)
if ((ss-1)->moveCount > 13)
r--;

// Decrease reduction if ttMove has been singularly extended (~3 Elo)
Expand Down Expand Up @@ -1192,17 +1192,17 @@ namespace {
+ (*contHist[0])[movedPiece][to_sq(move)]
+ (*contHist[1])[movedPiece][to_sq(move)]
+ (*contHist[3])[movedPiece][to_sq(move)]
- 4926;
- 4826;

// Decrease/increase reduction by comparing opponent's stat score (~10 Elo)
if (ss->statScore >= -99 && (ss-1)->statScore < -116)
if (ss->statScore >= -100 && (ss-1)->statScore < -112)
r--;

else if ((ss-1)->statScore >= -117 && ss->statScore < -150)
else if ((ss-1)->statScore >= -125 && ss->statScore < -138)
r++;

// Decrease/increase reduction for moves with a good/bad history (~30 Elo)
r -= ss->statScore / 15896;
r -= ss->statScore / 14615;
}
else
{
Expand All @@ -1212,7 +1212,7 @@ namespace {

// Unless giving check, this capture is likely bad
if ( !givesCheck
&& ss->staticEval + PieceValue[EG][pos.captured_piece()] + 200 * depth <= alpha)
&& ss->staticEval + PieceValue[EG][pos.captured_piece()] + 211 * depth <= alpha)
r++;
}

Expand Down Expand Up @@ -1474,7 +1474,7 @@ namespace {
if (PvNode && bestValue > alpha)
alpha = bestValue;

futilityBase = bestValue + 138;
futilityBase = bestValue + 141;
}

const PieceToHistory* contHist[] = { (ss-1)->continuationHistory, (ss-2)->continuationHistory,
Expand Down Expand Up @@ -1724,8 +1724,8 @@ namespace {
thisThread->counterMoves[pos.piece_on(prevSq)][prevSq] = move;
}

if (depth > 12 && ss->ply < MAX_LPH)
thisThread->lowPlyHistory[ss->ply][from_to(move)] << stat_bonus(depth - 7);
if (depth > 11 && ss->ply < MAX_LPH)
thisThread->lowPlyHistory[ss->ply][from_to(move)] << stat_bonus(depth - 6);
}

// When playing with strength handicap, choose best move among a set of RootMoves
Expand Down

0 comments on commit 995ee4b

Please sign in to comment.