Skip to content

Commit

Permalink
Increase max threads to 128
Browse files Browse the repository at this point in the history
Thanks to std::bitset we can easily increase
the limit of active threads above 64.

Thanks to Lucas Braesch for pointing at the
correct solution of using std::bitset.

No functional change.
  • Loading branch information
mcostalba committed Mar 18, 2014
1 parent fa3f6dc commit aab5863
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 17 deletions.
21 changes: 9 additions & 12 deletions src/search.cpp
Expand Up @@ -1470,7 +1470,7 @@ void Thread::idle_loop() {
mutex.lock();

// If we are master and all slaves have finished then exit idle_loop
if (this_sp && !this_sp->slavesMask)
if (this_sp && this_sp->slavesMask.none())
{
mutex.unlock();
break;
Expand Down Expand Up @@ -1529,14 +1529,14 @@ void Thread::idle_loop() {

searching = false;
activePosition = NULL;
sp->slavesMask &= ~(1ULL << idx);
sp->slavesMask.reset(idx);
sp->nodes += pos.nodes_searched();

// Wake up the master thread so to allow it to return from the idle
// loop in case we are the last slave of the split point.
if ( Threads.sleepWhileIdle
&& this != sp->masterThread
&& !sp->slavesMask)
&& sp->slavesMask.none())
{
assert(!sp->masterThread->searching);
sp->masterThread->notify_one();
Expand All @@ -1551,10 +1551,10 @@ void Thread::idle_loop() {

// If this thread is the master of a split point and all slaves have finished
// their work at this split point, return from the idle loop.
if (this_sp && !this_sp->slavesMask)
if (this_sp && this_sp->slavesMask.none())
{
this_sp->mutex.lock();
bool finished = !this_sp->slavesMask; // Retest under lock protection
bool finished = this_sp->slavesMask.none(); // Retest under lock protection
this_sp->mutex.unlock();
if (finished)
return;
Expand Down Expand Up @@ -1597,13 +1597,10 @@ void check_time() {
sp.mutex.lock();

nodes += sp.nodes;
Bitboard sm = sp.slavesMask;
while (sm)
{
Position* pos = Threads[pop_lsb(&sm)]->activePosition;
if (pos)
nodes += pos->nodes_searched();
}

for (size_t idx = 0; idx < Threads.size(); ++idx)
if (sp.slavesMask.test(idx) && Threads[idx]->activePosition)
nodes += Threads[idx]->activePosition->nodes_searched();

sp.mutex.unlock();
}
Expand Down
6 changes: 3 additions & 3 deletions src/thread.cpp
Expand Up @@ -123,7 +123,7 @@ bool Thread::available_to(const Thread* master) const {

// No split points means that the thread is available as a slave for any
// other thread otherwise apply the "helpful master" concept if possible.
return !size || (splitPoints[size - 1].slavesMask & (1ULL << master->idx));
return !size || splitPoints[size - 1].slavesMask.test(master->idx);
}


Expand Down Expand Up @@ -271,7 +271,7 @@ void Thread::split(Position& pos, const Stack* ss, Value alpha, Value beta, Valu

sp.masterThread = this;
sp.parentSplitPoint = activeSplitPoint;
sp.slavesMask = 1ULL << idx;
sp.slavesMask = 0, sp.slavesMask.set(idx);
sp.depth = depth;
sp.bestValue = *bestValue;
sp.bestMove = *bestMove;
Expand Down Expand Up @@ -299,7 +299,7 @@ void Thread::split(Position& pos, const Stack* ss, Value alpha, Value beta, Valu
if (!Fake)
for (Thread* slave; (slave = Threads.available_slave(this)) != NULL; )
{
sp.slavesMask |= 1ULL << slave->idx;
sp.slavesMask.set(slave->idx);
slave->activeSplitPoint = &sp;
slave->searching = true; // Slave leaves idle_loop()
slave->notify_one(); // Could be sleeping
Expand Down
5 changes: 3 additions & 2 deletions src/thread.h
Expand Up @@ -20,6 +20,7 @@
#ifndef THREAD_H_INCLUDED
#define THREAD_H_INCLUDED

#include <bitset>
#include <vector>

#include "material.h"
Expand All @@ -28,7 +29,7 @@
#include "position.h"
#include "search.h"

const int MAX_THREADS = 64; // Because SplitPoint::slavesMask is a uint64_t
const int MAX_THREADS = 128;
const int MAX_SPLITPOINTS_PER_THREAD = 8;

struct Mutex {
Expand Down Expand Up @@ -75,7 +76,7 @@ struct SplitPoint {

// Shared data
Mutex mutex;
volatile uint64_t slavesMask;
std::bitset<MAX_THREADS> slavesMask;
volatile uint64_t nodes;
volatile Value alpha;
volatile Value bestValue;
Expand Down

0 comments on commit aab5863

Please sign in to comment.