Skip to content

Commit

Permalink
Futher renaming in thread.cpp
Browse files Browse the repository at this point in the history
No functional change.
  • Loading branch information
mcostalba committed Jan 20, 2013
1 parent 588670e commit 62b32a4
Show file tree
Hide file tree
Showing 3 changed files with 68 additions and 67 deletions.
61 changes: 33 additions & 28 deletions src/search.cpp
Expand Up @@ -1008,7 +1008,7 @@ namespace {
// Step 19. Check for splitting the search
if ( !SpNode
&& depth >= Threads.minimumSplitDepth
&& Threads.available_slave_exists(thisThread))
&& Threads.slave_available(thisThread))
{
assert(bestValue < beta);

Expand Down Expand Up @@ -1554,40 +1554,40 @@ void RootMove::insert_pv_in_tt(Position& pos) {

void Thread::idle_loop() {

// Pointer 'sp_master', if non-NULL, points to the active SplitPoint
// object for which the thread is the master.
const SplitPoint* sp_master = splitPointsCnt ? curSplitPoint : NULL;
// Pointer 'this_sp' is not null only if we are called from split(), and not
// at the thread creation. So it means we are the split point's master.
const SplitPoint* this_sp = splitPointsSize ? activeSplitPoint : NULL;

assert(!sp_master || (sp_master->master == this && searching));
assert(!this_sp || (this_sp->master == this && searching));

// If this thread is the master of a split point and all slaves have
// finished their work at this split point, return from the idle loop.
while (!sp_master || sp_master->slavesMask)
// If this thread is the master of a split point and all slaves have finished
// their work at this split point, return from the idle loop.
while (!this_sp || this_sp->slavesMask)
{
// If we are not searching, wait for a condition to be signaled
// instead of wasting CPU time polling for work.
// If we are not searching, wait for a condition to be signaled instead of
// wasting CPU time polling for work.
while ((!searching && Threads.sleepWhileIdle) || exit)
{
if (exit)
{
assert(!sp_master);
assert(!this_sp);
return;
}

// Grab the lock to avoid races with Thread::wake_up()
// Grab the lock to avoid races with Thread::notify_one()
mutex.lock();

// If we are master and all slaves have finished don't go to sleep
if (sp_master && !sp_master->slavesMask)
// If we are master and all slaves have finished then exit idle_loop
if (this_sp && !this_sp->slavesMask)
{
mutex.unlock();
break;
}

// Do sleep after retesting sleep conditions under lock protection, in
// particular we need to avoid a deadlock in case a master thread has,
// in the meanwhile, allocated us and sent the wake_up() call before we
// had the chance to grab the lock.
// in the meanwhile, allocated us and sent the notify_one() call before
// we had the chance to grab the lock.
if (!searching && !exit)
sleepCondition.wait(mutex);

Expand All @@ -1602,7 +1602,7 @@ void Thread::idle_loop() {
Threads.mutex.lock();

assert(searching);
SplitPoint* sp = curSplitPoint;
SplitPoint* sp = activeSplitPoint;

Threads.mutex.unlock();

Expand All @@ -1614,28 +1614,33 @@ void Thread::idle_loop() {

sp->mutex.lock();

assert(sp->activePositions[idx] == NULL);
assert(sp->slavesPositions[idx] == NULL);

sp->activePositions[idx] = &pos;
sp->slavesPositions[idx] = &pos;

if (sp->nodeType == Root)
switch (sp->nodeType) {
case Root:
search<SplitPointRoot>(pos, ss+1, sp->alpha, sp->beta, sp->depth);
else if (sp->nodeType == PV)
break;
case PV:
search<SplitPointPV>(pos, ss+1, sp->alpha, sp->beta, sp->depth);
else if (sp->nodeType == NonPV)
break;
case NonPV:
search<SplitPointNonPV>(pos, ss+1, sp->alpha, sp->beta, sp->depth);
else
break;
default:
assert(false);
}

assert(searching);

searching = false;
sp->activePositions[idx] = NULL;
sp->slavesPositions[idx] = NULL;
sp->slavesMask &= ~(1ULL << idx);
sp->nodes += pos.nodes_searched();

// Wake up master thread so to allow it to return from the idle loop in
// case we are the last slave of the split point.
// Wake up master thread so to allow it to return from the idle loop
// in case we are the last slave of the split point.
if ( Threads.sleepWhileIdle
&& this != sp->master
&& !sp->slavesMask)
Expand Down Expand Up @@ -1681,7 +1686,7 @@ void check_time() {
// Loop across all split points and sum accumulated SplitPoint nodes plus
// all the currently active slaves positions.
for (size_t i = 0; i < Threads.size(); i++)
for (int j = 0; j < Threads[i].splitPointsCnt; j++)
for (int j = 0; j < Threads[i].splitPointsSize; j++)
{
SplitPoint& sp = Threads[i].splitPoints[j];

Expand All @@ -1691,7 +1696,7 @@ void check_time() {
Bitboard sm = sp.slavesMask;
while (sm)
{
Position* pos = sp.activePositions[pop_lsb(&sm)];
Position* pos = sp.slavesPositions[pop_lsb(&sm)];
nodes += pos ? pos->nodes_searched() : 0;
}

Expand Down
54 changes: 27 additions & 27 deletions src/thread.cpp
Expand Up @@ -45,8 +45,8 @@ namespace { extern "C" {
Thread::Thread() : splitPoints() {

searching = exit = false;
maxPly = splitPointsCnt = 0;
curSplitPoint = NULL;
maxPly = splitPointsSize = 0;
activeSplitPoint = NULL;
idx = Threads.size();

if (!thread_create(handle, start_routine, this))
Expand Down Expand Up @@ -146,7 +146,7 @@ void Thread::wait_for(volatile const bool& b) {

bool Thread::cutoff_occurred() const {

for (SplitPoint* sp = curSplitPoint; sp; sp = sp->parent)
for (SplitPoint* sp = activeSplitPoint; sp; sp = sp->parent)
if (sp->cutoff)
return true;

Expand All @@ -157,9 +157,9 @@ bool Thread::cutoff_occurred() const {
// Thread::is_available_to() checks whether the thread is available to help the
// thread 'master' at a split point. An obvious requirement is that thread must
// be idle. With more than two threads, this is not sufficient: If the thread is
// the master of some active split point, it is only available as a slave to the
// slaves which are busy searching the split point at the top of slaves split
// point stack (the "helpful master concept" in YBWC terminology).
// the master of some split point, it is only available as a slave to the slaves
// which are busy searching the split point at the top of slaves split point
// stack (the "helpful master concept" in YBWC terminology).

bool Thread::is_available_to(Thread* master) const {

Expand All @@ -168,11 +168,11 @@ bool Thread::is_available_to(Thread* master) const {

// Make a local copy to be sure doesn't become zero under our feet while
// testing next condition and so leading to an out of bound access.
int spCnt = splitPointsCnt;
int size = splitPointsSize;

// No active split points means that the thread is available as a slave for any
// No split points means that the thread is available as a slave for any
// other thread otherwise apply the "helpful master" concept if possible.
return !spCnt || (splitPoints[spCnt - 1].slavesMask & (1ULL << master->idx));
return !size || (splitPoints[size - 1].slavesMask & (1ULL << master->idx));
}


Expand Down Expand Up @@ -225,10 +225,10 @@ void ThreadPool::read_uci_options() {
}


// available_slave_exists() tries to find an idle thread which is available as
// a slave for the thread 'master'.
// slave_available() tries to find an idle thread which is available as a slave
// for the thread 'master'.

bool ThreadPool::available_slave_exists(Thread* master) const {
bool ThreadPool::slave_available(Thread* master) const {

for (size_t i = 0; i < threads.size(); i++)
if (threads[i]->is_available_to(master))
Expand Down Expand Up @@ -261,15 +261,14 @@ Value ThreadPool::split(Position& pos, Stack* ss, Value alpha, Value beta,

Thread* master = pos.this_thread();

if (master->splitPointsCnt >= MAX_SPLITPOINTS_PER_THREAD)
if (master->splitPointsSize >= MAX_SPLITPOINTS_PER_THREAD)
return bestValue;

// Pick the next available split point from the split point stack
SplitPoint& sp = master->splitPoints[master->splitPointsCnt];
SplitPoint& sp = master->splitPoints[master->splitPointsSize];

sp.parent = master->curSplitPoint;
sp.master = master;
sp.cutoff = false;
sp.parent = master->activeSplitPoint;
sp.slavesMask = 1ULL << master->idx;
sp.depth = depth;
sp.bestMove = *bestMove;
Expand All @@ -282,15 +281,16 @@ Value ThreadPool::split(Position& pos, Stack* ss, Value alpha, Value beta,
sp.moveCount = moveCount;
sp.pos = &pos;
sp.nodes = 0;
sp.cutoff = false;
sp.ss = ss;

assert(master->searching);

master->curSplitPoint = &sp;
master->activeSplitPoint = &sp;
int slavesCnt = 0;

assert(master->searching);

// Try to allocate available threads and ask them to start searching setting
// is_searching flag. This must be done under lock protection to avoid concurrent
// 'searching' flag. This must be done under lock protection to avoid concurrent
// allocation of the same slave by another master.
mutex.lock();
sp.mutex.lock();
Expand All @@ -299,21 +299,21 @@ Value ThreadPool::split(Position& pos, Stack* ss, Value alpha, Value beta,
if (threads[i]->is_available_to(master))
{
sp.slavesMask |= 1ULL << i;
threads[i]->curSplitPoint = &sp;
threads[i]->activeSplitPoint = &sp;
threads[i]->searching = true; // Slave leaves idle_loop()
threads[i]->notify_one(); // Could be sleeping

if (++slavesCnt + 1 >= maxThreadsPerSplitPoint) // Master is always included
if (++slavesCnt + 1 >= maxThreadsPerSplitPoint) // Include master
break;
}

master->splitPointsCnt++;
master->splitPointsSize++;

sp.mutex.unlock();
mutex.unlock();

// Everything is set up. The master thread enters the idle loop, from which
// it will instantly launch a search, because its is_searching flag is set.
// it will instantly launch a search, because its 'searching' flag is set.
// The thread will return from the idle loop when all slaves have finished
// their work at this split point.
if (slavesCnt || Fake)
Expand All @@ -326,14 +326,14 @@ Value ThreadPool::split(Position& pos, Stack* ss, Value alpha, Value beta,
}

// We have returned from the idle loop, which means that all threads are
// finished. Note that setting is_searching and decreasing splitPointsCnt is
// finished. Note that setting 'searching' and decreasing splitPointsSize is
// done under lock protection to avoid a race with Thread::is_available_to().
mutex.lock();
sp.mutex.lock();

master->searching = true;
master->splitPointsCnt--;
master->curSplitPoint = sp.parent;
master->splitPointsSize--;
master->activeSplitPoint = sp.parent;
pos.set_nodes_searched(pos.nodes_searched() + sp.nodes);
*bestMove = sp.bestMove;

Expand Down
20 changes: 8 additions & 12 deletions src/thread.h
Expand Up @@ -63,10 +63,10 @@ struct SplitPoint {
// Const data after split point has been setup
const Position* pos;
const Search::Stack* ss;
Thread* master;
Depth depth;
Value beta;
int nodeType;
Thread* master;
Move threatMove;

// Const pointers to shared data
Expand All @@ -75,7 +75,7 @@ struct SplitPoint {

// Shared data
Mutex mutex;
Position* activePositions[MAX_THREADS];
Position* slavesPositions[MAX_THREADS];
volatile uint64_t slavesMask;
volatile int64_t nodes;
volatile Value alpha;
Expand Down Expand Up @@ -111,14 +111,14 @@ struct Thread {
Mutex mutex;
ConditionVariable sleepCondition;
NativeHandle handle;
SplitPoint* volatile curSplitPoint;
volatile int splitPointsCnt;
SplitPoint* volatile activeSplitPoint;
volatile int splitPointsSize;
volatile bool searching;
volatile bool exit;
};


/// MainThread and TimerThread are sublassed from Thread to charaterize the two
/// MainThread and TimerThread are sublassed from Thread to characterize the two
/// special threads: the main one and the recurring timer.

struct MainThread : public Thread {
Expand Down Expand Up @@ -150,7 +150,7 @@ class ThreadPool {
TimerThread* timer_thread() { return timer; }

void read_uci_options();
bool available_slave_exists(Thread* master) const;
bool slave_available(Thread* master) const;
void wait_for_think_finished();
void start_thinking(const Position&, const Search::LimitsType&,
const std::vector<Move>&, Search::StateStackPtr&);
Expand All @@ -161,16 +161,12 @@ class ThreadPool {

bool sleepWhileIdle;
Depth minimumSplitDepth;
Mutex mutex;
ConditionVariable sleepCondition;

private:
friend struct Thread;
friend struct MainThread;
friend void check_time();

std::vector<Thread*> threads;
TimerThread* timer;
Mutex mutex;
ConditionVariable sleepCondition;
int maxThreadsPerSplitPoint;
};

Expand Down

0 comments on commit 62b32a4

Please sign in to comment.