Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[BUGFIX] Engine CompleteWriteDependency. to_delete_ stale value.
Browse files Browse the repository at this point in the history
  • Loading branch information
tqchen committed Sep 20, 2015
1 parent fd201b3 commit bd36bbc
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 18 deletions.
17 changes: 10 additions & 7 deletions src/engine/threaded_engine.cc
Expand Up @@ -83,12 +83,20 @@ void ThreadedVar::CompleteReadDependency(Dispatcher dispatcher) {

template <typename Dispatcher>
bool ThreadedVar::CompleteWriteDependency(Dispatcher dispatcher) {
// this is lock scope
VersionedVarBlock *old_pending_write, *end_of_read_chain;
bool trigger_write = false;
bool trigger_write = false, to_delete = false;
{
// this is lock scope
std::lock_guard<std::mutex> lock{m_};
assert(ready_to_read_ == false);
// really delete
if (to_delete_) {
VersionedVarBlock *head = pending_write_->next;
VersionedVarBlock::Delete(pending_write_);
assert(head->next == nullptr);
VersionedVarBlock::Delete(head);
return true;
}
// detach pending write
old_pending_write = pending_write_;
// search for chains to trigger
Expand Down Expand Up @@ -119,11 +127,6 @@ bool ThreadedVar::CompleteWriteDependency(Dispatcher dispatcher) {
// So it is safe to modify these
VersionedVarBlock *cur_head = old_pending_write->next;
VersionedVarBlock::Delete(old_pending_write);
if (to_delete_) {
assert(cur_head->next == nullptr);
VersionedVarBlock::Delete(cur_head);
return true;
}
// dispatch all the events
while (cur_head != end_of_read_chain) {
if (--cur_head->trigger->wait == 0) {
Expand Down
22 changes: 11 additions & 11 deletions src/io/iter_prefetcher.h
Expand Up @@ -115,22 +115,22 @@ class PrefetcherIter : public IIterator<DataBatch> {
}
virtual bool Next(void) {
if (ready_batches_.size() == param_.prefetch_buffer) {
TBlobBatch* old_batch = ready_batches_.front();
for (size_t i = 0; i < old_batch->data.size(); i++) {
NDArray old_ndarray = ready_ndarrays_.front();
old_ndarray.WaitToWrite();
ready_ndarrays_.pop();
}
iter_.Recycle(&old_batch);
ready_batches_.pop();
TBlobBatch* old_batch = ready_batches_.front();
for (size_t i = 0; i < old_batch->data.size(); i++) {
NDArray old_ndarray = ready_ndarrays_.front();
old_ndarray.WaitToWrite();
ready_ndarrays_.pop();
}
iter_.Recycle(&old_batch);
ready_batches_.pop();
}
TBlobBatch* next_batch = NULL;
if (!iter_.Next(&next_batch)) return false;
out_.data.clear();
// copy the batch
for (size_t i = 0; i < next_batch->data.size(); i++) {
out_.data.push_back(NDArray(next_batch->data[i], mshadow::cpu::kDevMask));
ready_ndarrays_.push(out_.data[i]);
for (size_t i = 0; i < next_batch->data.size(); ++i) {
out_.data.push_back(NDArray(next_batch->data[i], 0));
ready_ndarrays_.push(out_.data[i]);
}
// push the narrays and batch into the queue
ready_batches_.push(next_batch);
Expand Down
2 changes: 2 additions & 0 deletions tests/python/train/test_mlp.py
Expand Up @@ -46,7 +46,9 @@ def test_mlp():
model.fit(X=train_dataiter,
eval_data=val_dataiter,
iter_end_callback=mx.model.do_checkpoint(prefix))
logging.info('Finish fit...')
prob = model.predict(val_dataiter)
logging.info('Finish predict...')
val_dataiter.reset()
y = np.concatenate([label.asnumpy() for _, label in val_dataiter]).astype('int')
py = np.argmax(prob, axis=1)
Expand Down

0 comments on commit bd36bbc

Please sign in to comment.