Fix more float comparison precision issue #4566

Merged
merged 1 commit into from Apr 14, 2017
Jump to file or symbol
Failed to load files and symbols.
+9 −9
Split
@@ -695,7 +695,7 @@ TYPED_TEST(ConvolutionLayerTest, TestNDAgainst2D) {
}
ASSERT_EQ(backward_result_nd.count(), backward_result_2d.count());
for (int i = 0; i < backward_result_2d.count(); ++i) {
- EXPECT_EQ(backward_result_2d.cpu_diff()[i],
+ EXPECT_FLOAT_EQ(backward_result_2d.cpu_diff()[i],
backward_result_nd.cpu_diff()[i]);
}
ASSERT_EQ(backward_weight_result_nd.count(),
@@ -538,9 +538,9 @@ class GradientBasedSolverTest : public MultiDeviceTest<TypeParam> {
const vector<Blob<Dtype>*>& params = solver_->net()->learnable_params();
for (int i = 0; i < params.size(); ++i) {
for (int j = 0; j < params[i]->count(); ++j) {
- EXPECT_EQ(param_copies[i]->cpu_data()[j], params[i]->cpu_data()[j])
+ EXPECT_FLOAT_EQ(param_copies[i]->cpu_data()[j], params[i]->cpu_data()[j])
<< "param " << i << " data differed at dim " << j;
- EXPECT_EQ(param_copies[i]->cpu_diff()[j], params[i]->cpu_diff()[j])
+ EXPECT_FLOAT_EQ(param_copies[i]->cpu_diff()[j], params[i]->cpu_diff()[j])
<< "param " << i << " diff differed at dim " << j;
}
}
@@ -549,9 +549,9 @@ class GradientBasedSolverTest : public MultiDeviceTest<TypeParam> {
const vector<shared_ptr<Blob<Dtype> > >& history = solver_->history();
for (int i = 0; i < history.size(); ++i) {
for (int j = 0; j < history[i]->count(); ++j) {
- EXPECT_EQ(history_copies[i]->cpu_data()[j], history[i]->cpu_data()[j])
+ EXPECT_FLOAT_EQ(history_copies[i]->cpu_data()[j], history[i]->cpu_data()[j])
<< "history blob " << i << " data differed at dim " << j;
- EXPECT_EQ(history_copies[i]->cpu_diff()[j], history[i]->cpu_diff()[j])
+ EXPECT_FLOAT_EQ(history_copies[i]->cpu_diff()[j], history[i]->cpu_diff()[j])
<< "history blob " << i << " diff differed at dim " << j;
}
}
@@ -791,16 +791,16 @@ TYPED_TEST(NeuronLayerTest, TestPReLUInPlace) {
ip2.Backward(blob_middle_vec_2, propagate_down, blob_bottom_vec_2);
// Check numbers
for (int s = 0; s < blob_bottom_2->count(); ++s) {
- EXPECT_EQ(this->blob_bottom_->cpu_diff()[s], blob_bottom_2->cpu_diff()[s]);
+ EXPECT_FLOAT_EQ(this->blob_bottom_->cpu_diff()[s], blob_bottom_2->cpu_diff()[s]);
}
for (int s = 0; s < ip.blobs()[0]->count(); ++s) {
- EXPECT_EQ(ip.blobs()[0]->cpu_diff()[s], ip2.blobs()[0]->cpu_diff()[s]);
+ EXPECT_FLOAT_EQ(ip.blobs()[0]->cpu_diff()[s], ip2.blobs()[0]->cpu_diff()[s]);
}
for (int s = 0; s < ip.blobs()[1]->count(); ++s) {
- EXPECT_EQ(ip.blobs()[1]->cpu_diff()[s], ip2.blobs()[1]->cpu_diff()[s]);
+ EXPECT_FLOAT_EQ(ip.blobs()[1]->cpu_diff()[s], ip2.blobs()[1]->cpu_diff()[s]);
}
for (int s = 0; s < prelu.blobs()[0]->count(); ++s) {
- EXPECT_EQ(prelu.blobs()[0]->cpu_diff()[s],
+ EXPECT_FLOAT_EQ(prelu.blobs()[0]->cpu_diff()[s],
prelu2.blobs()[0]->cpu_diff()[s]);
}
}