Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cleanup PR 4127 #4153

Merged
merged 6 commits into from
Feb 6, 2018
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 3 additions & 3 deletions scripts/check_format.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ function check_shogun_style {
fi
done

RESULT_OUTPUT="$(git clang-format-3.8 --commit $BASE_COMMIT --diff --binary `which clang-format-3.8` $LIST)"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

need to get the Docker image fixed for this on travis

Copy link
Contributor Author

@dhelekal dhelekal Feb 5, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, sorry, i didnt want to commit that change.
It's just to make the style checker run locally. Working on reverting this.

RESULT_OUTPUT="$(git clang-format --commit $BASE_COMMIT --diff --binary `which clang-format` $LIST)"

if [ "$RESULT_OUTPUT" == "no modified files to format" ] \
|| [ "$RESULT_OUTPUT" == "clang-format-3.8 did not modify any files" ] \
Expand All @@ -54,10 +54,10 @@ function check_shogun_style {
echo "clang-format failed."
echo "To reproduce it locally please run: "
echo -e "\t1) git checkout ${1:-}"
echo -e "\t2) git clang-format-3.8 --commit $BASE_COMMIT --diff --binary $(which clang-format-3.8)"
echo -e "\t2) git clang-format --commit $BASE_COMMIT --diff --binary $(which clang-format)"
echo "To fix the errors automatically please run: "
echo -e "\t1) git checkout ${1:-}"
echo -e "\t2) git clang-format-3.8 --commit $BASE_COMMIT --binary $(which clang-format-3.8)"
echo -e "\t2) git clang-format --commit $BASE_COMMIT --binary $(which clang-format)"
echo "-----"
echo "Style errors found:"
echo "$RESULT_OUTPUT"
Expand Down
39 changes: 14 additions & 25 deletions src/shogun/kernel/CombinedKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,27 +66,20 @@ bool CCombinedKernel::init_with_extracted_subsets(
SGVector<index_t> rhs_subset)
{

auto l_combined = dynamic_cast<CCombinedFeatures*>(l);
auto r_combined = dynamic_cast<CCombinedFeatures*>(r);

if (!l_combined || !r_combined)
SG_ERROR("Cast failed - unsupported features passed")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

imo this will never happen (unless l or r is null) since if the typecasting fails it'll throw a bad_casting exception.


CKernel::init(l, r);
REQUIRE(
l->get_feature_class() == C_COMBINED,
"%s::init(): LHS features are"
" of class %s but need to be combined features!\n",
get_name(), l->get_name());
REQUIRE(
r->get_feature_class() == C_COMBINED,
"%s::init(): RHS features are"
" of class %s but need to be combined features!\n",
get_name(), r->get_name());
ASSERT(l->get_feature_type() == F_UNKNOWN)
ASSERT(r->get_feature_type() == F_UNKNOWN)

CFeatures* lf = NULL;
CFeatures* rf = NULL;
CKernel* k = NULL;

auto l_combined = dynamic_cast<CCombinedFeatures*>(l);
auto r_combined = dynamic_cast<CCombinedFeatures*>(r);

bool result = true;
index_t f_idx = 0;

Expand Down Expand Up @@ -138,6 +131,8 @@ bool CCombinedKernel::init_with_extracted_subsets(
"No kernel matrix was assigned to this Custom kernel\n")

auto k_custom = dynamic_cast<CCustomKernel*>(k);
if (!k_custom)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As we're casting a pointer type, it should be NULL on fail afaik? From the website:
If the cast fails and new_type is a pointer type, it returns a null pointer of that type.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

/me should really RTFM before saying RTFM

SG_ERROR("Dynamic cast to custom kernel failed")

// clear all previous subsets
k_custom->remove_all_row_subsets();
Expand Down Expand Up @@ -193,21 +188,18 @@ bool CCombinedKernel::init(CFeatures* l, CFeatures* r)
{
init_subkernel_weights();
}
/*
* The two subsets, we will be passing those to init_with_extracted_subsets
*/

if (!l)
SG_ERROR("LHS features are NULL");
if (!r)
SG_ERROR("RHS features are NULL");

SGVector<index_t> lhs_subset;
SGVector<index_t> rhs_subset;

/*
* We will be passing these features to init_with_extracted_subsets
*/
CCombinedFeatures* combined_l;
CCombinedFeatures* combined_r;

/*
* Extract the subsets so that we can pass them on
*/
auto l_subset_stack = l->get_subset_stack();
auto r_subset_stack = r->get_subset_stack();

Expand Down Expand Up @@ -258,9 +250,6 @@ bool CCombinedKernel::init(CFeatures* l, CFeatures* r)
}
else
{
/*
* Otherwise, we just pass l & r straight on
*/
combined_l = (CCombinedFeatures*)l;
combined_r = (CCombinedFeatures*)r;
}
Expand Down
92 changes: 45 additions & 47 deletions tests/unit/kernel/CombinedKernel_unittest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,12 @@ TEST(CombinedKernelTest,test_array_operations)
TEST(CombinedKernelTest, test_subset_mixed)
{

CMeanShiftDataGenerator* gen = new CMeanShiftDataGenerator(0, 2);
CFeatures* feats = gen->get_streamed_features(10);
int n_runs = 10;

CCombinedFeatures* cf = new CCombinedFeatures();
auto gen = new CMeanShiftDataGenerator(0, 2);
CFeatures* feats = gen->get_streamed_features(n_runs);

CCombinedFeatures* feats_combined = new CCombinedFeatures();

CCombinedKernel* combined = new CCombinedKernel();

Expand All @@ -64,59 +66,59 @@ TEST(CombinedKernelTest, test_subset_mixed)

combined->append_kernel(custom_1);
combined->append_kernel(gaus_1);
cf->append_feature_obj(feats);
feats_combined->append_feature_obj(feats);

combined->append_kernel(custom_2);
combined->append_kernel(gaus_2);
cf->append_feature_obj(feats);
feats_combined->append_feature_obj(feats);

SGVector<index_t> inds(10);
inds.range_fill();

for (index_t i = 0; i < 10; ++i)
for (index_t i = 0; i < n_runs; ++i)
{
CMath::permute(inds);

cf->add_subset(inds);
combined->init(cf, cf);
feats_combined->add_subset(inds);
combined->init(feats_combined, feats_combined);

CKernel* k_g = combined->get_kernel(1);
CKernel* k_0 = combined->get_kernel(0);
CKernel* k_3 = combined->get_kernel(2);
CKernel* ground_truth_kernel = combined->get_kernel(1);
CKernel* custom_kernel_1 = combined->get_kernel(0);
CKernel* custom_kernel_2 = combined->get_kernel(2);

SGMatrix<float64_t> gauss_matrix = k_g->get_kernel_matrix();
SGMatrix<float64_t> custom_matrix_1 = k_0->get_kernel_matrix();
SGMatrix<float64_t> custom_matrix_2 = k_3->get_kernel_matrix();
SGMatrix<float64_t> gauss_matrix =
ground_truth_kernel->get_kernel_matrix();
SGMatrix<float64_t> custom_matrix_1 =
custom_kernel_1->get_kernel_matrix();
SGMatrix<float64_t> custom_matrix_2 =
custom_kernel_2->get_kernel_matrix();

for (index_t j = 0; j < 10; ++j)
for (index_t j = 0; j < n_runs; ++j)
{
for (index_t k = 0; k < 10; ++k)
for (index_t k = 0; k < n_runs; ++k)
{
EXPECT_LE(
CMath::abs(gauss_matrix(k, j) - custom_matrix_1(k, j)),
1e-6);
EXPECT_LE(
CMath::abs(gauss_matrix(k, j) - custom_matrix_2(k, j)),
1e-6);
EXPECT_NEAR(gauss_matrix(j, k), custom_matrix_1(j, k), 1e-6);
EXPECT_NEAR(gauss_matrix(j, k), custom_matrix_1(j, k), 1e-6);
}
}

cf->remove_subset();
SG_UNREF(k_g);
SG_UNREF(k_0);
SG_UNREF(k_3);
feats_combined->remove_subset();
SG_UNREF(ground_truth_kernel);
SG_UNREF(custom_kernel_1);
SG_UNREF(custom_kernel_2);
}

SG_UNREF(gen);
SG_UNREF(gaus_ck);
SG_UNREF(combined);
}

TEST(CombinedKernelTest, test_subset_combined_only)
{

CMeanShiftDataGenerator* gen = new CMeanShiftDataGenerator(0, 2);
CFeatures* feats = gen->get_streamed_features(10);
int n_runs = 10;

auto gen = new CMeanShiftDataGenerator(0, 2);
CFeatures* feats = gen->get_streamed_features(n_runs);

CCombinedKernel* combined = new CCombinedKernel();

Expand All @@ -125,48 +127,44 @@ TEST(CombinedKernelTest, test_subset_combined_only)

CCustomKernel* custom_1 = new CCustomKernel(gaus_ck);
CCustomKernel* custom_2 = new CCustomKernel(gaus_ck);
;

combined->append_kernel(custom_1);
combined->append_kernel(custom_2);

SGVector<index_t> inds(10);
SGVector<index_t> inds(n_runs);
inds.range_fill();

for (index_t i = 0; i < 10; ++i)
for (index_t i = 0; i < n_runs; ++i)
{
CMath::permute(inds);

feats->add_subset(inds);
combined->init(feats, feats);
gaus_ck->init(feats, feats);

CKernel* k_0 = combined->get_kernel(0);
CKernel* k_1 = combined->get_kernel(1);
CKernel* custom_kernel_1 = combined->get_kernel(0);
CKernel* custom_kernel_2 = combined->get_kernel(1);

SGMatrix<float64_t> gauss_matrix = gaus_ck->get_kernel_matrix();
SGMatrix<float64_t> custom_matrix_1 = k_0->get_kernel_matrix();
SGMatrix<float64_t> custom_matrix_2 = k_1->get_kernel_matrix();
SGMatrix<float64_t> custom_matrix_1 =
custom_kernel_1->get_kernel_matrix();
SGMatrix<float64_t> custom_matrix_2 =
custom_kernel_2->get_kernel_matrix();

for (index_t j = 0; j < 10; ++j)
for (index_t j = 0; j < n_runs; ++j)
{
for (index_t k = 0; k < 10; ++k)
for (index_t k = 0; k < n_runs; ++k)
{
EXPECT_LE(
CMath::abs(gauss_matrix(k, j) - custom_matrix_1(k, j)),
1e-6);
EXPECT_LE(
CMath::abs(gauss_matrix(k, j) - custom_matrix_2(k, j)),
1e-6);
EXPECT_NEAR(gauss_matrix(j, k), custom_matrix_1(j, k), 1e-6);
EXPECT_NEAR(gauss_matrix(j, k), custom_matrix_1(j, k), 1e-6);
}
}

feats->remove_subset();
SG_UNREF(k_0);
SG_UNREF(k_1);
SG_UNREF(custom_kernel_1);
SG_UNREF(custom_kernel_2);
}

SG_UNREF(gen);
SG_UNREF(gaus_ck);
SG_UNREF(combined);
}
Expand Down