Skip to content
This repository has been archived by the owner on Jun 5, 2023. It is now read-only.

Commit

Permalink
Catch jmespath type errors (#2668)
Browse files Browse the repository at this point in the history
* Add test rules and cluster data for issue #2665

* Update expected violations for new test cluster data

* Add expected violation for the new test rule that should generate one

That is, "missing nodePool, should generate violation" from the file
ke_scanner_test_data.yaml.

* Add mock + assertions for correct logging

* Check for JMESPathErrors when searching in KE scanner

The KE API removes some properties (ie, nodePools) instead of passing back
a null value or empty list.  This causes unhandled exceptions if the user
writes key extraction expression that assumes the field is always defined.

Fixes: #2665
  • Loading branch information
rvandegrift authored and blueandgold committed Mar 29, 2019
1 parent 6b1bcef commit efef493
Show file tree
Hide file tree
Showing 3 changed files with 161 additions and 14 deletions.
14 changes: 13 additions & 1 deletion google/cloud/forseti/scanner/audit/ke_rules_engine.py
Expand Up @@ -314,7 +314,19 @@ def find_violations(self, ke_cluster):
"""
violations = []

actual = self.rule_jmespath.search(ke_cluster.as_dict)
try:
actual = self.rule_jmespath.search(ke_cluster.as_dict)
except jmespath.exceptions.JMESPathError as e:
LOGGER.warning(
'JMESPath error processing KE cluster %s: %s',
ke_cluster.id,
e
)

# The resource data violated some assumption the user made
# about it. So we cannot know if any violations occurred.
return violations

LOGGER.debug('actual jmespath result: %s', actual)

if self.rule_mode == 'whitelist':
Expand Down
31 changes: 31 additions & 0 deletions tests/scanner/scanners/data/ke_scanner_test_data.yaml
Expand Up @@ -111,3 +111,34 @@ rules:
mode: whitelist
values:
- True

# If a cluster has no node pools, the nodePools field is not
# returned by the GKE API. Thus scanner doesn't have enough
# information to get this test rule correct. But still, scanner
# shouldn't raise an exception and fail. So this is just ensuring
# the fix to #2665 fixes the crash. See also the next rule.

- name: missing nodePool, should not generate violation
resource:
- type: organization
resource_ids:
- '*'
key: 'length(nodePools) > `0`'
mode: whitelist
values:
- true

# This rule uses OR to handle clusters with no node pools. Not only
# should this rule not produce an exception, it should actually
# detect the test cluster data with no node pools. See also the
# previous rule.

- name: missing nodePool, should generate violation
resource:
- type: organization
resource_ids:
- '*'
key: 'length(nodePools || `[]`) > `0`'
mode: whitelist
values:
- true
130 changes: 117 additions & 13 deletions tests/scanner/scanners/ke_scanner_test.py
Expand Up @@ -62,8 +62,27 @@

FAKE_CLUSTER_ID = size_t_hash('fake-cluster.com')

NO_NODE_POOLS = '''
{
"name": "fake-cluster-no-node-pools",
"addonsConfig": {
"httpLoadBalancing": {},
"kubernetesDashboard": {
"disabled": true
},
"istioConfig": {
"auth": "AUTH_MUTUAL_TLS"
}
},
"selfLink": "fake-cluster-no-node-pools.com"
}
'''

NO_NODE_POOLS_ID = size_t_hash('fake-cluster-no-node-pools.com')

FAKE_CLUSTERS = {
FAKE_CLUSTER_ID: FAKE_CLUSTER,
NO_NODE_POOLS_ID: NO_NODE_POOLS,
}


Expand Down Expand Up @@ -92,20 +111,21 @@ def setUpClass(cls):
project = data_access.add_resource(session, 'project/fake-project',
organization)

ke_cluster = data_access.add_resource(
session,
'kubernetes_cluster/{}'.format(FAKE_CLUSTER_ID),
project,
)
for cluster_id, cluster in FAKE_CLUSTERS.items():
ke_cluster = data_access.add_resource(
session,
'kubernetes_cluster/{}'.format(cluster_id),
project,
)

ke_cluster.data = FAKE_CLUSTER
ke_cluster.data = cluster

sc = data_access.add_resource(
session,
'kubernetes_service_config/{}'.format(FAKE_CLUSTER_ID),
ke_cluster,
)
sc.data = SERVER_CONFIG
sc = data_access.add_resource(
session,
'kubernetes_service_config/{}'.format(cluster_id),
ke_cluster,
)
sc.data = SERVER_CONFIG

session.commit()

Expand All @@ -116,12 +136,88 @@ def setUp(self):
'', unittest_utils.get_datafile_path(
__file__, 'ke_scanner_test_data.yaml'))

@mock.patch(
'google.cloud.forseti.scanner.audit.ke_rules_engine.LOGGER',
autospsec=True,
)
@mock.patch.object(
ke_scanner.KeScanner,
'_output_results_to_db', autospec=True)
def test_run_scanner(self, mock_output_results):
def test_run_scanner(self, mock_output_results, mock_logger):
self.scanner.run()
expected_violations = [
{'rule_name': 'explicit whitelist, pass',
'resource_name': 'fake-cluster-no-node-pools',
'resource_data': '{"addonsConfig": {"httpLoadBalancing": {}, "istioConfig": {"auth": "AUTH_MUTUAL_TLS"}, "kubernetesDashboard": {"disabled": true}}, "name": "fake-cluster-no-node-pools", "selfLink": "fake-cluster-no-node-pools.com"}',
'full_name': 'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(NO_NODE_POOLS_ID),
'resource_id': NO_NODE_POOLS_ID,
'rule_index': 0,
'violation_type': 'KE_VIOLATION',
'violation_data': {'violation_reason': "name has value fake-cluster-no-node-pools, which is not in the whitelist (['fake-cluster'])", 'cluster_name': 'fake-cluster-no-node-pools', 'project_id': 'fake-project', 'full_name': 'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(NO_NODE_POOLS_ID)},
'resource_type': 'kubernetes_cluster'},

{'rule_name': 'explicit whitelist, fail',
'resource_name': u'fake-cluster-no-node-pools',
'resource_data': '{"addonsConfig": {"httpLoadBalancing": {}, "istioConfig": {"auth": "AUTH_MUTUAL_TLS"}, "kubernetesDashboard": {"disabled": true}}, "name": "fake-cluster-no-node-pools", "selfLink": "fake-cluster-no-node-pools.com"}',
'full_name': 'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(NO_NODE_POOLS_ID),
'resource_id': NO_NODE_POOLS_ID,
'rule_index': 1,
'violation_type': 'KE_VIOLATION',
'violation_data': {'violation_reason': u"name has value fake-cluster-no-node-pools, which is not in the whitelist (['real-cluster'])", 'cluster_name': u'fake-cluster-no-node-pools', 'project_id': 'fake-project', 'full_name': 'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(NO_NODE_POOLS_ID)},
'resource_type': 'kubernetes_cluster'},

{'rule_name': 'multiple values, pass',
'resource_name': u'fake-cluster-no-node-pools',
'resource_data': '{"addonsConfig": {"httpLoadBalancing": {}, "istioConfig": {"auth": "AUTH_MUTUAL_TLS"}, "kubernetesDashboard": {"disabled": true}}, "name": "fake-cluster-no-node-pools", "selfLink": "fake-cluster-no-node-pools.com"}',
'full_name': 'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(NO_NODE_POOLS_ID),
'resource_id': NO_NODE_POOLS_ID,
'rule_index': 4,
'violation_type': 'KE_VIOLATION',
'violation_data': {'violation_reason': u"name has value fake-cluster-no-node-pools, which is not in the whitelist (['real-cluster', 'fake-cluster'])", 'cluster_name': u'fake-cluster-no-node-pools', 'project_id': 'fake-project', 'full_name': 'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(NO_NODE_POOLS_ID)},
'resource_type': 'kubernetes_cluster'},

{'rule_name': 'multiple values, fail',
'resource_name': u'fake-cluster-no-node-pools',
'resource_data': '{"addonsConfig": {"httpLoadBalancing": {}, "istioConfig": {"auth": "AUTH_MUTUAL_TLS"}, "kubernetesDashboard": {"disabled": true}}, "name": "fake-cluster-no-node-pools", "selfLink": "fake-cluster-no-node-pools.com"}',
'full_name': 'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(NO_NODE_POOLS_ID),
'resource_id': NO_NODE_POOLS_ID,
'rule_index': 5,
'violation_type': 'KE_VIOLATION',
'violation_data': {'violation_reason': u"name has value fake-cluster-no-node-pools, which is not in the whitelist (['real-cluster', 'imaginary-cluster'])", 'cluster_name': u'fake-cluster-no-node-pools', 'project_id': 'fake-project', 'full_name': 'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(NO_NODE_POOLS_ID)},
'resource_type': 'kubernetes_cluster'},

{'rule_name': 'use projection to look for a list, pass',
'resource_name': u'fake-cluster-no-node-pools',
'resource_data': '{"addonsConfig": {"httpLoadBalancing": {}, "istioConfig": {"auth": "AUTH_MUTUAL_TLS"}, "kubernetesDashboard": {"disabled": true}}, "name": "fake-cluster-no-node-pools", "selfLink": "fake-cluster-no-node-pools.com"}',
'full_name': 'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(NO_NODE_POOLS_ID),
'resource_id': NO_NODE_POOLS_ID,
'rule_index': 7, 'violation_type': 'KE_VIOLATION',
'violation_data': {'violation_reason': "nodePools[*].config.imageType has value None, which is not in the whitelist ([['COS', 'COS']])", 'cluster_name': u'fake-cluster-no-node-pools', 'project_id': 'fake-project', 'full_name': 'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(NO_NODE_POOLS_ID)},
'resource_type': 'kubernetes_cluster'},

{'rule_name': 'use projection, look for a list, fail',
'resource_name': u'fake-cluster-no-node-pools',
'resource_data': '{"addonsConfig": {"httpLoadBalancing": {}, "istioConfig": {"auth": "AUTH_MUTUAL_TLS"}, "kubernetesDashboard": {"disabled": true}}, "name": "fake-cluster-no-node-pools", "selfLink": "fake-cluster-no-node-pools.com"}',
'full_name': 'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(NO_NODE_POOLS_ID),
'resource_id': NO_NODE_POOLS_ID,
'rule_index': 8,
'violation_type': 'KE_VIOLATION',
'violation_data': {'violation_reason': "nodePools[*].config.imageType has value None, which is not in the whitelist ([['COS'], ['Ubuntu', 'COS']])", 'cluster_name': u'fake-cluster-no-node-pools', 'project_id': 'fake-project', 'full_name': 'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(NO_NODE_POOLS_ID)},
'resource_type': 'kubernetes_cluster'},

{'rule_name': 'missing nodePool, should generate violation',
'resource_name': 'fake-cluster-no-node-pools',
'resource_data': '{"addonsConfig": {"httpLoadBalancing": {}, "istioConfig": {"auth": "AUTH_MUTUAL_TLS"}, "kubernetesDashboard": {"disabled": true}}, "name": "fake-cluster-no-node-pools", "selfLink": "fake-cluster-no-node-pools.com"}',
'full_name': 'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(NO_NODE_POOLS_ID),
'resource_id': NO_NODE_POOLS_ID,
'rule_index': 12,
'violation_type': 'KE_VIOLATION',
'violation_data': {'violation_reason': 'length(nodePools || `[]`) > `0` has value False, which is not in the whitelist ([True])',
'cluster_name': 'fake-cluster-no-node-pools',
'project_id': 'fake-project',
'full_name': 'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(NO_NODE_POOLS_ID)},
'resource_type': 'kubernetes_cluster'},

{'rule_name': 'explicit whitelist, fail',
'resource_name': u'fake-cluster',
'full_name': u'organization/12345/project/fake-project/kubernetes_cluster/{}/'.format(FAKE_CLUSTER_ID),
Expand Down Expand Up @@ -153,6 +249,14 @@ def test_run_scanner(self, mock_output_results):
mock_output_results.assert_called_once_with(mock.ANY,
expected_violations)

# check that the "missing nodePool, should not generate
# violation" rule test case did in fact log
self.assertTrue(mock_logger.warning.called)
self.assertTrue(
'JMESPath error processing KE cluster %s:' in mock_logger.warning.call_args[0][0],
)
self.assertTrue(NO_NODE_POOLS_ID in mock_logger.warning.call_args[0][1])


if __name__ == '__main__':
unittest.main()

0 comments on commit efef493

Please sign in to comment.