Skip to content

Commit

Permalink
Merge pull request #98 from caseydavenport/cd-kubeconfig
Browse files Browse the repository at this point in the history
kubeconfig, podCidr support
  • Loading branch information
caseydavenport committed May 24, 2016
2 parents 46f510c + d5da9da commit f2e2cf7
Show file tree
Hide file tree
Showing 7 changed files with 338 additions and 79 deletions.
9 changes: 5 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ dist/calico: $(SRCFILES)
docker run --rm \
-v `pwd`:/code \
calico/build:$(BUILD_VERSION) \
pyinstaller calico.py -ayF
/bin/sh -c "pip install pykube && pyinstaller calico.py -ayF"

# Makes the IPAM plugin.
dist/calico-ipam: $(SRCFILES)
Expand All @@ -46,13 +46,13 @@ deploy-rkt: dist/calicoctl
ut: update-version
docker run --rm -v `pwd`:/code \
calico/test \
nosetests tests/unit -c nose.cfg
sh -c "pip install pykube && nosetests tests/unit -c nose.cfg"

# Run the fv tests.
fv: update-version
docker run --rm -v `pwd`:/code \
calico/test \
nosetests tests/fv -c nose.cfg
sh -c "pip install pykube && nosetests tests/fv -c nose.cfg"

# Makes tests on Circle CI.
test-circle: update-version dist/calico dist/calico-ipam
Expand All @@ -63,7 +63,8 @@ test-circle: update-version dist/calico dist/calico-ipam
-v $(CIRCLE_TEST_REPORTS):/circle_output \
-e COVERALLS_REPO_TOKEN=$(COVERALLS_REPO_TOKEN) \
calico/test sh -c \
'nosetests tests -c nose.cfg \
'pip install pykube && \
nosetests tests -c nose.cfg \
--with-xunit --xunit-file=/circle_output/output.xml; RC=$$?;\
[[ ! -z "$$COVERALLS_REPO_TOKEN" ]] && coveralls || true; exit $$RC'

Expand Down
70 changes: 67 additions & 3 deletions calico.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,17 @@
ETCD_ENDPOINTS_ENV)
from pycalico.datastore_errors import MultipleEndpointsMatch

from pykube.config import KubeConfig
from pykube.http import HTTPClient
from pykube.objects import Node

from calico_cni import __version__, __commit__, __branch__
from calico_cni.util import (configure_logging, parse_cni_args, print_cni_error,
handle_datastore_error, CniError)

from calico_cni.container_engines import get_container_engine
from calico_cni.constants import *
from calico_cni.policy_drivers import ApplyProfileError, get_policy_driver
from calico_cni.policy_drivers import PolicyException, get_policy_driver
from ipam import IpamPlugin

# Logging configuration.
Expand Down Expand Up @@ -143,6 +147,9 @@ def __init__(self, network_config, env):
else:
self.workload_id = self.container_id
self.orchestrator_id = "cni"
kubernetes_config = network_config.get("kubernetes", {})
self.kubeconfig_path = kubernetes_config.get("kubeconfig")
self.k8s_node_name = kubernetes_config.get("node_name", socket.gethostname())
"""
Configure orchestrator specific settings.
workload_id: In Kubernetes, this is the pod's namespace and name.
Expand Down Expand Up @@ -259,7 +266,7 @@ def _add_new_endpoint(self):
# Provision / apply profile on the created endpoint.
try:
self.policy_driver.apply_profile(endpoint)
except ApplyProfileError as e:
except PolicyException as e:
_log.error("Failed to apply profile to endpoint %s",
endpoint.name)
self._remove_veth(endpoint)
Expand Down Expand Up @@ -297,7 +304,7 @@ def _add_existing_endpoint(self, endpoint):
# Apply a new profile to this endpoint.
try:
self.policy_driver.apply_profile(endpoint)
except ApplyProfileError as e:
except PolicyException as e:
# Hit an exception applying the profile. We haven't configured
# anything, so we don't need to clean anything up. Just exit.
_log.error("Failed to apply profile to endpoint %s",
Expand Down Expand Up @@ -441,14 +448,71 @@ def _call_ipam_plugin(self, env):
"msg": e.msg,
"details": e.details})
code = e.code
elif self.ipam_type == "host-local":
# We've been told to use the "host-local" IPAM plugin.
# Check if we need to use the Kubernetes podCidr for this node, and
# if so replace the subnet field with the correct value.
if self.network_config["ipam"].get("subnet") == "usePodCidr":
if not self.running_under_k8s:
print_cni_error(ERR_CODE_GENERIC, "Invalid network config",
"Must be running under Kubernetes to use 'subnet: usePodCidr'")
sys.exit(ERR_CODE_GENERIC)
_log.info("Using Kubernetes podCIDR for node: %s", self.k8s_node_name)
pod_cidr = self._get_kubernetes_pod_cidr()
self.network_config["ipam"]["subnet"] = str(pod_cidr)

# Call the IPAM plugin.
_log.debug("Calling host-local IPAM plugin")
code, response = self._call_binary_ipam_plugin(env)
else:
# Using some other IPAM plugin - call it.
_log.debug("Using binary plugin")
code, response = self._call_binary_ipam_plugin(env)

# Return the IPAM return code and output.
_log.debug("IPAM response (rc=%s): %s", code, response)
return code, response

def _get_kubernetes_pod_cidr(self):
"""
Attempt to get the Kubernetes pod CIDR for this node.
First check if we've written it to disk. If so, use that value. If
not, then query the Kubernetes API for it.
"""
_log.info("Getting node.spec.podCidr from API, kubeconfig: %s",
self.kubeconfig_path)
if not self.kubeconfig_path:
# For now, kubeconfig is the only supported auth method.
print_cni_error(ERR_CODE_GENERIC, "Missing kubeconfig",
"usePodCidr requires specification of kubeconfig file")
sys.exit(ERR_CODE_GENERIC)

# Query the API for this node. Default node name to the hostname.
try:
api = HTTPClient(KubeConfig.from_file(self.kubeconfig_path))
node = None
for n in Node.objects(api):
_log.debug("Checking node: %s", n.obj["metadata"]["name"])
if n.obj["metadata"]["name"] == self.k8s_node_name:
node = n
break
if not node:
raise KeyError("Unable to find node in API: %s", self.k8s_node_name)
_log.debug("Found node %s: %s: ", node.obj["metadata"]["name"],
node.obj["spec"])
except Exception:
print_cni_error(ERR_CODE_GENERIC, "Error querying Kubernetes API",
"Failed to get podCidr from Kubernetes API")
sys.exit(ERR_CODE_GENERIC)
else:
pod_cidr = node.obj["spec"].get("podCIDR")
if not pod_cidr:
print_cni_error(ERR_CODE_GENERIC, "Missing podCidr",
"No podCidr for node %s" % self.k8s_node_name)
sys.exit(ERR_CODE_GENERIC)
_log.debug("Using podCidr: %s", pod_cidr)
return pod_cidr

def _call_binary_ipam_plugin(self, env):
"""Calls through to the specified IPAM plugin binary.
Expand Down
4 changes: 2 additions & 2 deletions calico_cni/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import socket

# Regex to parse CNI_ARGS. Looks for key value pairs separated by an equals
# sign and followed either the end of the string, or a colon (indicating
# sign and followed either the end of the string, or a colon (indicating
# that there is another CNI_ARG key/value pair.
CNI_ARGS_RE = re.compile("([a-zA-Z0-9/\.\-\_ ]+)=([a-zA-Z0-9/\.\-\_ ]+)(?:;|$)")

Expand Down Expand Up @@ -54,7 +54,7 @@
ASSIGN_IPV4_KEY = "assign_ipv4"
ASSIGN_IPV6_KEY = "assign_ipv6"

# Constants for getting policy specific information
# Constants for getting policy specific information
# from the policy dictionary in the network config file.
API_ROOT_KEY = "k8s_api_root"
AUTH_TOKEN_KEY = "k8s_auth_token"
Expand Down
61 changes: 45 additions & 16 deletions calico_cni/policy_drivers.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,11 @@
from pycalico.datastore_errors import MultipleEndpointsMatch
from pycalico.util import validate_characters

from pykube.config import KubeConfig
from pykube.http import HTTPClient
from pykube.objects import Pod
from pykube.query import Query

from calico_cni.constants import *
import calico_cni.policy_parser

Expand Down Expand Up @@ -78,7 +83,7 @@ def apply_profile(self, endpoint):

# Check if the profile has already been applied.
if self.profile_name in endpoint.profile_ids:
_log.warning("Endpoint already in profile %s",
_log.warning("Endpoint already in profile %s",
self.profile_name)
return

Expand Down Expand Up @@ -131,7 +136,7 @@ class KubernetesNoPolicyDriver(DefaultPolicyDriver):
"""
Implements default network policy for a Kubernetes container manager.
The different between this an the DefaultPolicyDriver is that this
The different between this an the DefaultPolicyDriver is that this
engine creates profiles which allow all incoming traffic.
"""
def generate_rules(self):
Expand All @@ -156,14 +161,15 @@ class KubernetesAnnotationDriver(DefaultPolicyDriver):
"""

def __init__(self, pod_name, namespace, auth_token, api_root,
client_certificate, client_key, certificate_authority):
client_certificate, client_key, certificate_authority, kubeconfig):
self.pod_name = pod_name
self.namespace = namespace
self.namespace = namespace
self.policy_parser = calico_cni.policy_parser.PolicyParser(namespace)
self.auth_token = auth_token
self.client_certificate = client_certificate
self.client_key = client_key
self.certificate_authority = certificate_authority or False
self.kubeconfig_path = kubeconfig
self.api_root = api_root
self.profile_name = "%s_%s" % (namespace, pod_name)
self._annotation_key = "projectcalico.org/policy"
Expand All @@ -180,11 +186,11 @@ def remove_profile(self):
_log.info("Deleting Calico profile: %s", self.profile_name)
self._client.remove_profile(self.profile_name)
except KeyError:
_log.warning("Profile %s does not exist, cannot delete",
_log.warning("Profile %s does not exist, cannot delete",
self.profile_name)

def generate_rules(self):
"""Generates rules based on Kubernetes annotations.
"""Generates rules based on Kubernetes annotations.
"""
# Get the pod from the API.
if self.namespace != "kube-system":
Expand Down Expand Up @@ -213,16 +219,16 @@ def generate_rules(self):
except ValueError:
# Invalid rule specified.
_log.error("Invalid policy defined: %s", rule)
raise ApplyProfileError("Invalid policy defined",
details=rule)
raise ApplyProfileError("Invalid policy defined",
details=rule)
else:
# Rule was valid - append.
inbound_rules.append(parsed_rule)
else:
# Isolate on namespace boundaries by default.
_log.info("No policy annotations - apply namespace isolation")
inbound_rules = [Rule(action="allow", src_tag=self.ns_tag)]

return Rules(id=self.profile_name,
inbound_rules=inbound_rules,
outbound_rules=outbound_rules)
Expand All @@ -246,8 +252,22 @@ def generate_tags(self):
def _get_api_pod(self):
"""Get the pod resource from the API.
:return: JSON object containing the pod spec
:return: Dictionary representation of Pod from k8s API.
"""
# If kubeconfig was specified, use the pykube library.
if self.kubeconfig_path:
_log.info("Using kubeconfig at %s", self.kubeconfig_path)
try:
api = HTTPClient(KubeConfig.from_file(self.kubeconfig_path))
pod = Query(api, Pod, self.namespace).get_by_name(self.pod_name)
_log.debug("Found pod: %s: ", pod.obj)
except Exception as e:
raise PolicyException("Error querying Kubernetes API",
details=str(e.message))
else:
return pod.obj

# Otherwise, use direct HTTP query to get pod.
with requests.Session() as session:
if self.auth_token:
_log.debug('Updating header with Token %s', self.auth_token)
Expand Down Expand Up @@ -277,7 +297,7 @@ def _get_api_pod(self):
verify=self.certificate_authority)
except BaseException, e:
_log.exception("Exception hitting Kubernetes API")
raise ApplyProfileError("Error querying Kubernetes API",
raise ApplyProfileError("Error querying Kubernetes API",
details=str(e.message))
else:
# Check the HTTP response code for errors.
Expand Down Expand Up @@ -356,24 +376,31 @@ def remove_profile(self):
_log.debug("No profile to remove for pod %s", self.pod_name)


class ApplyProfileError(Exception):
class PolicyException(Exception):
"""
Attempting to apply a profile to an endpoint that does not exist.
Generic base class for policy errors.
"""
def __init__(self, msg=None, details=None):
Exception.__init__(self, msg)
self.details = details

class ApplyProfileError(PolicyException):
"""
Attempting to apply a profile to an endpoint that does not exist.
"""
pass


def get_policy_driver(cni_plugin):
"""Returns a policy driver based on CNI configuration arguments.
:return: a policy driver
:return: a policy driver
"""
# Extract policy config and network name.
policy_config = cni_plugin.network_config.get(POLICY_KEY, {})
network_name = cni_plugin.network_config["name"]
policy_type = policy_config.get("type")
k8s_config = cni_plugin.network_config.get("kubernetes", {})
supported_policy_types = [None,
POLICY_MODE_KUBERNETES,
POLICY_MODE_KUBERNETES_ANNOTATIONS]
Expand All @@ -397,6 +424,7 @@ def get_policy_driver(cni_plugin):
client_key = policy_config.get(K8S_CLIENT_KEY_VAR)
certificate_authority = policy_config.get(
K8S_CERTIFICATE_AUTHORITY_VAR)
kubeconfig_path = k8s_config.get("kubeconfig")

if (client_key and not os.path.isfile(client_key)) or \
(client_certificate and not os.path.isfile(client_certificate)) or \
Expand All @@ -418,7 +446,8 @@ def get_policy_driver(cni_plugin):
api_root,
client_certificate,
client_key,
certificate_authority]
certificate_authority,
kubeconfig_path]
else:
_log.debug("Using Kubernetes Driver - no policy")
driver_cls = KubernetesNoPolicyDriver
Expand All @@ -430,7 +459,7 @@ def get_policy_driver(cni_plugin):

# Create an instance of the driver class.
try:
_log.debug("Creating instance of %s with args %s",
_log.debug("Creating instance of %s with args %s",
driver_cls, driver_args)
driver = driver_cls(*driver_args)
except ValueError as e:
Expand Down
4 changes: 4 additions & 0 deletions configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ When using Calico IPAM, the following flags determine what IP addresses should b

A specific IP address can be chosen by using [`CNI_ARGS`](https://github.com/appc/cni/blob/master/SPEC.md#parameters) and setting `IP` to the desired value.

When using the CNI `host-local` IPAM plugin, a special value `usePodCidr` is allowed for the subnet field. This tells the plugin to determine the subnet to use from the Kubernetes API based on the Node.podCIDR field. This is currently only supported when using `kubeconfig` for accessing the API.

## Kubernetes specific

When using the Calico CNI plugin with Kubernetes, an additional config block can be specified to control how network policy is configured. The required config block is `policy`. See the [Calico Kubernetes documentation](https://github.com/projectcalico/calico-containers/tree/master/docs/cni/kubernetes) for more information.
Expand All @@ -63,6 +65,8 @@ The CNI plugin may need to authenticate with the Kubernetes API server. The foll
* `k8s_client_key`
* `k8s_certificate_authority`
* Verifying the API certificate against a CA only works if connecting to the API server using a hostname.
* `kubeconfig`
* Path to a Kubernetes `kubeconfig` file.


[![Analytics](https://calico-ga-beacon.appspot.com/UA-52125893-3/calico-cni/configuration.md?pixel)](https://github.com/igrigorik/ga-beacon)
Loading

0 comments on commit f2e2cf7

Please sign in to comment.