diff --git a/CHANGELOG-nightly.md b/CHANGELOG-nightly.md index d78f0e794c..1bca6d2d0c 100644 --- a/CHANGELOG-nightly.md +++ b/CHANGELOG-nightly.md @@ -2,6 +2,13 @@ Note: Breaking changes between versions are indicated by "💥". +- [Feature] Better support of Caddy as a load balancer in Kubernetes: + - Make it possible to start/stop a selection of resources with ``tutor k8s start/stop [names...]``. + - Make it easy to deploy an independent LoadBalancer by converting the caddy service to a ClusterIP when ``ENABLE_WEB_PROXY=false``. + - Add a ``app.kubernetes.io/component: loadbalancer`` label to the LoadBalancer service. + - Add ``app.kubernetes.io/name`` labels to all services. + - Preserve the LoadBalancer service in ``tutor k8s stop`` commands. + - Wait for the caddy deployment to be ready before running initialisation jobs. - [Security] On Kubernetes, convert all NodePort services to ClusterIP to guarantee network isolation from outside the cluster. - 💥[Improvement] Drop Python 3.5 compatibility. - [Bugfix] Fix docker-compose project name in development on nightly branch. diff --git a/docs/k8s.rst b/docs/k8s.rst index b44a66f0e8..0316b0b924 100644 --- a/docs/k8s.rst +++ b/docs/k8s.rst @@ -27,10 +27,22 @@ The Kubernetes cluster should have at least 4Gb of RAM on each node. When runnin .. image:: img/virtualbox-minikube-system.png :alt: Virtualbox memory settings for Minikube -Ingress controller and SSL/TLS certificates -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Load Balancer and SSL/TLS certificates +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -As of Tutor v11, it is no longer required to setup an Ingress controller to access your platform. Instead Caddy exposes a LoadBalancer service and SSL/TLS certificates are transparently generated at runtime. +By default, Tutor deploys a `LoadBalancer `__ service that exposes the Caddy deployment to the outside world. As in the local installation, this service is responsible for transparently generating SSL/TLS certificates at runtime. You will need to point your DNS records to this LoadBalancer object before the platform can work correctly. Thus, you should first start the Caddy load balancer, with:: + + tutor k8s start caddy + +Get the external IP of this services:: + + kubectl --namespace openedx get services/caddy + +Use this external IP to configure your DNS records. Once the DNS records are configured, you should verify that the Caddy container has properly generated the SSL/TLS certificates by checking the container logs:: + + tutor k8s logs -f caddy + +If, for some reason, you would like to deploy your own load balancer, you should set ``ENABLE_WEB_PROXY=false`` just like in the :ref:`local installation `. Then, point your load balancer at the "caddy" service, which will be a `ClusterIP `__. S3-like object storage with `MinIO `_ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/tutor/commands/k8s.py b/tutor/commands/k8s.py index 18443bfdc1..9613529fbf 100644 --- a/tutor/commands/k8s.py +++ b/tutor/commands/k8s.py @@ -198,9 +198,16 @@ def quickstart(context: click.Context, non_interactive: bool) -> None: ) -@click.command(help="Run all configured Open edX services") +@click.command( + short_help="Run all configured Open edX resources", + help=( + "Run all configured Open edX resources. You may limit this command to " + "some resources by passing name arguments." + ), +) +@click.argument("names", metavar="name", nargs=-1) @click.pass_obj -def start(context: Context) -> None: +def start(context: Context, names: List[str]) -> None: config = tutor_config.load(context.root) # Create namespace, if necessary # Note that this step should not be run for some users, in particular those @@ -218,34 +225,68 @@ def start(context: Context) -> None: "--selector", "app.kubernetes.io/component=namespace", ) - # Create volumes - utils.kubectl( - "apply", - "--kustomize", - tutor_env.pathjoin(context.root), - "--wait", - "--selector", - "app.kubernetes.io/component=volume", - ) - # Create everything else except jobs - utils.kubectl( - "apply", - "--kustomize", - tutor_env.pathjoin(context.root), - "--selector", - "app.kubernetes.io/component notin (job,volume,namespace)", - ) + + names = names or ["all"] + for name in names: + if name == "all": + # Create volumes + utils.kubectl( + "apply", + "--kustomize", + tutor_env.pathjoin(context.root), + "--wait", + "--selector", + "app.kubernetes.io/component=volume", + ) + # Create everything else except jobs + utils.kubectl( + "apply", + "--kustomize", + tutor_env.pathjoin(context.root), + "--selector", + "app.kubernetes.io/component notin (job,volume,namespace)", + ) + else: + utils.kubectl( + "apply", + "--kustomize", + tutor_env.pathjoin(context.root), + "--selector", + "app.kubernetes.io/name={}".format(name), + ) -@click.command(help="Stop a running platform") +@click.command( + short_help="Stop a running platform", + help=( + "Stop a running platform by deleting all resources, except for volumes. " + "You may limit this command to some resources by passing name arguments." + ), +) +@click.argument("names", metavar="name", nargs=-1) @click.pass_obj -def stop(context: Context) -> None: +def stop(context: Context, names: List[str]) -> None: config = tutor_config.load(context.root) - utils.kubectl( - "delete", - *resource_selector(config), - "deployments,services,configmaps,jobs", - ) + names = names or ["all"] + resource_types = "deployments,services,configmaps,jobs" + not_lb_selector = "app.kubernetes.io/component!=loadbalancer" + for name in names: + if name == "all": + utils.kubectl( + "delete", + *resource_selector(config, not_lb_selector), + resource_types, + ) + else: + utils.kubectl( + "delete", + *resource_selector( + config, + not_lb_selector, + "app.kubernetes.io/name={}".format(name), + ), + resource_types, + ) @click.command(help="Reboot an existing platform") @@ -290,9 +331,9 @@ def delete(context: Context, yes: bool) -> None: def init(context: Context, limit: Optional[str]) -> None: config = tutor_config.load(context.root) runner = K8sJobRunner(context.root, config) - for service in ["mysql", "elasticsearch", "mongodb"]: - if tutor_config.is_service_activated(config, service): - wait_for_pod_ready(config, service) + for name in ["caddy", "elasticsearch", "mysql", "mongodb"]: + if tutor_config.is_service_activated(config, name): + wait_for_pod_ready(config, name) jobs.initialise(runner, limit_to=limit) diff --git a/tutor/templates/k8s/services.yml b/tutor/templates/k8s/services.yml index 2e00b9b062..180032bad4 100644 --- a/tutor/templates/k8s/services.yml +++ b/tutor/templates/k8s/services.yml @@ -4,13 +4,33 @@ apiVersion: v1 kind: Service metadata: name: caddy + labels: + app.kubernetes.io/name: caddy + app.kubernetes.io/component: loadbalancer spec: type: LoadBalancer ports: - port: 80 name: http + {%- if ENABLE_HTTPS %} - port: 443 name: https + {%- endif %} + selector: + app.kubernetes.io/name: caddy +{% else %} +--- +apiVersion: v1 +kind: Service +metadata: + name: caddy + labels: + app.kubernetes.io/name: caddy +spec: + type: ClusterIP + ports: + - port: {{ CADDY_HTTP_PORT }} + name: http selector: app.kubernetes.io/name: caddy {% endif %} @@ -20,6 +40,8 @@ apiVersion: v1 kind: Service metadata: name: cms + labels: + app.kubernetes.io/name: cms spec: type: ClusterIP ports: @@ -34,6 +56,8 @@ apiVersion: v1 kind: Service metadata: name: lms + labels: + app.kubernetes.io/name: lms spec: type: ClusterIP ports: @@ -48,6 +72,8 @@ apiVersion: v1 kind: Service metadata: name: elasticsearch + labels: + app.kubernetes.io/name: elasticsearch spec: type: ClusterIP ports: @@ -62,6 +88,8 @@ apiVersion: v1 kind: Service metadata: name: mongodb + labels: + app.kubernetes.io/name: mongodb spec: type: ClusterIP ports: @@ -76,6 +104,8 @@ apiVersion: v1 kind: Service metadata: name: mysql + labels: + app.kubernetes.io/name: mysql spec: type: ClusterIP ports: @@ -90,6 +120,8 @@ apiVersion: v1 kind: Service metadata: name: redis + labels: + app.kubernetes.io/name: redis spec: type: ClusterIP ports: @@ -104,6 +136,8 @@ apiVersion: v1 kind: Service metadata: name: smtp + labels: + app.kubernetes.io/name: smtp spec: type: ClusterIP ports: