No results matching "
var gitbook = gitbook || [];
gitbook.push(function() {
- gitbook.page.hasChanged({"page":{"title":"What is a Contoller","level":"3.3.1","depth":2,"next":{"title":"Controller Example","level":"3.3.2","depth":2,"path":"basics/simple_controller.md","ref":"basics/simple_controller.md","articles":[]},"previous":{"title":"Controllers","level":"3.3","depth":1,"ref":"","articles":[{"title":"What is a Contoller","level":"3.3.1","depth":2,"path":"basics/what_is_a_controller.md","ref":"basics/what_is_a_controller.md","articles":[]},{"title":"Controller Example","level":"3.3.2","depth":2,"path":"basics/simple_controller.md","ref":"basics/simple_controller.md","articles":[]}]},"dir":"ltr"},"config":{"plugins":["theme-api","panel","sequence-diagrams","ga"],"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"pluginsConfig":{"panel":{},"search":{},"sequence-diagrams":{"theme":"simple"},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"theme-api":{"languages":[],"split":true,"theme":"light"},"ga":{"configuration":"auto","token":"UA-119864590-1"},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false}},"theme":"default","author":"Phillip Wittrock","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"title":"The Kubebuilder Book","gitbook":">= 3.0.0"},"file":{"path":"basics/what_is_a_controller.md","mtime":"2018-07-25T00:55:02.994Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2018-07-25T16:34:48.577Z"},"basePath":"..","book":{"language":""}});
+ gitbook.page.hasChanged({"page":{"title":"What is a Controller","level":"3.3.1","depth":2,"next":{"title":"Controller Example","level":"3.3.2","depth":2,"path":"basics/simple_controller.md","ref":"basics/simple_controller.md","articles":[]},"previous":{"title":"Controllers","level":"3.3","depth":1,"ref":"","articles":[{"title":"What is a Controller","level":"3.3.1","depth":2,"path":"basics/what_is_a_controller.md","ref":"basics/what_is_a_controller.md","articles":[]},{"title":"Controller Example","level":"3.3.2","depth":2,"path":"basics/simple_controller.md","ref":"basics/simple_controller.md","articles":[]}]},"dir":"ltr"},"config":{"plugins":["theme-api","panel","sequence-diagrams","ga"],"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"pluginsConfig":{"panel":{},"search":{},"sequence-diagrams":{"theme":"simple"},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"theme-api":{"languages":[],"split":true,"theme":"light"},"ga":{"configuration":"auto","token":"UA-119864590-1"},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false}},"theme":"default","author":"Phillip Wittrock","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"title":"The Kubebuilder Book","gitbook":">= 3.0.0"},"file":{"path":"basics/what_is_a_controller.md","mtime":"2018-07-25T00:55:02.994Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2018-09-18T16:32:20.891Z"},"basePath":"..","book":{"language":""}});
});
diff --git a/docs/book/public/basics/what_is_a_resource.html b/docs/book/public/basics/what_is_a_resource.html
index 01348c57642..7e65b4a315c 100644
--- a/docs/book/public/basics/what_is_a_resource.html
+++ b/docs/book/public/basics/what_is_a_resource.html
@@ -287,7 +287,7 @@
- What is a Contoller
+ What is a Controller
@@ -429,6 +429,19 @@
+
Kubebuilder uses dep to manage dependencies.
+Different dependency management tasks can be done using the dep ensure
+command.
+
Adding new dependencies
+
Kubernetes Dependencies
Kubebuilder-generated projects depends on a number of Kubernetes
+dependencies internally. Kubebuilder (using the controller-runtime
+library) makes sure that the parts of these dependencies that are exposed
+in the Kubebuilder API remain stable.
+
It's recommended not to make use of most of these libraries directly, since
+they change frequently in incompatible ways. The k8s.io/api repository is
+the exception to this, and it's reccomended that you rely on the version that
+kubebuilder requires, instead of listing it as a direct dependency in
+Gopkg.toml.
+
However, if you do add direct dependencies on any of these libraries yourself,
+be aware that you may encounter dependency conflicts. See the problem with
+kubernetes libraries below for more
+information.
+
+
Dep manages dependency constraints using the Gopkg.toml file. You can add
+new dependencies by adding new [[constraint]] stanzas to that file.
+Alternatively, if you're not using kubebuilder
+update, you can use the dep ensure -add
+command to add new dependencies to your Gopkg.toml.
+
# edit Gopkg.toml OR perform the following:
+dep ensure -add github.com/pkg/errors
+
+
+
+
Updating existing dependencies
+
Update dependencies for your project to the latest minor and patch versions.
+
dep ensure -update sigs.k8s.io/controller-runtime sigs.k8s.io/controller-tools
+
+
+
+
Repopulating your vendor directory
+
Dependency source code is stored in the vendor directory. If it ever gets
+deleted, you can repopulate it using the exact dependency versions stored in
+Gopkg.lock.
+
dep ensure
+
+
+
+
How Kubebuilder's Dependencies Work
+
Under the Hood
The information in this section details how Kubebuilder's dependency graph
+works. It's not necessary for day-to-day use of Kubebuilder, but can be useful
+if you want to understand how a particular version of Kubebuilder relates to
+a particular version of Kubernetes.
+
+
TL;DR
+
As of Kubebuilder 1.0.2:
+
+
Projects generated with Kubebuilder list a semantic version of
+controller-runtime and controller-tools as their only direct
+dependencies. All other Kubernetes-related libraries are transative
+dependencies.
+
+
controller-runtime and controller-tools each list a specific, identical
+set of dependencies on Kubernetes libraries and related libraries.
+
+
Once you've updated your dependencies with kubebuilder update vendor,
+you'll be able to run dep ensure and dep ensure --update sigs.k8s.io/controller-runtime sigs.k8s.io/controller-tools to safely
+update all your dependencies in the future.
+
+
You can depend on controller-runtime to follow semantic versioning
+guarantees -- we won't break your code without
+introducing a new major version, for both the interfaces in
+controller-runtime, and the bits of the kubernetes libraries that
+controller-runtime actually exposes.
+
+
+
The Problem with Kubernetes libraries
+
The kubernetes project exports a collection of libraries (which we'll call
+the k8s-deps from now on) that expose common functionality used when
+building applications that consume Kubernetes APIs (e.g. clients,
+informers, etc). Due to the way Kubrenetes is versioned
+(non-semantically), all of these dependencies must closely match --
+differing versions can cause strange compilation or runtime errors.
+
Beyond this, these libraries have their own set of dependencies which are
+not always the latest versions, or are occaisionally in-between versions.
+
Collecting the correct set of dependencies for any given Kubernetes
+project can thus be tricky.
+
Using Prebaked Manifests (Kubebuilder pre-1.0.2)
+
Before version 1.0.2, Kubebuilder shipped a pre-baked manifest of the
+correct dependencies. When scaffolding out at new project using
+kubebuilder init (a kb-project), it would copy over a Gopkg.toml
+file containing the exact dependency versions required for the project
+(which could then be used by dep dependency management tool to actually
+fetch the dependencies).
+
In addition to the Kubernetes dependencies required, this also specified
+that all kb-projects use the master branch of the controller-runtime
+library, which provides the abstractions that Kubebuilder is built upon.
+Because controller-runtime wraps and consumes Kubernetes, it also needs
+specific versions of the k8s-deps, and those version must match the ones
+listed in the kb-project's Gopkg.toml, otherwise we'd have conflicting
+dependencies.
+
The Problem with Prebaked Manifests
+
Using the master branch as the target version of controller-runtime made
+it impossible to make breaking changes to controller-runtime. However,
+even when using a specific version of controller-runtime, it's still
+difficult to make changes.
+
Since kb-projects must use an identical set of dependencies to
+controller-runtime, any update to the controller-runtime dependencies
+(say, to pull in a new feature) would have caused immediate dependency
+version conflicts. Effectively, any update to the dependencies had to be
+treated as a major version revision, and there would have been no way to
+tell the difference between "this release includes breaking API changes"
+and "this release simply switches to a newer version of the k8s-deps".
+
Transitive Dependencies (Kubebuilder 1.0.2+)
+
As noted above, any dependency version in kb-projects must match
+dependency versions listed in controller-runtime, exactly. Furthermore, it
+turns out, by design, the set of k8s-deps used in controller-runtime is
+a superset of the set of dependencies actually imported by kb-projects.
+
Therefore, in kb-projects generated with Kubebuilder 1.0.2+, no
+dependencies are listed besides controller-runtime (and controller-tools).
+All of the k8s-deps become transitive dependencies, whose versions are
+determined when dep (the dependency management tool) looks at the
+versions required by controller-runtime.
+
controller-runtime is semantically versioned, so any changes to either the
+interfaces in controller-runtime, or the pieces of the k8s-deps that are
+exposed as part of those interfaces, means a new major version of
+controller-runtime will be released. Any other changes (new features, bug
+fixes, updates to k8s-deps which don't break interfaces) yield minor or
+patch versions (as per semver), which can easily and
+safely be updated to by kb-projects.
+
controller-tools Dependencies
+
controller-tools is the library used to generate CRD and RBAC manifests
+for kb-projects. With Kubebuilder 1.0.2+, it does not directly depend on
+controller-runtime, but shares the same set of dependencies. It therefore
+must be updated in lockstep with controller-runtime. This is mostly
+a concern of the controller-tools/controller-runtime maintainers, and will
+not affect users. Like controller-runtime, controller-tools uses semver.
This chapter walks through a simple webhook implementation.
+
It uses the controller-runtime libraries to implement
+a Webhook Server and Manager.
+
Same as controllers, a Webhook Server is a
+Runable which needs to be registered to a manager.
+Arbitrary number of Runables can be registered to a manager,
+so a webhook server can run with other controllers in the same manager.
+They will share the same dependencies provided by the manager. For example, shared cache, client, scheme, etc.
+
Setup
+
Way to Deploy your Webhook Server
+
There are various ways to deploy the webhook server in terms of
+
+
Where the serving certificates live.
+
In what environment the webhook server runs, in a pod or directly on a VM, etc.
+
If in a pod, on what type of node, worker nodes or master node.
+
+
The recommended way to deploy the webhook server is
+
+
Run the webhook server as a regular pod on worker nodes through a workload API, e.g. Deployment or StatefulSet.
+
Put the certificate in a k8s secret in the same namespace as the webhook server
+
Mount the secret as a volume in the pod
+
Create a k8s service to front the webhook server.
+
+
Creating a Handler
+
The business logic for a Webhook exists in a Handler.
+A Handler implements the admission.Handler interface, which contains a single Handle method.
+
If a Handler implements inject.Client and inject.Decoder interfaces,
+the manager will automatically inject the client and the decoder into the Handler.
+
Note: The client.Client provided by the manager reads from a cache which is lazily initialized.
+To eagerly initialize the cache, perform a read operation with the client before starting the server.
+
podAnnotator is a Handler, which implements the admission.Handler, inject.Client and inject.Decoder interfaces.
+
Details about how to implement an admission webhook podAnnotator is covered in a later section.
+
type podAnnotator struct {
+ client client.Client
+ decoder types.Decoder
+}
+
+// podAnnotator implements admission.Handler.
+var _ admission.Handler = &podAnnotator{}
+
+func (a *podAnnotator) Handle(ctx context.Context, req types.Request) types.Response {
+ ...
+}
+
+// podAnnotator implements inject.Client.
+var _ inject.Client = &podAnnotator{}
+
+// InjectClient injects the client into the podAnnotator
+func (a *podAnnotator) InjectClient(c client.Client) error {
+ a.client = c
+ returnnil
+}
+
+// podAnnotator implements inject.Decoder.
+var _ inject.Decoder = &podAnnotator{}
+
+// InjectDecoder injects the decoder into the podAnnotator
+func (a *podAnnotator) InjectDecoder(d types.Decoder) error {
+ a.decoder = d
+ returnnil
+}
+
+
+
+
+
Configuring a Webhook and Registering the Handler
+
A Webhook configures what type of requests the Handler should accept from the apiserver. Options include:
+
+
The type of the Operations (CRUD)
+
The type of the Targets (Deployment, Pod, etc)
+
The type of the Handler (Mutating, Validating)
+
+
When the Server starts, it will register all Webhook Configurations with the apiserver to start accepting and
+routing requests to the Handlers.
+
controller-runtime provides a useful package for
+building a webhook.
+You can incrementally set the configuration of a webhook and then invoke Build to complete building a webhook.
+
If you want to specify the name and(or) path for your webhook instead of using the default, you can invoke
+Name("yourname") and Path("/yourpath") respectively.
A Server registers Webhook Configuration with the apiserver and creates an HTTP server to route requests to the handlers.
+
The server is behind a Kubernetes Service and provides a certificate to the apiserver when serving requests.
+
The Server depends on a Kubernetes Secret containing this certificate to be mounted under CertDir.
+
If the Secret is empty, during bootstrapping the Server will generate a certificate and write it into the Secret.
+
A new webhook server can be created by invoking webhook.NewServer.
+The Server will be registered to the provided manager.
+You can specify Port, CertDir and various BootstrapOptions.
+For the full list of Server options, please see GoDoc.
You can register webhook(s) in the webhook server by invoking svr.Register(wh).
+
Implementing Webhook Handler
+
Implementing the Handler Business Logic
+
decoder types.Decoder is a decoder that knows how the decode all core type and your CRD types.
+
client client.Client is a client that knows how to talk to the API server.
+
The guideline of returning HTTP status code is that:
+
+
If the server decides to admit the request, it should return 200 and set
+Allowed
+to true.
+
If the server rejects the request due to an admission policy reason, it should return 200, set
+Allowed
+to false and provide an informational message as reason.
+
If the request is not well formatted, the server should reject it with 400 (Bad Request) and an error message.
+
If the server encounters an unexpected error during processing, it should reject the request with 500 (Internal Error).
+
+
controller-runtime provides various helper methods for constructing Response.
+
+
ErrorResponse for rejecting a request due to an error.
+
PatchResponse for mutating webook to admit a request with patches.
+
ValidationResponse for admitting or rejecting a request with a reason message.
+
+
type podAnnotator struct {
+ client client.Client
+ decoder types.Decoder
+}
+
+// podAnnotator Iimplements admission.Handler.
+var _ admission.Handler = &podAnnotator{}
+
+// podAnnotator adds an annotation to every incoming pods.
+func (a *podAnnotator) Handle(ctx context.Context, req types.Request) types.Response {
+ pod := &corev1.Pod{}
+
+ err := a.decoder.Decode(req, pod)
+ if err != nil {
+ return admission.ErrorResponse(http.StatusBadRequest, err)
+ }
+ copy := pod.DeepCopy()
+
+ err = a.mutatePodsFn(ctx, copy)
+ if err != nil {
+ return admission.ErrorResponse(http.StatusInternalServerError, err)
+ }
+ // admission.PatchResponse generates a Response containing patches.
+ return admission.PatchResponse(pod, copy)
+}
+
+// mutatePodsFn add an annotation to the given pod
+func (a *podAnnotator) mutatePodsFn(ctx context.Context, pod *corev1.Pod) error {
+ if pod.Annotations == nil {
+ pod.Annotations = map[string]string{}
+ }
+ pod.Annotations["example-mutating-admission-webhook"] = "foo"
+ returnnil
+}
+
Before following the instructions below, make sure to update Kubebuilder
+to 1.0.2+, update your dependency file to the latest version by using
+kubebuilder update vendor (see
+below). See the dependencies
+guide for more information on why this is necessary.
+
+
You can update dependencies to minor and patch versions using
+dep, as you would any other dependency in
+your project. See the dependencies
+guide for more
+information.
+
Updating to New Major Versions
Update your project's dependencies to the latest version of the libraries used by kubebuilder. This
will modify Gopkg.toml by rewriting the [[override]] elements beneath the
# DO NOT MODIFY BELOW THIS LINE. line. Rules added by the user above this line will be retained.
@@ -603,7 +675,14 @@
Update Existing Project's De
-
+
By Hand
+
You can also update your project by hand. Simply edit Gopkg.toml to
+point to a new version of the dependencies listed under the # DO NOT
+MODIFY BELOW THIS LINE. line, making sure that
+sigs.k8s.io/controller-tools and sigs.k8s.io/controller-runtime always
+have the same version listed. You should then remove the marker line to
+indicate that you've updated dependencies by hand, and don't want them
+overridden.
Webhooks are HTTP callbacks, providing a way for notifications to be delivered to an external web server.
+A web application implementing webhooks will send an HTTP request (typically POST) to other application when certain event happens.
+In the kubernetes world, there are 3 kinds of webhooks:
+admission webhook,
+authorization webhook and CRD conversion webhook.
+
In controller-runtime libraries,
+currently we only support admission webhooks.
+CRD conversion webhooks will be supported after it is released in kubernetes 1.12.
+
Admission Webhook
+
Admission webhooks are HTTP callbacks that receive admission requests, process them and return admission responses.
+There are two types of admission webhooks: mutating admission webhook and validating admission webhook.
+With mutating admission webhooks, you may change the request object before it is stored (e.g. for implementing defaulting of fields)
+With validating admission webhooks, you may not change the request, but you can reject it (e.g. for implementing validation of the request).
+
Why Admission Webhooks are Important
+
Admission webhooks are the mechanism to enable kubernetes extensibility through CRD.
+
+
Mutating admission webhook is the only way to do defaulting for CRDs.
+
Validating admission webhook allows for more complex validation than pure schema-based validation.
+e.g. cross-field validation or cross-object validation.
+
+
It can also be used to add custom logic in the core kubernetes API.
+
Mutating Admission Webhook
+
A mutating admission webhook receives an admission request which contains an object.
+The webhook can either decline the request directly or returning JSON patches for modifying the original object.
+
+
If admitting the request, the webhook is responsible for generating JSON patches and send them back in the
+admission response.
+
If declining the request, a reason message should be returned in the admission response.
+
+
Validating Admission Webhook
+
A validating admission webhook receives an admission request which contains an object.
+The webhook can either admit or decline the request.
+A reason message should be returned in the admission response if declining the request.
+
Authentication
+
The apiserver by default doesn't authenticate itself to the webhooks.
+That means the webhooks don't authenticate the identities of the clients.
+
But if you want to authenticate the clients, you need to configure the apiserver to use basic auth, bearer token,
+or a cert to authenticate itself to the webhooks. You can find detailed steps
+here.
+
Configure Admission Webhooks Dynamically
+
Admission webhooks can be configured dynamically via the admissionregistration.k8s.io/v1beta1 API.
+So your cluster must be 1.9 or later and has enabled the API.
+
You can do CRUD operations on WebhookConfiguration objects as on other k8s objects.
+
apiVersion: admissionregistration.k8s.io/v1beta1
+kind: ValidatingWebhookConfiguration
+metadata:
+ name: <name of itself>
+webhooks:
+- name: <webhook name, e.g. validate-deployment.example.com>
+ rules:
+ - apiGroups:
+ - apps
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - deployments
+ clientConfig:
+ service:
+ namespace: <namespace of the service>
+ name: <name of the service>
+ caBundle: <pem encoded ca cert that signs the server cert used by the webhook>
+
version=1.0.1 # latest stable version
arch=amd64
# download the release
-curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v$version/kubebuilder_$version_darwin_$arch.tar.gz
+curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v${version}/kubebuilder_${version}_darwin_${arch}.tar.gz
# extract the archive
-tar -zxvf kubebuilder_$version_darwin_$arch.tar.gz
-sudo mv kubebuilder_$version_darwin_$arch /usr/local/kubebuilder
+tar -zxvf kubebuilder_${version}_darwin_${arch}.tar.gz
+sudo mv kubebuilder_${version}_darwin_${arch} /usr/local/kubebuilder
# update your PATH to include /usr/local/kubebuilder/binexport PATH=$PATH:/usr/local/kubebuilder/bin
-
version=1.0.0 # latest stable version
+
version=1.0.1 # latest stable version
arch=amd64
# download the release
-curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v$version/kubebuilder_$version_linux_$arch.tar.gz
+curl -L -O https://github.com/kubernetes-sigs/kubebuilder/releases/download/v${version}/kubebuilder_${version}_linux_${arch}.tar.gz
# extract the archive
-tar -zxvf kubebuilder_$version_linux_$arch.tar.gz
-sudo mv kubebuilder_$version_linux_$arch /usr/local/kubebuilder
+tar -zxvf kubebuilder_${version}_linux_${arch}.tar.gz
+sudo mv kubebuilder_${version}_linux_${arch} /usr/local/kubebuilder
# update your PATH to include /usr/local/kubebuilder/binexport PATH=$PATH:/usr/local/kubebuilder/bin
@@ -632,11 +690,11 @@
Installation and Setup
arch=amd64
# download the release
-curl -L -O https://storage.googleapis.com/kubebuilder-release/kubebuilder_master_darwin_$arch.tar.gz
+curl -L -O https://storage.googleapis.com/kubebuilder-release/kubebuilder_master_darwin_${arch}.tar.gz
# extract the archive
-tar -zxvf kubebuilder_master_darwin_$arch.tar.gz
-sudo mv kubebuilder_master_darwin_$arch /usr/local/kubebuilder
+tar -zxvf kubebuilder_master_darwin_${arch}.tar.gz
+sudo mv kubebuilder_master_darwin_${arch} /usr/local/kubebuilder
# update your PATH to include /usr/local/kubebuilder/binexport PATH=$PATH:/usr/local/kubebuilder/bin
@@ -644,11 +702,11 @@
Installation and Setup
arch=amd64
# download the release
-curl -L -O https://storage.googleapis.com/kubebuilder-release/kubebuilder_master_linux_$arch.tar.gz
+curl -L -O https://storage.googleapis.com/kubebuilder-release/kubebuilder_master_linux_${arch}.tar.gz
# extract the archive
-tar -zxvf kubebuilder_master_linux_$arch.tar.gz
-sudo mv kubebuilder_master_linux_$arch /usr/local/kubebuilder
+tar -zxvf kubebuilder_master_linux_${arch}.tar.gz
+sudo mv kubebuilder_master_linux_${arch} /usr/local/kubebuilder
# update your PATH to include /usr/local/kubebuilder/binexport PATH=$PATH:/usr/local/kubebuilder/bin
@@ -687,7 +745,7 @@
No results matching "
var gitbook = gitbook || [];
gitbook.push(function() {
- gitbook.page.hasChanged({"page":{"title":"Installation and Setup","level":"2.3","depth":1,"next":{"title":"Hello World","level":"2.4","depth":1,"path":"getting_started/hello_world.md","ref":"getting_started/hello_world.md","articles":[]},"previous":{"title":"What is Kubebuilder","level":"2.2","depth":1,"path":"getting_started/what_is_kubebuilder.md","ref":"getting_started/what_is_kubebuilder.md","articles":[]},"dir":"ltr"},"config":{"plugins":["theme-api","panel","sequence-diagrams","ga"],"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"pluginsConfig":{"panel":{},"search":{},"sequence-diagrams":{"theme":"simple"},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"theme-api":{"languages":[],"split":true,"theme":"light"},"ga":{"configuration":"auto","token":"UA-119864590-1"},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false}},"theme":"default","author":"Phillip Wittrock","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"title":"The Kubebuilder Book","gitbook":">= 3.0.0"},"file":{"path":"getting_started/installation_and_setup.md","mtime":"2018-07-25T00:55:03.061Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2018-07-25T16:34:48.577Z"},"basePath":"..","book":{"language":""}});
+ gitbook.page.hasChanged({"page":{"title":"Installation and Setup","level":"2.3","depth":1,"next":{"title":"Hello World","level":"2.4","depth":1,"path":"getting_started/hello_world.md","ref":"getting_started/hello_world.md","articles":[]},"previous":{"title":"What is Kubebuilder","level":"2.2","depth":1,"path":"getting_started/what_is_kubebuilder.md","ref":"getting_started/what_is_kubebuilder.md","articles":[]},"dir":"ltr"},"config":{"plugins":["theme-api","panel","sequence-diagrams","ga"],"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"pluginsConfig":{"panel":{},"search":{},"sequence-diagrams":{"theme":"simple"},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"theme-api":{"languages":[],"split":true,"theme":"light"},"ga":{"configuration":"auto","token":"UA-119864590-1"},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false}},"theme":"default","author":"Phillip Wittrock","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"title":"The Kubebuilder Book","gitbook":">= 3.0.0"},"file":{"path":"getting_started/installation_and_setup.md","mtime":"2018-09-14T23:33:05.167Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2018-09-18T16:32:20.891Z"},"basePath":"..","book":{"language":""}});
});
diff --git a/docs/book/public/getting_started/what_is_kubebuilder.html b/docs/book/public/getting_started/what_is_kubebuilder.html
index 3b82e673c4d..440ff70c691 100644
--- a/docs/book/public/getting_started/what_is_kubebuilder.html
+++ b/docs/book/public/getting_started/what_is_kubebuilder.html
@@ -289,7 +289,7 @@
- What is a Contoller
+ What is a Controller
@@ -431,6 +431,19 @@
+
+
+
diff --git a/docs/book/public/getting_started/why_kubernetes.html b/docs/book/public/getting_started/why_kubernetes.html
index 96a6f15a7c7..7887a8bfbb8 100644
--- a/docs/book/public/getting_started/why_kubernetes.html
+++ b/docs/book/public/getting_started/why_kubernetes.html
@@ -289,7 +289,7 @@
- What is a Contoller
+ What is a Controller
@@ -431,6 +431,19 @@
+
+
+
diff --git a/docs/book/public/go_docs.html b/docs/book/public/go_docs.html
index 18b90a49639..df461db5589 100644
--- a/docs/book/public/go_docs.html
+++ b/docs/book/public/go_docs.html
@@ -287,7 +287,7 @@
- What is a Contoller
+ What is a Controller
@@ -429,6 +429,19 @@
+
+
+
diff --git a/docs/book/public/index.html b/docs/book/public/index.html
index 8f1a57eeef4..e5cb5a75bff 100644
--- a/docs/book/public/index.html
+++ b/docs/book/public/index.html
@@ -287,7 +287,7 @@
- What is a Contoller
+ What is a Controller
@@ -429,6 +429,19 @@
+
+
+
Kubebuilder projects contain 3 important packages.
+
cmd/...
+
The cmd package contains the manager main program. Manager is responsible for initializing
+shared dependencies and starting / stopping Controllers. Users typically
+will not need to edit this package and can rely on the scaffolding.
+
The cmd package is scaffolded automatically by kubebuilder init.
+
pkg/apis/...
+
The pkg/apis/... packages contains the API resource definitions.
+Users edit the *_types.go files under this director to implement their API definitions.
+
Each resource lives in a pkg/apis/<api-group-name>/<api-version-name>/<api-kind-name>_types.go
+file.
+
The pkg/apis package is scaffolded automatically by kubebuilder create api when creating a Resource.
+
pkg/controller/...
+
The pkg/controller/... packages contain the Controller implementations.
+Users edit the *_controller.go files under this directory to implement their Controllers.
+
The pkg/controller package is scaffolded automatically by kubebuilder create api when creating a Controller.
+
Additional directories and files
+
In addition to the packages above, a Kubebuilder project has several other directories and files.
+
Makefile
+
A Makefile is created with targets to build, test, run and deploy the controller artifacts
+for development as well as production workflows
+
Dockerfile
+
A Dockerfile is scaffolded to build a container image for your Manager.
+
config/...
+
Kubebuilder creates yaml config for installing the CRDs and related objects under config/.
+
+
config/crds
+
config/rbac
+
config/manager
+
config/samples
+
+
docs/...
+
API reference documentation, user defined API samples and API conceptual documentation go here.
+
Providing boilerplate headers
To prepend boilerplate comments at the top of generated and bootstrapped files,
+add the boilerplate to a hack/boilerplate.go.txt file before creating a project.
+
+
Create a new project
+
Create a new kubebuilder project. This will automatically initialize the vendored go libraries
+that will be required to build your project.
Run your manager locally against a Kubernetes cluster
+
Users may run the controller-manager binary locally against a Kubernetes cluster. This will
+install the APIs into the cluster and begin watching and reconciling the resources.
+
# Create a minikube cluster
+$ minikube start
+
+# Install the CRDs into the cluster
+$ make install
+
+# Build and run the manager
+$ make run
+
+
+
+
Create an instance
+
Create a new instance of your Resource. Observe the manager logs printed to the console after creating the object.
+
$ kubectl apply -f sample/<resource>.yaml
+
+
+
+
Deploying your manager in a Kubernetes cluster
+
Users can run the controller-manager in a Kubernetes cluster.
+
# Create a docker image
+$ make docker-build IMG=<img-name>
+
+# Push the docker image to a configured container registry
+$ make docker-push IMG=<img-name>
+
+# Deploy the controller manager manifests to the cluster.
+$ make deploy
+
This chapter walks through a simple Controller implementation.
+
This example is for the Controller for the ContainerSet API shown in the Resource Example.
+It uses the controller-runtime libraries
+to implement the Controller and Manager.
+
Unlike the Hello World example, here we use the underlying Controller libraries directly instead
+of the higher-level application pattern libraries. This gives greater control over
+the Controller is configured.
+
+
$ kubebuilder create api --group workloads --version v1beta1 --kind ContainerSet
// +kubebuilder:rbac creates RBAC rules in the config/rbac/rbac_role.yaml file when make is run.
+This will ensure the Kubernetes ServiceAccount running the controller can read / write to the Deployment API.
+
+
ContainerSetController has 2 variables:
+
+
client.Client is a client for reading / writing Kubernetes APIs.
+
scheme *runtime.Scheme is a runtime.Scheme used by the library to set OwnerReferences.
+
+
Adding a Controller to the Manager
+
Add creates a new Controller that will be started by the Manager. When adding a Controller it is important to setup
+Watch functions to trigger Reconciles.
+
Watch is a function that takes an event source.Source and a handler.EventHandler. The Source provides events
+for some type, and the EventHandler responds to events by enqueuing reconcile.Requests for objects.
+Watch optionally takes a list of Predicates that may be used to filter events.
+
Sources
+
+
To watch for create / update / delete events for an object use a source.KindSource e.g.
+source.KindSource{Type: &v1.Pod}
+
+
Handlers
+
+
To enqueue a Reconcile for the object in the event use a handler.EnqueueRequestForObject
+
To enqueue a Reconcile for the owner object that created the object in the event use a handler.EnqueueRequestForOwner
+with the type of the owner e.g. &handler.EnqueueRequestForOwner{OwnerType: &appsv1.Deployment{}, IsController: true}
+
To enqueue Reconcile requests for an arbitrary collection of objects in response to the event, use a
+handler.EnqueueRequestsFromMapFunc.
+
+
Example:
+
+
Create a new ContainerSetController struct that will.
+
Invoke Reconcile with the Name and Namespace of a ContainerSet for ContainerSet create / update / delete events
+
Invoke Reconcile with the Name and Namespace of a ContainerSet for Deployment create / update / delete events
+
+
+
+
Reference
+
+
See the controller libraries godocs for reference
+documentation on the controller libraries.
type ContainerSetController struct {
+ client.Client
+ scheme *runtime.Scheme
+}
+
+func Add(mgr manager.Manager) error (
+ // Create a new Controller
+ c, err := controller.New("containerset-controller", mgr,
+ controller.Options{Reconciler: &ContainerSetController{
+ Client: mgr.GetClient(),
+ scheme: mgr.GetScheme(),
+ }})
+ if err != nil {
+ return err
+ }
+
+ // Watch for changes to ContainerSet
+ err = c.Watch(
+ &source.Kind{Type:&workloadsv1beta1.ContainerSet{}},
+ &handler.EnqueueRequestForObject{})
+ if err != nil {
+ return err
+ }
+
+ // Watch for changes to Deployments created by a ContainerSet and trigger a Reconcile for the owner
+ err = c.Watch(
+ &source.Kind{Type: &appsv1.Deployment{}},
+ &handler.EnqueueRequestForOwner{
+ IsController: true,
+ OwnerType: &workloadsv1beta1.ContainerSet{},
+ })
+ if err != nil {
+ return err
+ }
+
+ returnnil
+}
+
+
+
+
Adding Annotations For Watches And CRUD Operations
It is important// +kubebuilder:rbac annotations when adding Watches or CRUD operations
+so that when the Controller is deployed it will have the correct permissions.
+
make must be run anytime annotations are changed to regenerated code and configs.
+
+
Implementing Controller Reconcile
+
Level vs Edge
The Reconcile function does not differentiate between create, update or deletion events.
+Instead it simply reads the state of the cluster at the time it is called.
+
+
Reconcile uses a client.Client to read and write objects. The Client is able to
+read or write any type of runtime.Object (e.g. Kubernetes object), so users don't need
+to generate separate clients for each collection of APIs.
+
The business logic of the Controller is implemented in the Reconcile function. This function takes the Namespace
+ and Name of a ContainerSet, allowing multiple Events to be batched together into a single Reconcile call.
+
The function shown here creates or updates a Deployment using the replicas and image specified in
+ContainerSet.Spec. Note that it sets an OwnerReference for the Deployment to enable garbage collection
+on the Deployment once the ContainerSet is deleted.
+
+
Read the ContainerSet using the NamespacedName
+
If there is an error or it has been deleted, return
+
Create the new desired DeploymentSpec from the ContainerSetSpec
+
Read the Deployment and compare the Deployment.Spec to the ContainerSet.Spec
+
If the observed Deployment.Spec does not match the desired spec
+
Deployment was not found: create a new Deployment
+
Deployment was found and changes are needed: update the Deployment
The main program lives under the cmd/ package created by kubebuilder init.
+It does not need to be changed by the user for most cases.
+
The main program starts the Controllers that have been registered with the Manager.
+Scaffolded Controllers are automatically registered with the Manager by scaffolding
+an init function to the controller package. Scaffolded Resources are
+automatically registered with the Manager Scheme by scaffolding an init
+function to the apis package.
+
+
Get a kubeconfig to talk to an apiserver
+
Add APIs to the Manager's Scheme
+
Add Controllers to the Manager
+
Start the Manager
+
+
func main() {
+ // Get a config to talk to the apiserver
+ cfg, err := config.GetConfig()
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Create a new Cmd to provide shared dependencies and start components
+ mgr, err := manager.New(cfg, manager.Options{})
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Setup Scheme for all resources
+ if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
+ log.Fatal(err)
+ }
+
+ // Setup all Controllers
+ if err := controller.AddToManager(mgr); err != nil {
+ log.Fatal(err)
+ }
+
+ // Start the Cmd
+ log.Fatal(mgr.Start(signals.SetupSignalHandler()))}
+
This chapter walks through the definition of a new Resource call ContainerSet. ContainerSet
+contains the image and replicas fields, and ensures a Deployment with matching image and replicas
+it running in the cluster.
+
Create the scaffolding for a new resource using the kubebuilder cli:
+
+
$ kubebuilder create api --group workloads --version v1beta1 --kind ContainerSet
+
+
This creates several files, including the Resource schema definition in:
+
+
pkg/apis/workloads/v1beta1/containerset_types.go
+
+
Type Definition
+
ContainerSet has 4 fields:
+
+
Spec contains the desired cluster state specified by the object. While much of the Spec is
+defined by users, unspecified parts may be filled in with defaults or by Controllers such as autoscalers.
+
Status contains only observed cluster state and is only written by controllers
+Status is not the source of truth for any information, but instead aggregates and publishes observed state.
+
TypeMeta contains metadata about the API itself - such as Group, Version, Kind.
+
ObjectMeta contains metadata about the specific object instance - such as the name, namespace,
+labels and annotations. ObjectMeta contains data common to most objects.
// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ContainerSet creates a new Deployment running multiple replicas of a single container with the given
+// image.
+// +k8s:openapi-gen=true
+// +resource:path=containersets
+type ContainerSet struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // spec contains the desired behavior of the ContainerSet
+ Spec ContainerSetSpec `json:"spec,omitempty"`
+
+ // status contains the last observed state of the ContainerSet
+ Status ContainerSetStatus `json:"status,omitempty"`
+}
+
+
+
+
Comment annotation directives
The definition contains several comment annotations of the form // +something. These are
+used to configure code generators to run against this code. The code generators will
+generate boilerplate functions and types to complete the Resource definition.
+To learn more on configuring code generation see the Code Generation chapter.
+
Note: The // +kubebuilder:validation:Pattern=.+:.+ annotation declares Pattern validation
+requiring that the Image field match the regular expression .+:.+
+
+
ContainerSetSpec
+
The ContainerSetSpec contains the container image and replica count, which should be read by
+the controller and used to create and manage a new Deployment. The Spec field contains desired
+state defined by the user or, if unspecified, field defaults defaults or Controllers set values.
+An example of an unspecified field that could be owned by a Controller would be the replicas
+field, which may be set by autoscalers.
+
// ContainerSetSpec defines the desired state of ContainerSet
+type ContainerSetSpec struct {
+ // replics is the number of replicas to maintain
+ Replicas int32`json:"replicas,omitempty"`
+
+ // image is the container image to run. Image must have a tag.
+ // +kubebuilder:validation:Pattern=.+:.+
+ Image string`json:"image,omitempty"`
+}
+
+
+
+
ContainerSetStatus
+
The ContainerSetStatus contains the number of healthy replicas, and should be set by the controller
+each time the ContainerSet is reconciled.
+
This field is propagated from the DeploymentStatus, and so the controller must watch for Deployment
+events to update the field.
+
// ContainerSetStatus defines the observed state of ContainerSet
+type ContainerSetStatus struct {
+ HealthyReplicas `json:"healthyReplicas,omitempty"`
+}
+
+
+
+
Runing Code Generators
While users don't directly modify generated code, the code must be regenerated after resources are
+modified by adding or removing fields. This is automatically done when running make.
+
Code generation may be configured for resources using annotations of the form // +something.
+See the pkg/gen reference documentation.
+
+
Scaffolded Boilerplate
+
Kubebuilder scaffolds boilerplate code to register resources with the runtime.Scheme used to
+map go structs to GroupVersionKinds.
+
+
SchemeGroupVersion is the GroupVersion for the APIs in this package
+
SchemeBuilder should have every API in the package type added to it
+
+
var (
+ // SchemeGroupVersion is group version used to register these objects
+ SchemeGroupVersion = schema.GroupVersion{Group: "workloads.k8s.io", Version: "v1beta1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+ SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion})
+
+
func init() {
+ // Register the types with the SchemeBuilder
+ SchemeBuilder.Register(&v1.ContainerSet{}, &v1.ContainerSetList{})
+}
+
Controllers implement APIs defined by Resources. Unlike Controllers in the ModelViewController
+pattern, Kubernetes Controllers are run asynchronously after the Resources (Models) have
+been written to storage. This model is highly flexible and allows new Controllers to be
+added for Models through extension instead of modification.
+
A Kubernetes Controller is a routine running in a Kubernetes cluster that watches for create /
+update / delete events on Resources, and triggers a Reconcile function in response. Reconcile
+is a function that may be called at any time with the Namespace and Name of an object (Resource
+instance), and it will make the cluster state match the state declared in the object Spec.
+Upon completion, Reconcile updates the object Status the new actual state.
+
It is common for Controllers to watch for changes to the Resource type that they Reconcile
+and Resource types of objects they create. e.g. a ReplicaSet Controller watches for
+changes to ReplicaSets and Pods. The Controller will trigger a Reconcile for a ReplicaSet
+in response to either an event for that ReplicaSet or in response to an event for a
+Pod created by that ReplicaSet.
+
In some cases Reconcile may only update the Status without updating any cluster state.
+
Illustrative example:
+
+
A ReplicaSet object is created with 10 replicas specified in the Spec
+
ReplicaSetController Reconcile reads the Spec and lists the Pods owned by the ReplicaSet
+
No Pods are found, ReplicaSetController creates 10 Pods and updates the Status with 0/10 Pods running
+
ReplicaSetController Reconcile is triggered as the Pods start running, and updates Status in the
+ReplicaSet object.
+
+
Kubernetes APIs and Controllers have level based implementations to facilitate self-
+healing and periodic reconciliation. This means no state is provided to the Reconcile
+when it is called.
+
What is a Level Based API
+
The term level-based comes from interrupts hardware, where interrupts may be either level-based or edge-based.
+
Kubernetes defines a level-based API as implemented by reading the observed (actual) state of the system,
+comparing it to what is declared in the object Spec, and making changes to the system state so
+it matches the state of the Spec at the time Reconcile is called.
+
This has a number of notable properties:
+
+
Reconcile skips intermediate or obsolete values declared in the Spec and
+works directly toward the current Spec.
+
Reconcile may batch multiple events together before processing them instead
+of handling each individually
+
+
Consider the following examples of level based API implementations.
+
Example 1: Batching Events
+
A user creates a ReplicaSet with 1000 replicas. The ReplicaSet creates 1000 Pods and maintains a
+Status field with the number of healthy Pods. In a level based system, the Controller batches
+the Pod updates together (the Reconcile only gets the ReplicaSet Namespace and Name) before triggering
+the Reconcile. In an edge based system, the Controller responds to each individual Pod event, potentially
+performing 1000 sequential updates to the Status instead of 1.
+
Example 2: Skipping Obsolete States
+
A user creates a rollout for a Deployment containing a new container image. Shortly after
+starting the rollout, the user realizes the containers are crash looping because they need
+to increase memory thresholds when running the new image.
+The user updates the Deployment with the new memory limit to start a new rollout. In a
+level based system, the Controller will immediately stop rolling out the old values and start
+the rollout for the new values. In an edge based system the Controller may complete the first
+rollout before starting the next.
+
Watching Events
+
The Controller Reconcile is triggered both by cluster events.
+
Watching Resources
+
Controllers must watch for events for the Resource they Reconcile. The ReplicaSetController
+watches for changes to ReplicaSets and triggers a Reconcile in response.
+
ReplicaSet Creation
+
The following diagram shows a creation event triggering a reconcile.
+
+
Watching Created Resources
+
Controllers should watch for events on the Resources they create. The ReplicaSetController watches
+for Pod events. If a Pod is deleted, the ReplicaSetController will see the Pod event and
+Reconcile the ReplicaSet that created the Pod so it can create a new one.
+
ReplicaSet Creation And Self-Healing
+
The following diagram shows a series of events after creating a new ReplicaSet and then a Pod getting deleted.
+
+
Watching Related Resource Events
+
Controllers may watch for events on Resources that are related, but they did not create. The
+DaemonSetController watches for changes to Nodes. If a new Node is created, the Controller
+will create a new Pod scheduled on that Node. In this case, all DaemonSet objects are reconciled
+each time a Node is created.
+
Create Objects During Reconciliation
+
Many Controllers create new Kubernetes objects as part of a reconcile. These objects
+are owned by the object responsible for their creation.
+This relationship is recorded both in an OwnersReference in the ObjectMeta of the created
+objects and through labels (on the created object) + selectors (on the created object).
+
The labels + selectors allow the creating controller to find all of the objects it has created,
+by listing them using their label. The OwnersReference maps the created object to its
+owner when there is an event for the created object.
+
Writing Status Back to Objects
+
Controllers are run asynchronously, meaning that the user operation will return a success to
+the user before the Controller is run. If there are issues when the Controller is run,
+such as the container image being invalid, the user will not be notified.
+
Instead the Controller must write back the Status of the object at each Reconcile and
+users must check the object Status.
+
Status
The controller will keep Status up-to-date both in response to user initiated events, but also
+in response to non-user initiated events, such as Node failures.
+
+
Walkthrough: a Deployment Rollout across Deployments, ReplicaSets, Pods
+
Following is a walkthrough of a Deployment Rolling update.
+
Kubectl commands
+
Using kubectl, it is possible to call the same watch API used by controllers to trigger
+reconciles. The following example watches Deployments, ReplicaSets and Pods; creates a Deployment;
+and updates the Deployment with a new container image (triggering a rolling update).
+
# watch deployments in terminal 1
+kubectl get -w deployments
+
+# watch replicasets in terminal 2
+kubectl get -w replicasets
+
+# watch pods in terminal 3
+kubectl get -w pods
+
+# create deployment
+kubectl run nginx --image nginx:1.12 --replicas 3
+
+# rollout new image
+kubectl set image deployments nginx *=nginx:1.13
+
+
Flow Diagram
+
+
Controllers vs Operators
+
Controllers that implement an API for a specific application, such as Etcd, Spark or Cassandra are
+often referred to as Operators.
A Kubernetes Resource is a declarative API with a well defined Schema structure
+and endpoints. Because the structure of the Schema and Endpoints are predictable
+and structured, most Kubernetes tools work with any Kubernetes API even if they
+are not part of the core (e.g. extensions through CRDs).
+
What is a Declarative API
+
A declarative API expresses a fixed state that the cluster must continually
+work towards. Declarative APIs define the what, but not the how.
+Example: $ replicas 3
+
An imperative API expresses an operation that may change state, but does not
+define an absolute state that must be maintained. Imperative APIs express the
+how, but not what. Example: $ add-pods 2.
+
In the declarative case, if a replica is lost the cluster has a clear directive
+to create another one, whereas in the latter case this is not necessarily true.
Constraints on the how may be defined within declarative APIs, such as performing a rolling update
+versus deleting and recreating all Pods immediately.
+
+
Resource Schema
+
Group, Version, Kind
+
Every Kubernetes resource has a Group, Version and Kind that uniquely identifies it.
+
+
The resource Kind is the name of the API - such as Deployment or Service.
+
The resource Version defines the stability of the API and backward compatibility guarantees -
+such as v1beta1 or v1.
+
The resource Group is similar to package in a language. It disambiguates different APIs
+that may happen to have identically named Kinds. Groups often contain a domain name, such as k8s.io.
+
+
Deployment yaml config Group Version Kind
+
apiVersion: apps/v1
+kind: Deployment
+
+
+
+
Versions
Resources with different Versions but the same Group and Kind differ in the following ways:
+
+
Unspecified fields may have different defaults
+
The same logical fields may have different names or representations
+
+
However resources with different versions frequently share the same features and controller.
+
Alpha APIs may break backwards compatibility by changing field names, defaults or behavior. They
+also may not be supported in the future.
+
Beta APIs maintain backwards compatibility on field names, defaults and behavior. They may be
+missing features required for GA. However once the API goes GA, the features should be available
+in the Beta version.
+
GA APIs have been available and running in production for sufficient time to have developed
+a stable set of field names and defaults, as well as a complete feature set.
+
+
Spec, Status, Metadata
+
Most Kubernetes Resource Schemas contain 3 components: Spec, Status and Metadata
+
Spec: the Resource Spec defines the desired state of the cluster as specified by the user.
+
Status: the Resource Status publishes the state of the cluster as observed by the controller.
+
Metadata: the Resource Metadata contains information common to most resources about the object
+including as the object name, annotations, labels and more.
+
Note: this config has been abbreviated for the purposes of display
+
Deployment yaml config with Spec Status and Metadata
The resource Status should not contain the source of truth for any information, and should be
+possible for Controllers to recreate by looking at the cluster state. Other values assigned by
+Controllers, such as the Service spec.clusterIp, should be set on the Spec not the Status.
+
+
Resource Endpoints
+
Kubernetes Resources have well defined endpoints as described below.
+
Create, Update, Patch, Delete
+
The create, update, patch and delete endpoints may be used to modify objects. The update endpoint
+replaces the object with what is provided, whereas the patch endpoint selectively updates
+fields.
+
Get, List, Watch
+
The get, list and watch endpoints may be used to get a specific resource by name, list all
+resources matching a labels, or continually watch for updates.
When reading objects, the same objects should be returned regardless of which version of the API endpoint is
+read from (though the structure may differ between versions).
+
When writing objects, the default values applied to fields may change between API versions, but
+the written object should be visible when read from any version.
+
+
Warning on Updates
The update API should only be used to read-then-write an object, and never used to
+update an object directly from declarative config. This is because the object state
+may be partially managed by Controllers running in the cluster and this state would
+be lost when the update replaces the current object with the declarative config.
+
Illustrative example: updating a Service from declarative config rather than a read-then-write
+would clear the Service spec.clusterIp field set by the controller.
+
+
Watch Timeouts
If used directly, a watch API call will timeout and need to be re-established. The kubebuilder
+libraries hide the details behind watches from users and automatically re-establish connections.
+
+
Subresources
+
While most operations can be represented declaratively, some may not, such as
+logs, attach or exec. These operations may be implemented as subresources.
+
Subresources are functions attached to resources, but that have their
+own Schema and Endpoints. By having different resources each implement
+the same subresource API, resources can implemented shared interfaces.
+
For example Deployment, ReplicaSet and StatefulSet each implement the
+scale subresource API, making it easy to build tools which scale any of them
+as well as scale any other resources that implement the scale subresource.
+
Deployment Scale Subresource Endpoints under /apis/apps/v1
Labels in ObjectMeta data are key-value pairs that may be queried to find matching objects.
+Labels are used to connect objects together in a Kubernetes cluster. For instance
+Services use labels to determine which Pods to direct traffic to, and Deployments use labels
+(along with OwnersReferences) to identify Pods they created.
+
Annotations allow arbitrary data to be written to resources that may not fit within the
+Schema of the resource, but may be needed by end users or tools.
+
Extending Built In Types
Annotations may be used to define new extension fields on resources without modifying the
+Schema of the object. This allows users to define their own private schema extensions for
+existing core Kubernetes resources.
+
+
Namespaces
+
While most resources are Namespaced, that is the objects are scoped to a Namespace, some resources
+are non-namespaces and scoped to the cluster. Examples of non-namespaced resources include
+Nodes, Namespaces and ClusterRole.
The Manager is an executable that wraps one or more Controllers. It may
+either be built and run locally against a remote cluster, or run as a container
+in the cluster.
+
When run as a container, it should be installed into its own Namespace with a
+ServiceAccount and RBAC permissions on the appropriate resources. The configs
+to do this are automatically generated for the user by running make.
+
Note that the Manager is run as a StatefulSet and not a Deployment. This
+is to ensure that only 1 instance of the Manager is run at a time (a Deployment
+may sometimes run multiple instances even with replicas set to 1).
+
Building and Running Locally
+
Build and run locally against the cluster defined in ~/.kube/config. Note
+this requires a running Kubernetes cluster to be accessible with the
+~/.kube/config.
+
make run
+
+
In another terminal, create an instance of your resource.
Controllers may watch Resources and trigger Reconcile calls with the key of the
+object from the watch event.
+
This example configures a controller to watch for Pod events, and call Reconcile with
+the Pod key.
+
If Pod default/foo is created, updated or deleted, then Reconcile will be called with
+namespace: default, name: foo
+
// Annotation for generating RBAC role to Watch Pods
+// +kubebuilder:rbac:groups="",resources=pods,verbs=get;watch;list
+
+
// Watch for Pod events, and enqueue a reconcile.Request to trigger a Reconcile
+err := c.Watch(
+ &source.Kind{Type: &v1.Pod{}},
+ &handler.EnqueueRequestForObject{})
+if err != nil {
+ return err
+}
+
+
+
+
+
Watching Created Resources
+
Controllers may watch Resources of types they create and trigger Reconcile calls with the key of
+the Owner of the object.
+
This example configures a Controller to watch for Pod events, and call Reconcile with
+the Owner ReplicaSet key. This is done by looking up the object referred to by the Owner reference
+from the watch event object.
+
+
Define a function to lookup the Owner from the key
+
Call WatchControllerOf with the Owned object and the function to lookup the owner
+
+
If Pod default/foo-pod was created by ReplicaSet default/foo-rs, and the Pod is
+(re)created, updated or deleted, then Reconcile will be called with namespace: default, name: foo-rs
+
Note: This requires adding the following annotations to your Controller struct to ensure the
+correct RBAC rules are in place and informers have been started.
+
// Annotation to generate RBAC roles to watch and update Pods
+// +kubebuilder:rbac:groups="",resources=pods,verbs=get;watch;list,create,update,delete
+
+
// Watch for Pod events, and enqueue a reconcile.Request for the ReplicaSet in the OwnerReferences
+err := c.Watch(
+ &source.Kind{Type: &corev1.Pod{}},
+ &handler.EnqueueRequestForOwner{
+ IsController: true,
+ OwnerType: &appsv1.ReplicaSet{}})
+if err != nil {
+ return err
+}
+
+
+
+
Watching Arbitrary Resources
+
Controllers may watch arbitrary Resources and map them to a key of the Resource managed by the
+controller. Controllers may even map an event to multiple keys, triggering Reconciles for
+each key.
+
Example: To respond to cluster scaling events (e.g. the deletion or addition of Nodes),
+a Controller would watch Nodes and map the watch events to keys of objects managed by
+the controller.
+
This simple example configures a Controller to watch for Pod events, and then reconciles objects with
+names derived from the Pod's name.
+
If Pod default/foo is created, updated or deleted, then Reconcile will be called for
+namespace: default, name: foo-parent-1 and for namespace: default, name: foo-parent-2.
+
Note: This requires adding the following annotations to your Controller struct to ensure the
+correct RBAC rules are in place and informers have been started.
// Define a mapping from the object in the event to one or more
+// objects to Reconcile
+mapFn := handler.ToRequestsFunc(
+ func(a handler.MapObject) []reconcile.Request {
+ return []reconcile.Request{
+ {NamespacedName: types.NamespacedName{
+ Name: a.Meta.GetName() + "-1",
+ Namespace: a.Meta.GetNamespace(),
+ }},
+ {NamespacedName: types.NamespacedName{
+ Name: a.Meta.GetName() + "-2",
+ Namespace: a.Meta.GetNamespace(),
+ }},
+ }
+ })
+// Watch Deployments and trigger Reconciles for objects
+// mapped from the Deployment in the event
+err := c.Watch(
+ &source.Kind{Type: &appsv1.Deployment{}},
+ &handler.EnqueueRequestsFromMapFunc{
+ ToRequests: mapFn,
+ })
+if err != nil {
+ return err
+}
+
+
+
+
+
Watching Channels
+
Controllers may trigger Reconcile for events written to Channels. This is useful if the Controller
+needs to trigger a Reconcile in response to something other than a create / update / delete event
+to a Kubernetes object. Note: in most situations this case is better handled by updating a Kubernetes
+object with the external state that would trigger the Reconcile.
It is often useful to publish Event objects from the controller Reconcile function. Events
+allow users to see what is going on with a particular object, and allow automated processes
+to see and respond to them.
+
Getting Events
Recent Events for an object may be viewed by running kubectl describe
+
+
Events are published from a Controller using an EventRecorder,
+which can be created for a Controller by calling GetRecorder(name string) on a Manager.
eventtype is the type of this event, and is either Normal or Warning.
+
reason is the reason this event is generated. It should be short and unique with
+UpperCamelCase format. The value could appear in switch statements by automation.
Deploy the controller-manager in a Kubernetes cluster
+
Deploying the controller to a Kubernetes cluster involves following steps:
+
+
Building the docker image
+
Pushing the docker image to the container registry
+
Customizing the deployment manifests
+
Applying the manifests to deploy in the cluster
+
+
Kubebuilder generated Makefile supports all the above steps.
+
Prerequisites
Kubebuilder generated Makefile uses Kustomize for customizing the manifests
+before deploying to the kubernetes cluster. Follow the instructions to install Kustomize and
+ensure that is available in the PATH. Note that Kubebuilder requires Kustomize version 1.0.4 or higher for deploy to work.
Kubebuilder will generate API reference documentation for your APIs with kubebuilder docs. The
+reference documentation will be built under docs/reference/build/index.html and can be opened
+directly in a web browser.
+
+
Use --docs-copyright to set the copyright footer
+
Use --title to set the title
+
+
Non-Kubebuilder Projects
Kubebuilder can also be used to generate API reference documentation for non-kubebuilder projects, as long as the
+resources are annotated with // +kubebuilder:resource:path=<resource-name> the same as they are in kubebuilder
+projects.
+
Important: The // +kubebuilder:resource annotation must appear directly above the go struct
+defining the resource. No blank lines may appear between the annotation and the go struct.
+
+
Creating Examples
+
Users can provide resource examples by running
+kubebuilder create example --kind <kind> --group <group> --version <version>. This will create an example
+file under docs/reference/examples/<kind>/<kind>.yaml for the user to edit. The contents of this file will appear
+next to the API reference documentation after rerunning kubebuilder docs.
+
+
note: description that will appear directly above the example
+
sample: example yaml that will be displayed
+
+
+
$ kubebuilder create example --kind Frigate --version v1beta1 --group ships
Users can modify documentation of the overview and API groups by editing the files under
+docs/reference/static_includes.
+
+
Edit _overview.md to provide documentation for the full set of APIs.
+
Edit _<group>.md to provide documentation for a specific API group.
+
+
Adding Notes and Warnings for APIs
+
It is possible to add notes and warnings to APIs in the reference documentation by annotating
+the go struct with // +kubebuilder:doc:note= or // +kubebuilder:doc:warning=. These will
+show up in blue and orange boxes.
+
// Frigate API documentation goes here.
+// +kubebuilder:doc:note=this is a note
+// +kubebuilder:doc:warning=this is a warning
+// +k8s:openapi-gen=true
+// +kubebuilder:resource:path=frigates
+type Frigate struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec field documentation goes here.
+ Spec FrigateSpec `json:"spec,omitempty"`
+
+ // Status field documentation goes here.
+ Status FrigateStatus `json:"status,omitempty"`
+}
+
+
+
+
Customizing the API documentation
+
The generated documentation is controller by the docs/reference/config.yaml file generated by kubebuilder. This
+file may be manually changed by users to customize the appearance of the documentation, however this is
+discouraged as the user will need to manually managed the config going forward.
+
Modifying config.yaml
When manually modifying config.yaml, users must run kubebuilder docs with --generate-config=false to
+prevent the file from being rewritten.
+
+
Table of Contents
+
docs/reference/config.yaml is automatically generated to create a section for each API group including
+the APIs in the group, and to show the most mature versions of each API. Both the API sections and
+displayed API versions may be manually controlled if needed.
Kubebuilder will create scaffolding tests for controllers and resources. When run, these tests will start
+a local control plane as part of the integration test. Developers may talk to the local control plane
+using the provided config.
+
Resource Tests
+
The resource tests are created under pkg/apis/<group>/<version>/<kind>_types_test.go. When a resource
+is created with kubebuilder create api, a test file will be created to store and read back the object.
+
Update the test to include validation you add to your resource.
The controller tests are created under pkg/controller/<kind>/controller_test.go. When a resource
+is created with kubebuilder create api, a test file will be created to start the controller
+and reconcile objects. The default test will create a new object and verify that the controller
+Reconcile function is called.
+
Update the test to verify the business logic of your controller.
To override the test binaries used to start the control plane, set the TEST_ASSET_ environment variables.
+This can be useful for performing testing against multiple Kubernetes cluster versions.
+
If these environment variables are unset, kubebuiler will default to the binaries packaged with kubebuilder.
Install the latest version of kubebuilder from releases page.
+
Update Existing Project's Dependencies
+
Update your project's dependencies to the latest version of the libraries used by kubebuilder. This
+will modify Gopkg.toml by rewriting the [[override]] elements beneath the
+# DO NOT MODIFY BELOW THIS LINE. line. Rules added by the user above this line will be retained.
+
Gopkg.toml's without the # DO NOT MODIFY BELOW THIS LINE. will be ignored.
A new project may be scaffolded for a user by running kubebuilder init and then scaffolding a
+new API with kubebuilder create api. More on this topic in
+Project Creation and Structure
+
This chapter shows a simple Controller implementation using the
+controller-runtime builder
+libraries to do most of the Controller configuration.
+
While Kubernetes APIs have typically have 3 components, (Resource, Controller, Manager), this
+example uses an existing Resource (ReplicaSet) and the builder package to hide many of the
+ setup details.
+
For a more detailed look at creating Resources and Controllers that may be more complex,
+see the Resource, Controller and
+Manager examples.
+
ReplicaSet Controller Setup
+
The example main program configures a new ReplicaSetController to watch for
+create/update/delete events for ReplicaSets and Pods.
+
+
On ReplicaSet create/update/delete events - Reconcile the ReplicaSet
+
On Pod create/update/delete events - Reconcile the ReplicaSet that created the Pod
+
Reconcile by calling ReplicaSetController.Reconcile with the Namespace and Name of
+ReplicaSet
+
+
func main() {
+ a, err := builder.SimpleController()
+ // ReplicaSet is the Application type that
+ // is Reconciled Respond to ReplicaSet events.
+ ForType(&appsv1.ReplicaSet{}).
+ // ReplicaSet creates Pods. Trigger
+ // ReplicaSet Reconciles for Pod events.
+ Owns(&corev1.Pod{}).
+ // Call ReplicaSetController with the
+ // Namespace / Name of the ReplicaSet
+ Build(&ReplicaSetController{})
+ if err != nil {
+ log.Fatal(err)
+ }
+ log.Fatal(mrg.Start(signals.SetupSignalHandler()))
+}
+
+// ReplicaSetController is a simple Controller example implementation.
+type ReplicaSetController struct {
+ client.Client
+}
+
+
+
+
ReplicaSet Implementation
+
ReplicaSetController implements reconcile.Reconciler. It takes the Namespace and Name for
+a ReplicaSet object and makes the state of the cluster match what is specified in the ReplicaSet
+at the time Reconcile is called. This typically means using a client.Client to read
+the same of multiple objects, and perform create / update / delete as needed.
+
+
Implement InjectClient to get a client.Client from the application.Builder
+
Read the ReplicaSet object using the provided Namespace and Name
+
List the Pods matching the ReplicaSet selector
+
Set a Label on the ReplicaSet with the matching Pod count
+
+
Because the Controller watches for Pod events, the count will be updated any time
+a Pod is created or deleted.
+
// InjectClient is called by the application.Builder
+// to provide a client.Client
+func (a *ReplicaSetController) InjectClient(
+ c client.Client) error {
+ a.Client = c
+ returnnil
+}
+
+// Reconcile reads the Pods for a ReplicaSet and writes
+// the count back as an annotation
+func (a *ReplicaSetController) Reconcile(
+ req reconcile.Request) (reconcile.Result, error) {
+ // Read the ReplicaSet
+ rs := &appsv1.ReplicaSet{}
+ err := a.Get(context.TODO(), req.NamespacedName, rs)
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+
+ // List the Pods matching the PodTemplate Labels
+ pods := &corev1.PodList{}
+ err = a.List(context.TODO(),
+ client.InNamespace(req.Namespace).
+ MatchingLabels(rs.Spec.Template.Labels),
+ pods)
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+
+ // Update the ReplicaSet
+ rs.Labels["selector-pod-count"] =
+ fmt.Sprintf("%v", len(pods.Items))
+ err = a.Update(context.TODO(), rs)
+ if err != nil {
+ return reconcile.Result{}, err
+ }
+
+ return reconcile.Result{}, nil
+}
+
Kubebuilder is an SDK for rapidly building and publishing Kubernetes APIs in Go.
+It builds on top of the canonical techniques used to build the core Kubernetes APIs
+to provide simple abstractions that reduce boilerplate and toil.
+
Similar to web development frameworks such as Ruby on Rails and SpringBoot,
+Kubebuilder increases velocity and reduces the complexity managed by
+developers.
+
Included in Kubebuilder:
+
+
Initializing projects with a base structure including
+
Go package dependencies at canonical versions.
+
main program entry point
+
Makefile for formatting, generating, testing and building go
+
Dockerfile for building container images
+
+
+
Scaffolding APIs with
+
Resource (Model) definition
+
Controller implementation
+
Integration tests for Resource and Controller
+
CRD definition
+
+
+
Simple abstractions for implementing APIs
+
Controllers
+
Resource Schema Validation
+
Validating Webhooks
+
+
+
Artifacts for publishing APIs for installation into clusters
+
Namespace
+
CRDs
+
RBAC Roles and RoleBindings
+
Controller StatefulSet + Service
+
+
+
API reference documentation with examples
+
+
Kubebuilder is developed on top of the controller-runtime and controller-tools libraries.