diff --git a/.editorconfig b/.editorconfig index 42ff3294fd16a..e49c89c4e8b78 100644 --- a/.editorconfig +++ b/.editorconfig @@ -5,7 +5,7 @@ charset = utf-8 max_line_length = 80 trim_trailing_whitespace = true -[*.{html,js,json,sass,md,mmark,toml,yaml}] +[*.{css,html,js,json,sass,md,mmark,toml,yaml}] indent_style = space indent_size = 2 diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 291db36bf2345..8a8da8978e5d9 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -41,7 +41,6 @@ aliases: sig-docs-en-owners: # Admins for English content - bradtopol - daminisatya - - gochist - jaredbhatti - jimangel - kbarnard10 @@ -52,13 +51,13 @@ aliases: - sftim - steveperry-53 - tengqm + - vineethreddy02 - xiangpengzhao - zacharysarah - zparnold sig-docs-en-reviews: # PR reviews for English content - bradtopol - daminisatya - - gochist - jaredbhatti - jimangel - kbarnard10 @@ -66,6 +65,7 @@ aliases: - makoscafee - onlydole - rajakavitha1 + - rajeshdeshpande02 - sftim - steveperry-53 - tengqm @@ -128,12 +128,10 @@ aliases: - fabriziopandini - mattiaperi - micheleberardi - - rlenferink sig-docs-it-reviews: # PR reviews for Italian content - fabriziopandini - mattiaperi - micheleberardi - - rlenferink sig-docs-ja-owners: # Admins for Japanese content - cstoku - inductor @@ -192,10 +190,12 @@ aliases: - femrtnz - jcjesus - devlware + - jhonmike sig-docs-pt-reviews: # PR reviews for Portugese content - femrtnz - jcjesus - devlware + - jhonmike sig-docs-vi-owners: # Admins for Vietnamese content - huynguyennovem - ngtuna diff --git a/README-id.md b/README-id.md index b178b220bc38e..1d6b830f2e8d8 100644 --- a/README-id.md +++ b/README-id.md @@ -9,7 +9,7 @@ Selamat datang! Repositori ini merupakan wadah bagi semua komponen yang dibutuhk Pertama, kamu dapat menekan tombol **Fork** yang berada pada bagian atas layar, untuk menyalin repositori pada akun Github-mu. Salinan ini disebut sebagai **fork**. Kamu dapat menambahkan konten pada **fork** yang kamu miliki, setelah kamu merasa cukup untuk menambahkan konten yang kamu miliki dan ingin memberikan konten tersebut pada kami, kamu dapat melihat **fork** yang telah kamu buat dan membuat **pull request** untuk memberi tahu kami bahwa kamu ingin menambahkan konten yang telah kamu buat. -Setelah kamu membuat sebuah **pull request**, seorang **reviewer** akan memberikan masukan terhadap konten yang kamu sediakan serta beberapa hal yang dapat kamu lakukan apabila perbaikan diperlukan terhadap konten yang telah kamu sediakan. Sebagai seorang yang membuat **pull request**, **sudah menjadi kewajiban kamu untuk melakukan modifikasi terhadap konten yang kamu berikan sesuai dengan masukan yang diberikan oleh seorang reviewer Kubernetes**. Perlu kamu ketahui bahwa kamu dapat saja memiliki lebih dari satu orang **reviewer Kubernetes** atau dalam kasus kamu bisa saja mendapatkan **reviewer Kubernetes** yang berbeda dengan **reviewer Kubernetes** awal yang ditugaskan untuk memberikan masukan terhadap konten yang kamu sediakan. Selain itu, seorang **reviewer Kubernetes** bisa saja meminta masukan teknis dari [reviewer teknis Kubernetes](https://github.com/kubernetes/website/wiki/Tech-reviewers) jika diperlukan. +Setelah kamu membuat sebuah **pull request**, seorang **reviewer** akan memberikan masukan terhadap konten yang kamu sediakan serta beberapa hal yang dapat kamu lakukan apabila perbaikan diperlukan terhadap konten yang telah kamu sediakan. Sebagai seorang yang membuat **pull request**, **sudah menjadi kewajiban kamu untuk melakukan modifikasi terhadap konten yang kamu berikan sesuai dengan masukan yang diberikan oleh seorang reviewer Kubernetes**. Perlu kamu ketahui bahwa kamu dapat saja memiliki lebih dari satu orang **reviewer Kubernetes** atau dalam kasus kamu bisa saja mendapatkan **reviewer Kubernetes** yang berbeda dengan **reviewer Kubernetes** awal yang ditugaskan untuk memberikan masukan terhadap konten yang kamu sediakan. Selain itu, seorang **reviewer Kubernetes** bisa saja meminta masukan teknis dari [reviewer teknis Kubernetes](https://github.com/kubernetes/website/wiki/Tech-reviewers) jika diperlukan. Untuk informasi lebih lanjut mengenai tata cara melakukan kontribusi, kamu dapat melihat tautan di bawah ini: @@ -21,11 +21,11 @@ Untuk informasi lebih lanjut mengenai tata cara melakukan kontribusi, kamu dapat ## Menjalankan Dokumentasi Kubernetes pada Mesin Lokal Kamu -Petunjuk yang disarankan untuk menjalankan Dokumentasi Kubernetes pada mesin lokal kamus adalah dengan menggunakan [Docker](https://docker.com) **image** yang memiliki **package** [Hugo](https://gohugo.io), **Hugo** sendiri merupakan generator website statis. +Petunjuk yang disarankan untuk menjalankan Dokumentasi Kubernetes pada mesin lokal kamus adalah dengan menggunakan [Docker](https://docker.com) **image** yang memiliki **package** [Hugo](https://gohugo.io), **Hugo** sendiri merupakan generator website statis. > Jika kamu menggunakan Windows, kamu mungkin membutuhkan beberapa langkah tambahan untuk melakukan instalasi perangkat lunak yang dibutuhkan. Instalasi ini dapat dilakukan dengan menggunakan [Chocolatey](https://chocolatey.org). `choco install make` -> Jika kamu ingin menjalankan **website** tanpa menggunakan **Docker**, kamu dapat melihat tautan berikut [Petunjuk untuk menjalankan website pada mesin lokal dengan menggunakan Hugo](#petunjuk-untuk-menjalankan-website-pada-mesin-lokal-denga-menggunakan-hugo) di bagian bawah. +> Jika kamu ingin menjalankan **website** tanpa menggunakan **Docker**, kamu dapat melihat tautan berikut [Petunjuk untuk menjalankan website pada mesin lokal dengan menggunakan Hugo](#petunjuk-untuk-menjalankan-website-pada-mesin-lokal-dengan-menggunakan-hugo) di bagian bawah. Jika kamu sudah memiliki **Docker** [yang sudah dapat digunakan](https://www.docker.com/get-started), kamu dapat melakukan **build** `kubernetes-hugo` **Docker image** secara lokal: @@ -44,7 +44,7 @@ Buka **browser** kamu ke http://localhost:1313 untuk melihat laman dokumentasi. ## Petunjuk untuk menjalankan website pada mesin lokal dengan menggunakan Hugo -Kamu dapat melihat [dokumentasi resmi Hugo](https://gohugo.io/getting-started/installing/) untuk mengetahui langkah yang diperlukan untuk melakukan instalasi **Hugo**. Pastikan kamu melakukan instalasi versi **Hugo** sesuai dengan versi yang tersedia pada **environment variable** `HUGO_VERSION` pada **file**[`netlify.toml`](netlify.toml#L9). +Kamu dapat melihat [dokumentasi resmi Hugo](https://gohugo.io/getting-started/installing/) untuk mengetahui langkah yang diperlukan untuk melakukan instalasi **Hugo**. Pastikan kamu melakukan instalasi versi **Hugo** sesuai dengan versi yang tersedia pada **environment variable** `HUGO_VERSION` pada **file**[`netlify.toml`](netlify.toml#L9). Untuk menjalankan laman pada mesin lokal setelah instalasi **Hugo**, kamu dapat menjalankan perintah berikut: diff --git a/README-it.md b/README-it.md index e94f770ca95db..5530a673e5693 100644 --- a/README-it.md +++ b/README-it.md @@ -21,11 +21,11 @@ Per maggiori informazioni su come contribuire alla documentazione Kubernetes, ve ## Eseguire il sito Web localmente usando Docker -Il modo consigliato per eseguire localmente il sito Web Kubernetes prevede l'utilizzo di un'immagine [Docker] (https://docker.com) inclusa nel sito e configurata con tutti i software necessari, a partire dal generatore di siti web statici [Hugo] (https://gohugo.io). +Il modo consigliato per eseguire localmente il sito Web Kubernetes prevede l'utilizzo di un'immagine [Docker](https://docker.com) inclusa nel sito e configurata con tutti i software necessari, a partire dal generatore di siti web statici [Hugo](https://gohugo.io). -> Se stai utilizzando Windows, avrai bisogno di alcuni strumenti aggiuntivi che puoi installare con [Chocolatey] (https://chocolatey.org). `choco install make` +> Se stai utilizzando Windows, avrai bisogno di alcuni strumenti aggiuntivi che puoi installare con [Chocolatey](https://chocolatey.org). `choco install make` -> Se preferisci eseguire il sito Web localmente senza Docker, vedi [Eseguire il sito Web localmente utilizzando Hugo](# running-the-site-local-using-hugo) di seguito. +> Se preferisci eseguire il sito Web localmente senza Docker, vedi [Eseguire il sito Web localmente utilizzando Hugo](#eseguire-il-sito-web-localmente-utilizzando-hugo) di seguito. Se hai Docker [attivo e funzionante](https://www.docker.com/get-started), crea l'immagine Docker `kubernetes-hugo` localmente: diff --git a/README-pl.md b/README-pl.md index c05631df91fe8..65cd63df64dd4 100644 --- a/README-pl.md +++ b/README-pl.md @@ -41,7 +41,7 @@ Zalecaną metodą uruchomienia serwisu internetowego Kubernetesa lokalnie jest u choco install make ``` -> Jeśli wolisz uruchomić serwis lokalnie bez Dockera, przeczytaj [jak uruchomić serwis lokalnie przy pomocy Hugo](#jak-uruchomić-serwis-lokalnie-przy-pomocy-hugo) poniżej. +> Jeśli wolisz uruchomić serwis lokalnie bez Dockera, przeczytaj [jak uruchomić serwis lokalnie przy pomocy Hugo](#jak-uruchomić-lokalną-kopię-strony-przy-pomocy-hugo) poniżej. Jeśli [zainstalowałeś i uruchomiłeś](https://www.docker.com/get-started) już Dockera, zbuduj obraz `kubernetes-hugo` lokalnie: diff --git a/README-vi.md b/README-vi.md index 454d687d02352..b06b6df368b66 100644 --- a/README-vi.md +++ b/README-vi.md @@ -26,7 +26,7 @@ Cách được đề xuất để chạy trang web Kubernetes cục bộ là dù > Nếu bạn làm việc trên môi trường Windows, bạn sẽ cần thêm môt vài công cụ mà bạn có thể cài đặt với [Chocolatey](https://chocolatey.org). `choco install make` -> Nếu bạn không muốn dùng Docker để chạy trang web cục bộ, hãy xem [Chạy website cục bộ dùng Hugo](#Chạy website cục bộ dùng Hugo) dưới đây. +> Nếu bạn không muốn dùng Docker để chạy trang web cục bộ, hãy xem [Chạy website cục bộ dùng Hugo](#chạy-website-cục-bộ-dùng-hugo) dưới đây. Nếu bạn có Docker đang [up và running](https://www.docker.com/get-started), build `kubernetes-hugo` Docker image cục bộ: diff --git a/README-zh.md b/README-zh.md index 286db04db5cdc..8a7898774a055 100644 --- a/README-zh.md +++ b/README-zh.md @@ -122,7 +122,7 @@ Open up your browser to http://localhost:1313 to view the website. As you make c -## 使用 Hugo 在本地运行网站 +## 使用 Hugo 在本地运行网站 {#running-the-site-locally-using-hugo} \ No newline at end of file diff --git a/content/en/blog/_posts/2020-04-01-server-side-apply-beta2.md b/content/en/blog/_posts/2020-04-01-server-side-apply-beta2.md new file mode 100644 index 0000000000000..3aa81683e55db --- /dev/null +++ b/content/en/blog/_posts/2020-04-01-server-side-apply-beta2.md @@ -0,0 +1,51 @@ +--- +layout: blog +title: Kubernetes 1.18 Feature Server-side Apply Beta 2 +date: 2020-04-01 +slug: Kubernetes-1.18-Feature-Server-side-Apply-Beta-2 +--- + +**Authors:** Antoine Pelisse (Google) + +## What is Server-side Apply? +Server-side Apply is an important effort to migrate “kubectl apply” to the apiserver. It was started in 2018 by the Apply working group. + +The use of kubectl to declaratively apply resources has exposed the following challenges: + +- One needs to use the kubectl go code, or they have to shell out to kubectl. + +- Strategic merge-patch, the patch format used by kubectl, grew organically and was challenging to fix while maintaining compatibility with various api-server versions. + +- Some features are hard to implement directly on the client, for example, unions. + + +Server-side Apply is a new merging algorithm, as well as tracking of field ownership, running on the Kubernetes api-server. Server-side Apply enables new features like conflict detection, so the system knows when two actors are trying to edit the same field. + +## How does it work, what’s managedFields? +Server-side Apply works by keeping track of which actor of the system has changed each field of an object. It does so by diffing all updates to objects, and recording all the fields that have changed as well the time of the operation. All this information is stored in the managedFields in the metadata of objects. Since objects can have many fields, this field can be quite large. + +When someone applies, we can then use the information stored within managedFields to report relevant conflicts and help the merge algorithm to do the right thing. + +## Wasn’t it already Beta before 1.18? +Yes, Server-side Apply has been Beta since 1.16, but it didn’t track the owner for fields associated with objects that had not been applied. This means that most objects didn’t have the managedFields metadata stored, and conflicts for these objects cannot be resolved. With Kubernetes 1.18, all new objects will have the managedFields attached to them and provide accurate information on conflicts. + +## How do I use it? +The most common way to use this is through kubectl: `kubectl apply --server-side`. This is likely to show conflicts with other actors, including client-side apply. When that happens, conflicts can be forced by using the `--force-conflicts` flag, which will grab the ownership for the fields that have changed. + +## Current limitations +We have two important limitations right now, especially with sub-resources. The first is that if you apply with a status, the status is going to be ignored. We are still going to try and acquire the fields, which may lead to invalid conflicts. The other is that we do not update the managedFields on some sub-resources, including scale, so you may not see information about a horizontal pod autoscaler changing the number of replicas. + +## What’s next? +We are working hard to improve the experience of using server-side apply with kubectl, and we are trying to make it the default. As part of that, we want to improve the migration from client-side to server-side. + +## Can I help? +Of course! The working-group apply is available on slack #wg-apply, through the [mailing list](https://groups.google.com/forum/#!forum/kubernetes-wg-apply) and we also meet every other Tuesday at 9.30 PT on Zoom. We have lots of exciting features to build and can use all sorts of help. + +We would also like to use the opportunity to thank the hard work of all the contributors involved in making this new beta possible: + +* Daniel Smith +* Jenny Buckley +* Joe Betz +* Julian Modesto +* Kevin Wiesmüller +* Maria Ntalla diff --git a/content/en/blog/_posts/2020-04-02-Improvements-to-the-Ingress-API-in-Kubernetes-1.18.md b/content/en/blog/_posts/2020-04-02-Improvements-to-the-Ingress-API-in-Kubernetes-1.18.md new file mode 100644 index 0000000000000..713c9821ce0a6 --- /dev/null +++ b/content/en/blog/_posts/2020-04-02-Improvements-to-the-Ingress-API-in-Kubernetes-1.18.md @@ -0,0 +1,87 @@ +--- +layout: blog +title: Improvements to the Ingress API in Kubernetes 1.18 +date: 2020-04-02 +slug: Improvements-to-the-Ingress-API-in-Kubernetes-1.18 +--- + +**Authors:** Rob Scott (Google), Christopher M Luciano (IBM) + +The Ingress API in Kubernetes has enabled a large number of controllers to provide simple and powerful ways to manage inbound network traffic to Kubernetes workloads. In Kubernetes 1.18, we've made 3 significant additions to this API: + +* A new `pathType` field that can specify how Ingress paths should be matched. +* A new `IngressClass` resource that can specify how Ingresses should be implemented by controllers. +* Support for wildcards in hostnames. + +## Better Path Matching With Path Types +The new concept of a path type allows you to specify how a path should be matched. There are three supported types: + +* __ImplementationSpecific (default):__ With this path type, matching is up to the controller implementing the `IngressClass`. Implementations can treat this as a separate `pathType` or treat it identically to the `Prefix` or `Exact` path types. +* __Exact:__ Matches the URL path exactly and with case sensitivity. +* __Prefix:__ Matches based on a URL path prefix split by `/`. Matching is case sensitive and done on a path element by element basis. + +## Extended Configuration With Ingress Classes +The Ingress resource was designed with simplicity in mind, providing a simple set of fields that would be applicable in all use cases. Over time, as use cases evolved, implementations began to rely on a long list of custom annotations for further configuration. The new `IngressClass` resource provides a way to replace some of those annotations. + +Each `IngressClass` specifies which controller should implement Ingresses of the class and can reference a custom resource with additional parameters. +``` +apiVersion: networking.k8s.io/v1beta1 +kind: IngressClass +metadata: + name: external-lb +spec: + controller: example.com/ingress-controller + parameters: + apiGroup: k8s.example.com/v1alpha + kind: IngressParameters + name: external-lb +``` + +### Specifying the Class of an Ingress +A new `ingressClassName` field has been added to the Ingress spec that is used to reference the `IngressClass` that should be used to implement this Ingress. + +### Deprecating the Ingress Class Annotation +Before the `IngressClass` resource was added in Kubernetes 1.18, a similar concept of Ingress class was often specified with a `kubernetes.io/ingress.class` annotation on the Ingress. Although this annotation was never formally defined, it was widely supported by Ingress controllers, and should now be considered formally deprecated. + +### Setting a Default IngressClass +It’s possible to mark a specific `IngressClass` as default in a cluster. Setting the +`ingressclass.kubernetes.io/is-default-class` annotation to true on an +IngressClass resource will ensure that new Ingresses without an `ingressClassName` specified will be assigned this default `IngressClass`. + +## Support for Hostname Wildcards +Many Ingress providers have supported wildcard hostname matching like `*.foo.com` matching `app1.foo.com`, but until now the spec assumed an exact FQDN match of the host. Hosts can now be precise matches (for example “`foo.bar.com`”) or a wildcard (for example “`*.foo.com`”). Precise matches require that the http host header matches the Host setting. Wildcard matches require the http host header is equal to the suffix of the wildcard rule. + +| Host | Host header | Match? | +| ------------- |-------------| -----| +| `*.foo.com` | `*.foo.com` | Matches based on shared suffix | +| `*.foo.com` | `*.foo.com` | No match, wildcard only covers a single DNS label | +| `*.foo.com` | `foo.com` | No match, wildcard only covers a single DNS label | + +### Putting it All Together +These new Ingress features allow for much more configurability. Here’s an example of an Ingress that makes use of pathType, `ingressClassName`, and a hostname wildcard: + +``` +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: example-ingress +spec: + ingressClassName: external-lb + rules: + - host: *.example.com + http: + paths: + - path: /example + pathType: Prefix + backend: + serviceName: example-service + servicePort: 80 +``` + +### Ingress Controller Support +Since these features are new in Kubernetes 1.18, each Ingress controller implementation will need some time to develop support for these new features. Check the documentation for your preferred Ingress controllers to see when they will support this new functionality. + +## The Future of Ingress +The Ingress API is on pace to graduate from beta to a stable API in Kubernetes 1.19. It will continue to provide a simple way to manage inbound network traffic for Kubernetes workloads. This API has intentionally been kept simple and lightweight, but there has been a desire for greater configurability for more advanced use cases. + +Work is currently underway on a new highly configurable set of APIs that will provide an alternative to Ingress in the future. These APIs are being referred to as the new “Service APIs”. They are not intended to replace any existing APIs, but instead provide a more configurable alternative for complex use cases. For more information, check out the [Service APIs repo on GitHub](http://github.com/kubernetes-sigs/service-apis). \ No newline at end of file diff --git a/content/en/blog/_posts/Kong-Ingress-Controller-and-Service-Mesh.md b/content/en/blog/_posts/Kong-Ingress-Controller-and-Service-Mesh.md new file mode 100644 index 0000000000000..2023f2e1dad5b --- /dev/null +++ b/content/en/blog/_posts/Kong-Ingress-Controller-and-Service-Mesh.md @@ -0,0 +1,199 @@ +--- +layout: blog +title: 'Kong Ingress Controller and Service Mesh: Setting up Ingress to Istio on Kubernetes' +date: 2020-03-18 +slug: kong-ingress-controller-and-istio-service-mesh +--- + +**Author:** Kevin Chen, Kong + +Kubernetes has become the de facto way to orchestrate containers and the services within services. But how do we give services outside our cluster access to what is within? Kubernetes comes with the Ingress API object that manages external access to services within a cluster. + +Ingress is a group of rules that will proxy inbound connections to endpoints defined by a backend. However, Kubernetes does not know what to do with Ingress resources without an Ingress controller, which is where an open source controller can come into play. In this post, we are going to use one option for this: the Kong Ingress Controller. The Kong Ingress Controller was open-sourced a year ago and recently reached one million downloads. In the recent 0.7 release, service mesh support was also added. Other features of this release include: + +* **Built-In Kubernetes Admission Controller**, which validates Custom Resource Definitions (CRD) as they are created or updated and rejects any invalid configurations. +* **In-memory Mode** - Each pod’s controller actively configures the Kong container in its pod, which limits the blast radius of failure of a single container of Kong or controller container to that pod only. +* **Native gRPC Routing** - gRPC traffic can now be routed via Kong Ingress Controller natively with support for method-based routing. + +![K4K-gRPC](/images/blog/Kong-Ingress-Controller-and-Service-Mesh/KIC-gRPC.png) + +If you would like a deeper dive into Kong Ingress Controller 0.7, please check out the [GitHub repository](https://github.com/Kong/kubernetes-ingress-controller). + +But let’s get back to the service mesh support since that will be the main focal point of this blog post. Service mesh allows organizations to address microservices challenges related to security, reliability, and observability by abstracting inter-service communication into a mesh layer. But what if our mesh layer sits within Kubernetes and we still need to expose certain services beyond our cluster? Then you need an Ingress controller such as the Kong Ingress Controller. In this blog post, we’ll cover how to deploy Kong Ingress Controller as your Ingress layer to an Istio mesh. Let’s dive right in: + +![Kong Kubernetes Ingress Controller](/images/blog/Kong-Ingress-Controller-and-Service-Mesh/k4k8s.png) + +### Part 0: Set up Istio on Kubernetes + +This blog will assume you have Istio set up on Kubernetes. If you need to catch up to this point, please check out the [Istio documentation](https://istio.io/docs/setup/). It will walk you through setting up Istio on Kubernetes. + +### 1. Install the Bookinfo Application + +First, we need to label the namespaces that will host our application and Kong proxy. To label our default namespace where the bookinfo app sits, run this command: + +``` +$ kubectl label namespace default istio-injection=enabled +namespace/default labeled +``` + +Then create a new namespace that will be hosting our Kong gateway and the Ingress controller: + +``` +$ kubectl create namespace kong +namespace/kong created +``` + +Because Kong will be sitting outside the default namespace, be sure you also label the Kong namespace with istio-injection enabled as well: + +``` +$ kubectl label namespace kong istio-injection=enabled +namespace/kong labeled +``` + +Having both namespaces labeled `istio-injection=enabled` is necessary. Or else the default configuration will not inject a sidecar container into the pods of your namespaces. + +Now deploy your BookInfo application with the following command: + +``` +$ kubectl apply -f http://bit.ly/bookinfoapp +service/details created +serviceaccount/bookinfo-details created +deployment.apps/details-v1 created +service/ratings created +serviceaccount/bookinfo-ratings created +deployment.apps/ratings-v1 created +service/reviews created +serviceaccount/bookinfo-reviews created +deployment.apps/reviews-v1 created +deployment.apps/reviews-v2 created +deployment.apps/reviews-v3 created +service/productpage created +serviceaccount/bookinfo-productpage created +deployment.apps/productpage-v1 created +``` + +Let’s double-check our Services and Pods to make sure that we have it all set up correctly: + +``` +$ kubectl get services +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +details ClusterIP 10.97.125.254 9080/TCP 29s +kubernetes ClusterIP 10.96.0.1 443/TCP 29h +productpage ClusterIP 10.97.62.68 9080/TCP 28s +ratings ClusterIP 10.96.15.180 9080/TCP 28s +reviews ClusterIP 10.104.207.136 9080/TCP 28s +``` + +You should see four new services: details, productpage, ratings, and reviews. None of them have an external IP so we will use the [Kong gateway](https://github.com/Kong/kong) to expose the necessary services. And to check pods, run the following command: + +``` +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +details-v1-c5b5f496d-9wm29 2/2 Running 0 101s +productpage-v1-7d6cfb7dfd-5mc96 2/2 Running 0 100s +ratings-v1-f745cf57b-hmkwf 2/2 Running 0 101s +reviews-v1-85c474d9b8-kqcpt 2/2 Running 0 101s +reviews-v2-ccffdd984-9jnsj 2/2 Running 0 101s +reviews-v3-98dc67b68-nzw97 2/2 Running 0 101s +``` + +This command outputs useful data, so let’s take a second to understand it. If you examine the READY column, each pod has two containers running: the service and an Envoy sidecar injected alongside it. Another thing to highlight is that there are three review pods but only 1 review service. The Envoy sidecar will load balance the traffic to three different review pods that contain different versions, giving us the ability to A/B test our changes. With that said, you should now be able to access your product page! + +``` +$ kubectl exec -it $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o ".*" +Simple Bookstore App +``` + +### 2. Kong Kubernetes Ingress Controller Without Database + +To expose your services to the world, we will deploy Kong as the north-south traffic gateway. [Kong 1.1](https://github.com/Kong/kong/releases/tag/1.1.2) released with declarative configuration and DB-less mode. Declarative configuration allows you to specify the desired system state through a YAML or JSON file instead of a sequence of API calls. Using declarative config provides several key benefits to reduce complexity, increase automation and enhance system performance. And with the Kong Ingress Controller, any Ingress rules you apply to the cluster will automatically be configured on the Kong proxy. Let’s set up the Kong Ingress Controller and the actual Kong proxy first like this: + +``` +$ kubectl apply -f https://bit.ly/k4k8s +namespace/kong configured +customresourcedefinition.apiextensions.k8s.io/kongconsumers.configuration.konghq.com created +customresourcedefinition.apiextensions.k8s.io/kongcredentials.configuration.konghq.com created +customresourcedefinition.apiextensions.k8s.io/kongingresses.configuration.konghq.com created +customresourcedefinition.apiextensions.k8s.io/kongplugins.configuration.konghq.com created +serviceaccount/kong-serviceaccount created +clusterrole.rbac.authorization.k8s.io/kong-ingress-clusterrole created +clusterrolebinding.rbac.authorization.k8s.io/kong-ingress-clusterrole-nisa-binding created +configmap/kong-server-blocks created +service/kong-proxy created +service/kong-validation-webhook created +deployment.apps/ingress-kong created +``` + +To check if the Kong pod is up and running, run: + +``` +$ kubectl get pods -n kong +NAME READY STATUS RESTARTS AGE +pod/ingress-kong-8b44c9856-9s42v 3/3 Running 0 2m26s +``` + +There will be three containers within this pod. The first container is the Kong Gateway that will be the Ingress point to your cluster. The second container is the Ingress controller. It uses Ingress resources and updates the proxy to follow rules defined in the resource. And lastly, the third container is the Envoy proxy injected by Istio. Kong will route traffic through the Envoy sidecar proxy to the appropriate service. To send requests into the cluster via our newly deployed Kong Gateway, setup an environment variable with the a URL based on the IP address at which Kong is accessible. + +``` +$ export PROXY_URL="$(minikube service -n kong kong-proxy --url | head -1)" +$ echo $PROXY_URL +http://192.168.99.100:32728 +``` + +Next, we need to change some configuration so that the side-car Envoy process can route the request correctly based on the host/authority header of the request. Run the following to stop the route from preserving host: + +``` +$ echo " +apiVersion: configuration.konghq.com/v1 +kind: KongIngress +metadata: + name: do-not-preserve-host +route: + preserve_host: false +" | kubectl apply -f - +kongingress.configuration.konghq.com/do-not-preserve-host created +``` + +And annotate the existing productpage service to set service-upstream as true: + +``` +$ kubectl annotate svc productpage Ingress.kubernetes.io/service-upstream="true" +service/productpage annotated +``` + +Now that we have everything set up, we can look at how to use the Ingress resource to help route external traffic to the services within your Istio mesh. We’ll create an Ingress rule that routes all traffic with the path of `/` to our productpage service: + +``` +$ echo " +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: productpage + annotations: + configuration.konghq.com: do-not-preserve-host +spec: + rules: + - http: + paths: + - path: / + backend: + serviceName: productpage + servicePort: 9080 +" | kubectl apply -f - +ingress.extensions/productpage created +``` + +And just like that, the Kong Ingress Controller is able to understand the rules you defined in the Ingress resource and routes it to the productpage service! To view the product page service’s GUI, go to `$PROXY_URL/productpage` in your browser. Or to test it in your command line, try: + +``` +$ curl $PROXY_URL/productpage +``` + +That is all I have for this walk-through. If you enjoyed the technologies used in this post, please check out their repositories since they are all open source and would love to have more contributors! Here are their links for your convenience: + +* Kong: [[GitHub](https://github.com/Kong/kubernetes-ingress-controller)] [[Twitter](https://twitter.com/thekonginc)] +* Kubernetes: [[GitHub](https://github.com/kubernetes/kubernetes)] [[Twitter](https://twitter.com/kubernetesio)] +* Istio: [[GitHub](https://github.com/istio/istio)] [[Twitter](https://twitter.com/IstioMesh)] +* Envoy: [[GitHub](https://github.com/envoyproxy/envoy)] [[Twitter](https://twitter.com/EnvoyProxy)] + +Thank you for following along! diff --git a/content/en/community/_index.html b/content/en/community/_index.html index 4a686b04093d0..0f6fb9bc04db8 100644 --- a/content/en/community/_index.html +++ b/content/en/community/_index.html @@ -6,8 +6,8 @@
- Kubernetes Conference Gallery - Kubernetes Conference Gallery + Kubernetes Conference Gallery + Kubernetes Conference Gallery
diff --git a/content/en/docs/concepts/architecture/controller.md b/content/en/docs/concepts/architecture/controller.md index e5bee1d0a52a7..0021fc2a74f07 100644 --- a/content/en/docs/concepts/architecture/controller.md +++ b/content/en/docs/concepts/architecture/controller.md @@ -113,17 +113,15 @@ useful changes, it doesn't matter if the overall state is or is not stable. As a tenet of its design, Kubernetes uses lots of controllers that each manage a particular aspect of cluster state. Most commonly, a particular control loop (controller) uses one kind of resource as its desired state, and has a different -kind of resource that it manages to make that desired state happen. +kind of resource that it manages to make that desired state happen. For example, +a controller for Jobs tracks Job objects (to discover new work) and Pod objects +(to run the Jobs, and then to see when the work is finished). In this case +something else creates the Jobs, whereas the Job controller creates Pods. It's useful to have simple controllers rather than one, monolithic set of control loops that are interlinked. Controllers can fail, so Kubernetes is designed to allow for that. -For example: a controller for Jobs tracks Job objects (to discover -new work) and Pod object (to run the Jobs, and then to see when the work is -finished). In this case something else creates the Jobs, whereas the Job -controller creates Pods. - {{< note >}} There can be several controllers that create or update the same kind of object. Behind the scenes, Kubernetes controllers make sure that they only pay attention diff --git a/content/en/docs/concepts/architecture/nodes.md b/content/en/docs/concepts/architecture/nodes.md index cf7bdac64b5c7..6e62881451643 100644 --- a/content/en/docs/concepts/architecture/nodes.md +++ b/content/en/docs/concepts/architecture/nodes.md @@ -72,7 +72,7 @@ The node condition is represented as a JSON object. For example, the following r ] ``` -If the Status of the Ready condition remains `Unknown` or `False` for longer than the `pod-eviction-timeout`, an argument is passed to the [kube-controller-manager](/docs/admin/kube-controller-manager/) and all the Pods on the node are scheduled for deletion by the Node Controller. The default eviction timeout duration is **five minutes**. In some cases when the node is unreachable, the apiserver is unable to communicate with the kubelet on the node. The decision to delete the pods cannot be communicated to the kubelet until communication with the apiserver is re-established. In the meantime, the pods that are scheduled for deletion may continue to run on the partitioned node. +If the Status of the Ready condition remains `Unknown` or `False` for longer than the `pod-eviction-timeout` (an argument passed to the [kube-controller-manager](/docs/admin/kube-controller-manager/)), all the Pods on the node are scheduled for deletion by the Node Controller. The default eviction timeout duration is **five minutes**. In some cases when the node is unreachable, the apiserver is unable to communicate with the kubelet on the node. The decision to delete the pods cannot be communicated to the kubelet until communication with the apiserver is re-established. In the meantime, the pods that are scheduled for deletion may continue to run on the partitioned node. In versions of Kubernetes prior to 1.5, the node controller would [force delete](/docs/concepts/workloads/pods/pod/#force-deletion-of-pods) these unreachable pods from the apiserver. However, in 1.5 and higher, the node controller does not force delete pods until it is @@ -83,8 +83,8 @@ Kubernetes causes all the Pod objects running on the node to be deleted from the The node lifecycle controller automatically creates [taints](/docs/concepts/configuration/taint-and-toleration/) that represent conditions. -When the scheduler is assigning a Pod to a Node, the scheduler takes the Node's taints -into account, except for any taints that the Pod tolerates. +The scheduler takes the Node's taints into consideration when assigning a Pod to a Node. +Pods can also have tolerations which let them tolerate a Node's taints. ### Capacity and Allocatable {#capacity} @@ -131,6 +131,8 @@ Kubernetes creates a node object internally (the representation), and validates the node by health checking based on the `metadata.name` field. If the node is valid -- that is, if all necessary services are running -- it is eligible to run a pod. Otherwise, it is ignored for any cluster activity until it becomes valid. +The name of a Node object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). {{< note >}} Kubernetes keeps the object for the invalid node and keeps checking to see whether it becomes valid. @@ -182,7 +184,7 @@ a Lease object. timeout for unreachable nodes). - The kubelet creates and then updates its Lease object every 10 seconds (the default update interval). Lease updates occur independently from the - `NodeStatus` updates. + `NodeStatus` updates. If the Lease update fails, the kubelet retries with exponential backoff starting at 200 milliseconds and capped at 7 seconds. #### Reliability diff --git a/content/en/docs/concepts/cluster-administration/addons.md b/content/en/docs/concepts/cluster-administration/addons.md index 75bf1b6a223c1..c7f3dd89096f9 100644 --- a/content/en/docs/concepts/cluster-administration/addons.md +++ b/content/en/docs/concepts/cluster-administration/addons.md @@ -46,7 +46,7 @@ Add-ons in each section are sorted alphabetically - the ordering does not imply ## Infrastructure -* [KubeVirt](https://kubevirt.io/user-guide/docs/latest/administration/intro.html#cluster-side-add-on-deployment) is an add-on to run virtual machines on Kubernetes. Usually run on bare-metal clusters. +* [KubeVirt](https://kubevirt.io/user-guide/#/installation/installation) is an add-on to run virtual machines on Kubernetes. Usually run on bare-metal clusters. ## Legacy Add-ons diff --git a/content/en/docs/concepts/cluster-administration/certificates.md b/content/en/docs/concepts/cluster-administration/certificates.md index 6ef3f813c527e..052e7b9aa5b66 100644 --- a/content/en/docs/concepts/cluster-administration/certificates.md +++ b/content/en/docs/concepts/cluster-administration/certificates.md @@ -130,11 +130,11 @@ Finally, add the same parameters into the API server start parameters. Note that you may need to adapt the sample commands based on the hardware architecture and cfssl version you are using. - curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o cfssl + curl -L https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssl_1.4.1_linux_amd64 -o cfssl chmod +x cfssl - curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o cfssljson + curl -L https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssljson_1.4.1_linux_amd64 -o cfssljson chmod +x cfssljson - curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o cfssl-certinfo + curl -L https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssl-certinfo_1.4.1_linux_amd64 -o cfssl-certinfo chmod +x cfssl-certinfo 1. Create a directory to hold the artifacts and initialize cfssl: diff --git a/content/en/docs/concepts/cluster-administration/cluster-administration-overview.md b/content/en/docs/concepts/cluster-administration/cluster-administration-overview.md index a82533530496b..5ba0bb30d856a 100644 --- a/content/en/docs/concepts/cluster-administration/cluster-administration-overview.md +++ b/content/en/docs/concepts/cluster-administration/cluster-administration-overview.md @@ -20,7 +20,6 @@ See the guides in [Setup](/docs/setup/) for examples of how to plan, set up, and Before choosing a guide, here are some considerations: - Do you just want to try out Kubernetes on your computer, or do you want to build a high-availability, multi-node cluster? Choose distros best suited for your needs. - - **If you are designing for high-availability**, learn about configuring [clusters in multiple zones](/docs/concepts/cluster-administration/federation/). - Will you be using **a hosted Kubernetes cluster**, such as [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/), or **hosting your own cluster**? - Will your cluster be **on-premises**, or **in the cloud (IaaS)**? Kubernetes does not directly support hybrid clusters. Instead, you can set up multiple clusters. - **If you are configuring Kubernetes on-premises**, consider which [networking model](/docs/concepts/cluster-administration/networking/) fits best. @@ -44,7 +43,7 @@ Note: Not all distros are actively maintained. Choose distros which have been te * [Certificates](/docs/concepts/cluster-administration/certificates/) describes the steps to generate certificates using different tool chains. -* [Kubernetes Container Environment](/docs/concepts/containers/container-environment-variables/) describes the environment for Kubelet managed containers on a Kubernetes node. +* [Kubernetes Container Environment](/docs/concepts/containers/container-environment/) describes the environment for Kubelet managed containers on a Kubernetes node. * [Controlling Access to the Kubernetes API](/docs/reference/access-authn-authz/controlling-access/) describes how to set up permissions for users and service accounts. diff --git a/content/en/docs/concepts/cluster-administration/federation.md b/content/en/docs/concepts/cluster-administration/federation.md deleted file mode 100644 index e26aef3fa8510..0000000000000 --- a/content/en/docs/concepts/cluster-administration/federation.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: Federation -content_template: templates/concept -weight: 80 ---- - -{{% capture overview %}} - -{{< deprecationfilewarning >}} -{{< include "federation-deprecation-warning-note.md" >}} -{{< /deprecationfilewarning >}} - -This page explains why and how to manage multiple Kubernetes clusters using -federation. -{{% /capture %}} - -{{% capture body %}} -## Why federation - -Federation makes it easy to manage multiple clusters. It does so by providing 2 -major building blocks: - - * Sync resources across clusters: Federation provides the ability to keep - resources in multiple clusters in sync. For example, you can ensure that the same deployment exists in multiple clusters. - * Cross cluster discovery: Federation provides the ability to auto-configure DNS servers and load balancers with backends from all clusters. For example, you can ensure that a global VIP or DNS record can be used to access backends from multiple clusters. - -Some other use cases that federation enables are: - -* High Availability: By spreading load across clusters and auto configuring DNS - servers and load balancers, federation minimises the impact of cluster - failure. -* Avoiding provider lock-in: By making it easier to migrate applications across - clusters, federation prevents cluster provider lock-in. - - -Federation is not helpful unless you have multiple clusters. Some of the reasons -why you might want multiple clusters are: - -* Low latency: Having clusters in multiple regions minimises latency by serving - users from the cluster that is closest to them. -* Fault isolation: It might be better to have multiple small clusters rather - than a single large cluster for fault isolation (for example: multiple - clusters in different availability zones of a cloud provider). -* Scalability: There are scalability limits to a single kubernetes cluster (this - should not be the case for most users. For more details: - [Kubernetes Scaling and Performance Goals](https://git.k8s.io/community/sig-scalability/goals.md)). -* [Hybrid cloud](#hybrid-cloud-capabilities): You can have multiple clusters on different cloud providers or - on-premises data centers. - -### Caveats - -While there are a lot of attractive use cases for federation, there are also -some caveats: - -* Increased network bandwidth and cost: The federation control plane watches all - clusters to ensure that the current state is as expected. This can lead to - significant network cost if the clusters are running in different regions on - a cloud provider or on different cloud providers. -* Reduced cross cluster isolation: A bug in the federation control plane can - impact all clusters. This is mitigated by keeping the logic in federation - control plane to a minimum. It mostly delegates to the control plane in - kubernetes clusters whenever it can. The design and implementation also errs - on the side of safety and avoiding multi-cluster outage. -* Maturity: The federation project is relatively new and is not very mature. - Not all resources are available and many are still alpha. [Issue - 88](https://github.com/kubernetes/federation/issues/88) enumerates - known issues with the system that the team is busy solving. - -### Hybrid cloud capabilities - -Federations of Kubernetes Clusters can include clusters running in -different cloud providers (e.g. Google Cloud, AWS), and on-premises -(e.g. on OpenStack). [Kubefed](/docs/tasks/federation/set-up-cluster-federation-kubefed/) is the recommended way to deploy federated clusters. - -Thereafter, your [API resources](#api-resources) can span different clusters -and cloud providers. - -## Setting up federation - -To be able to federate multiple clusters, you first need to set up a federation -control plane. -Follow the [setup guide](/docs/tutorials/federation/set-up-cluster-federation-kubefed/) to set up the -federation control plane. - -## API resources - -Once you have the control plane set up, you can start creating federation API -resources. -The following guides explain some of the resources in detail: - -* [Cluster](/docs/tasks/federation/administer-federation/cluster/) -* [ConfigMap](/docs/tasks/federation/administer-federation/configmap/) -* [DaemonSets](/docs/tasks/federation/administer-federation/daemonset/) -* [Deployment](/docs/tasks/federation/administer-federation/deployment/) -* [Events](/docs/tasks/federation/administer-federation/events/) -* [Hpa](/docs/tasks/federation/administer-federation/hpa/) -* [Ingress](/docs/tasks/federation/administer-federation/ingress/) -* [Jobs](/docs/tasks/federation/administer-federation/job/) -* [Namespaces](/docs/tasks/federation/administer-federation/namespaces/) -* [ReplicaSets](/docs/tasks/federation/administer-federation/replicaset/) -* [Secrets](/docs/tasks/federation/administer-federation/secret/) -* [Services](/docs/concepts/cluster-administration/federation-service-discovery/) - - -The [API reference docs](/docs/reference/federation/) list all the -resources supported by federation apiserver. - -## Cascading deletion - -Kubernetes version 1.6 includes support for cascading deletion of federated -resources. With cascading deletion, when you delete a resource from the -federation control plane, you also delete the corresponding resources in all underlying clusters. - -Cascading deletion is not enabled by default when using the REST API. To enable -it, set the option `DeleteOptions.orphanDependents=false` when you delete a -resource from the federation control plane using the REST API. Using `kubectl -delete` -enables cascading deletion by default. You can disable it by running `kubectl -delete --cascade=false` - -Note: Kubernetes version 1.5 included cascading deletion support for a subset of -federation resources. - -## Scope of a single cluster - -On IaaS providers such as Google Compute Engine or Amazon Web Services, a VM exists in a -[zone](https://cloud.google.com/compute/docs/zones) or [availability -zone](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html). -We suggest that all the VMs in a Kubernetes cluster should be in the same availability zone, because: - - - compared to having a single global Kubernetes cluster, there are fewer single-points of failure. - - compared to a cluster that spans availability zones, it is easier to reason about the availability properties of a - single-zone cluster. - - when the Kubernetes developers are designing the system (e.g. making assumptions about latency, bandwidth, or - correlated failures) they are assuming all the machines are in a single data center, or otherwise closely connected. - -It is recommended to run fewer clusters with more VMs per availability zone; but it is possible to run multiple clusters per availability zones. - -Reasons to prefer fewer clusters per availability zone are: - - - improved bin packing of Pods in some cases with more nodes in one cluster (less resource fragmentation). - - reduced operational overhead (though the advantage is diminished as ops tooling and processes mature). - - reduced costs for per-cluster fixed resource costs, for example apiserver VMs (but small as a percentage - of overall cluster cost for medium to large clusters). - -Reasons to have multiple clusters include: - - - strict security policies requiring isolation of one class of work from another (but, see Partitioning Clusters - below). - - test clusters to canary new Kubernetes releases or other cluster software. - -## Selecting the right number of clusters - -The selection of the number of Kubernetes clusters may be a relatively static choice, only revisited occasionally. -By contrast, the number of nodes in a cluster and the number of pods in a service may change frequently according to -load and growth. - -To pick the number of clusters, first, decide which regions you need to be in to have adequate latency to all your end users, for services that will run -on Kubernetes (if you use a Content Distribution Network, the latency requirements for the CDN-hosted content need not -be considered). Legal issues might influence this as well. For example, a company with a global customer base might decide to have clusters in US, EU, AP, and SA regions. -Call the number of regions to be in `R`. - -Second, decide how many clusters should be able to be unavailable at the same time, while still being available. Call -the number that can be unavailable `U`. If you are not sure, then 1 is a fine choice. - -If it is allowable for load-balancing to direct traffic to any region in the event of a cluster failure, then -you need at least the larger of `R` or `U + 1` clusters. If it is not (e.g. you want to ensure low latency for all -users in the event of a cluster failure), then you need to have `R * (U + 1)` clusters -(`U + 1` in each of `R` regions). In any case, try to put each cluster in a different zone. - -Finally, if any of your clusters would need more than the maximum recommended number of nodes for a Kubernetes cluster, then -you may need even more clusters. Kubernetes v1.3 supports clusters up to 1000 nodes in size. Kubernetes v1.8 supports -clusters up to 5000 nodes. See [Building Large Clusters](/docs/setup/best-practices/cluster-large/) for more guidance. - -{{% /capture %}} - -{{% capture whatsnext %}} -* Learn more about the [Federation - proposal](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/multicluster/federation.md). -* See this [setup guide](/docs/tutorials/federation/set-up-cluster-federation-kubefed/) for cluster federation. -* See this [Kubecon2016 talk on federation](https://www.youtube.com/watch?v=pq9lbkmxpS8) -* See this [Kubecon2017 Europe update on federation](https://www.youtube.com/watch?v=kwOvOLnFYck) -* See this [Kubecon2018 Europe update on sig-multicluster](https://www.youtube.com/watch?v=vGZo5DaThQU) -* See this [Kubecon2018 Europe Federation-v2 prototype presentation](https://youtu.be/q27rbaX5Jis?t=7m20s) -* See this [Federation-v2 Userguide](https://github.com/kubernetes-sigs/federation-v2/blob/master/docs/userguide.md) -{{% /capture %}} diff --git a/content/en/docs/concepts/cluster-administration/flow-control.md b/content/en/docs/concepts/cluster-administration/flow-control.md new file mode 100644 index 0000000000000..c41998f6e9300 --- /dev/null +++ b/content/en/docs/concepts/cluster-administration/flow-control.md @@ -0,0 +1,377 @@ +--- +title: API Priority and Fairness +content_template: templates/concept +min-kubernetes-server-version: v1.18 +--- + +{{% capture overview %}} + +{{< feature-state state="alpha" for_k8s_version="v1.18" >}} + +Controlling the behavior of the Kubernetes API server in an overload situation +is a key task for cluster administrators. The {{< glossary_tooltip +term_id="kube-apiserver" text="kube-apiserver" >}} has some controls available +(i.e. the `--max-requests-inflight` and `--max-mutating-requests-inflight` +command-line flags) to limit the amount of outstanding work that will be +accepted, preventing a flood of inbound requests from overloading and +potentially crashing the API server, but these flags are not enough to ensure +that the most important requests get through in a period of high traffic. + +The API Priority and Fairness feature (APF) is an alternative that improves upon +aforementioned max-inflight limitations. APF classifies +and isolates requests in a more fine-grained way. It also introduces +a limited amount of queuing, so that no requests are rejected in cases +of very brief bursts. Requests are dispatched from queues using a +fair queuing technique so that, for example, a poorly-behaved {{< +glossary_tooltip text="controller" term_id="controller" >}}) need not +starve others (even at the same priority level). + +{{< caution >}} +Requests classified as "long-running" — primarily watches — are not +subject to the API Priority and Fairness filter. This is also true for +the `--max-requests-inflight` flag without the API Priority and +Fairness feature enabled. +{{< /caution >}} + +{{% /capture %}} + +{{% capture body %}} + +## Enabling API Priority and Fairness + +The API Priority and Fairness feature is controlled by a feature gate +and is not enabled by default. See +[Feature Gates](/docs/reference/command-line-tools-reference/feature-gates/) +for a general explanation of feature gates and how to enable and disable them. The +name of the feature gate for APF is "APIPriorityAndFairness". This +feature also involves an {{< glossary_tooltip term_id="api-group" +text="API Group" >}} that must be enabled. You can do these +things by adding the following command-line flags to your +`kube-apiserver` invocation: + +```shell +kube-apiserver \ +--feature-gates=APIPriorityAndFairness=true \ +--runtime-config=flowcontrol.apiserver.k8s.io/v1alpha1=true \ + # …and other flags as usual +``` + +The command-line flag `--enable-priority-and-fairness=false` will disable the +API Priority and Fairness feature, even if other flags have enabled it. + +## Concepts +There are several distinct features involved in the API Priority and Fairness +feature. Incoming requests are classified by attributes of the request using +_FlowSchemas_, and assigned to priority levels. Priority levels add a degree of +isolation by maintaining separate concurrency limits, so that requests assigned +to different priority levels cannot starve each other. Within a priority level, +a fair-queuing algorithm prevents requests from different _flows_ from starving +each other, and allows for requests to be queued to prevent bursty traffic from +causing failed requests when the average load is acceptably low. + +### Priority Levels +Without APF enabled, overall concurrency in +the API server is limited by the `kube-apiserver` flags +`--max-requests-inflight` and `--max-mutating-requests-inflight`. With APF +enabled, the concurrency limits defined by these flags are summed and then the sum is divided up +among a configurable set of _priority levels_. Each incoming request is assigned +to a single priority level, and each priority level will only dispatch as many +concurrent requests as its configuration allows. + +The default configuration, for example, includes separate priority levels for +leader-election requests, requests from built-in controllers, and requests from +Pods. This means that an ill-behaved Pod that floods the API server with +requests cannot prevent leader election or actions by the built-in controllers +from succeeding. + +### Queuing +Even within a priority level there may be a large number of distinct sources of +traffic. In an overload situation, it is valuable to prevent one stream of +requests from starving others (in particular, in the relatively common case of a +single buggy client flooding the kube-apiserver with requests, that buggy client +would ideally not have much measurable impact on other clients at all). This is +handled by use of a fair-queuing algorithm to process requests that are assigned +the same priority level. Each request is assigned to a _flow_, identified by the +name of the matching FlowSchema plus a _flow distinguisher_ — which +is either the requesting user, the target resource's namespace, or nothing — and the +system attempts to give approximately equal weight to requests in different +flows of the same priority level. + +After classifying a request into a flow, the API Priority and Fairness +feature then may assign the request to a queue. This assignment uses +a technique known as {{< glossary_tooltip term_id="shuffle-sharding" +text="shuffle sharding" >}}, which makes relatively efficient use of +queues to insulate low-intensity flows from high-intensity flows. + +The details of the queuing algorithm are tunable for each priority level, and +allow administrators to trade off memory use, fairness (the property that +independent flows will all make progress when total traffic exceeds capacity), +tolerance for bursty traffic, and the added latency induced by queuing. + +### Exempt requests +Some requests are considered sufficiently important that they are not subject to +any of the limitations imposed by this feature. These exemptions prevent an +improperly-configured flow control configuration from totally disabling an API +server. + +## Defaults +The Priority and Fairness feature ships with a suggested configuration that +should suffice for experimentation; if your cluster is likely to +experience heavy load then you should consider what configuration will work best. The suggested configuration groups requests into five priority +classes: + +* The `system` priority level is for requests from the `system:nodes` group, + i.e. Kubelets, which must be able to contact the API server in order for + workloads to be able to schedule on them. + +* The `leader-election` priority level is for leader election requests from + built-in controllers (in particular, requests for `endpoints`, `configmaps`, + or `leases` coming from the `system:kube-controller-manager` or + `system:kube-scheduler` users and service accounts in the `kube-system` + namespace). These are important to isolate from other traffic because failures + in leader election cause their controllers to fail and restart, which in turn + causes more expensive traffic as the new controllers sync their informers. + +* The `workload-high` priority level is for other requests from built-in + controllers. + +* The `workload-low` priority level is for requests from any other service + account, which will typically include all requests from controllers runing in + Pods. + +* The `global-default` priority level handles all other traffic, e.g. + interactive `kubectl` commands run by nonprivileged users. + +Additionally, there are two PriorityLevelConfigurations and two FlowSchemas that +are built in and may not be overwritten: + +* The special `exempt` priority level is used for requests that are not subject + to flow control at all: they will always be dispatched immediately. The + special `exempt` FlowSchema classifies all requests from the `system:masters` + group into this priority level. You may define other FlowSchemas that direct + other requests to this priority level, if appropriate. + +* The special `catch-all` priority level is used in combination with the special + `catch-all` FlowSchema to make sure that every request gets some kind of + classification. Typically you should not rely on this catch-all configuration, + and should create your own catch-all FlowSchema and PriorityLevelConfiguration + (or use the `global-default` configuration that is installed by default) as + appropriate. To help catch configuration errors that miss classifying some + requests, the mandatory `catch-all` priority level only allows one concurrency + share and does not queue requests, making it relatively likely that traffic + that only matches the `catch-all` FlowSchema will be rejected with an HTTP 429 + error. + +## Resources +The flow control API involves two kinds of resources. +[PriorityLevelConfigurations](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#prioritylevelconfiguration-v1alpha1-flowcontrol) +define the available isolation classes, the share of the available concurrency +budget that each can handle, and allow for fine-tuning queuing behavior. +[FlowSchemas](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#flowschema-v1alpha1-flowcontrol) +are used to classify individual inbound requests, matching each to a single +PriorityLevelConfiguration. + +### PriorityLevelConfiguration +A PriorityLevelConfiguration represents a single isolation class. Each +PriorityLevelConfiguration has an independent limit on the number of outstanding +requests, and limitations on the number of queued requests. + +Concurrency limits for PriorityLevelConfigurations are not specified in absolute +number of requests, but rather in "concurrency shares." The total concurrency +limit for the API Server is distributed among the existing +PriorityLevelConfigurations in proportion with these shares. This allows a +cluster administrator to scale up or down the total amount of traffic to a +server by restarting `kube-apiserver` with a different value for +`--max-requests-inflight` (or `--max-mutating-requests-inflight`), and all +PriorityLevelConfigurations will see their maximum allowed concurrency go up (or +down) by the same fraction. +{{< caution >}} +With the Priority and Fairness feature enabled, the total concurrency limit for +the server is set to the sum of `--max-requests-inflight` and +`--max-mutating-requests-inflight`. There is no longer any distinction made +between mutating and non-mutating requests; if you want to treat them +separately for a given resource, make separate FlowSchemas that match the +mutating and non-mutating verbs respectively. +{{< /caution >}} + +When the volume of inbound requests assigned to a single +PriorityLevelConfiguration is more than its permitted concurrency level, the +`type` field of its specification determines what will happen to extra requests. +A type of `Reject` means that excess traffic will immediately be rejected with +an HTTP 429 (Too Many Requests) error. A type of `Queue` means that requests +above the threshold will be queued, with the shuffle sharding and fair queuing techniques used +to balance progress between request flows. + +The queuing configuration allows tuning the fair queuing algorithm for a +priority level. Details of the algorithm can be read in the [enhancement +proposal](#what-s-next), but in short: + +* Increasing `queues` reduces the rate of collisions between different flows, at + the cost of increased memory usage. A value of 1 here effectively disables the + fair-queuing logic, but still allows requests to be queued. + +* Increasing `queueLengthLimit` allows larger bursts of traffic to be + sustained without dropping any requests, at the cost of increased + latency and memory usage. + +* Changing `handSize` allows you to adjust the probability of collisions between + different flows and the overall concurrency available to a single flow in an + overload situation. + {{< note >}} + A larger `handSize` makes it less likely for two individual flows to collide + (and therefore for one to be able to starve the other), but more likely that + a small number of flows can dominate the apiserver. A larger `handSize` also + potentially increases the amount of latency that a single high-traffic flow + can cause. The maximum number of queued requests possible from a + single flow is `handSize * queueLengthLimit`. + {{< /note >}} + + +Following is a table showing an interesting collection of shuffle +sharding configurations, showing for each the probability that a +given mouse (low-intensity flow) is squished by the elephants (high-intensity flows) for +an illustrative collection of numbers of elephants. See +https://play.golang.org/p/Gi0PLgVHiUg , which computes this table. + +{{< table caption="Example Shuffle Sharding Configurations" >}} +|HandSize| Queues| 1 elephant| 4 elephants| 16 elephants| +|--------|-----------|------------|----------------|--------------------| +| 12| 32| 4.428838398950118e-09| 0.11431348830099144| 0.9935089607656024| +| 10| 32| 1.550093439632541e-08| 0.0626479840223545| 0.9753101519027554| +| 10| 64| 6.601827268370426e-12| 0.00045571320990370776| 0.49999929150089345| +| 9| 64| 3.6310049976037345e-11| 0.00045501212304112273| 0.4282314876454858| +| 8| 64| 2.25929199850899e-10| 0.0004886697053040446| 0.35935114681123076| +| 8| 128| 6.994461389026097e-13| 3.4055790161620863e-06| 0.02746173137155063| +| 7| 128| 1.0579122850901972e-11| 6.960839379258192e-06| 0.02406157386340147| +| 7| 256| 7.597695465552631e-14| 6.728547142019406e-08| 0.0006709661542533682| +| 6| 256| 2.7134626662687968e-12| 2.9516464018476436e-07| 0.0008895654642000348| +| 6| 512| 4.116062922897309e-14| 4.982983350480894e-09| 2.26025764343413e-05| +| 6| 1024| 6.337324016514285e-16| 8.09060164312957e-11| 4.517408062903668e-07| + +### FlowSchema + +A FlowSchema matches some inbound requests and assigns them to a +priority level. Every inbound request is tested against every +FlowSchema in turn, starting with those with numerically lowest --- +which we take to be the logically highest --- `matchingPrecedence` and +working onward. The first match wins. + +{{< caution >}} +Only the first matching FlowSchema for a given request matters. If multiple +FlowSchemas match a single inbound request, it will be assigned based on the one +with the highest `matchingPrecedence`. If multiple FlowSchemas with equal +`matchingPrecedence` match the same request, the one with lexicographically +smaller `name` will win, but it's better not to rely on this, and instead to +ensure that no two FlowSchemas have the same `matchingPrecedence`. +{{< /caution >}} + +A FlowSchema matches a given request if at least one of its `rules` +matches. A rule matches if at least one of its `subjects` *and* at least +one of its `resourceRules` or `nonResourceRules` (depending on whether the +incoming request is for a resource or non-resource URL) matches the request. + +For the `name` field in subjects, and the `verbs`, `apiGroups`, `resources`, +`namespaces`, and `nonResourceURLs` fields of resource and non-resource rules, +the wildcard `*` may be specified to match all values for the given field, +effectively removing it from consideration. + +A FlowSchema's `distinguisherMethod.type` determines how requests matching that +schema will be separated into flows. It may be +either `ByUser`, in which case one requesting user will not be able to starve +other users of capacity, or `ByNamespace`, in which case requests for resources +in one namespace will not be able to starve requests for resources in other +namespaces of capacity, or it may be blank (or `distinguisherMethod` may be +omitted entirely), in which case all requests matched by this FlowSchema will be +considered part of a single flow. The correct choice for a given FlowSchema +depends on the resource and your particular environment. + +## Diagnostics +Every HTTP response from an API server with the priority and fairness feature +enabled has two extra headers: `X-Kubernetes-PF-FlowSchema-UID` and +`X-Kubernetes-PF-PriorityLevel-UID`, noting the flow schema that matched the request +and the priority level to which it was assigned, respectively. The API objects' +names are not included in these headers in case the requesting user does not +have permission to view them, so when debugging you can use a command like + +```shell +kubectl get flowschemas -o custom-columns="uid:{metadata.uid},name:{metadata.name}" +kubectl get prioritylevelconfigurations -o custom-columns="uid:{metadata.uid},name:{metadata.name}" +``` + +to get a mapping of UIDs to names for both FlowSchemas and +PriorityLevelConfigurations. + +## Observability +When you enable the API Priority and Fairness feature, the kube-apiserver +exports additional metrics. Monitoring these can help you determine whether your +configuration is inappropriately throttling important traffic, or find +poorly-behaved workloads that may be harming system health. + +* `apiserver_flowcontrol_rejected_requests_total` counts requests that + were rejected, grouped by the name of the assigned priority level, + the name of the assigned FlowSchema, and the reason for rejection. + The reason will be one of the following: + * `queue-full`, indicating that too many requests were already + queued, + * `concurrency-limit`, indicating that the + PriorityLevelConfiguration is configured to reject rather than + queue excess requests, or + * `time-out`, indicating that the request was still in the queue + when its queuing time limit expired. + +* `apiserver_flowcontrol_dispatched_requests_total` counts requests + that began executing, grouped by the name of the assigned priority + level and the name of the assigned FlowSchema. + +* `apiserver_flowcontrol_current_inqueue_requests` gives the + instantaneous total number of queued (not executing) requests, + grouped by priority level and FlowSchema. + +* `apiserver_flowcontrol_current_executing_requests` gives the instantaneous + total number of executing requests, grouped by priority level and FlowSchema. + +* `apiserver_flowcontrol_request_queue_length_after_enqueue` gives a + histogram of queue lengths for the queues, grouped by priority level + and FlowSchema, as sampled by the enqueued requests. Each request + that gets queued contributes one sample to its histogram, reporting + the length of the queue just after the request was added. Note that + this produces different statistics than an unbiased survey would. + {{< note >}} + An outlier value in a histogram here means it is likely that a single flow + (i.e., requests by one user or for one namespace, depending on + configuration) is flooding the API server, and being throttled. By contrast, + if one priority level's histogram shows that all queues for that priority + level are longer than those for other priority levels, it may be appropriate + to increase that PriorityLevelConfiguration's concurrency shares. + {{< /note >}} + +* `apiserver_flowcontrol_request_concurrency_limit` gives the computed + concurrency limit (based on the API server's total concurrency limit and PriorityLevelConfigurations' + concurrency shares) for each PriorityLevelConfiguration. + +* `apiserver_flowcontrol_request_wait_duration_seconds` gives a histogram of how + long requests spent queued, grouped by the FlowSchema that matched the + request, the PriorityLevel to which it was assigned, and whether or not the + request successfully executed. + {{< note >}} + Since each FlowSchema always assigns requests to a single + PriorityLevelConfiguration, you can add the histograms for all the + FlowSchemas for one priority level to get the effective histogram for + requests assigned to that priority level. + {{< /note >}} + +* `apiserver_flowcontrol_request_execution_seconds` gives a histogram of how + long requests took to actually execute, grouped by the FlowSchema that matched the + request and the PriorityLevel to which it was assigned. + + +{{% /capture %}} + +{{% capture whatsnext %}} + +For background information on design details for API priority and fairness, see +the [enhancement proposal](https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190228-priority-and-fairness.md). +You can make suggestions and feature requests via [SIG API +Machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery). + +{{% /capture %}} diff --git a/content/en/docs/concepts/cluster-administration/manage-deployment.md b/content/en/docs/concepts/cluster-administration/manage-deployment.md index 39e9695062a09..eedafce1a3e3e 100644 --- a/content/en/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/en/docs/concepts/cluster-administration/manage-deployment.md @@ -424,16 +424,16 @@ At some point, you'll eventually need to update your deployed application, typic We'll guide you through how to create and update applications with Deployments. -Let's say you were running version 1.7.9 of nginx: +Let's say you were running version 1.14.2 of nginx: ```shell -kubectl run my-nginx --image=nginx:1.7.9 --replicas=3 +kubectl run my-nginx --image=nginx:1.14.2 --replicas=3 ``` ```shell deployment.apps/my-nginx created ``` -To update to version 1.9.1, simply change `.spec.template.spec.containers[0].image` from `nginx:1.7.9` to `nginx:1.9.1`, with the kubectl commands we learned above. +To update to version 1.16.1, simply change `.spec.template.spec.containers[0].image` from `nginx:1.14.2` to `nginx:1.16.1`, with the kubectl commands we learned above. ```shell kubectl edit deployment/my-nginx diff --git a/content/en/docs/concepts/configuration/assign-pod-node.md b/content/en/docs/concepts/configuration/assign-pod-node.md index 19f33c47e839e..2323cd76f4f93 100644 --- a/content/en/docs/concepts/configuration/assign-pod-node.md +++ b/content/en/docs/concepts/configuration/assign-pod-node.md @@ -111,9 +111,10 @@ For example, `example.com.node-restriction.kubernetes.io/fips=true` or `example. `nodeSelector` provides a very simple way to constrain pods to nodes with particular labels. The affinity/anti-affinity feature, greatly expands the types of constraints you can express. The key enhancements are -1. the language is more expressive (not just "AND or exact match") +1. The affinity/anti-affinity language is more expressive. The language offers more matching rules + besides exact matches created with a logical AND operation; 2. you can indicate that the rule is "soft"/"preference" rather than a hard requirement, so if the scheduler - can't satisfy it, the pod will still be scheduled + can't satisfy it, the pod will still be scheduled; 3. you can constrain against labels on other pods running on the node (or other topological domain), rather than against labels on the node itself, which allows rules about which pods can and cannot be co-located @@ -159,9 +160,9 @@ You can use `NotIn` and `DoesNotExist` to achieve node anti-affinity behavior, o If you specify both `nodeSelector` and `nodeAffinity`, *both* must be satisfied for the pod to be scheduled onto a candidate node. -If you specify multiple `nodeSelectorTerms` associated with `nodeAffinity` types, then the pod can be scheduled onto a node **if one of** the `nodeSelectorTerms` is satisfied. +If you specify multiple `nodeSelectorTerms` associated with `nodeAffinity` types, then the pod can be scheduled onto a node **only if all** `nodeSelectorTerms` can be satisfied. -If you specify multiple `matchExpressions` associated with `nodeSelectorTerms`, then the pod can be scheduled onto a node **only if all** `matchExpressions` can be satisfied. +If you specify multiple `matchExpressions` associated with `nodeSelectorTerms`, then the pod can be scheduled onto a node **if one of** the `matchExpressions` is satisfied. If you remove or change the label of the node where the pod is scheduled, the pod won't be removed. In other words, the affinity selection works only at the time of scheduling the pod. @@ -228,7 +229,7 @@ for performance and security reasons, there are some constraints on topologyKey: 1. For affinity and for `requiredDuringSchedulingIgnoredDuringExecution` pod anti-affinity, empty `topologyKey` is not allowed. 2. For `requiredDuringSchedulingIgnoredDuringExecution` pod anti-affinity, the admission controller `LimitPodHardAntiAffinityTopology` was introduced to limit `topologyKey` to `kubernetes.io/hostname`. If you want to make it available for custom topologies, you may modify the admission controller, or simply disable it. -3. For `preferredDuringSchedulingIgnoredDuringExecution` pod anti-affinity, empty `topologyKey` is interpreted as "all topologies" ("all topologies" here is now limited to the combination of `kubernetes.io/hostname`, `failure-domain.beta.kubernetes.io/zone` and `failure-domain.beta.kubernetes.io/region`). +3. For `preferredDuringSchedulingIgnoredDuringExecution` pod anti-affinity, empty `topologyKey` is not allowed. 4. Except for the above cases, the `topologyKey` can be any legal label-key. In addition to `labelSelector` and `topologyKey`, you can optionally specify a list `namespaces` @@ -318,7 +319,7 @@ spec: topologyKey: "kubernetes.io/hostname" containers: - name: web-app - image: nginx:1.12-alpine + image: nginx:1.16-alpine ``` If we create the above two deployments, our three node cluster should look like below. diff --git a/content/en/docs/concepts/configuration/manage-compute-resources-container.md b/content/en/docs/concepts/configuration/manage-compute-resources-container.md index 43e71b15f73d1..597d1e796080c 100644 --- a/content/en/docs/concepts/configuration/manage-compute-resources-container.md +++ b/content/en/docs/concepts/configuration/manage-compute-resources-container.md @@ -68,13 +68,7 @@ resource requests/limits of that type for each Container in the Pod. ## Meaning of CPU Limits and requests for CPU resources are measured in *cpu* units. -One cpu, in Kubernetes, is equivalent to: - -- 1 AWS vCPU -- 1 GCP Core -- 1 Azure vCore -- 1 IBM vCPU -- 1 *Hyperthread* on a bare-metal Intel processor with Hyperthreading +One cpu, in Kubernetes, is equivalent to **1 vCPU/Core** for cloud providers and **1 hyperthread** on bare-metal Intel processors. Fractional requests are allowed. A Container with `spec.containers[].resources.requests.cpu` of `0.5` is guaranteed half as much @@ -191,9 +185,10 @@ resource limits, see the The resource usage of a Pod is reported as part of the Pod status. -If [optional monitoring](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/cluster-monitoring/README.md) -is configured for your cluster, then Pod resource usage can be retrieved from -the monitoring system. +If optional [tools for monitoring](/docs/tasks/debug-application-cluster/resource-usage-monitoring/) +are available in your cluster, then Pod resource usage can be retrieved either +from the [Metrics API](/docs/tasks/debug-application-cluster/resource-metrics-pipeline/#the-metrics-api) +directly or from your monitoring tools. ## Troubleshooting @@ -391,7 +386,7 @@ spec: ### How Pods with ephemeral-storage requests are scheduled When you create a Pod, the Kubernetes scheduler selects a node for the Pod to -run on. Each node has a maximum amount of local ephemeral storage it can provide for Pods. For more information, see ["Node Allocatable"](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable). +run on. Each node has a maximum amount of local ephemeral storage it can provide for Pods. For more information, see ["Node Allocatable"](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable). The scheduler ensures that the sum of the resource requests of the scheduled Containers is less than the capacity of the node. diff --git a/content/en/docs/concepts/configuration/pod-overhead.md b/content/en/docs/concepts/configuration/pod-overhead.md index 8309fce51e88c..0e796df9ffc49 100644 --- a/content/en/docs/concepts/configuration/pod-overhead.md +++ b/content/en/docs/concepts/configuration/pod-overhead.md @@ -10,12 +10,12 @@ weight: 20 {{% capture overview %}} -{{< feature-state for_k8s_version="v1.16" state="alpha" >}} +{{< feature-state for_k8s_version="v1.18" state="beta" >}} When you run a Pod on a Node, the Pod itself takes an amount of system resources. These resources are additional to the resources needed to run the container(s) inside the Pod. -_Pod Overhead_ is a feature for accounting for the resources consumed by the pod infrastructure +_Pod Overhead_ is a feature for accounting for the resources consumed by the Pod infrastructure on top of the container requests & limits. @@ -24,33 +24,169 @@ on top of the container requests & limits. {{% capture body %}} -## Pod Overhead - -In Kubernetes, the pod's overhead is set at +In Kubernetes, the Pod's overhead is set at [admission](/docs/reference/access-authn-authz/extensible-admission-controllers/#what-are-admission-webhooks) -time according to the overhead associated with the pod's +time according to the overhead associated with the Pod's [RuntimeClass](/docs/concepts/containers/runtime-class/). When Pod Overhead is enabled, the overhead is considered in addition to the sum of container -resource requests when scheduling a pod. Similarly, Kubelet will include the pod overhead when sizing -the pod cgroup, and when carrying out pod eviction ranking. +resource requests when scheduling a Pod. Similarly, Kubelet will include the Pod overhead when sizing +the Pod cgroup, and when carrying out Pod eviction ranking. -### Set Up +## Enabling Pod Overhead {#set-up} You need to make sure that the `PodOverhead` -[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled (it is off by default) -across your cluster. This means: - -- in {{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}} -- in {{< glossary_tooltip text="kube-apiserver" term_id="kube-apiserver" >}} -- in the {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} on each Node -- in any custom API servers that use feature gates - -{{< note >}} -Users who can write to RuntimeClass resources are able to have cluster-wide impact on -workload performance. You can limit access to this ability using Kubernetes access controls. -See [Authorization Overview](/docs/reference/access-authn-authz/authorization/) for more details. -{{< /note >}} +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled (it is on by default as of 1.18) +across your cluster, and a `RuntimeClass` is utilized which defines the `overhead` field. + +## Usage example + +To use the PodOverhead feature, you need a RuntimeClass that defines the `overhead` field. As +an example, you could use the following RuntimeClass definition with a virtualizing container runtime +that uses around 120MiB per Pod for the virtual machine and the guest OS: + +```yaml +--- +kind: RuntimeClass +apiVersion: node.k8s.io/v1beta1 +metadata: + name: kata-fc +handler: kata-fc +overhead: + podFixed: + memory: "120Mi" + cpu: "250m" +``` + +Workloads which are created which specify the `kata-fc` RuntimeClass handler will take the memory and +cpu overheads into account for resource quota calculations, node scheduling, as well as Pod cgroup sizing. + +Consider running the given example workload, test-pod: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: test-pod +spec: + runtimeClassName: kata-fc + containers: + - name: busybox-ctr + image: busybox + stdin: true + tty: true + resources: + limits: + cpu: 500m + memory: 100Mi + - name: nginx-ctr + image: nginx + resources: + limits: + cpu: 1500m + memory: 100Mi +``` + +At admission time the RuntimeClass [admission controller](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) +updates the workload's PodSpec to include the `overhead` as described in the RuntimeClass. If the PodSpec already has this field defined, +the Pod will be rejected. In the given example, since only the RuntimeClass name is specified, the admission controller mutates the Pod +to include an `overhead`. + +After the RuntimeClass admission controller, you can check the updated PodSpec: + +```bash +kubectl get pod test-pod -o jsonpath='{.spec.overhead}' +``` + +The output is: +``` +map[cpu:250m memory:120Mi] +``` + +If a ResourceQuota is defined, the sum of container requests as well as the +`overhead` field are counted. + +When the kube-scheduler is deciding which node should run a new Pod, the scheduler considers that Pod's +`overhead` as well as the sum of container requests for that Pod. For this example, the scheduler adds the +requests and the overhead, then looks for a node that has 2.25 CPU and 320 MiB of memory available. + +Once a Pod is scheduled to a node, the kubelet on that node creates a new {{< glossary_tooltip text="cgroup" term_id="cgroup" >}} +for the Pod. It is within this pod that the underlying container runtime will create containers. + +If the resource has a limit defined for each container (Guaranteed QoS or Bustrable QoS with limits defined), +the kubelet will set an upper limit for the pod cgroup associated with that resource (cpu.cfs_quota_us for CPU +and memory.limit_in_bytes memory). This upper limit is based on the sum of the container limits plus the `overhead` +defined in the PodSpec. + +For CPU, if the Pod is Guaranteed or Burstable QoS, the kubelet will set `cpu.shares` based on the sum of container +requests plus the `overhead` defined in the PodSpec. + +Looking at our example, verify the container requests for the workload: +```bash +kubectl get pod test-pod -o jsonpath='{.spec.containers[*].resources.limits}' +``` + +The total container requests are 2000m CPU and 200MiB of memory: +``` +map[cpu: 500m memory:100Mi] map[cpu:1500m memory:100Mi] +``` + +Check this against what is observed by the node: +```bash +kubectl describe node | grep test-pod -B2 +``` + +The output shows 2250m CPU and 320MiB of memory are requested, which includes PodOverhead: +``` + Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE + --------- ---- ------------ ---------- --------------- ------------- --- + default test-pod 2250m (56%) 2250m (56%) 320Mi (1%) 320Mi (1%) 36m +``` + +## Verify Pod cgroup limits + +Check the Pod's memory cgroups on the node where the workload is running. In the following example, [`crictl`](https://github.com/kubernetes-sigs/cri-tools/blob/master/docs/crictl.md) +is used on the node, which provides a CLI for CRI-compatible container runtimes. This is an +advanced example to show PodOverhead behavior, and it is not expected that users should need to check +cgroups directly on the node. + +First, on the particular node, determine the Pod identifier: + +```bash +# Run this on the node where the Pod is scheduled +POD_ID="$(sudo crictl pods --name test-pod -q)" +``` + +From this, you can determine the cgroup path for the Pod: +```bash +# Run this on the node where the Pod is scheduled +sudo crictl inspectp -o=json $POD_ID | grep cgroupsPath +``` + +The resulting cgroup path includes the Pod's `pause` container. The Pod level cgroup is one directory above. +``` + "cgroupsPath": "/kubepods/podd7f4b509-cf94-4951-9417-d1087c92a5b2/7ccf55aee35dd16aca4189c952d83487297f3cd760f1bbf09620e206e7d0c27a" +``` + +In this specific case, the pod cgroup path is `kubepods/podd7f4b509-cf94-4951-9417-d1087c92a5b2`. Verify the Pod level cgroup setting for memory: +```bash +# Run this on the node where the Pod is scheduled. +# Also, change the name of the cgroup to match the cgroup allocated for your pod. + cat /sys/fs/cgroup/memory/kubepods/podd7f4b509-cf94-4951-9417-d1087c92a5b2/memory.limit_in_bytes +``` + +This is 320 MiB, as expected: +``` +335544320 +``` + +### Observability + +A `kube_pod_overhead` metric is available in [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) +to help identify when PodOverhead is being utilized and to help observe stability of workloads +running with a defined Overhead. This functionality is not available in the 1.9 release of +kube-state-metrics, but is expected in a following release. Users will need to build kube-state-metrics +from source in the meantime. {{% /capture %}} diff --git a/content/en/docs/concepts/configuration/pod-priority-preemption.md b/content/en/docs/concepts/configuration/pod-priority-preemption.md index 5c2edb1a0a2b0..1d24c2f094e67 100644 --- a/content/en/docs/concepts/configuration/pod-priority-preemption.md +++ b/content/en/docs/concepts/configuration/pod-priority-preemption.md @@ -16,42 +16,25 @@ importance of a Pod relative to other Pods. If a Pod cannot be scheduled, the scheduler tries to preempt (evict) lower priority Pods to make scheduling of the pending Pod possible. -In Kubernetes 1.9 and later, Priority also affects scheduling order of Pods and -out-of-resource eviction ordering on the Node. - -Pod priority and preemption graduated to beta in Kubernetes 1.11 and to GA in -Kubernetes 1.14. They have been enabled by default since 1.11. - -In Kubernetes versions where Pod priority and preemption is still an alpha-level -feature, you need to explicitly enable it. To use these features in the older -versions of Kubernetes, follow the instructions in the documentation for your -Kubernetes version, by going to the documentation archive version for your -Kubernetes version. - -Kubernetes Version | Priority and Preemption State | Enabled by default ------------------- | :---------------------------: | :----------------: -1.8 | alpha | no -1.9 | alpha | no -1.10 | alpha | no -1.11 | beta | yes -1.14 | stable | yes - -{{< warning >}}In a cluster where not all users are trusted, a -malicious user could create pods at the highest possible priorities, causing -other pods to be evicted/not get scheduled. To resolve this issue, -[ResourceQuota](/docs/concepts/policy/resource-quotas/) is -augmented to support Pod priority. An admin can create ResourceQuota for users -at specific priority levels, preventing them from creating pods at high -priorities. This feature is in beta since Kubernetes 1.12. -{{< /warning >}} - {{% /capture %}} {{% capture body %}} + +{{< warning >}} +In a cluster where not all users are trusted, a malicious user could create Pods +at the highest possible priorities, causing other Pods to be evicted/not get +scheduled. +An administrator can use ResourceQuota to prevent users from creating pods at +high priorities. + +See [limit Priority Class consumption by default](/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default) +for details. +{{< /warning >}} + ## How to use priority and preemption -To use priority and preemption in Kubernetes 1.11 and later, follow these steps: +To use priority and preemption: 1. Add one or more [PriorityClasses](#priorityclass). @@ -77,21 +60,20 @@ Pods. ## How to disable preemption -{{< note >}} -In Kubernetes 1.12+, critical pods rely on scheduler preemption to be scheduled -when a cluster is under resource pressure. For this reason, it is not -recommended to disable preemption. -{{< /note >}} +{{< caution >}} +Critical pods rely on scheduler preemption to be scheduled when a cluster +is under resource pressure. For this reason, it is not recommended to +disable preemption. +{{< /caution >}} {{< note >}} -In Kubernetes 1.15 and later, -if the feature `NonPreemptingPriority` is enabled, +In Kubernetes 1.15 and later, if the feature `NonPreemptingPriority` is enabled, PriorityClasses have the option to set `preemptionPolicy: Never`. This will prevent pods of that PriorityClass from preempting other pods. {{< /note >}} -In Kubernetes 1.11 and later, preemption is controlled by a kube-scheduler flag -`disablePreemption`, which is set to `false` by default. +Preemption is controlled by a kube-scheduler flag `disablePreemption`, which is +set to `false` by default. If you want to disable preemption despite the above note, you can set `disablePreemption` to `true`. @@ -117,6 +99,9 @@ priority class name to the integer value of the priority. The name is specified in the `name` field of the PriorityClass object's metadata. The value is specified in the required `value` field. The higher the value, the higher the priority. +The name of a PriorityClass object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names), +and it cannot be prefixed with `system-`. A PriorityClass object can have any 32-bit integer value smaller than or equal to 1 billion. Larger numbers are reserved for critical system Pods that should @@ -237,12 +222,12 @@ spec: ### Effect of Pod priority on scheduling order -In Kubernetes 1.9 and later, when Pod priority is enabled, scheduler orders -pending Pods by their priority and a pending Pod is placed ahead of other -pending Pods with lower priority in the scheduling queue. As a result, the -higher priority Pod may be scheduled sooner than Pods with lower priority if its -scheduling requirements are met. If such Pod cannot be scheduled, scheduler will -continue and tries to schedule other lower priority Pods. +When Pod priority is enabled, the scheduler orders pending Pods by +their priority and a pending Pod is placed ahead of other pending Pods +with lower priority in the scheduling queue. As a result, the higher +priority Pod may be scheduled sooner than Pods with lower priority if +its scheduling requirements are met. If such Pod cannot be scheduled, +scheduler will continue and tries to schedule other lower priority Pods. ## Preemption @@ -288,12 +273,12 @@ point that scheduler preempts victims and the time that Pod P is scheduled. In order to minimize this gap, one can set graceful termination period of lower priority Pods to zero or a small number. -#### PodDisruptionBudget is supported, but not guaranteed! +#### PodDisruptionBudget is supported, but not guaranteed A [Pod Disruption Budget (PDB)](/docs/concepts/workloads/pods/disruptions/) allows application owners to limit the number of Pods of a replicated application -that are down simultaneously from voluntary disruptions. Kubernetes 1.9 supports -PDB when preempting Pods, but respecting PDB is best effort. The Scheduler tries +that are down simultaneously from voluntary disruptions. Kubernetes supports +PDB when preempting Pods, but respecting PDB is best effort. The scheduler tries to find victims whose PDB are not violated by preemption, but if no such victims are found, preemption will still happen, and lower priority Pods will be removed despite their PDBs being violated. @@ -344,28 +329,23 @@ gone, and Pod P could possibly be scheduled on Node N. We may consider adding cross Node preemption in future versions if there is enough demand and if we find an algorithm with reasonable performance. -## Debugging Pod Priority and Preemption - -Pod Priority and Preemption is a major feature that could potentially disrupt -Pod scheduling if it has bugs. +## Troubleshooting -### Potential problems caused by Priority and Preemption +Pod priority and pre-emption can have unwanted side effects. Here are some +examples of potential problems and ways to deal with them. -The followings are some of the potential problems that could be caused by bugs -in the implementation of the feature. This list is not exhaustive. - -#### Pods are preempted unnecessarily +### Pods are preempted unnecessarily Preemption removes existing Pods from a cluster under resource pressure to make -room for higher priority pending Pods. If a user gives high priorities to -certain Pods by mistake, these unintentional high priority Pods may cause -preemption in the cluster. As mentioned above, Pod priority is specified by -setting the `priorityClassName` field of `podSpec`. The integer value of +room for higher priority pending Pods. If you give high priorities to +certain Pods by mistake, these unintentionally high priority Pods may cause +preemption in your cluster. Pod priority is specified by setting the +`priorityClassName` field in the Pod's specification. The integer value for priority is then resolved and populated to the `priority` field of `podSpec`. -To resolve the problem, `priorityClassName` of the Pods must be changed to use -lower priority classes or should be left empty. Empty `priorityClassName` is -resolved to zero by default. +To address the problem, you can change the `priorityClassName` for those Pods +to use lower priority classes, or leave that field empty. An empty +`priorityClassName` is resolved to zero by default. When a Pod is preempted, there will be events recorded for the preempted Pod. Preemption should happen only when a cluster does not have enough resources for @@ -374,29 +354,31 @@ Pod (preemptor) is higher than the victim Pods. Preemption must not happen when there is no pending Pod, or when the pending Pods have equal or lower priority than the victims. If preemption happens in such scenarios, please file an issue. -#### Pods are preempted, but the preemptor is not scheduled +### Pods are preempted, but the preemptor is not scheduled When pods are preempted, they receive their requested graceful termination -period, which is by default 30 seconds, but it can be any different value as -specified in the PodSpec. If the victim Pods do not terminate within this period, -they are force-terminated. Once all the victims go away, the preemptor Pod can -be scheduled. +period, which is by default 30 seconds. If the victim Pods do not terminate within +this period, they are forcibly terminated. Once all the victims go away, the +preemptor Pod can be scheduled. While the preemptor Pod is waiting for the victims to go away, a higher priority -Pod may be created that fits on the same node. In this case, the scheduler will +Pod may be created that fits on the same Node. In this case, the scheduler will schedule the higher priority Pod instead of the preemptor. -In the absence of such a higher priority Pod, we expect the preemptor Pod to be -scheduled after the graceful termination period of the victims is over. +This is expected behavior: the Pod with the higher priority should take the place +of a Pod with a lower priority. Other controller actions, such as +[cluster autoscaling](/docs/tasks/administer-cluster/cluster-management/#cluster-autoscaling), +may eventually provide capacity to schedule the pending Pods. -#### Higher priority Pods are preempted before lower priority pods +### Higher priority Pods are preempted before lower priority pods -The scheduler tries to find nodes that can run a pending Pod and if no node is -found, it tries to remove Pods with lower priority from one node to make room -for the pending pod. If a node with low priority Pods is not feasible to run the -pending Pod, the scheduler may choose another node with higher priority Pods -(compared to the Pods on the other node) for preemption. The victims must still -have lower priority than the preemptor Pod. +The scheduler tries to find nodes that can run a pending Pod. If no node is +found, the scheduler tries to remove Pods with lower priority from an arbitrary +node in order to make room for the pending pod. +If a node with low priority Pods is not feasible to run the pending Pod, the scheduler +may choose another node with higher priority Pods (compared to the Pods on the +other node) for preemption. The victims must still have lower priority than the +preemptor Pod. When there are multiple nodes available for preemption, the scheduler tries to choose the node with a set of Pods with lowest priority. However, if such Pods @@ -404,13 +386,11 @@ have PodDisruptionBudget that would be violated if they are preempted then the scheduler may choose another node with higher priority Pods. When multiple nodes exist for preemption and none of the above scenarios apply, -we expect the scheduler to choose a node with the lowest priority. If that is -not the case, it may indicate a bug in the scheduler. +the scheduler chooses a node with the lowest priority. -## Interactions of Pod priority and QoS +## Interactions between Pod priority and quality of service {#interactions-of-pod-priority-and-qos} -Pod priority and -[QoS](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/resource-qos.md) +Pod priority and {{< glossary_tooltip text="QoS class" term_id="qos-class" >}} are two orthogonal features with few interactions and no default restrictions on setting the priority of a Pod based on its QoS classes. The scheduler's preemption logic does not consider QoS when choosing preemption targets. @@ -421,15 +401,20 @@ to schedule the preemptor Pod, or if the lowest priority Pods are protected by `PodDisruptionBudget`. The only component that considers both QoS and Pod priority is -[Kubelet out-of-resource eviction](/docs/tasks/administer-cluster/out-of-resource/). +[kubelet out-of-resource eviction](/docs/tasks/administer-cluster/out-of-resource/). The kubelet ranks Pods for eviction first by whether or not their usage of the starved resource exceeds requests, then by Priority, and then by the consumption of the starved compute resource relative to the Pods’ scheduling requests. See -[Evicting end-user pods](/docs/tasks/administer-cluster/out-of-resource/#evicting-end-user-pods) -for more details. Kubelet out-of-resource eviction does not evict Pods whose +[evicting end-user pods](/docs/tasks/administer-cluster/out-of-resource/#evicting-end-user-pods) +for more details. + +kubelet out-of-resource eviction does not evict Pods wheir their usage does not exceed their requests. If a Pod with lower priority is not exceeding its requests, it won't be evicted. Another Pod with higher priority that exceeds its requests may be evicted. {{% /capture %}} +{{% capture whatsnext %}} +* Read about using ResourceQuotas in connection with PriorityClasses: [limit Priority Class consumption by default](/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default) +{{% /capture %}} diff --git a/content/en/docs/concepts/configuration/secret.md b/content/en/docs/concepts/configuration/secret.md index 61995234e3db7..dc9a214fa320d 100644 --- a/content/en/docs/concepts/configuration/secret.md +++ b/content/en/docs/concepts/configuration/secret.md @@ -68,6 +68,8 @@ echo -n '1f2d1e2e67df' > ./password.txt The `kubectl create secret` command packages these files into a Secret and creates the object on the API server. +The name of a Secret object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). ```shell kubectl create secret generic db-user-pass --from-file=./username.txt --from-file=./password.txt @@ -137,8 +139,10 @@ See [decoding a secret](#decoding-a-secret) to learn how to view the contents of #### Creating a Secret manually You can also create a Secret in a file first, in JSON or YAML format, -and then create that object. The -[Secret](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#secret-v1-core) +and then create that object. +The name of a Secret object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). +The [Secret](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#secret-v1-core) contains two maps: `data` and `stringData`. The `data` field is used to store arbitrary data, encoded using base64. The `stringData` field is provided for convenience, and allows you to provide @@ -672,6 +676,37 @@ A container using a Secret as a Secret updates. {{< /note >}} +{{< feature-state for_k8s_version="v1.18" state="alpha" >}} + +The Kubernetes alpha feature _Immutable Secrets and ConfigMaps_ provides an option to set +individual Secrets and ConfigMaps as immutable. For clusters that extensively use Secrets +(at least tens of thousands of unique Secret to Pod mounts), preventing changes to their +data has the following advantages: + +- protects you from accidental (or unwanted) updates that could cause applications outages +- improves performance of your cluster by significantly reducing load on kube-apiserver, by +closing watches for secrets marked as immutable. + +To use this feature, enable the `ImmutableEmphemeralVolumes` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) and set +your Secret or ConfigMap `immutable` field to `true`. For example: +```yaml +apiVersion: v1 +kind: Secret +metadata: + ... +data: + ... +immutable: true +``` + +{{< note >}} +Once a Secret or ConfigMap is marked as immutable, it is _not_ possible to revert this change +nor to mutate the contents of the `data` field. You can only delete and recreate the Secret. +Existing Pods maintain a mount point to the deleted Secret - it is recommended to recreate +these pods. +{{< /note >}} + ### Using Secrets as environment variables To use a secret in an {{< glossary_tooltip text="environment variable" term_id="container-env-variables" >}} diff --git a/content/en/docs/concepts/configuration/taint-and-toleration.md b/content/en/docs/concepts/configuration/taint-and-toleration.md index eac6267e799bd..2026390eff955 100644 --- a/content/en/docs/concepts/configuration/taint-and-toleration.md +++ b/content/en/docs/concepts/configuration/taint-and-toleration.md @@ -197,11 +197,13 @@ on the special hardware nodes. This will make sure that these special hardware nodes are dedicated for pods requesting such hardware and you don't have to manually add tolerations to your pods. -* **Taint based Evictions (beta feature)**: A per-pod-configurable eviction behavior +* **Taint based Evictions**: A per-pod-configurable eviction behavior when there are node problems, which is described in the next section. ## Taint based Evictions +{{< feature-state for_k8s_version="1.18" state="stable" >}} + Earlier we mentioned the `NoExecute` taint effect, which affects pods that are already running on the node as follows @@ -229,9 +231,9 @@ certain condition is true. The following taints are built in: as unusable. After a controller from the cloud-controller-manager initializes this node, the kubelet removes this taint. -In version 1.13, the `TaintBasedEvictions` feature is promoted to beta and enabled by default, hence the taints are automatically -added by the NodeController (or kubelet) and the normal logic for evicting pods from nodes -based on the Ready NodeCondition is disabled. +In case a node is to be evicted, the node controller or the kubelet adds relevant taints +with `NoExecute` effect. If the fault condition returns to normal the kubelet or node +controller can remove the relevant taint(s). {{< note >}} To maintain the existing [rate limiting](/docs/concepts/architecture/nodes/) @@ -240,7 +242,7 @@ in a rate-limited way. This prevents massive pod evictions in scenarios such as the master becoming partitioned from the nodes. {{< /note >}} -This beta feature, in combination with `tolerationSeconds`, allows a pod +The feature, in combination with `tolerationSeconds`, allows a pod to specify how long it should stay bound to a node that has one or both of these problems. For example, an application with a lot of local state might want to stay @@ -277,15 +279,13 @@ admission controller](https://git.k8s.io/kubernetes/plugin/pkg/admission/default * `node.kubernetes.io/unreachable` * `node.kubernetes.io/not-ready` -This ensures that DaemonSet pods are never evicted due to these problems, -which matches the behavior when this feature is disabled. +This ensures that DaemonSet pods are never evicted due to these problems. ## Taint Nodes by Condition The node lifecycle controller automatically creates taints corresponding to -Node conditions. +Node conditions with `NoSchedule` effect. Similarly the scheduler does not check Node conditions; instead the scheduler checks taints. This assures that Node conditions don't affect what's scheduled onto the Node. The user can choose to ignore some of the Node's problems (represented as Node conditions) by adding appropriate Pod tolerations. -Note that `TaintNodesByCondition` only taints nodes with `NoSchedule` effect. `NoExecute` effect is controlled by `TaintBasedEviction` which is a beta feature and enabled by default since version 1.13. Starting in Kubernetes 1.8, the DaemonSet controller automatically adds the following `NoSchedule` tolerations to all daemons, to prevent DaemonSets from diff --git a/content/en/docs/concepts/containers/container-environment-variables.md b/content/en/docs/concepts/containers/container-environment.md similarity index 98% rename from content/en/docs/concepts/containers/container-environment-variables.md rename to content/en/docs/concepts/containers/container-environment.md index b8b3c28a6b138..86b595661d3cf 100644 --- a/content/en/docs/concepts/containers/container-environment-variables.md +++ b/content/en/docs/concepts/containers/container-environment.md @@ -2,7 +2,7 @@ reviewers: - mikedanese - thockin -title: Container Environment Variables +title: Container Environment content_template: templates/concept weight: 20 --- diff --git a/content/en/docs/concepts/containers/container-lifecycle-hooks.md b/content/en/docs/concepts/containers/container-lifecycle-hooks.md index 3d4f81152d20e..fe810d23c5ce9 100644 --- a/content/en/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/en/docs/concepts/containers/container-lifecycle-hooks.md @@ -116,7 +116,7 @@ Events: {{% capture whatsnext %}} -* Learn more about the [Container environment](/docs/concepts/containers/container-environment-variables/). +* Learn more about the [Container environment](/docs/concepts/containers/container-environment/). * Get hands-on experience [attaching handlers to Container lifecycle events](/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/). diff --git a/content/en/docs/concepts/containers/overview.md b/content/en/docs/concepts/containers/overview.md new file mode 100644 index 0000000000000..968ba7104a53d --- /dev/null +++ b/content/en/docs/concepts/containers/overview.md @@ -0,0 +1,45 @@ +--- +reviewers: +- erictune +- thockin +title: Containers overview +content_template: templates/concept +weight: 1 +--- + +{{% capture overview %}} + +Containers are a technnology for packaging the (compiled) code for an +application along with the dependencies it needs at run time. Each +container that you run is repeatable; the standardisation from having +dependencies included means that you get the same behavior wherever you +run it. + +Containers decouple applications from underlying host infrastructure. +This makes deployment easier in different cloud or OS environments. + +{{% /capture %}} + + +{{% capture body %}} + +## Container images +A [container image](/docs/concepts/containers/images/) is a ready-to-run +software package, containing everything needed to run an application: +the code and any runtime it requires, application and system libraries, +and default values for any essential settings. + +By design, a container is immutable: you cannot change the code of a +container that is already running. If you have a containerized application +and want to make changes, you need to build a new container that includes +the change, then recreate the container to start from the updated image. + +## Container runtimes + +{{< glossary_definition term_id="container-runtime" length="all" >}} + +{{% /capture %}} +{{% capture whatsnext %}} +* Read about [container images](/docs/concepts/containers/images/) +* Read about [Pods](/docs/concepts/workloads/pods/) +{{% /capture %}} diff --git a/content/en/docs/concepts/containers/runtime-class.md b/content/en/docs/concepts/containers/runtime-class.md index 00bd9fae34a4f..d29825d698a47 100644 --- a/content/en/docs/concepts/containers/runtime-class.md +++ b/content/en/docs/concepts/containers/runtime-class.md @@ -13,22 +13,14 @@ weight: 20 This page describes the RuntimeClass resource and runtime selection mechanism. -{{< warning >}} -RuntimeClass includes *breaking* changes in the beta upgrade in v1.14. If you were using -RuntimeClass prior to v1.14, see [Upgrading RuntimeClass from Alpha to -Beta](#upgrading-runtimeclass-from-alpha-to-beta). -{{< /warning >}} +RuntimeClass is a feature for selecting the container runtime configuration. The container runtime +configuration is used to run a Pod's containers. {{% /capture %}} {{% capture body %}} -## Runtime Class - -RuntimeClass is a feature for selecting the container runtime configuration. The container runtime -configuration is used to run a Pod's containers. - ## Motivation You can set a different RuntimeClass between different Pods to provide a balance of @@ -41,7 +33,7 @@ additional overhead. You can also use RuntimeClass to run different Pods with the same container runtime but with different settings. -### Set Up +## Setup Ensure the RuntimeClass feature gate is enabled (it is by default). See [Feature Gates](/docs/reference/command-line-tools-reference/feature-gates/) for an explanation of enabling @@ -50,7 +42,7 @@ feature gates. The `RuntimeClass` feature gate must be enabled on apiservers _an 1. Configure the CRI implementation on nodes (runtime dependent) 2. Create the corresponding RuntimeClass resources -#### 1. Configure the CRI implementation on nodes +### 1. Configure the CRI implementation on nodes The configurations available through RuntimeClass are Container Runtime Interface (CRI) implementation dependent. See the corresponding documentation ([below](#cri-configuration)) for your @@ -65,7 +57,7 @@ heterogenous node configurations, see [Scheduling](#scheduling) below. The configurations have a corresponding `handler` name, referenced by the RuntimeClass. The handler must be a valid DNS 1123 label (alpha-numeric + `-` characters). -#### 2. Create the corresponding RuntimeClass resources +### 2. Create the corresponding RuntimeClass resources The configurations setup in step 1 should each have an associated `handler` name, which identifies the configuration. For each handler, create a corresponding RuntimeClass object. @@ -82,13 +74,16 @@ metadata: handler: myconfiguration # The name of the corresponding CRI configuration ``` +The name of a RuntimeClass object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + {{< note >}} It is recommended that RuntimeClass write operations (create/update/patch/delete) be restricted to the cluster administrator. This is typically the default. See [Authorization Overview](/docs/reference/access-authn-authz/authorization/) for more details. {{< /note >}} -### Usage +## Usage Once RuntimeClasses are configured for the cluster, using them is very simple. Specify a `runtimeClassName` in the Pod spec. For example: @@ -147,14 +142,14 @@ See CRI-O's [config documentation][100] for more details. [100]: https://raw.githubusercontent.com/cri-o/cri-o/9f11d1d/docs/crio.conf.5.md -### Scheduling +## Scheduling {{< feature-state for_k8s_version="v1.16" state="beta" >}} As of Kubernetes v1.16, RuntimeClass includes support for heterogenous clusters through its `scheduling` fields. Through the use of these fields, you can ensure that pods running with this RuntimeClass are scheduled to nodes that support it. To use the scheduling support, you must have -the RuntimeClass [admission controller][] enabled (the default, as of 1.16). +the [RuntimeClass admission controller][] enabled (the default, as of 1.16). To ensure pods land on nodes supporting a specific RuntimeClass, that set of nodes should have a common label which is then selected by the `runtimeclass.scheduling.nodeSelector` field. The @@ -170,50 +165,23 @@ by each. To learn more about configuring the node selector and tolerations, see [Assigning Pods to Nodes](/docs/concepts/configuration/assign-pod-node/). -[admission controller]: /docs/reference/access-authn-authz/admission-controllers/ +[RuntimeClass admission controller]: /docs/reference/access-authn-authz/admission-controllers/#runtimeclass ### Pod Overhead -{{< feature-state for_k8s_version="v1.16" state="alpha" >}} +{{< feature-state for_k8s_version="v1.18" state="beta" >}} -As of Kubernetes v1.16, RuntimeClass includes support for specifying overhead associated with -running a pod, as part of the [`PodOverhead`](/docs/concepts/configuration/pod-overhead/) feature. -To use `PodOverhead`, you must have the PodOverhead [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) -enabled (it is off by default). +You can specify _overhead_ resources that are associated with running a Pod. Declaring overhead allows +the cluster (including the scheduler) to account for it when making decisions about Pods and resources. +To use Pod overhead, you must have the PodOverhead [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +enabled (it is on by default). - -Pod overhead is defined in RuntimeClass through the `Overhead` fields. Through the use of these fields, +Pod overhead is defined in RuntimeClass through the `overhead` fields. Through the use of these fields, you can specify the overhead of running pods utilizing this RuntimeClass and ensure these overheads are accounted for in Kubernetes. -### Upgrading RuntimeClass from Alpha to Beta - -The RuntimeClass Beta feature includes the following changes: - -- The `node.k8s.io` API group and `runtimeclasses.node.k8s.io` resource have been migrated to a - built-in API from a CustomResourceDefinition. -- The `spec` has been inlined in the RuntimeClass definition (i.e. there is no more - RuntimeClassSpec). -- The `runtimeHandler` field has been renamed `handler`. -- The `handler` field is now required in all API versions. This means the `runtimeHandler` field in - the Alpha API is also required. -- The `handler` field must be a valid DNS label ([RFC 1123](https://tools.ietf.org/html/rfc1123)), - meaning it can no longer contain `.` characters (in all versions). Valid handlers match the - following regular expression: `^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`. - -**Action Required:** The following actions are required to upgrade from the alpha version of the -RuntimeClass feature to the beta version: - -- RuntimeClass resources must be recreated *after* upgrading to v1.14, and the - `runtimeclasses.node.k8s.io` CRD should be manually deleted: - ``` - kubectl delete customresourcedefinitions.apiextensions.k8s.io runtimeclasses.node.k8s.io - ``` -- Alpha RuntimeClasses with an unspecified or empty `runtimeHandler` or those using a `.` character - in the handler are no longer valid, and must be migrated to a valid handler configuration (see - above). - -### Further Reading +{{% /capture %}} +{{% capture whatsnext %}} - [RuntimeClass Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/runtime-class.md) - [RuntimeClass Scheduling Design](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/runtime-class-scheduling.md) diff --git a/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md b/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md index bf2d5501920e9..8bc6e22861761 100644 --- a/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md +++ b/content/en/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md @@ -5,30 +5,34 @@ reviewers: - cheftako - chenopis content_template: templates/concept -weight: 10 +weight: 20 --- {{% capture overview %}} -The aggregation layer allows Kubernetes to be extended with additional APIs, beyond what is offered by the core Kubernetes APIs. +The aggregation layer allows Kubernetes to be extended with additional APIs, beyond what is offered by the core Kubernetes APIs. +The additional APIs can either be ready-made solutions such as [service-catalog](/docs/concepts/extend-kubernetes/service-catalog/), or APIs that you develop yourself. + +The aggregation layer is different from [Custom Resources](/docs/concepts/extend-kubernetes/api-extension/custom-resources/), which are a way to make the {{< glossary_tooltip term_id="kube-apiserver" text="kube-apiserver" >}} recognise new kinds of object. {{% /capture %}} {{% capture body %}} -## Overview +## Aggregation layer + +The aggregation layer runs in-process with the kube-apiserver. Until an extension resource is registered, the aggregation layer will do nothing. To register an API, you add an _APIService_ object, which "claims" the URL path in the Kubernetes API. At that point, the aggregation layer will proxy anything sent to that API path (e.g. `/apis/myextension.mycompany.io/v1/…`) to the registered APIService. -The aggregation layer enables installing additional Kubernetes-style APIs in your cluster. These can either be pre-built, existing 3rd party solutions, such as [service-catalog](https://github.com/kubernetes-incubator/service-catalog/blob/master/README.md), or user-created APIs like [apiserver-builder](https://github.com/kubernetes-incubator/apiserver-builder/blob/master/README.md), which can get you started. +The most common way to implement the APIService is to run an *extension API server* in Pod(s) that run in your cluster. If you're using the extension API server to manage resources in your cluster, the extension API server (also written as "extension-apiserver") is typically paired with one or more {{< glossary_tooltip text="controllers" term_id="controller" >}}. The apiserver-builder library provides a skeleton for both extension API servers and the associated controller(s). -The aggregation layer runs in-process with the kube-apiserver. Until an extension resource is registered, the aggregation layer will do nothing. To register an API, users must add an APIService object, which "claims" the URL path in the Kubernetes API. At that point, the aggregation layer will proxy anything sent to that API path (e.g. /apis/myextension.mycompany.io/v1/…) to the registered APIService. +### Response latency -Ordinarily, the APIService will be implemented by an *extension-apiserver* in a pod running in the cluster. This extension-apiserver will normally need to be paired with one or more controllers if active management of the added resources is needed. As a result, the apiserver-builder will actually provide a skeleton for both. As another example, when the service-catalog is installed, it provides both the extension-apiserver and controller for the services it provides. +Extension API servers should have low latency networking to and from the kube-apiserver. +Discovery requests are required to round-trip from the kube-apiserver in five seconds or less. -Extension-apiservers should have low latency connections to and from the kube-apiserver. -In particular, discovery requests are required to round-trip from the kube-apiserver in five seconds or less. -If your deployment cannot achieve this, you should consider how to change it. For now, setting the -`EnableAggregatedDiscoveryTimeout=false` feature gate on the kube-apiserver -will disable the timeout restriction. It will be removed in a future release. +If your extension API server cannot achieve that latency requirement, consider making changes that let you meet it. You can also set the +`EnableAggregatedDiscoveryTimeout=false` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) on the kube-apiserver +to disable the timeout restriction. This deprecated feature gate will be removed in a future release. {{% /capture %}} @@ -37,7 +41,6 @@ will disable the timeout restriction. It will be removed in a future release. * To get the aggregator working in your environment, [configure the aggregation layer](/docs/tasks/access-kubernetes-api/configure-aggregation-layer/). * Then, [setup an extension api-server](/docs/tasks/access-kubernetes-api/setup-extension-api-server/) to work with the aggregation layer. * Also, learn how to [extend the Kubernetes API using Custom Resource Definitions](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/). +* Read the specification for [APIService](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#apiservice-v1-apiregistration-k8s-io) {{% /capture %}} - - diff --git a/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index d0b990e0da6f6..14e0b899240d6 100644 --- a/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -4,7 +4,7 @@ reviewers: - enisoc - deads2k content_template: templates/concept -weight: 20 +weight: 10 --- {{% capture overview %}} @@ -19,7 +19,7 @@ methods for adding custom resources and how to choose between them. ## Custom resources A *resource* is an endpoint in the [Kubernetes API](/docs/reference/using-api/api-overview/) that stores a collection of -[API objects](/docs/concepts/overview/working-with-objects/kubernetes-objects/) of a certain kind. For example, the built-in *pods* resource contains a collection of Pod objects. +[API objects](/docs/concepts/overview/working-with-objects/kubernetes-objects/) of a certain kind; for example, the built-in *pods* resource contains a collection of Pod objects. A *custom resource* is an extension of the Kubernetes API that is not necessarily available in a default Kubernetes installation. It represents a customization of a particular Kubernetes installation. However, @@ -105,7 +105,7 @@ Use a [secret](/docs/concepts/configuration/secret/) for sensitive data, which i Use a custom resource (CRD or Aggregated API) if most of the following apply: * You want to use Kubernetes client libraries and CLIs to create and update the new resource. -* You want top-level support from kubectl (for example: `kubectl get my-object object-name`). +* You want top-level support from `kubectl` (e.g. `kubectl get my-object object-name`). * You want to build new automation that watches for updates on the new object, and then CRUD other objects, or vice versa. * You want to write automation that handles updates to the object. * You want to use Kubernetes API conventions like `.spec`, `.status`, and `.metadata`. @@ -120,9 +120,9 @@ Kubernetes provides two ways to add custom resources to your cluster: Kubernetes provides these two options to meet the needs of different users, so that neither ease of use nor flexibility is compromised. -Aggregated APIs are subordinate APIServers that sit behind the primary API server, which acts as a proxy. This arrangement is called [API Aggregation](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) (AA). To users, it simply appears that the Kubernetes API is extended. +Aggregated APIs are subordinate API servers that sit behind the primary API server, which acts as a proxy. This arrangement is called [API Aggregation](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) (AA). To users, it simply appears that the Kubernetes API is extended. -CRDs allow users to create new types of resources without adding another APIserver. You do not need to understand API Aggregation to use CRDs. +CRDs allow users to create new types of resources without adding another API server. You do not need to understand API Aggregation to use CRDs. Regardless of how they are installed, the new resources are referred to as Custom Resources to distinguish them from built-in Kubernetes resources (like pods). @@ -167,43 +167,43 @@ CRDs are easier to create than Aggregated APIs. | CRDs | Aggregated API | | --------------------------- | -------------- | -| Do not require programming. Users can choose any language for a CRD controller. | Requires programming in Go and building binary and image. Users can choose any language for a CRD controller. | -| No additional service to run; CRs are handled by API Server. | An additional service to create and that could fail. | -| No ongoing support once the CRD is created. Any bug fixes are picked up as part of normal Kubernetes Master upgrades. | May need to periodically pickup bug fixes from upstream and rebuild and update the Aggregated APIserver. | -| No need to handle multiple versions of your API. For example: when you control the client for this resource, you can upgrade it in sync with the API. | You need to handle multiple versions of your API, for example: when developing an extension to share with the world. | +| Do not require programming. Users can choose any language for a CRD controller. | Requires programming in Go and building binary and image. | +| No additional service to run; CRDs are handled by API server. | An additional service to create and that could fail. | +| No ongoing support once the CRD is created. Any bug fixes are picked up as part of normal Kubernetes Master upgrades. | May need to periodically pickup bug fixes from upstream and rebuild and update the Aggregated API server. | +| No need to handle multiple versions of your API; for example, when you control the client for this resource, you can upgrade it in sync with the API. | You need to handle multiple versions of your API; for example, when developing an extension to share with the world. | ### Advanced features and flexibility -Aggregated APIs offer more advanced API features and customization of other features, for example: the storage layer. +Aggregated APIs offer more advanced API features and customization of other features; e.g. the storage layer. | Feature | Description | CRDs | Aggregated API | | ------- | ----------- | ---- | -------------- | | Validation | Help users prevent errors and allow you to evolve your API independently of your clients. These features are most useful when there are many clients who can't all update at the same time. | Yes. Most validation can be specified in the CRD using [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation). Any other validations supported by addition of a [Validating Webhook](/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook-alpha-in-1-8-beta-in-1-9). | Yes, arbitrary validation checks | -| Defaulting | See above | Yes, either via [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#defaulting) `default` keyword (GA in 1.17), or via a [Mutating Webhook](/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook) (though this will not be run when reading from etcd for old objects) | Yes | +| Defaulting | See above | Yes, either via [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#defaulting) `default` keyword (GA in 1.17), or via a [Mutating Webhook](/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook) (though this will not be run when reading from etcd for old objects). | Yes | | Multi-versioning | Allows serving the same object through two API versions. Can help ease API changes like renaming fields. Less important if you control your client versions. | [Yes](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning) | Yes | -| Custom Storage | If you need storage with a different performance mode (for example, time-series database instead of key-value store) or isolation for security (for example, encryption secrets or different | No | Yes | +| Custom Storage | If you need storage with a different performance mode (e.g. time-series database instead of key-value store) or isolation for security (e.g. encryption secrets or different | No | Yes | | Custom Business Logic | Perform arbitrary checks or actions when creating, reading, updating or deleting an object | Yes, using [Webhooks](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks). | Yes | | Scale Subresource | Allows systems like HorizontalPodAutoscaler and PodDisruptionBudget interact with your new resource | [Yes](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#scale-subresource) | Yes | | Status Subresource | Allows fine-grained access control where user writes the spec section and the controller writes the status section. Allows incrementing object Generation on custom resource data mutation (requires separate spec and status sections in the resource) | [Yes](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#status-subresource) | Yes | | Other Subresources | Add operations other than CRUD, such as "logs" or "exec". | No | Yes | | strategic-merge-patch | The new endpoints support PATCH with `Content-Type: application/strategic-merge-patch+json`. Useful for updating objects that may be modified both locally, and by the server. For more information, see ["Update API Objects in Place Using kubectl patch"](/docs/tasks/run-application/update-api-object-kubectl-patch/) | No | Yes | | Protocol Buffers | The new resource supports clients that want to use Protocol Buffers | No | Yes | -| OpenAPI Schema | Is there an OpenAPI (swagger) schema for the types that can be dynamically fetched from the server? Is the user protected from misspelling field names by ensuring only allowed fields are set? Are types enforced (in other words, don't put an `int` in a `string` field?) | Yes, based on the [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) schema (GA in 1.16) | Yes | +| OpenAPI Schema | Is there an OpenAPI (swagger) schema for the types that can be dynamically fetched from the server? Is the user protected from misspelling field names by ensuring only allowed fields are set? Are types enforced (in other words, don't put an `int` in a `string` field?) | Yes, based on the [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) schema (GA in 1.16). | Yes | ### Common Features -When you create a custom resource, either via a CRDs or an AA, you get many features for your API, compared to implementing it outside the Kubernetes platform: +When you create a custom resource, either via a CRD or an AA, you get many features for your API, compared to implementing it outside the Kubernetes platform: | Feature | What it does | | ------- | ------------ | | CRUD | The new endpoints support CRUD basic operations via HTTP and `kubectl` | | Watch | The new endpoints support Kubernetes Watch operations via HTTP | -| Discovery | Clients like kubectl and dashboard automatically offer list, display, and field edit operations on your resources | +| Discovery | Clients like `kubectl` and dashboard automatically offer list, display, and field edit operations on your resources | | json-patch | The new endpoints support PATCH with `Content-Type: application/json-patch+json` | | merge-patch | The new endpoints support PATCH with `Content-Type: application/merge-patch+json` | | HTTPS | The new endpoints uses HTTPS | -| Built-in Authentication | Access to the extension uses the core apiserver (aggregation layer) for authentication | -| Built-in Authorization | Access to the extension can reuse the authorization used by the core apiserver (e.g. RBAC) | +| Built-in Authentication | Access to the extension uses the core API server (aggregation layer) for authentication | +| Built-in Authorization | Access to the extension can reuse the authorization used by the core API server (e.g. RBAC) | | Finalizers | Block deletion of extension resources until external cleanup happens. | | Admission Webhooks | Set default values and validate extension resources during any create/update/delete operation. | | UI/CLI Display | Kubectl, dashboard can display extension resources. | @@ -217,9 +217,9 @@ There are several points to be aware of before adding a custom resource to your ### Third party code and new points of failure -While creating a CRD does not automatically add any new points of failure (for example, by causing third party code to run on your API server), packages (for example, Charts) or other installation bundles often include CRDs as well as a Deployment of third-party code that implements the business logic for a new custom resource. +While creating a CRD does not automatically add any new points of failure (e.g. by causing third party code to run on your API server), packages (e.g. Charts) or other installation bundles often include CRDs as well as a Deployment of third-party code that implements the business logic for a new custom resource. -Installing an Aggregated APIserver always involves running a new Deployment. +Installing an Aggregated API server always involves running a new Deployment. ### Storage @@ -229,7 +229,7 @@ Aggregated API servers may use the same storage as the main API server, in which ### Authentication, authorization, and auditing -CRDs always use the same authentication, authorization, and audit logging as the built-in resources of your API Server. +CRDs always use the same authentication, authorization, and audit logging as the built-in resources of your API server. If you use RBAC for authorization, most RBAC roles will not grant access to the new resources (except the cluster-admin role or any role created with wildcard rules). You'll need to explicitly grant access to the new resources. CRDs and Aggregated APIs often come bundled with new role definitions for the types they add. @@ -237,11 +237,11 @@ Aggregated API servers may or may not use the same authentication, authorization ## Accessing a custom resource -Kubernetes [client libraries](/docs/reference/using-api/client-libraries/) can be used to access custom resources. Not all client libraries support custom resources. The go and python client libraries do. +Kubernetes [client libraries](/docs/reference/using-api/client-libraries/) can be used to access custom resources. Not all client libraries support custom resources. The _Go_ and _Python_ client libraries do. When you add a custom resource, you can access it using: -- kubectl +- `kubectl` - The kubernetes dynamic client. - A REST client that you write. - A client generated using [Kubernetes client generation tools](https://github.com/kubernetes/code-generator) (generating one is an advanced undertaking, but some projects may provide a client along with the CRD or AA). diff --git a/content/en/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md b/content/en/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md index 1afbc17c09b94..4c5ab12c03aae 100644 --- a/content/en/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md +++ b/content/en/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md @@ -1,117 +1,111 @@ --- -title: Poseidon-Firmament - An alternate scheduler +title: Poseidon-Firmament Scheduler content_template: templates/concept weight: 80 --- {{% capture overview %}} -**Current release of Poseidon-Firmament scheduler is an alpha release.** +{{< feature-state for_k8s_version="v1.6" state="alpha" >}} -Poseidon-Firmament scheduler is an alternate scheduler that can be deployed alongside the default Kubernetes scheduler. +The Poseidon-Firmament scheduler is an alternate scheduler that can be deployed alongside the default Kubernetes scheduler. {{% /capture %}} {{% capture body %}} -## Introduction +## Introduction -Poseidon is a service that acts as the integration glue for the [Firmament scheduler](https://github.com/Huawei-PaaS/firmament) with Kubernetes. Poseidon-Firmament scheduler augments the current Kubernetes scheduling capabilities. It incorporates novel flow network graph based scheduling capabilities alongside the default Kubernetes Scheduler. Firmament scheduler models workloads and clusters as flow networks and runs min-cost flow optimizations over these networks to make scheduling decisions. +Poseidon is a service that acts as the integration glue between the [Firmament scheduler](https://github.com/Huawei-PaaS/firmament) and Kubernetes. Poseidon-Firmament augments the current Kubernetes scheduling capabilities. It incorporates novel flow network graph based scheduling capabilities alongside the default Kubernetes scheduler. The Firmament scheduler models workloads and clusters as flow networks and runs min-cost flow optimizations over these networks to make scheduling decisions. -It models the scheduling problem as a constraint-based optimization over a flow network graph. This is achieved by reducing scheduling to a min-cost max-flow optimization problem. The Poseidon-Firmament scheduler dynamically refines the workload placements. +Firmament models the scheduling problem as a constraint-based optimization over a flow network graph. This is achieved by reducing scheduling to a min-cost max-flow optimization problem. The Poseidon-Firmament scheduler dynamically refines the workload placements. -Poseidon-Firmament scheduler runs alongside the default Kubernetes Scheduler as an alternate scheduler, so multiple schedulers run simultaneously. +Poseidon-Firmament scheduler runs alongside the default Kubernetes scheduler as an alternate scheduler. You can simultaneously run multiple, different schedulers. -## Key Advantages +Flow graph scheduling with the Poseidon-Firmament scheduler provides the following advantages: -### Flow graph scheduling based Poseidon-Firmament scheduler provides the following key advantages: -- Workloads (pods) are bulk scheduled to enable scheduling at massive scale.. -- Based on the extensive performance test results, Poseidon-Firmament scales much better than the Kubernetes default scheduler as the number of nodes increase in a cluster. This is due to the fact that Poseidon-Firmament is able to amortize more and more work across workloads. -- Poseidon-Firmament Scheduler outperforms the Kubernetes default scheduler by a wide margin when it comes to throughput performance numbers for scenarios where compute resource requirements are somewhat uniform across jobs (Replicasets/Deployments/Jobs). Poseidon-Firmament scheduler end-to-end throughput performance numbers, including bind time, consistently get better as the number of nodes in a cluster increase. For example, for a 2,700 node cluster (shown in the graphs [here](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/benchmark/README.md)), Poseidon-Firmament scheduler achieves a 7X or greater end-to-end throughput than the Kubernetes default scheduler, which includes bind time. +- Workloads (Pods) are bulk scheduled to enable scheduling at massive scale. + The Poseidon-Firmament scheduler outperforms the Kubernetes default scheduler by a wide margin when it comes to throughput performance for scenarios where compute resource requirements are somewhat uniform across your workload (Deployments, ReplicaSets, Jobs). +- The Poseidon-Firmament's scheduler's end-to-end throughput performance and bind time improves as the number of nodes in a cluster increases. As you scale out, Poseidon-Firmament scheduler is able to amortize more and more work across workloads. +- Scheduling in Poseidon-Firmament is dynamic; it keeps cluster resources in a global optimal state during every scheduling run. +- The Poseidon-Firmament scheduler supports scheduling complex rule constraints. -- Availability of complex rule constraints. -- Scheduling in Poseidon-Firmament is dynamic; it keeps cluster resources in a global optimal state during every scheduling run. -- Highly efficient resource utilizations. +## How the Poseidon-Firmament scheduler works -## Poseidon-Firmament Scheduler - How it works +Kubernetes supports [using multiple schedulers](/docs/tasks/administer-cluster/configure-multiple-schedulers/). You can specify, for a particular Pod, that it is scheduled by a custom scheduler (“poseidon” for this case), by setting the `schedulerName` field in the PodSpec at the time of pod creation. The default scheduler will ignore that Pod and allow Poseidon-Firmament scheduler to schedule the Pod on a relevant node. -As part of the Kubernetes multiple schedulers support, each new pod is typically scheduled by the default scheduler. Kubernetes can be instructed to use another scheduler by specifying the name of another custom scheduler (“poseidon” in our case) in the **schedulerName** field of the PodSpec at the time of pod creation. In this case, the default scheduler will ignore that Pod and allow Poseidon scheduler to schedule the Pod on a relevant node. +For example: ```yaml apiVersion: v1 kind: Pod - ... spec: - schedulerName: poseidon -``` - - -{{< note >}} -For details about the design of this project see the [design document](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/design/README.md). -{{< /note >}} + schedulerName: poseidon +... +``` -## Possible Use Case Scenarios - When to use it +## Batch scheduling As mentioned earlier, Poseidon-Firmament scheduler enables an extremely high throughput scheduling environment at scale due to its bulk scheduling approach versus Kubernetes pod-at-a-time approach. In our extensive tests, we have observed substantial throughput benefits as long as resource requirements (CPU/Memory) for incoming Pods are uniform across jobs (Replicasets/Deployments/Jobs), mainly due to efficient amortization of work across jobs. Although, Poseidon-Firmament scheduler is capable of scheduling various types of workloads, such as service, batch, etc., the following are a few use cases where it excels the most: -1. For “Big Data/AI” jobs consisting of large number of tasks, throughput benefits are tremendous. -2. Service or batch jobs where workload resource requirements are uniform across jobs (Replicasets/Deployments/Jobs). +1. For “Big Data/AI” jobs consisting of large number of tasks, throughput benefits are tremendous. +2. Service or batch jobs where workload resource requirements are uniform across jobs (Replicasets/Deployments/Jobs). -## Current Project Stage +## Feature state -- **Alpha Release - Incubation repo.** at https://github.com/kubernetes-sigs/poseidon. -- Currently, Poseidon-Firmament scheduler **does not provide support for high availability**, our implementation assumes that the scheduler cannot fail. The [design document](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/design/README.md) describes possible ways to enable high availability, but we leave this to future work. -- We are **not aware of any production deployment** of Poseidon-Firmament scheduler at this time. -- Poseidon-Firmament is supported from Kubernetes release 1.6 and works with all subsequent releases. -- Release process for Poseidon and Firmament repos are in lock step. The current Poseidon release can be found [here](https://github.com/kubernetes-sigs/poseidon/releases) and the corresponding Firmament release can be found [here](https://github.com/Huawei-PaaS/firmament/releases). +Poseidon-Firmament is designed to work with Kubernetes release 1.6 and all subsequent releases. -## Features Comparison Matrix +{{< caution >}} +Poseidon-Firmament scheduler does not provide support for high availability; its implementation assumes that the scheduler cannot fail. +{{< /caution >}} +## Feature comparison {#feature-comparison-matrix} +{{< table caption="Feature comparison of Kubernetes and Poseidon-Firmament schedulers." >}} |Feature|Kubernetes Default Scheduler|Poseidon-Firmament Scheduler|Notes| |--- |--- |--- |--- | |Node Affinity/Anti-Affinity|Y|Y|| -|Pod Affinity/Anti-Affinity - including support for pod anti-affinity symmetry|Y|Y|Currently, the default scheduler outperforms the Poseidon-Firmament scheduler pod affinity/anti-affinity functionality. We are working towards resolving this.| +|Pod Affinity/Anti-Affinity - including support for pod anti-affinity symmetry|Y|Y|The default scheduler outperforms the Poseidon-Firmament scheduler pod affinity/anti-affinity functionality.| |Taints & Tolerations|Y|Y|| -|Baseline Scheduling capability in accordance to available compute resources (CPU & Memory) on a node|Y|Y**|Not all Predicates & Priorities are supported at this time.| -|Extreme Throughput at scale|Y**|Y|Bulk scheduling approach scales or increases workload placement. Substantial throughput benefits using Firmament scheduler as long as resource requirements (CPU/Memory) for incoming Pods is uniform across Replicasets/Deployments/Jobs. This is mainly due to efficient amortization of work across Replicasets/Deployments/Jobs . 1) For “Big Data/AI” jobs consisting of large no. of tasks, throughput benefits are tremendous. 2) Substantial throughput benefits also for service or batch job scenarios where workload resource requirements are uniform across Replicasets/Deployments/Jobs.| -|Optimal Scheduling|Pod-by-Pod scheduler, processes one pod at a time (may result into sub-optimal scheduling)|Bulk Scheduling (Optimal scheduling)|Pod-by-Pod Kubernetes default scheduler may assign tasks to a sub-optimal machine. By contrast, Firmament considers all unscheduled tasks at the same time together with their soft and hard constraints.| -|Colocation Interference Avoidance|N|N**|Planned in Poseidon-Firmament.| -|Priority Pre-emption|Y|N**|Partially exists in Poseidon-Firmament versus extensive support in Kubernetes default scheduler.| -|Inherent Re-Scheduling|N|Y**|Poseidon-Firmament scheduler supports workload re-scheduling. In each scheduling run it considers all the pods, including running pods, and as a result can migrate or evict pods – a globally optimal scheduling environment.| +|Baseline Scheduling capability in accordance to available compute resources (CPU & Memory) on a node|Y|Y†|**†** Not all Predicates & Priorities are supported with Poseidon-Firmament.| +|Extreme Throughput at scale|Y†|Y|**†** Bulk scheduling approach scales or increases workload placement. Firmament scheduler offers high throughput when resource requirements (CPU/Memory) for incoming Pods are uniform across ReplicaSets/Deployments/Jobs.| +|Colocation Interference Avoidance|N|N|| +|Priority Preemption|Y|N†|**†** Partially exists in Poseidon-Firmament versus extensive support in Kubernetes default scheduler.| +|Inherent Rescheduling|N|Y†|**†** Poseidon-Firmament scheduler supports workload re-scheduling. In each scheduling run, Poseidon-Firmament considers all Pods, including running Pods, and as a result can migrate or evict Pods – a globally optimal scheduling environment.| |Gang Scheduling|N|Y|| |Support for Pre-bound Persistence Volume Scheduling|Y|Y|| -|Support for Local Volume & Dynamic Persistence Volume Binding Scheduling|Y|N**|Planned.| -|High Availability|Y|N**|Planned.| -|Real-time metrics based scheduling|N|Y**|Initially supported using Heapster (now deprecated) for placing pods using actual cluster utilization statistics rather than reservations. Plans to switch over to "metric server".| +|Support for Local Volume & Dynamic Persistence Volume Binding Scheduling|Y|N|| +|High Availability|Y|N|| +|Real-time metrics based scheduling|N|Y†|**†** Partially supported in Poseidon-Firmament using Heapster (now deprecated) for placing Pods using actual cluster utilization statistics rather than reservations.| |Support for Max-Pod per node|Y|Y|Poseidon-Firmament scheduler seamlessly co-exists with Kubernetes default scheduler.| |Support for Ephemeral Storage, in addition to CPU/Memory|Y|Y|| +{{< /table >}} +## Installation -## Installation - -For in-cluster installation of Poseidon, please start at the [Installation instructions](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/install/README.md). - - -## Development +The [Poseidon-Firmament installation guide](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/install/README.md#Installation) explains how to deploy Poseidon-Firmament to your cluster. -For developers, please refer to the [Developer Setup instructions](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/devel/README.md). +## Performance comparison -## Latest Throughput Performance Testing Results +{{< note >}} + Please refer to the [latest benchmark results](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/benchmark/README.md) for detailed throughput performance comparison test results between Poseidon-Firmament scheduler and the Kubernetes default scheduler. +{{< /note >}} -Pod-by-pod schedulers, such as the Kubernetes default scheduler, typically process one pod at a time. These schedulers have the following crucial drawbacks: +Pod-by-pod schedulers, such as the Kubernetes default scheduler, process Pods in small batches (typically one at a time). These schedulers have the following crucial drawbacks: 1. The scheduler commits to a pod placement early and restricts the choices for other pods that wait to be placed. -2. There is limited opportunities for amortizing work across pods because they are considered for placement individually. +2. There is limited opportunities for amortizing work across pods because they are considered for placement individually. These downsides of pod-by-pod schedulers are addressed by batching or bulk scheduling in Poseidon-Firmament scheduler. Processing several pods in a batch allows the scheduler to jointly consider their placement, and thus to find the best trade-off for the whole batch instead of one pod. At the same time it amortizes work across pods resulting in much higher throughput. -{{< note >}} - Please refer to the [latest benchmark results](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/benchmark/README.md) for detailed throughput performance comparison test results between Poseidon-Firmament scheduler and the Kubernetes default scheduler. -{{< /note >}} - +{{% /capture %}} +{{% capture whatsnext %}} +* See [Poseidon-Firmament](https://github.com/kubernetes-sigs/poseidon#readme) on GitHub for more information. +* See the [design document](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/design/README.md) for Poseidon. +* Read [Firmament: Fast, Centralized Cluster Scheduling at Scale](https://www.usenix.org/system/files/conference/osdi16/osdi16-gog.pdf), the academic paper on the Firmament scheduling design. +* If you'd like to contribute to Poseidon-Firmament, refer to the [developer setup instructions](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/devel/README.md). {{% /capture %}} diff --git a/content/en/docs/concepts/overview/kubernetes-api.md b/content/en/docs/concepts/overview/kubernetes-api.md index 1c6066f4dbb9f..ad9569ede66f0 100644 --- a/content/en/docs/concepts/overview/kubernetes-api.md +++ b/content/en/docs/concepts/overview/kubernetes-api.md @@ -121,21 +121,22 @@ There are two supported paths to extending the API with [custom resources](/docs to make it seamless for clients. -## Enabling API groups +## Enabling or disabling API groups Certain resources and API groups are enabled by default. They can be enabled or disabled by setting `--runtime-config` -on apiserver. `--runtime-config` accepts comma separated values. For ex: to disable batch/v1, set +on apiserver. `--runtime-config` accepts comma separated values. For example: to disable batch/v1, set `--runtime-config=batch/v1=false`, to enable batch/v2alpha1, set `--runtime-config=batch/v2alpha1`. The flag accepts comma separated set of key=value pairs describing runtime configuration of the apiserver. -IMPORTANT: Enabling or disabling groups or resources requires restarting apiserver and controller-manager -to pick up the `--runtime-config` changes. +{{< note >}}Enabling or disabling groups or resources requires restarting apiserver and controller-manager +to pick up the `--runtime-config` changes.{{< /note >}} -## Enabling resources in the groups +## Enabling specific resources in the extensions/v1beta1 group -DaemonSets, Deployments, HorizontalPodAutoscalers, Ingresses, Jobs and ReplicaSets are enabled by default. -Other extensions resources can be enabled by setting `--runtime-config` on -apiserver. `--runtime-config` accepts comma separated values. For example: to disable deployments and ingress, set -`--runtime-config=extensions/v1beta1/deployments=false,extensions/v1beta1/ingresses=false` +DaemonSets, Deployments, StatefulSet, NetworkPolicies, PodSecurityPolicies and ReplicaSets in the `extensions/v1beta1` API group are disabled by default. +For example: to enable deployments and daemonsets, set +`--runtime-config=extensions/v1beta1/deployments=true,extensions/v1beta1/daemonsets=true`. + +{{< note >}}Individual resource enablement/disablement is only supported in the `extensions/v1beta1` API group for legacy reasons.{{< /note >}} {{% /capture %}} diff --git a/content/en/docs/concepts/overview/what-is-kubernetes.md b/content/en/docs/concepts/overview/what-is-kubernetes.md index 34e1ba2f8fadf..fbe74e4337921 100644 --- a/content/en/docs/concepts/overview/what-is-kubernetes.md +++ b/content/en/docs/concepts/overview/what-is-kubernetes.md @@ -2,7 +2,9 @@ reviewers: - bgrant0607 - mikedanese -title: What is Kubernetes +title: What is Kubernetes? +description: > + Kubernetes is a portable, extensible, open-source platform for managing containerized workloads and services, that facilitates both declarative configuration and automation. It has a large, rapidly growing ecosystem. Kubernetes services, support, and tools are widely available. content_template: templates/concept weight: 10 card: @@ -17,9 +19,10 @@ This page is an overview of Kubernetes. {{% capture body %}} Kubernetes is a portable, extensible, open-source platform for managing containerized workloads and services, that facilitates both declarative configuration and automation. It has a large, rapidly growing ecosystem. Kubernetes services, support, and tools are widely available. -The name Kubernetes originates from Greek, meaning helmsman or pilot. Google open-sourced the Kubernetes project in 2014. Kubernetes builds upon a [decade and a half of experience that Google has with running production workloads at scale](https://ai.google/research/pubs/pub43438), combined with best-of-breed ideas and practices from the community. +The name Kubernetes originates from Greek, meaning helmsman or pilot. Google open-sourced the Kubernetes project in 2014. Kubernetes combines [over 15 years of Google's experience](/blog/2015/04/borg-predecessor-to-kubernetes/) running production workloads at scale with best-of-breed ideas and practices from the community. ## Going back in time + Let's take a look at why Kubernetes is so useful by going back in time. ![Deployment evolution](/images/docs/Container_Evolution.svg) @@ -42,13 +45,13 @@ Containers have become popular because they provide extra benefits, such as: * Dev and Ops separation of concerns: create application container images at build/release time rather than deployment time, thereby decoupling applications from infrastructure. * Observability not only surfaces OS-level information and metrics, but also application health and other signals. * Environmental consistency across development, testing, and production: Runs the same on a laptop as it does in the cloud. -* Cloud and OS distribution portability: Runs on Ubuntu, RHEL, CoreOS, on-prem, Google Kubernetes Engine, and anywhere else. +* Cloud and OS distribution portability: Runs on Ubuntu, RHEL, CoreOS, on-premises, on major public clouds, and anywhere else. * Application-centric management: Raises the level of abstraction from running an OS on virtual hardware to running an application on an OS using logical resources. * Loosely coupled, distributed, elastic, liberated micro-services: applications are broken into smaller, independent pieces and can be deployed and managed dynamically – not a monolithic stack running on one big single-purpose machine. * Resource isolation: predictable application performance. * Resource utilization: high efficiency and density. -## Why you need Kubernetes and what can it do +## Why you need Kubernetes and what it can do {#why-you-need-kubernetes-and-what-can-it-do} Containers are a good way to bundle and run your applications. In a production environment, you need to manage the containers that run the applications and ensure that there is no downtime. For example, if a container goes down, another container needs to start. Wouldn't it be easier if this behavior was handled by a system? diff --git a/content/en/docs/concepts/overview/working-with-objects/annotations.md b/content/en/docs/concepts/overview/working-with-objects/annotations.md index 7e2446ba5a607..f88c6a0003d90 100644 --- a/content/en/docs/concepts/overview/working-with-objects/annotations.md +++ b/content/en/docs/concepts/overview/working-with-objects/annotations.md @@ -82,7 +82,7 @@ metadata: spec: containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ports: - containerPort: 80 diff --git a/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md index 0ce8d81bf6bfc..7364596306f63 100644 --- a/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -26,10 +26,27 @@ To work with Kubernetes objects--whether to create, modify, or delete them--you' ### Object Spec and Status -Every Kubernetes object includes two nested object fields that govern the object's configuration: the object *spec* and the object *status*. The *spec*, which you must provide, describes your desired state for the object--the characteristics that you want the object to have. The *status* describes the *actual state* of the object, and is supplied and updated by the Kubernetes system. At any given time, the Kubernetes Control Plane actively manages an object's actual state to match the desired state you supplied. - - -For example, a Kubernetes Deployment is an object that can represent an application running on your cluster. When you create the Deployment, you might set the Deployment spec to specify that you want three replicas of the application to be running. The Kubernetes system reads the Deployment spec and starts three instances of your desired application--updating the status to match your spec. If any of those instances should fail (a status change), the Kubernetes system responds to the difference between spec and status by making a correction--in this case, starting a replacement instance. +Almost every Kubernetes object includes two nested object fields that govern +the object's configuration: the object *`spec`* and the object *`status`*. +For objects that have a `spec`, you have to set this when you create the object, +providing a description of the characteristics you want the resource to have: +its _desired state_. + +The `status` describes the _current state_ of the object, supplied and updated +by the Kubernetes and its components. The Kubernetes +{{< glossary_tooltip text="control plane" term_id="control-plane" >}} continually +and actively manages every object's actual state to match the desired state you +supplied. + +For example: in Kubernetes, a Deployment is an object that can represent an +application running on your cluster. When you create the Deployment, you +might set the Deployment `spec` to specify that you want three replicas of +the application to be running. The Kubernetes system reads the Deployment +spec and starts three instances of your desired application--updating +the status to match your spec. If any of those instances should fail +(a status change), the Kubernetes system responds to the difference +between spec and status by making a correction--in this case, starting +a replacement instance. For more information on the object spec, status, and metadata, see the [Kubernetes API Conventions](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md). diff --git a/content/en/docs/concepts/overview/working-with-objects/labels.md b/content/en/docs/concepts/overview/working-with-objects/labels.md index a74e21910303a..0de7f04a8cc0b 100644 --- a/content/en/docs/concepts/overview/working-with-objects/labels.md +++ b/content/en/docs/concepts/overview/working-with-objects/labels.md @@ -69,7 +69,7 @@ metadata: spec: containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ports: - containerPort: 80 diff --git a/content/en/docs/concepts/overview/working-with-objects/names.md b/content/en/docs/concepts/overview/working-with-objects/names.md index 60c07391a598f..01bb53b56d5ff 100644 --- a/content/en/docs/concepts/overview/working-with-objects/names.md +++ b/content/en/docs/concepts/overview/working-with-objects/names.md @@ -64,7 +64,7 @@ metadata: spec: containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ports: - containerPort: 80 ``` diff --git a/content/en/docs/concepts/policy/limit-range.md b/content/en/docs/concepts/policy/limit-range.md index b4a9579a36308..971a7a372c9f8 100644 --- a/content/en/docs/concepts/policy/limit-range.md +++ b/content/en/docs/concepts/policy/limit-range.md @@ -9,56 +9,58 @@ weight: 10 {{% capture overview %}} By default, containers run with unbounded [compute resources](/docs/user-guide/compute-resources) on a Kubernetes cluster. -With Resource quotas, cluster administrators can restrict the resource consumption and creation on a namespace basis. -Within a namespace, a Pod or Container can consume as much CPU and memory as defined by the namespace's resource quota. There is a concern that one Pod or Container could monopolize all of the resources. Limit Range is a policy to constrain resource by Pod or Container in a namespace. +With resource quotas, cluster administrators can restrict resource consumption and creation on a namespace basis. +Within a namespace, a Pod or Container can consume as much CPU and memory as defined by the namespace's resource quota. There is a concern that one Pod or Container could monopolize all available resources. A LimitRange is a policy to constrain resource allocations (to Pods or Containers) in a namespace. {{% /capture %}} {{% capture body %}} -A limit range, defined by a `LimitRange` object, provides constraints that can: +A _LimitRange_ provides constraints that can: - Enforce minimum and maximum compute resources usage per Pod or Container in a namespace. - Enforce minimum and maximum storage request per PersistentVolumeClaim in a namespace. - Enforce a ratio between request and limit for a resource in a namespace. - Set default request/limit for compute resources in a namespace and automatically inject them to Containers at runtime. -## Enabling Limit Range +## Enabling LimitRange -Limit Range support is enabled by default for many Kubernetes distributions. It is +LimitRange support is enabled by default for many Kubernetes distributions. It is enabled when the apiserver `--enable-admission-plugins=` flag has `LimitRanger` admission controller as one of its arguments. -A limit range is enforced in a particular namespace when there is a -`LimitRange` object in that namespace. +A LimitRange is enforced in a particular namespace when there is a +LimitRange object in that namespace. -### Overview of Limit Range: +The name of a LimitRange object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +### Overview of Limit Range - The administrator creates one `LimitRange` in one namespace. - Users create resources like Pods, Containers, and PersistentVolumeClaims in the namespace. -- The `LimitRanger` admission controller enforces defaults limits for all Pods and Container that do not set compute resource requirements and tracks usage to ensure it does not exceed resource minimum , maximum and ratio defined in any `LimitRange` present in the namespace. -- If creating or updating a resource (Pod, Container, PersistentVolumeClaim) violates a limit range constraint, the request to the API server will fail with HTTP status code `403 FORBIDDEN` and a message explaining the constraint that would have been violated. -- If limit range is activated in a namespace for compute resources like `cpu` and `memory`, users must specify - requests or limits for those values; otherwise, the system may reject pod creation. -- LimitRange validations occurs only at Pod Admission stage, not on Running pods. - +- The `LimitRanger` admission controller enforces defaults and limits for all Pods and Containers that do not set compute resource requirements and tracks usage to ensure it does not exceed resource minimum, maximum and ratio defined in any LimitRange present in the namespace. +- If creating or updating a resource (Pod, Container, PersistentVolumeClaim) that violates a LimitRange constraint, the request to the API server will fail with an HTTP status code `403 FORBIDDEN` and a message explaining the constraint that have been violated. +- If a LimitRange is activated in a namespace for compute resources like `cpu` and `memory`, users must specify + requests or limits for those values. Otherwise, the system may reject Pod creation. +- LimitRange validations occurs only at Pod Admission stage, not on Running Pods. Examples of policies that could be created using limit range are: -- In a 2 node cluster with a capacity of 8 GiB RAM, and 16 cores, constrain Pods in a namespace to request 100m and not exceeds 500m for CPU , request 200Mi and not exceed 600Mi -- Define default CPU limits and request to 150m and Memory default request to 300Mi for containers started with no cpu and memory requests in their spec. +- In a 2 node cluster with a capacity of 8 GiB RAM and 16 cores, constrain Pods in a namespace to request 100m of CPU with a max limit of 500m for CPU and request 200Mi for Memory with a max limit of 600Mi for Memory. +- Define default CPU limit and request to 150m and memory default request to 300Mi for Containers started with no cpu and memory requests in their specs. In the case where the total limits of the namespace is less than the sum of the limits of the Pods/Containers, -there may be contention for resources; The Containers or Pods will not be created. +there may be contention for resources. In this case, the Containers or Pods will not be created. -Neither contention nor changes to limitrange will affect already created resources. +Neither contention nor changes to a LimitRange will affect already created resources. ## Limiting Container compute resources The following section discusses the creation of a LimitRange acting at Container Level. -A Pod with 04 containers is first created; each container within the Pod has a specific `spec.resource` configuration -each container within the pod is handled differently by the LimitRanger admission controller. +A Pod with 04 Containers is first created. Each Container within the Pod has a specific `spec.resource` configuration. +Each Container within the Pod is handled differently by the `LimitRanger` admission controller. Create a namespace `limitrange-demo` using the following kubectl command: @@ -75,16 +77,16 @@ kubectl config set-context --current --namespace=limitrange-demo Here is the configuration file for a LimitRange object: {{< codenew file="admin/resource/limit-mem-cpu-container.yaml" >}} -This object defines minimum and maximum Memory/CPU limits, default cpu/Memory requests and default limits for CPU/Memory resources to be apply to containers. +This object defines minimum and maximum CPU/Memory limits, default CPU/Memory requests, and default limits for CPU/Memory resources to be apply to containers. -Create the `limit-mem-cpu-per-container` LimitRange in the `limitrange-demo` namespace with the following kubectl command: +Create the `limit-mem-cpu-per-container` LimitRange with the following kubectl command: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/limit-mem-cpu-container.yaml -n limitrange-demo +kubectl create -f https://k8s.io/examples/admin/resource/limit-mem-cpu-container.yaml ``` ```shell -kubectl describe limitrange/limit-mem-cpu-per-container -n limitrange-demo +kubectl describe limitrange/limit-mem-cpu-per-container ``` ```shell @@ -94,13 +96,13 @@ Container cpu 100m 800m 110m 700m - Container memory 99Mi 1Gi 111Mi 900Mi - ``` -Here is the configuration file for a Pod with 04 containers to demonstrate LimitRange features : +Here is the configuration file for a Pod with 04 Containers to demonstrate LimitRange features: {{< codenew file="admin/resource/limit-range-pod-1.yaml" >}} Create the `busybox1` Pod: ```shell -kubectl apply -f https://k8s.io/examples/admin/resource/limit-range-pod-1.yaml -n limitrange-demo +kubectl apply -f https://k8s.io/examples/admin/resource/limit-range-pod-1.yaml ``` ### Container spec with valid CPU/Memory requests and limits @@ -108,7 +110,7 @@ kubectl apply -f https://k8s.io/examples/admin/resource/limit-range-pod-1.yaml - View the `busybox-cnt01` resource configuration: ```shell -kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[0].resources" +kubectl get po/busybox1 -o json | jq ".spec.containers[0].resources" ``` ```json @@ -125,9 +127,9 @@ kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[0].res ``` - The `busybox-cnt01` Container inside `busybox` Pod defined `requests.cpu=100m` and `requests.memory=100Mi`. -- `100m <= 500m <= 800m` , The container cpu limit (500m) falls inside the authorized CPU limit range. -- `99Mi <= 200Mi <= 1Gi` , The container memory limit (200Mi) falls inside the authorized Memory limit range. -- No request/limits ratio validation for CPU/Memory , thus the container is valid and created. +- `100m <= 500m <= 800m` , The Container cpu limit (500m) falls inside the authorized CPU LimitRange. +- `99Mi <= 200Mi <= 1Gi` , The Container memory limit (200Mi) falls inside the authorized Memory LimitRange. +- No request/limits ratio validation for CPU/Memory, so the Container is valid and created. ### Container spec with a valid CPU/Memory requests but no limits @@ -135,7 +137,7 @@ kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[0].res View the `busybox-cnt02` resource configuration ```shell -kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[1].resources" +kubectl get po/busybox1 -o json | jq ".spec.containers[1].resources" ``` ```json @@ -151,17 +153,18 @@ kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[1].res } ``` - The `busybox-cnt02` Container inside `busybox1` Pod defined `requests.cpu=100m` and `requests.memory=100Mi` but not limits for cpu and memory. -- The container do not have a limits section, the default limits defined in the limit-mem-cpu-per-container LimitRange object are injected to this container `limits.cpu=700mi` and `limits.memory=900Mi`. -- `100m <= 700m <= 800m` , The container cpu limit (700m) falls inside the authorized CPU limit range. -- `99Mi <= 900Mi <= 1Gi` , The container memory limit (900Mi) falls inside the authorized Memory limit range. -- No request/limits ratio set , thus the container is valid and created. +- The Container does not have a limits section. The default limits defined in the `limit-mem-cpu-per-container` LimitRange object are injected in to this Container: `limits.cpu=700mi` and `limits.memory=900Mi`. +- `100m <= 700m <= 800m` , The Container cpu limit (700m) falls inside the authorized CPU limit range. +- `99Mi <= 900Mi <= 1Gi` , The Container memory limit (900Mi) falls inside the authorized Memory limit range. +- No request/limits ratio set, so the Container is valid and created. -### Container spec with a valid CPU/Memory limits but no requests -View the `busybox-cnt03` resource configuration +### Container spec with a valid CPU/Memory limits but no requests + +View the `busybox-cnt03` resource configuration: ```shell -kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[2].resources" +kubectl get po/busybox1 -o json | jq ".spec.containers[2].resources" ``` ```json { @@ -177,17 +180,17 @@ kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[2].res ``` - The `busybox-cnt03` Container inside `busybox1` Pod defined `limits.cpu=500m` and `limits.memory=200Mi` but no `requests` for cpu and memory. -- The container do not define a request section, the defaultRequest defined in the limit-mem-cpu-per-container LimitRange is not used to fill its limits section but the limits defined by the container are set as requests `limits.cpu=500m` and `limits.memory=200Mi`. -- `100m <= 500m <= 800m` , The container cpu limit (500m) falls inside the authorized CPU limit range. -- `99Mi <= 200Mi <= 1Gi` , The container memory limit (200Mi) falls inside the authorized Memory limit range. -- No request/limits ratio set , thus the container is valid and created. +- The Container does not define a request section. The default request defined in the limit-mem-cpu-per-container LimitRange is not used to fill its limits section, but the limits defined by the Container are set as requests `limits.cpu=500m` and `limits.memory=200Mi`. +- `100m <= 500m <= 800m` , The Container cpu limit (500m) falls inside the authorized CPU limit range. +- `99Mi <= 200Mi <= 1Gi` , The Container memory limit (200Mi) falls inside the authorized Memory limit range. +- No request/limits ratio set, so the Container is valid and created. ### Container spec with no CPU/Memory requests/limits View the `busybox-cnt04` resource configuration: ```shell -kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[3].resources" +kubectl get po/busybox1 -o json | jq ".spec.containers[3].resources" ``` ```json @@ -204,27 +207,27 @@ kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[3].res ``` - The `busybox-cnt04` Container inside `busybox1` define neither `limits` nor `requests`. -- The container do not define a limit section, the default limit defined in the limit-mem-cpu-per-container LimitRange is used to fill its request +- The Container do not define a limit section, the default limit defined in the limit-mem-cpu-per-container LimitRange is used to fill its request `limits.cpu=700m and` `limits.memory=900Mi` . -- The container do not define a request section, the defaultRequest defined in the limit-mem-cpu-per-container LimitRange is used to fill its request section requests.cpu=110m and requests.memory=111Mi -- `100m <= 700m <= 800m` , The container cpu limit (700m) falls inside the authorized CPU limit range. -- `99Mi <= 900Mi <= 1Gi` , The container memory limit (900Mi) falls inside the authorized Memory limitrange . -- No request/limits ratio set , thus the container is valid and created. +- The Container do not define a request section, the defaultRequest defined in the `limit-mem-cpu-per-container` LimitRange is used to fill its request section requests.cpu=110m and requests.memory=111Mi +- `100m <= 700m <= 800m` , The Container cpu limit (700m) falls inside the authorized CPU limit range. +- `99Mi <= 900Mi <= 1Gi` , The Container memory limit (900Mi) falls inside the authorized Memory limit range . +- No request/limits ratio set, so the Container is valid and created. -All containers defined in the `busybox` Pod passed LimitRange validations, this the Pod is valid and create in the namespace. +All Containers defined in the `busybox` Pod passed LimitRange validations, so this the Pod is valid and created in the namespace. ## Limiting Pod compute resources -The following section discusses how to constrain resources at Pod level. +The following section discusses how to constrain resources at the Pod level. {{< codenew file="admin/resource/limit-mem-cpu-pod.yaml" >}} -Without having to delete `busybox1` Pod, create the `limit-mem-cpu-pod` LimitRange in the `limitrange-demo` namespace: +Without having to delete the `busybox1` Pod, create the `limit-mem-cpu-pod` LimitRange in the `limitrange-demo` namespace: ```shell -kubectl apply -f https://k8s.io/examples/admin/resource/limit-mem-cpu-pod.yaml -n limitrange-demo +kubectl apply -f https://k8s.io/examples/admin/resource/limit-mem-cpu-pod.yaml ``` -The limitrange is created and limits CPU to 2 Core and Memory to 2Gi per Pod: +The LimitRange is created and limits CPU to 2 Core and Memory to 2Gi per Pod: ```shell limitrange/limit-mem-cpu-per-pod created @@ -250,36 +253,36 @@ Now create the `busybox2` Pod: {{< codenew file="admin/resource/limit-range-pod-2.yaml" >}} ```shell -kubectl apply -f https://k8s.io/examples/admin/resource/limit-range-pod-2.yaml -n limitrange-demo +kubectl apply -f https://k8s.io/examples/admin/resource/limit-range-pod-2.yaml ``` -The `busybox2` Pod definition is identical to `busybox1` but an error is reported since Pod's resources are now limited: +The `busybox2` Pod definition is identical to `busybox1`, but an error is reported since the Pod's resources are now limited: ```shell Error from server (Forbidden): error when creating "limit-range-pod-2.yaml": pods "busybox2" is forbidden: [maximum cpu usage per Pod is 2, but limit is 2400m., maximum memory usage per Pod is 2Gi, but limit is 2306867200.] ``` ```shell -kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[].resources.limits.memory" +kubectl get po/busybox1 -o json | jq ".spec.containers[].resources.limits.memory" "200Mi" "900Mi" "200Mi" "900Mi" ``` -`busybox2` Pod will not be admitted on the cluster since the total memory limit of its container is greater than the limit defined in the LimitRange. +`busybox2` Pod will not be admitted on the cluster since the total memory limit of its Container is greater than the limit defined in the LimitRange. `busybox1` will not be evicted since it was created and admitted on the cluster before the LimitRange creation. ## Limiting Storage resources -You can enforce minimum and maximum size of [storage resources](/docs/concepts/storage/persistent-volumes/) that can be requested by each PersistentVolumeClaim in a namespace using a LimitRange: +You can enforce minimum and maximum size of [storage resources](/docs/concepts/storage/persistent-volumes/) that can be requested by each PersistentVolumeClaim in a namespace using a LimitRange: {{< codenew file="admin/resource/storagelimits.yaml" >}} Apply the YAML using `kubectl create`: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/storagelimits.yaml -n limitrange-demo +kubectl create -f https://k8s.io/examples/admin/resource/storagelimits.yaml ``` ```shell @@ -305,7 +308,7 @@ PersistentVolumeClaim storage 1Gi 2Gi - - - {{< codenew file="admin/resource/pvc-limit-lower.yaml" >}} ```shell -kubectl create -f https://k8s.io/examples/admin/resource/pvc-limit-lower.yaml -n limitrange-demo +kubectl create -f https://k8s.io/examples/admin/resource/pvc-limit-lower.yaml ``` While creating a PVC with `requests.storage` lower than the Min value in the LimitRange, an Error thrown by the server: @@ -319,7 +322,7 @@ Same behaviour is noted if the `requests.storage` is greater than the Max value {{< codenew file="admin/resource/pvc-limit-greater.yaml" >}} ```shell -kubectl create -f https://k8s.io/examples/admin/resource/pvc-limit-greater.yaml -n limitrange-demo +kubectl create -f https://k8s.io/examples/admin/resource/pvc-limit-greater.yaml ``` ```shell @@ -328,9 +331,9 @@ Error from server (Forbidden): error when creating "pvc-limit-greater.yaml": per ## Limits/Requests Ratio -If `LimitRangeItem.maxLimitRequestRatio` is specified in the `LimitRangeSpec`, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value +If `LimitRangeItem.maxLimitRequestRatio` is specified in the `LimitRangeSpec`, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value. -The following `LimitRange` enforces memory limit to be at most twice the amount of the memory request for any pod in the namespace. +The following LimitRange enforces memory limit to be at most twice the amount of the memory request for any Pod in the namespace: {{< codenew file="admin/resource/limit-memory-ratio-pod.yaml" >}} @@ -352,7 +355,7 @@ Type Resource Min Max Default Request Default Limit Max Limit/Reques Pod memory - - - - 2 ``` -Let's create a pod with `requests.memory=100Mi` and `limits.memory=300Mi`: +Create a pod with `requests.memory=100Mi` and `limits.memory=300Mi`: {{< codenew file="admin/resource/limit-range-pod-3.yaml" >}} @@ -360,19 +363,24 @@ Let's create a pod with `requests.memory=100Mi` and `limits.memory=300Mi`: kubectl apply -f https://k8s.io/examples/admin/resource/limit-range-pod-3.yaml ``` -The pod creation failed as the ratio here (`3`) is greater than the enforced limit (`2`) in `limit-memory-ratio-pod` LimitRange +The pod creation failed as the ratio here (`3`) is greater than the enforced limit (`2`) in `limit-memory-ratio-pod` LimitRange: -```shell +``` Error from server (Forbidden): error when creating "limit-range-pod-3.yaml": pods "busybox3" is forbidden: memory max limit to request ratio per Pod is 2, but provided ratio is 3.000000. ``` -### Clean up +## Clean up Delete the `limitrange-demo` namespace to free all resources: ```shell kubectl delete ns limitrange-demo ``` +Change your context to `default` namespace with the following command: + +```shell +kubectl config set-context --current --namespace=default +``` ## Examples diff --git a/content/en/docs/concepts/policy/pod-security-policy.md b/content/en/docs/concepts/policy/pod-security-policy.md index 45b48f62aea94..f482c5efb2200 100644 --- a/content/en/docs/concepts/policy/pod-security-policy.md +++ b/content/en/docs/concepts/policy/pod-security-policy.md @@ -197,6 +197,8 @@ alias kubectl-user='kubectl --as=system:serviceaccount:psp-example:fake-user -n Define the example PodSecurityPolicy object in a file. This is a policy that simply prevents the creation of privileged pods. +The name of a PodSecurityPolicy object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). {{< codenew file="policy/example-psp.yaml" >}} @@ -419,8 +421,10 @@ The **recommended minimum set** of allowed volumes for new PSPs are: - projected {{< warning >}} -PodSecurityPolicy does not limit the types of `PersistentVolume` objects that may be referenced by a `PersistentVolumeClaim`. -Only trusted users should be granted permission to create `PersistentVolume` objects. +PodSecurityPolicy does not limit the types of `PersistentVolume` objects that +may be referenced by a `PersistentVolumeClaim`, and hostPath type +`PersistentVolumes` do not support read-only access mode. Only trusted users +should be granted permission to create `PersistentVolume` objects. {{< /warning >}} **FSGroup** - Controls the supplemental group applied to some volumes. diff --git a/content/en/docs/concepts/policy/resource-quotas.md b/content/en/docs/concepts/policy/resource-quotas.md index 92f43fe3a3ba2..d48c2db88ade8 100644 --- a/content/en/docs/concepts/policy/resource-quotas.md +++ b/content/en/docs/concepts/policy/resource-quotas.md @@ -37,6 +37,9 @@ Resource quotas work like this: the `LimitRanger` admission controller to force defaults for pods that make no compute resource requirements. See the [walkthrough](/docs/tasks/administer-cluster/quota-memory-cpu-namespace/) for an example of how to avoid this problem. +The name of a `ResourceQuota` object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + Examples of policies that could be created using namespaces and quotas are: - In a cluster with a capacity of 32 GiB RAM, and 16 cores, let team A use 20 GiB and 10 cores, diff --git a/content/en/docs/concepts/scheduling/kube-scheduler.md b/content/en/docs/concepts/scheduling/kube-scheduler.md index 0fc46e48ee4a2..7e3074f1148c5 100644 --- a/content/en/docs/concepts/scheduling/kube-scheduler.md +++ b/content/en/docs/concepts/scheduling/kube-scheduler.md @@ -1,7 +1,7 @@ --- title: Kubernetes Scheduler content_template: templates/concept -weight: 60 +weight: 50 --- {{% capture overview %}} @@ -54,14 +54,12 @@ individual and collective resource requirements, hardware / software / policy constraints, affinity and anti-affinity specifications, data locality, inter-workload interference, and so on. -## Scheduling with kube-scheduler {#kube-scheduler-implementation} +### Node selection in kube-scheduler {#kube-scheduler-implementation} kube-scheduler selects a node for the pod in a 2-step operation: 1. Filtering - -2. Scoring - +1. Scoring The _filtering_ step finds the set of Nodes where it's feasible to schedule the Pod. For example, the PodFitsResources filter checks whether a @@ -78,105 +76,15 @@ Finally, kube-scheduler assigns the Pod to the Node with the highest ranking. If there is more than one node with equal scores, kube-scheduler selects one of these at random. +There are two supported ways to configure the filtering and scoring behavior +of the scheduler: -### Default policies - -kube-scheduler has a default set of scheduling policies. - -### Filtering - -- `PodFitsHostPorts`: Checks if a Node has free ports (the network protocol kind) - for the Pod ports the Pod is requesting. - -- `PodFitsHost`: Checks if a Pod specifies a specific Node by its hostname. - -- `PodFitsResources`: Checks if the Node has free resources (eg, CPU and Memory) - to meet the requirement of the Pod. - -- `PodMatchNodeSelector`: Checks if a Pod's Node {{< glossary_tooltip term_id="selector" >}} - matches the Node's {{< glossary_tooltip text="label(s)" term_id="label" >}}. - -- `NoVolumeZoneConflict`: Evaluate if the {{< glossary_tooltip text="Volumes" term_id="volume" >}} - that a Pod requests are available on the Node, given the failure zone restrictions for - that storage. - -- `NoDiskConflict`: Evaluates if a Pod can fit on a Node due to the volumes it requests, - and those that are already mounted. - -- `MaxCSIVolumeCount`: Decides how many {{< glossary_tooltip text="CSI" term_id="csi" >}} - volumes should be attached, and whether that's over a configured limit. - -- `CheckNodeMemoryPressure`: If a Node is reporting memory pressure, and there's no - configured exception, the Pod won't be scheduled there. - -- `CheckNodePIDPressure`: If a Node is reporting that process IDs are scarce, and - there's no configured exception, the Pod won't be scheduled there. - -- `CheckNodeDiskPressure`: If a Node is reporting storage pressure (a filesystem that - is full or nearly full), and there's no configured exception, the Pod won't be - scheduled there. - -- `CheckNodeCondition`: Nodes can report that they have a completely full filesystem, - that networking isn't available or that kubelet is otherwise not ready to run Pods. - If such a condition is set for a Node, and there's no configured exception, the Pod - won't be scheduled there. - -- `PodToleratesNodeTaints`: checks if a Pod's {{< glossary_tooltip text="tolerations" term_id="toleration" >}} - can tolerate the Node's {{< glossary_tooltip text="taints" term_id="taint" >}}. - -- `CheckVolumeBinding`: Evaluates if a Pod can fit due to the volumes it requests. - This applies for both bound and unbound - {{< glossary_tooltip text="PVCs" term_id="persistent-volume-claim" >}}. - -### Scoring - -- `SelectorSpreadPriority`: Spreads Pods across hosts, considering Pods that - belong to the same {{< glossary_tooltip text="Service" term_id="service" >}}, - {{< glossary_tooltip term_id="statefulset" >}} or - {{< glossary_tooltip term_id="replica-set" >}}. - -- `InterPodAffinityPriority`: Computes a sum by iterating through the elements - of weightedPodAffinityTerm and adding “weight” to the sum if the corresponding - PodAffinityTerm is satisfied for that node; the node(s) with the highest sum - are the most preferred. - -- `LeastRequestedPriority`: Favors nodes with fewer requested resources. In other - words, the more Pods that are placed on a Node, and the more resources those - Pods use, the lower the ranking this policy will give. - -- `MostRequestedPriority`: Favors nodes with most requested resources. This policy - will fit the scheduled Pods onto the smallest number of Nodes needed to run your - overall set of workloads. - -- `RequestedToCapacityRatioPriority`: Creates a requestedToCapacity based ResourceAllocationPriority using default resource scoring function shape. - -- `BalancedResourceAllocation`: Favors nodes with balanced resource usage. - -- `NodePreferAvoidPodsPriority`: Prioritizes nodes according to the node annotation - `scheduler.alpha.kubernetes.io/preferAvoidPods`. You can use this to hint that - two different Pods shouldn't run on the same Node. - -- `NodeAffinityPriority`: Prioritizes nodes according to node affinity scheduling - preferences indicated in PreferredDuringSchedulingIgnoredDuringExecution. - You can read more about this in [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/). - -- `TaintTolerationPriority`: Prepares the priority list for all the nodes, based on - the number of intolerable taints on the node. This policy adjusts a node's rank - taking that list into account. - -- `ImageLocalityPriority`: Favors nodes that already have the - {{< glossary_tooltip text="container images" term_id="image" >}} for that - Pod cached locally. - -- `ServiceSpreadingPriority`: For a given Service, this policy aims to make sure that - the Pods for the Service run on different nodes. It favours scheduling onto nodes - that don't have Pods for the service already assigned there. The overall outcome is - that the Service becomes more resilient to a single Node failure. - -- `CalculateAntiAffinityPriorityMap`: This policy helps implement - [pod anti-affinity](/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). - -- `EqualPriorityMap`: Gives an equal weight of one to all nodes. +1. [Scheduling Policies](/docs/reference/scheduling/policies) allow you to + configure _Predicates_ for filtering and _Priorities_ for scoring. +1. [Scheduling Profiles](/docs/reference/scheduling/profiles) allow you to + configure Plugins that implement different scheduling stages, including: + `QueueSort`, `Filter`, `Score`, `Bind`, `Reserve`, `Permit`, and others. You + can also configure the kube-scheduler to run different profiles. {{% /capture %}} {{% capture whatsnext %}} diff --git a/content/en/docs/concepts/configuration/scheduling-framework.md b/content/en/docs/concepts/scheduling/scheduling-framework.md similarity index 56% rename from content/en/docs/concepts/configuration/scheduling-framework.md rename to content/en/docs/concepts/scheduling/scheduling-framework.md index 58fb36b192307..ddc2225cac28a 100644 --- a/content/en/docs/concepts/configuration/scheduling-framework.md +++ b/content/en/docs/concepts/scheduling/scheduling-framework.md @@ -3,14 +3,14 @@ reviewers: - ahg-g title: Scheduling Framework content_template: templates/concept -weight: 70 +weight: 60 --- {{% capture overview %}} {{< feature-state for_k8s_version="1.15" state="alpha" >}} -The scheduling framework is a new pluggable architecture for Kubernetes Scheduler +The scheduling framework is a pluggable architecture for Kubernetes Scheduler that makes scheduler customizations easy. It adds a new set of "plugin" APIs to the existing scheduler. Plugins are compiled into the scheduler. The APIs allow most scheduling features to be implemented as plugins, while keeping the @@ -56,16 +56,16 @@ stateful tasks. {{< figure src="/images/docs/scheduling-framework-extensions.png" title="scheduling framework extension points" >}} -### Queue sort +### QueueSort {#queue-sort} These plugins are used to sort Pods in the scheduling queue. A queue sort plugin -essentially will provide a "less(Pod1, Pod2)" function. Only one queue sort +essentially provides a `Less(Pod1, Pod2)` function. Only one queue sort plugin may be enabled at a time. -### Pre-filter +### PreFilter {#pre-filter} These plugins are used to pre-process info about the Pod, or to check certain -conditions that the cluster or the Pod must meet. If a pre-filter plugin returns +conditions that the cluster or the Pod must meet. If a PreFilter plugin returns an error, the scheduling cycle is aborted. ### Filter @@ -75,28 +75,25 @@ node, the scheduler will call filter plugins in their configured order. If any filter plugin marks the node as infeasible, the remaining plugins will not be called for that node. Nodes may be evaluated concurrently. -### Post-filter +### PreScore {#pre-score} -This is an informational extension point. Plugins will be called with a list of -nodes that passed the filtering phase. A plugin may use this data to update -internal state or to generate logs/metrics. +These plugins are used to perform "pre-scoring" work, which generates a sharable +state for Score plugins to use. If a PreScore plugin returns an error, the +scheduling cycle is aborted. -**Note:** Plugins wishing to perform "pre-scoring" work should use the -post-filter extension point. - -### Scoring +### Score {#scoring} These plugins are used to rank nodes that have passed the filtering phase. The scheduler will call each scoring plugin for each node. There will be a well defined range of integers representing the minimum and maximum scores. After the -[normalize scoring](#normalize-scoring) phase, the scheduler will combine node +[NormalizeScore](#normalize-scoring) phase, the scheduler will combine node scores from all plugins according to the configured plugin weights. -### Normalize scoring +### NormalizeScore {#normalize-scoring} These plugins are used to modify scores before the scheduler computes a final ranking of Nodes. A plugin that registers for this extension point will be -called with the [scoring](#scoring) results from the same plugin. This is called +called with the [Score](#scoring) results from the same plugin. This is called once per plugin per scheduling cycle. For example, suppose a plugin `BlinkingLightScorer` ranks Nodes based on how @@ -104,7 +101,7 @@ many blinking lights they have. ```go func ScoreNode(_ *v1.pod, n *v1.Node) (int, error) { - return getBlinkingLightCount(n) + return getBlinkingLightCount(n) } ``` @@ -114,21 +111,23 @@ extension point. ```go func NormalizeScores(scores map[string]int) { - highest := 0 - for _, score := range scores { - highest = max(highest, score) - } - for node, score := range scores { - scores[node] = score*NodeScoreMax/highest - } + highest := 0 + for _, score := range scores { + highest = max(highest, score) + } + for node, score := range scores { + scores[node] = score*NodeScoreMax/highest + } } ``` -If any normalize-scoring plugin returns an error, the scheduling cycle is +If any NormalizeScore plugin returns an error, the scheduling cycle is aborted. -**Note:** Plugins wishing to perform "pre-reserve" work should use the -normalize-scoring extension point. +{{< note >}} +Plugins wishing to perform "pre-reserve" work should use the +NormalizeScore extension point. +{{< /note >}} ### Reserve @@ -140,53 +139,53 @@ to prevent race conditions while the scheduler waits for the bind to succeed. This is the last step in a scheduling cycle. Once a Pod is in the reserved state, it will either trigger [Unreserve](#unreserve) plugins (on failure) or -[Post-bind](#post-bind) plugins (on success) at the end of the binding cycle. - -*Note: This concept used to be referred to as "assume".* +[PostBind](#post-bind) plugins (on success) at the end of the binding cycle. ### Permit -These plugins are used to prevent or delay the binding of a Pod. A permit plugin -can do one of three things. +_Permit_ plugins are invoked at the end of the scheduling cycle for each Pod, to +prevent or delay the binding to the candidate node. A permit plugin can do one of +the three things: 1. **approve** \ - Once all permit plugins approve a Pod, it is sent for binding. + Once all Permit plugins approve a Pod, it is sent for binding. 1. **deny** \ - If any permit plugin denies a Pod, it is returned to the scheduling queue. + If any Permit plugin denies a Pod, it is returned to the scheduling queue. This will trigger [Unreserve](#unreserve) plugins. 1. **wait** (with a timeout) \ - If a permit plugin returns "wait", then the Pod is kept in the permit phase - until a [plugin approves it](#frameworkhandle). If a timeout occurs, **wait** - becomes **deny** and the Pod is returned to the scheduling queue, triggering - [Unreserve](#unreserve) plugins. + If a Permit plugin returns "wait", then the Pod is kept in an internal "waiting" + Pods list, and the binding cycle of this Pod starts but directly blocks until it + gets [approved](#frameworkhandle). If a timeout occurs, **wait** becomes **deny** + and the Pod is returned to the scheduling queue, triggering [Unreserve](#unreserve) + plugins. -**Approving a Pod binding** +{{< note >}} +While any plugin can access the list of "waiting" Pods and approve them +(see [`FrameworkHandle`](#frameworkhandle)), we expect only the permit +plugins to approve binding of reserved Pods that are in "waiting" state. Once a Pod +is approved, it is sent to the [PreBind](#pre-bind) phase. +{{< /note >}} -While any plugin can access the list of "waiting" Pods from the cache and -approve them (see [`FrameworkHandle`](#frameworkhandle)) we expect only the permit -plugins to approve binding of reserved Pods that are in "waiting" state. Once a -Pod is approved, it is sent to the pre-bind phase. - -### Pre-bind +### PreBind {#pre-bind} These plugins are used to perform any work required before a Pod is bound. For example, a pre-bind plugin may provision a network volume and mount it on the target node before allowing the Pod to run there. -If any pre-bind plugin returns an error, the Pod is [rejected](#unreserve) and +If any PreBind plugin returns an error, the Pod is [rejected](#unreserve) and returned to the scheduling queue. ### Bind These plugins are used to bind a Pod to a Node. Bind plugins will not be called -until all pre-bind plugins have completed. Each bind plugin is called in the +until all PreBind plugins have completed. Each bind plugin is called in the configured order. A bind plugin may choose whether or not to handle the given Pod. If a bind plugin chooses to handle a Pod, **the remaining bind plugins are skipped**. -### Post-bind +### PostBind {#post-bind} This is an informational extension point. Post-bind plugins are called after a Pod is successfully bound. This is the end of a binding cycle, and can be used @@ -209,88 +208,35 @@ interfaces have the following form. ```go type Plugin interface { - Name() string + Name() string } type QueueSortPlugin interface { - Plugin - Less(*v1.pod, *v1.pod) bool + Plugin + Less(*v1.pod, *v1.pod) bool } type PreFilterPlugin interface { - Plugin - PreFilter(PluginContext, *v1.pod) error + Plugin + PreFilter(context.Context, *framework.CycleState, *v1.pod) error } // ... ``` -# Plugin Configuration - -Plugins can be enabled in the scheduler configuration. Also, default plugins can -be disabled in the configuration. In 1.15, there are no default plugins for the -scheduling framework. - -The scheduler configuration can include configuration for plugins as well. Such -configurations are passed to the plugins at the time the scheduler initializes -them. The configuration is an arbitrary value. The receiving plugin should -decode and process the configuration. - -The following example shows a scheduler configuration that enables some -plugins at `reserve` and `preBind` extension points and disables a plugin. It -also provides a configuration to plugin `foo`. - -```yaml -apiVersion: kubescheduler.config.k8s.io/v1alpha1 -kind: KubeSchedulerConfiguration - -... - -plugins: - reserve: - enabled: - - name: foo - - name: bar - disabled: - - name: baz - preBind: - enabled: - - name: foo - disabled: - - name: baz - -pluginConfig: -- name: foo - args: > - Arbitrary set of args to plugin foo -``` +## Plugin configuration -When an extension point is omitted from the configuration default plugins for -that extension points are used. When an extension point exists and `enabled` is -provided, the `enabled` plugins are called in addition to default plugins. -Default plugins are called first and then the additional enabled plugins are -called in the same order specified in the configuration. If a different order of -calling default plugins is desired, default plugins must be `disabled` and -`enabled` in the desired order. - -Assuming there is a default plugin called `foo` at `reserve` and we are adding -plugin `bar` that we want to be invoked before `foo`, we should disable `foo` -and enable `bar` and `foo` in order. The following example shows the -configuration that achieves this: - -```yaml -apiVersion: kubescheduler.config.k8s.io/v1alpha1 -kind: KubeSchedulerConfiguration - -... - -plugins: - reserve: - enabled: - - name: bar - - name: foo - disabled: - - name: foo -``` +You can enable or disable plugins in the scheduler configuration. If you are using +Kubernetes v1.18 or later, most scheduling +[plugins](/docs/reference/scheduling/profiles/#scheduling-plugins) are in use and +enabled by default. + +In addition to default plugins, you can also implement your own scheduling +plugins and get them configured along with default plugins. You can visit +[scheduler-plugins](https://github.com/kubernetes-sigs/scheduler-plugins) for more details. + +If you are using Kubernetes v1.18 or later, you can configure a set of plugins as +a scheduler profile and then define multiple profiles to fit various kinds of workload. +Learn more at [multiple profiles](/docs/reference/scheduling/profiles/#multiple-profiles). {{% /capture %}} diff --git a/content/en/docs/concepts/services-networking/connect-applications-service.md b/content/en/docs/concepts/services-networking/connect-applications-service.md index 6a958ea31e8de..bc17b74d15b89 100644 --- a/content/en/docs/concepts/services-networking/connect-applications-service.md +++ b/content/en/docs/concepts/services-networking/connect-applications-service.md @@ -422,10 +422,8 @@ LoadBalancer Ingress: a320587ffd19711e5a37606cf4a74574-1142138393.us-east-1.el {{% capture whatsnext %}} -Kubernetes also supports Federated Services, which can span multiple -clusters and cloud providers, to provide increased availability, -better fault tolerance and greater scalability for your services. See -the [Federated Services User Guide](/docs/concepts/cluster-administration/federation-service-discovery/) -for further information. +* Learn more about [Using a Service to Access an Application in a Cluster](/docs/tasks/access-application-cluster/service-access-application-cluster/) +* Learn more about [Connecting a Front End to a Back End Using a Service](/docs/tasks/access-application-cluster/connecting-frontend-backend/) +* Learn more about [Creating an External Load Balancer](/docs/tasks/access-application-cluster/create-external-load-balancer/) {{% /capture %}} diff --git a/content/en/docs/concepts/services-networking/endpoint-slices.md b/content/en/docs/concepts/services-networking/endpoint-slices.md index 99df54759277a..0525d84f87524 100644 --- a/content/en/docs/concepts/services-networking/endpoint-slices.md +++ b/content/en/docs/concepts/services-networking/endpoint-slices.md @@ -24,6 +24,21 @@ Endpoints. {{% capture body %}} +## Motivation + +The Endpoints API has provided a simple and straightforward way of +tracking network endpoints in Kubernetes. Unfortunately as Kubernetes clusters +and Services have gotten larger, limitations of that API became more visible. +Most notably, those included challenges with scaling to larger numbers of +network endpoints. + +Since all network endpoints for a Service were stored in a single Endpoints +resource, those resources could get quite large. That affected the performance +of Kubernetes components (notably the master control plane) and resulted in +significant amounts of network traffic and processing when Endpoints changed. +EndpointSlices help you mitigate those issues as well as provide an extensible +platform for additional features such as topological routing. + ## EndpointSlice resources {#endpointslice-resource} In Kubernetes, an EndpointSlice contains references to a set of network @@ -32,6 +47,8 @@ for a Kubernetes Service when a {{< glossary_tooltip text="selector" term_id="selector" >}} is specified. These EndpointSlices will include references to any Pods that match the Service selector. EndpointSlices group network endpoints together by unique Service and Port combinations. +The name of a EndpointSlice object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). As an example, here's a sample EndpointSlice resource for the `example` Kubernetes Service. @@ -163,21 +180,6 @@ necessary soon anyway. Rolling updates of Deployments also provide a natural repacking of EndpointSlices with all pods and their corresponding endpoints getting replaced. -## Motivation - -The Endpoints API has provided a simple and straightforward way of -tracking network endpoints in Kubernetes. Unfortunately as Kubernetes clusters -and Services have gotten larger, limitations of that API became more visible. -Most notably, those included challenges with scaling to larger numbers of -network endpoints. - -Since all network endpoints for a Service were stored in a single Endpoints -resource, those resources could get quite large. That affected the performance -of Kubernetes components (notably the master control plane) and resulted in -significant amounts of network traffic and processing when Endpoints changed. -EndpointSlices help you mitigate those issues as well as provide an extensible -platform for additional features such as topological routing. - {{% /capture %}} {{% capture whatsnext %}} diff --git a/content/en/docs/concepts/services-networking/ingress.md b/content/en/docs/concepts/services-networking/ingress.md index 6739cf1bd06ac..39e57ffdb0311 100644 --- a/content/en/docs/concepts/services-networking/ingress.md +++ b/content/en/docs/concepts/services-networking/ingress.md @@ -37,7 +37,7 @@ Traffic routing is controlled by rules defined on the Ingress resource. [ Services ] ``` -An Ingress can be configured to give Services externally-reachable URLs, load balance traffic, terminate SSL / TLS, and offer name based virtual hosting. An [Ingress controller](/docs/concepts/services-networking/ingress-controllers) is responsible for fulfilling the Ingress, usually with a load balancer, though it may also configure your edge router or additional frontends to help handle the traffic. +An Ingress may be configured to give Services externally-reachable URLs, load balance traffic, terminate SSL / TLS, and offer name based virtual hosting. An [Ingress controller](/docs/concepts/services-networking/ingress-controllers) is responsible for fulfilling the Ingress, usually with a load balancer, though it may also configure your edge router or additional frontends to help handle the traffic. An Ingress does not expose arbitrary ports or protocols. Exposing services other than HTTP and HTTPS to the internet typically uses a service of type [Service.Type=NodePort](/docs/concepts/services-networking/service/#nodeport) or @@ -73,16 +73,19 @@ spec: - http: paths: - path: /testpath + pathType: Prefix backend: serviceName: test servicePort: 80 ``` - As with all other Kubernetes resources, an Ingress needs `apiVersion`, `kind`, and `metadata` fields. - For general information about working with config files, see [deploying applications](/docs/tasks/run-application/run-stateless-application-deployment/), [configuring containers](/docs/tasks/configure-pod-container/configure-pod-configmap/), [managing resources](/docs/concepts/cluster-administration/manage-deployment/). +As with all other Kubernetes resources, an Ingress needs `apiVersion`, `kind`, and `metadata` fields. +The name of an Ingress object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). +For general information about working with config files, see [deploying applications](/docs/tasks/run-application/run-stateless-application-deployment/), [configuring containers](/docs/tasks/configure-pod-container/configure-pod-configmap/), [managing resources](/docs/concepts/cluster-administration/manage-deployment/). Ingress frequently uses annotations to configure some options depending on the Ingress controller, an example of which is the [rewrite-target annotation](https://github.com/kubernetes/ingress-nginx/blob/master/docs/examples/rewrite/README.md). - Different [Ingress controller](/docs/concepts/services-networking/ingress-controllers) support different annotations. Review the documentation for +Different [Ingress controller](/docs/concepts/services-networking/ingress-controllers) support different annotations. Review the documentation for your choice of Ingress controller to learn which annotations are supported. The Ingress [spec](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status) @@ -115,6 +118,84 @@ backend is typically a configuration option of the [Ingress controller](/docs/co If none of the hosts or paths match the HTTP request in the Ingress objects, the traffic is routed to your default backend. +### Path Types + +Each path in an Ingress has a corresponding path type. There are three supported +path types: + +* _`ImplementationSpecific`_ (default): With this path type, matching is up to + the IngressClass. Implementations can treat this as a separate `pathType` or + treat it identically to `Prefix` or `Exact` path types. + +* _`Exact`_: Matches the URL path exactly and with case sensitivity. + +* _`Prefix`_: Matches based on a URL path prefix split by `/`. Matching is case + sensitive and done on a path element by element basis. A path element refers + to the list of labels in the path split by the `/` separator. A request is a + match for path _p_ if every _p_ is an element-wise prefix of _p_ of the + request path. + {{< note >}} + If the last element of the path is a substring of the + last element in request path, it is not a match (for example: + `/foo/bar` matches`/foo/bar/baz`, but does not match `/foo/barbaz`). + {{< /note >}} + +#### Multiple Matches +In some cases, multiple paths within an Ingress will match a request. In those +cases precedence will be given first to the longest matching path. If two paths +are still equally matched, precedence will be given to paths with an exact path +type over prefix path type. + +## Ingress Class + +Ingresses can be implemented by different controllers, often with different +configuration. Each Ingress should specify a class, a reference to an +IngressClass resource that contains additional configuration including the name +of the controller that should implement the class. + +```yaml +apiVersion: networking.k8s.io/v1beta1 +kind: IngressClass +metadata: + name: external-lb +spec: + controller: example.com/ingress-controller + parameters: + apiGroup: k8s.example.com/v1alpha + kind: IngressParameters + name: external-lb +``` + +IngressClass resources contain an optional parameters field. This can be used to +reference additional configuration for this class. + +### Deprecated Annotation + +Before the IngressClass resource and `ingressClassName` field were added in +Kubernetes 1.18, Ingress classes were specified with a +`kubernetes.io/ingress.class` annotation on the Ingress. This annotation was +never formally defined, but was widely supported by Ingress controllers. + +The newer `ingressClassName` field on Ingresses is a replacement for that +annotation, but is not a direct equivalent. While the annotation was generally +used to reference the name of the Ingress controller that should implement the +Ingress, the field is a reference to an IngressClass resource that contains +additional Ingress configuration, including the name of the Ingress controller. + +### Default Ingress Class + +You can mark a particular IngressClass as default for your cluster. Setting the +`ingressclass.kubernetes.io/is-default-class` annotation to `true` on an +IngressClass resource will ensure that new Ingresses without an +`ingressClassName` field specified will be assigned this default IngressClass. + +{{< caution >}} +If you have more than one IngressClass marked as the default for your cluster, +the admission controller prevents creating new Ingress objects that don't have +an `ingressClassName` specified. You can resolve this by ensuring that at most 1 +IngressClasess are marked as default in your cluster. +{{< /caution >}} + ## Types of Ingress ### Single Service Ingress @@ -134,10 +215,10 @@ kubectl get ingress test-ingress ``` NAME HOSTS ADDRESS PORTS AGE -test-ingress * 107.178.254.228 80 59s +test-ingress * 203.0.113.123 80 59s ``` -Where `107.178.254.228` is the IP allocated by the Ingress controller to satisfy +Where `203.0.113.123` is the IP allocated by the Ingress controller to satisfy this Ingress. {{< note >}} diff --git a/content/en/docs/concepts/services-networking/network-policies.md b/content/en/docs/concepts/services-networking/network-policies.md index 71f53b554ee5d..3de1c87076629 100644 --- a/content/en/docs/concepts/services-networking/network-policies.md +++ b/content/en/docs/concepts/services-networking/network-policies.md @@ -207,7 +207,7 @@ This ensures that even pods that aren't selected by any other NetworkPolicy will {{< feature-state for_k8s_version="v1.12" state="alpha" >}} -To use this feature, you (or your cluster administrator) will need to enable the `SCTPSupport` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) for the API server with `--feature-gates=SCTPSupport=true,…`. +To use this feature, you (or your cluster administrator) will need to enable the `SCTPSupport` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) for the API server with `--feature-gates=SCTPSupport=true,…`. When the feature gate is enabled, you can set the `protocol` field of a NetworkPolicy to `SCTP`. {{< note >}} diff --git a/content/en/docs/concepts/services-networking/service.md b/content/en/docs/concepts/services-networking/service.md index c568b36231fde..a62faf1e0f32f 100644 --- a/content/en/docs/concepts/services-networking/service.md +++ b/content/en/docs/concepts/services-networking/service.md @@ -73,6 +73,8 @@ balancer in between your application and the backend Pods. A Service in Kubernetes is a REST object, similar to a Pod. Like all of the REST objects, you can `POST` a Service definition to the API server to create a new instance. +The name of a Service object must be a valid +[DNS label name](/docs/concepts/overview/working-with-objects/names#dns-label-names). For example, suppose you have a set of Pods that each listen on TCP port 9376 and carry a label `app=MyApp`: @@ -167,6 +169,9 @@ subsets: - port: 9376 ``` +The name of the Endpoints object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + {{< note >}} The endpoint IPs _must not_ be: loopback (127.0.0.0/8 for IPv4, ::1/128 for IPv6), or link-local (169.254.0.0/16 and 224.0.0.0/24 for IPv4, fe80::/64 for IPv6). @@ -197,6 +202,17 @@ endpoints. EndpointSlices provide additional attributes and functionality which is described in detail in [EndpointSlices](/docs/concepts/services-networking/endpoint-slices/). +### Application protocol + +{{< feature-state for_k8s_version="v1.18" state="alpha" >}} + +The AppProtocol field provides a way to specify an application protocol to be +used for each Service port. + +As an alpha feature, this field is not enabled by default. To use this field, +enable the `ServiceAppProtocol` [feature +gate](/docs/reference/command-line-tools-reference/feature-gates/). + ## Virtual IPs and service proxies Every node in a Kubernetes cluster runs a `kube-proxy`. `kube-proxy` is @@ -1173,19 +1189,6 @@ SCTP is not supported on Windows based nodes. The kube-proxy does not support the management of SCTP associations when it is in userspace mode. {{< /warning >}} -## Future work - -In the future, the proxy policy for Services can become more nuanced than -simple round-robin balancing, for example master-elected or sharded. We also -envision that some Services will have "real" load balancers, in which case the -virtual IP address will simply transport the packets there. - -The Kubernetes project intends to improve support for L7 (HTTP) Services. - -The Kubernetes project intends to have more flexible ingress modes for Services -that encompass the current ClusterIP, NodePort, and LoadBalancer modes and more. - - {{% /capture %}} {{% capture whatsnext %}} diff --git a/content/en/docs/concepts/storage/dynamic-provisioning.md b/content/en/docs/concepts/storage/dynamic-provisioning.md index 6bdca6b8af93a..77885981f7cac 100644 --- a/content/en/docs/concepts/storage/dynamic-provisioning.md +++ b/content/en/docs/concepts/storage/dynamic-provisioning.md @@ -46,6 +46,9 @@ To enable dynamic provisioning, a cluster administrator needs to pre-create one or more StorageClass objects for users. StorageClass objects define which provisioner should be used and what parameters should be passed to that provisioner when dynamic provisioning is invoked. +The name of a StorageClass object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + The following manifest creates a storage class "slow" which provisions standard disk-like persistent disks. diff --git a/content/en/docs/concepts/storage/persistent-volumes.md b/content/en/docs/concepts/storage/persistent-volumes.md index c59cb2ee3c3e2..99c21105a934d 100644 --- a/content/en/docs/concepts/storage/persistent-volumes.md +++ b/content/en/docs/concepts/storage/persistent-volumes.md @@ -4,6 +4,7 @@ reviewers: - saad-ali - thockin - msau42 +- xing-yang title: Persistent Volumes feature: title: Storage orchestration @@ -16,7 +17,7 @@ weight: 20 {{% capture overview %}} -This document describes the current state of `PersistentVolumes` in Kubernetes. Familiarity with [volumes](/docs/concepts/storage/volumes/) is suggested. +This document describes the current state of _persistent volumes_ in Kubernetes. Familiarity with [volumes](/docs/concepts/storage/volumes/) is suggested. {{% /capture %}} @@ -25,23 +26,16 @@ This document describes the current state of `PersistentVolumes` in Kubernetes. ## Introduction -Managing storage is a distinct problem from managing compute instances. The `PersistentVolume` subsystem provides an API for users and administrators that abstracts details of how storage is provided from how it is consumed. To do this, we introduce two new API resources: `PersistentVolume` and `PersistentVolumeClaim`. +Managing storage is a distinct problem from managing compute instances. The PersistentVolume subsystem provides an API for users and administrators that abstracts details of how storage is provided from how it is consumed. To do this, we introduce two new API resources: PersistentVolume and PersistentVolumeClaim. -A `PersistentVolume` (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using [Storage Classes](/docs/concepts/storage/storage-classes/). It is a resource in the cluster just like a node is a cluster resource. PVs are volume plugins like Volumes, but have a lifecycle independent of any individual Pod that uses the PV. This API object captures the details of the implementation of the storage, be that NFS, iSCSI, or a cloud-provider-specific storage system. +A PersistentVolume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using [Storage Classes](/docs/concepts/storage/storage-classes/). It is a resource in the cluster just like a node is a cluster resource. PVs are volume plugins like Volumes, but have a lifecycle independent of any individual Pod that uses the PV. This API object captures the details of the implementation of the storage, be that NFS, iSCSI, or a cloud-provider-specific storage system. -A `PersistentVolumeClaim` (PVC) is a request for storage by a user. It is similar to a Pod. Pods consume node resources and PVCs consume PV resources. Pods can request specific levels of resources (CPU and Memory). Claims can request specific size and access modes (e.g., they can be mounted once read/write or many times read-only). +A PersistentVolumeClaim (PVC) is a request for storage by a user. It is similar to a Pod. Pods consume node resources and PVCs consume PV resources. Pods can request specific levels of resources (CPU and Memory). Claims can request specific size and access modes (e.g., they can be mounted once read/write or many times read-only). -While `PersistentVolumeClaims` allow a user to consume abstract storage -resources, it is common that users need `PersistentVolumes` with varying -properties, such as performance, for different problems. Cluster administrators -need to be able to offer a variety of `PersistentVolumes` that differ in more -ways than just size and access modes, without exposing users to the details of -how those volumes are implemented. For these needs, there is the `StorageClass` -resource. +While PersistentVolumeClaims allow a user to consume abstract storage resources, it is common that users need PersistentVolumes with varying properties, such as performance, for different problems. Cluster administrators need to be able to offer a variety of PersistentVolumes that differ in more ways than just size and access modes, without exposing users to the details of how those volumes are implemented. For these needs, there is the _StorageClass_ resource. See the [detailed walkthrough with working examples](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/). - ## Lifecycle of a volume and claim PVs are resources in the cluster. PVCs are requests for those resources and also act as claim checks to the resource. The interaction between PVs and PVCs follows this lifecycle: @@ -51,12 +45,14 @@ PVs are resources in the cluster. PVCs are requests for those resources and also There are two ways PVs may be provisioned: statically or dynamically. #### Static + A cluster administrator creates a number of PVs. They carry the details of the real storage, which is available for use by cluster users. They exist in the Kubernetes API and are available for consumption. #### Dynamic -When none of the static PVs the administrator created match a user's `PersistentVolumeClaim`, + +When none of the static PVs the administrator created match a user's PersistentVolumeClaim, the cluster may try to dynamically provision a volume specially for the PVC. -This provisioning is based on `StorageClasses`: the PVC must request a +This provisioning is based on StorageClasses: the PVC must request a [storage class](/docs/concepts/storage/storage-classes/) and the administrator must have created and configured that class for dynamic provisioning to occur. Claims that request the class `""` effectively disable @@ -79,10 +75,10 @@ Claims will remain unbound indefinitely if a matching volume does not exist. Cla Pods use claims as volumes. The cluster inspects the claim to find the bound volume and mounts that volume for a Pod. For volumes that support multiple access modes, the user specifies which mode is desired when using their claim as a volume in a Pod. -Once a user has a claim and that claim is bound, the bound PV belongs to the user for as long as they need it. Users schedule Pods and access their claimed PVs by including a `persistentVolumeClaim` in their Pod's volumes block. [See below for syntax details](#claims-as-volumes). +Once a user has a claim and that claim is bound, the bound PV belongs to the user for as long as they need it. Users schedule Pods and access their claimed PVs by including a `persistentVolumeClaim` section in a Pod's `volumes` block. See [Claims As Volumes](#claims-as-volumes) for more details on this. ### Storage Object in Use Protection -The purpose of the Storage Object in Use Protection feature is to ensure that Persistent Volume Claims (PVCs) in active use by a Pod and Persistent Volume (PVs) that are bound to PVCs are not removed from the system, as this may result in data loss. +The purpose of the Storage Object in Use Protection feature is to ensure that PersistentVolumeClaims (PVCs) in active use by a Pod and PersistentVolume (PVs) that are bound to PVCs are not removed from the system, as this may result in data loss. {{< note >}} PVC is in active use by a Pod when a Pod object exists that is using the PVC. @@ -130,19 +126,19 @@ Events: ### Reclaiming -When a user is done with their volume, they can delete the PVC objects from the API that allows reclamation of the resource. The reclaim policy for a `PersistentVolume` tells the cluster what to do with the volume after it has been released of its claim. Currently, volumes can either be Retained, Recycled, or Deleted. +When a user is done with their volume, they can delete the PVC objects from the API that allows reclamation of the resource. The reclaim policy for a PersistentVolume tells the cluster what to do with the volume after it has been released of its claim. Currently, volumes can either be Retained, Recycled, or Deleted. #### Retain -The `Retain` reclaim policy allows for manual reclamation of the resource. When the `PersistentVolumeClaim` is deleted, the `PersistentVolume` still exists and the volume is considered "released". But it is not yet available for another claim because the previous claimant's data remains on the volume. An administrator can manually reclaim the volume with the following steps. +The `Retain` reclaim policy allows for manual reclamation of the resource. When the PersistentVolumeClaim is deleted, the PersistentVolume still exists and the volume is considered "released". But it is not yet available for another claim because the previous claimant's data remains on the volume. An administrator can manually reclaim the volume with the following steps. -1. Delete the `PersistentVolume`. The associated storage asset in external infrastructure (such as an AWS EBS, GCE PD, Azure Disk, or Cinder volume) still exists after the PV is deleted. +1. Delete the PersistentVolume. The associated storage asset in external infrastructure (such as an AWS EBS, GCE PD, Azure Disk, or Cinder volume) still exists after the PV is deleted. 1. Manually clean up the data on the associated storage asset accordingly. -1. Manually delete the associated storage asset, or if you want to reuse the same storage asset, create a new `PersistentVolume` with the storage asset definition. +1. Manually delete the associated storage asset, or if you want to reuse the same storage asset, create a new PersistentVolume with the storage asset definition. #### Delete -For volume plugins that support the `Delete` reclaim policy, deletion removes both the `PersistentVolume` object from Kubernetes, as well as the associated storage asset in the external infrastructure, such as an AWS EBS, GCE PD, Azure Disk, or Cinder volume. Volumes that were dynamically provisioned inherit the [reclaim policy of their `StorageClass`](#reclaim-policy), which defaults to `Delete`. The administrator should configure the `StorageClass` according to users' expectations; otherwise, the PV must be edited or patched after it is created. See [Change the Reclaim Policy of a PersistentVolume](/docs/tasks/administer-cluster/change-pv-reclaim-policy/). +For volume plugins that support the `Delete` reclaim policy, deletion removes both the PersistentVolume object from Kubernetes, as well as the associated storage asset in the external infrastructure, such as an AWS EBS, GCE PD, Azure Disk, or Cinder volume. Volumes that were dynamically provisioned inherit the [reclaim policy of their StorageClass](#reclaim-policy), which defaults to `Delete`. The administrator should configure the StorageClass according to users' expectations; otherwise, the PV must be edited or patched after it is created. See [Change the Reclaim Policy of a PersistentVolume](/docs/tasks/administer-cluster/change-pv-reclaim-policy/). #### Recycle @@ -212,8 +208,8 @@ allowVolumeExpansion: true ``` To request a larger volume for a PVC, edit the PVC object and specify a larger -size. This triggers expansion of the volume that backs the underlying `PersistentVolume`. A -new `PersistentVolume` is never created to satisfy the claim. Instead, an existing volume is resized. +size. This triggers expansion of the volume that backs the underlying PersistentVolume. A +new PersistentVolume is never created to satisfy the claim. Instead, an existing volume is resized. #### CSI Volume expansion @@ -227,7 +223,7 @@ Support for expanding CSI volumes is enabled by default but it also requires a s You can only resize volumes containing a file system if the file system is XFS, Ext3, or Ext4. When a volume contains a file system, the file system is only resized when a new Pod is using -the `PersistentVolumeClaim` in ReadWrite mode. File system expansion is either done when a Pod is starting up +the PersistentVolumeClaim in `ReadWrite` mode. File system expansion is either done when a Pod is starting up or when a Pod is running and the underlying file system supports online expansion. FlexVolumes allow resize if the driver is set with the `RequiresFSResize` capability to `true`. @@ -260,7 +256,7 @@ Expanding EBS volumes is a time-consuming operation. Also, there is a per-volume ## Types of Persistent Volumes -`PersistentVolume` types are implemented as plugins. Kubernetes currently supports the following plugins: +PersistentVolume types are implemented as plugins. Kubernetes currently supports the following plugins: * GCEPersistentDisk * AWSElasticBlockStore @@ -286,6 +282,8 @@ Expanding EBS volumes is a time-consuming operation. Also, there is a per-volume ## Persistent Volumes Each PV contains a spec and status, which is the specification and status of the volume. +The name of a PersistentVolume object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). ```yaml apiVersion: v1 @@ -308,6 +306,10 @@ spec: server: 172.17.0.2 ``` +{{< note >}} +Helper programs relating to the volume type may be required for consumption of a PersistentVolume within a cluster. In this example, the PersistentVolume is of type NFS and the helper program /sbin/mount.nfs is required to support the mounting of NFS filesystems. +{{< /note >}} + ### Capacity Generally, a PV will have a specific storage capacity. This is set using the PV's `capacity` attribute. See the Kubernetes [Resource Model](https://git.k8s.io/community/contributors/design-proposals/scheduling/resources.md) to understand the units expected by `capacity`. @@ -316,16 +318,28 @@ Currently, storage size is the only resource that can be set or requested. Futu ### Volume Mode -{{< feature-state for_k8s_version="v1.13" state="beta" >}} +{{< feature-state for_k8s_version="v1.18" state="stable" >}} -Prior to Kubernetes 1.9, all volume plugins created a filesystem on the persistent volume. -Now, you can set the value of `volumeMode` to `block` to use a raw block device, or `filesystem` -to use a filesystem. `filesystem` is the default if the value is omitted. This is an optional API -parameter. +Kubernetes supports two `volumeModes` of PersistentVolumes: `Filesystem` and `Block`. + +`volumeMode` is an optional API parameter. +`Filesystem` is the default mode used when `volumeMode` parameter is omitted. + +A volume with `volumeMode: Filesystem` is *mounted* into Pods into a directory. If the volume +is backed by a block device and the device is empty, Kuberneretes creates a filesystem +on the device before mounting it for the first time. + +You can set the value of `volumeMode` to `Block` to use a volume as a raw block device. +Such volume is presented into a Pod as a block device, without any filesystem on it. +This mode is useful to provide a Pod the fastest possible way to access a volume, without +any filesystem layer between the Pod and the volume. On the other hand, the application +running in the Pod must know how to handle a raw block device. +See [Raw Block Volume Support](docs/concepts/storage/persistent-volumes/#raw-block-volume-support) +for an example on how to use a volume with `volumeMode: Block` in a Pod. ### Access Modes -A `PersistentVolume` can be mounted on a host in any way supported by the resource provider. As shown in the table below, providers will have different capabilities and each PV's access modes are set to the specific modes supported by that particular volume. For example, NFS can support multiple read/write clients, but a specific NFS PV might be exported on the server as read-only. Each PV gets its own set of access modes describing that specific PV's capabilities. +A PersistentVolume can be mounted on a host in any way supported by the resource provider. As shown in the table below, providers will have different capabilities and each PV's access modes are set to the specific modes supported by that particular volume. For example, NFS can support multiple read/write clients, but a specific NFS PV might be exported on the server as read-only. Each PV gets its own set of access modes describing that specific PV's capabilities. The access modes are: @@ -440,6 +454,8 @@ The CLI will show the name of the PVC bound to the PV. ## PersistentVolumeClaims Each PVC contains a spec and status, which is the specification and status of the claim. +The name of a PersistentVolumeClaim object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). ```yaml apiVersion: v1 @@ -499,22 +515,22 @@ by the cluster, depending on whether the is turned on. * If the admission plugin is turned on, the administrator may specify a - default `StorageClass`. All PVCs that have no `storageClassName` can be bound only to - PVs of that default. Specifying a default `StorageClass` is done by setting the + default StorageClass. All PVCs that have no `storageClassName` can be bound only to + PVs of that default. Specifying a default StorageClass is done by setting the annotation `storageclass.kubernetes.io/is-default-class` equal to `true` in - a `StorageClass` object. If the administrator does not specify a default, the + a StorageClass object. If the administrator does not specify a default, the cluster responds to PVC creation as if the admission plugin were turned off. If more than one default is specified, the admission plugin forbids the creation of all PVCs. * If the admission plugin is turned off, there is no notion of a default - `StorageClass`. All PVCs that have no `storageClassName` can be bound only to PVs that + StorageClass. All PVCs that have no `storageClassName` can be bound only to PVs that have no class. In this case, the PVCs that have no `storageClassName` are treated the same way as PVCs that have their `storageClassName` set to `""`. Depending on installation method, a default StorageClass may be deployed to a Kubernetes cluster by addon manager during installation. -When a PVC specifies a `selector` in addition to requesting a `StorageClass`, +When a PVC specifies a `selector` in addition to requesting a StorageClass, the requirements are ANDed together: only a PV of the requested class and with the requested labels may be bound to the PVC. @@ -528,7 +544,7 @@ it won't be supported in a future Kubernetes release. ## Claims As Volumes -Pods access storage by using the claim as a volume. Claims must exist in the same namespace as the Pod using the claim. The cluster finds the claim in the Pod's namespace and uses it to get the `PersistentVolume` backing the claim. The volume is then mounted to the host and into the Pod. +Pods access storage by using the claim as a volume. Claims must exist in the same namespace as the Pod using the claim. The cluster finds the claim in the Pod's namespace and uses it to get the PersistentVolume backing the claim. The volume is then mounted to the host and into the Pod. ```yaml apiVersion: v1 @@ -550,30 +566,28 @@ spec: ### A Note on Namespaces -`PersistentVolumes` binds are exclusive, and since `PersistentVolumeClaims` are namespaced objects, mounting claims with "Many" modes (`ROX`, `RWX`) is only possible within one namespace. +PersistentVolumes binds are exclusive, and since PersistentVolumeClaims are namespaced objects, mounting claims with "Many" modes (`ROX`, `RWX`) is only possible within one namespace. ## Raw Block Volume Support -{{< feature-state for_k8s_version="v1.13" state="beta" >}} +{{< feature-state for_k8s_version="v1.18" state="stable" >}} The following volume plugins support raw block volumes, including dynamic provisioning where applicable: * AWSElasticBlockStore * AzureDisk +* CSI * FC (Fibre Channel) * GCEPersistentDisk * iSCSI * Local volume +* OpenStack Cinder * RBD (Ceph Block Device) -* VsphereVolume (alpha) +* VsphereVolume -{{< note >}} -Only FC and iSCSI volumes supported raw block volumes in Kubernetes 1.9. -Support for the additional plugins was added in 1.10. -{{< /note >}} +### PersistentVolume using a Raw Block Volume {#persistent-volume-using-a-raw-block-volume} -### Persistent Volumes using a Raw Block Volume ```yaml apiVersion: v1 kind: PersistentVolume @@ -591,7 +605,8 @@ spec: lun: 0 readOnly: false ``` -### Persistent Volume Claim requesting a Raw Block Volume +### PersistentVolumeClaim requesting a Raw Block Volume {#persistent-volume-claim-requesting-a-raw-block-volume} + ```yaml apiVersion: v1 kind: PersistentVolumeClaim @@ -605,7 +620,9 @@ spec: requests: storage: 10Gi ``` + ### Pod specification adding Raw Block Device path in container + ```yaml apiVersion: v1 kind: Pod @@ -632,7 +649,7 @@ When adding a raw block device for a Pod, you specify the device path in the con ### Binding Block Volumes -If a user requests a raw block volume by indicating this using the `volumeMode` field in the `PersistentVolumeClaim` spec, the binding rules differ slightly from previous releases that didn't consider this mode as part of the spec. +If a user requests a raw block volume by indicating this using the `volumeMode` field in the PersistentVolumeClaim spec, the binding rules differ slightly from previous releases that didn't consider this mode as part of the spec. Listed is a table of possible combinations the user and admin might specify for requesting a raw block device. The table indicates if the volume will be bound or not given the combinations: Volume binding matrix for statically provisioned volumes: @@ -654,14 +671,15 @@ Only statically provisioned volumes are supported for alpha release. Administrat ## Volume Snapshot and Restore Volume from Snapshot Support -{{< feature-state for_k8s_version="v1.12" state="alpha" >}} +{{< feature-state for_k8s_version="v1.17" state="beta" >}} Volume snapshot feature was added to support CSI Volume Plugins only. For details, see [volume snapshots](/docs/concepts/storage/volume-snapshots/). To enable support for restoring a volume from a volume snapshot data source, enable the `VolumeSnapshotDataSource` feature gate on the apiserver and controller-manager. -### Create Persistent Volume Claim from Volume Snapshot +### Create a PersistentVolumeClaim from a Volume Snapshot {#create-persistent-volume-claim-from-volume-snapshot} + ```yaml apiVersion: v1 kind: PersistentVolumeClaim @@ -682,14 +700,10 @@ spec: ## Volume Cloning -{{< feature-state for_k8s_version="v1.16" state="beta" >}} - -Volume clone feature was added to support CSI Volume Plugins only. For details, see [volume cloning](/docs/concepts/storage/volume-pvc-datasource/). +[Volume Cloning](/docs/concepts/storage/volume-pvc-datasource/) only available for CSI volume plugins. -To enable support for cloning a volume from a PVC data source, enable the -`VolumePVCDataSource` feature gate on the apiserver and controller-manager. +### Create PersistentVolumeClaim from an existing PVC {#create-persistent-volume-claim-from-an-existing-pvc} -### Create Persistent Volume Claim from an existing pvc ```yaml apiVersion: v1 kind: PersistentVolumeClaim @@ -732,5 +746,17 @@ and need persistent storage, it is recommended that you use the following patter dynamic storage support (in which case the user should create a matching PV) or the cluster has no storage system (in which case the user cannot deploy config requiring PVCs). +{{% /capture %}} + {{% capture whatsnext %}} + +* Learn more about [Creating a PersistentVolume](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolume). +* Learn more about [Creating a PersistentVolumeClaim](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolumeclaim). +* Read the [Persistent Storage design document](https://git.k8s.io/community/contributors/design-proposals/storage/persistent-storage.md). + +### Reference +* [PersistentVolume](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolume-v1-core) +* [PersistentVolumeSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumespec-v1-core) +* [PersistentVolumeClaim](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaim-v1-core) +* [PersistentVolumeClaimSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaimspec-v1-core) {{% /capture %}} diff --git a/content/en/docs/concepts/storage/storage-classes.md b/content/en/docs/concepts/storage/storage-classes.md index 5a55665db36ff..694ea7742fa75 100644 --- a/content/en/docs/concepts/storage/storage-classes.md +++ b/content/en/docs/concepts/storage/storage-classes.md @@ -21,7 +21,7 @@ with [volumes](/docs/concepts/storage/volumes/) and ## Introduction -A `StorageClass` provides a way for administrators to describe the "classes" of +A StorageClass provides a way for administrators to describe the "classes" of storage they offer. Different classes might map to quality-of-service levels, or to backup policies, or to arbitrary policies determined by the cluster administrators. Kubernetes itself is unopinionated about what classes @@ -30,18 +30,18 @@ systems. ## The StorageClass Resource -Each `StorageClass` contains the fields `provisioner`, `parameters`, and -`reclaimPolicy`, which are used when a `PersistentVolume` belonging to the +Each StorageClass contains the fields `provisioner`, `parameters`, and +`reclaimPolicy`, which are used when a PersistentVolume belonging to the class needs to be dynamically provisioned. -The name of a `StorageClass` object is significant, and is how users can +The name of a StorageClass object is significant, and is how users can request a particular class. Administrators set the name and other parameters -of a class when first creating `StorageClass` objects, and the objects cannot +of a class when first creating StorageClass objects, and the objects cannot be updated once they are created. -Administrators can specify a default `StorageClass` just for PVCs that don't +Administrators can specify a default StorageClass just for PVCs that don't request any particular class to bind to: see the -[`PersistentVolumeClaim` section](/docs/concepts/storage/persistent-volumes/#class-1) +[PersistentVolumeClaim section](/docs/concepts/storage/persistent-volumes/#class-1) for details. ```yaml @@ -61,7 +61,7 @@ volumeBindingMode: Immediate ### Provisioner -Storage classes have a provisioner that determines what volume plugin is used +Each StorageClass has a provisioner that determines what volume plugin is used for provisioning PVs. This field must be specified. | Volume Plugin | Internal Provisioner| Config Example | @@ -104,23 +104,23 @@ vendors provide their own external provisioner. ### Reclaim Policy -Persistent Volumes that are dynamically created by a storage class will have the +PersistentVolumes that are dynamically created by a StorageClass will have the reclaim policy specified in the `reclaimPolicy` field of the class, which can be either `Delete` or `Retain`. If no `reclaimPolicy` is specified when a -`StorageClass` object is created, it will default to `Delete`. +StorageClass object is created, it will default to `Delete`. -Persistent Volumes that are created manually and managed via a storage class will have +PersistentVolumes that are created manually and managed via a StorageClass will have whatever reclaim policy they were assigned at creation. ### Allow Volume Expansion {{< feature-state for_k8s_version="v1.11" state="beta" >}} -Persistent Volumes can be configured to be expandable. This feature when set to `true`, +PersistentVolumes can be configured to be expandable. This feature when set to `true`, allows the users to resize the volume by editing the corresponding PVC object. The following types of volumes support volume expansion, when the underlying -Storage Class has the field `allowVolumeExpansion` set to true. +StorageClass has the field `allowVolumeExpansion` set to true. {{< table caption = "Table of Volume types and the version of Kubernetes they require" >}} @@ -146,7 +146,7 @@ You can only use the volume expansion feature to grow a Volume, not to shrink it ### Mount Options -Persistent Volumes that are dynamically created by a storage class will have the +PersistentVolumes that are dynamically created by a StorageClass will have the mount options specified in the `mountOptions` field of the class. If the volume plugin does not support mount options but mount options are @@ -219,7 +219,7 @@ allowedTopologies: ## Parameters -Storage classes have parameters that describe volumes belonging to the storage +Storage Classes have parameters that describe volumes belonging to the storage class. Different parameters may be accepted depending on the `provisioner`. For example, the value `io1`, for the parameter `type`, and the parameter `iopsPerGB` are specific to EBS. When a parameter is omitted, some default is @@ -367,7 +367,7 @@ parameters: `"8452344e2becec931ece4e33c4674e4e,42982310de6c63381718ccfa6d8cf397"`. This is an optional parameter. * `gidMin`, `gidMax` : The minimum and maximum value of GID range for the - storage class. A unique value (GID) in this range ( gidMin-gidMax ) will be + StorageClass. A unique value (GID) in this range ( gidMin-gidMax ) will be used for dynamically provisioned volumes. These are optional values. If not specified, the volume will be provisioned with a value between 2000-2147483647 which are defaults for gidMin and gidMax respectively. @@ -441,7 +441,7 @@ This internal provisioner of OpenStack is deprecated. Please use [the external c ``` `datastore`: The user can also specify the datastore in the StorageClass. - The volume will be created on the datastore specified in the storage class, + The volume will be created on the datastore specified in the StorageClass, which in this case is `VSANDatastore`. This field is optional. If the datastore is not specified, then the volume will be created on the datastore specified in the vSphere config file used to initialize the vSphere Cloud @@ -580,7 +580,7 @@ parameters: ### Azure Disk -#### Azure Unmanaged Disk Storage Class +#### Azure Unmanaged Disk storage class {#azure-unmanaged-disk-storage-class} ```yaml apiVersion: storage.k8s.io/v1 @@ -601,7 +601,7 @@ parameters: ignored. If a storage account is not provided, a new storage account will be created in the same resource group as the cluster. -#### New Azure Disk Storage Class (starting from v1.7.2) +#### Azure Disk storage class (starting from v1.7.2) {#azure-disk-storage-class} ```yaml apiVersion: storage.k8s.io/v1 diff --git a/content/en/docs/concepts/storage/volume-pvc-datasource.md b/content/en/docs/concepts/storage/volume-pvc-datasource.md index 85774f4eb6fac..2f29fb9bb907a 100644 --- a/content/en/docs/concepts/storage/volume-pvc-datasource.md +++ b/content/en/docs/concepts/storage/volume-pvc-datasource.md @@ -11,7 +11,6 @@ weight: 30 {{% capture overview %}} -{{< feature-state for_k8s_version="v1.16" state="beta" >}} This document describes the concept of cloning existing CSI Volumes in Kubernetes. Familiarity with [Volumes](/docs/concepts/storage/volumes) is suggested. {{% /capture %}} @@ -36,6 +35,7 @@ Users need to be aware of the following when using this feature: * Cloning is only supported within the same Storage Class. - Destination volume must be the same storage class as the source - Default storage class can be used and storageClassName omitted in the spec +* Cloning can only be performed between two volumes that use the same VolumeMode setting (if you request a block mode volume, the source MUST also be block mode) ## Provisioning @@ -60,6 +60,10 @@ spec: name: pvc-1 ``` +{{< note >}} +You must specify a capacity value for `spec.resources.requests.storage`, and the value you specify must be the same or larger than the capacity of the source volume. +{{< /note >}} + The result is a new PVC with the name `clone-of-pvc-1` that has the exact same content as the specified source `pvc-1`. ## Usage diff --git a/content/en/docs/concepts/storage/volume-snapshots.md b/content/en/docs/concepts/storage/volume-snapshots.md index b68c83d8f95e4..d29f5b52bf9bb 100644 --- a/content/en/docs/concepts/storage/volume-snapshots.md +++ b/content/en/docs/concepts/storage/volume-snapshots.md @@ -29,7 +29,7 @@ A `VolumeSnapshotContent` is a snapshot taken from a volume in the cluster that A `VolumeSnapshot` is a request for snapshot of a volume by a user. It is similar to a PersistentVolumeClaim. -`VolumeSnapshotClass` allows you to specify different attributes belonging to a `VolumeSnapshot`. These attibutes may differ among snapshots taken from the same volume on the storage system and therefore cannot be expressed by using the same `StorageClass` of a `PersistentVolumeClaim`. +`VolumeSnapshotClass` allows you to specify different attributes belonging to a `VolumeSnapshot`. These attributes may differ among snapshots taken from the same volume on the storage system and therefore cannot be expressed by using the same `StorageClass` of a `PersistentVolumeClaim`. Users need to be aware of the following when using this feature: diff --git a/content/en/docs/concepts/storage/volumes.md b/content/en/docs/concepts/storage/volumes.md index cbace1139288b..537a813ded93e 100644 --- a/content/en/docs/concepts/storage/volumes.md +++ b/content/en/docs/concepts/storage/volumes.md @@ -605,6 +605,38 @@ spec: type: Directory ``` +{{< caution >}} +It should be noted that the `FileOrCreate` mode does not create the parent directory of the file. If the parent directory of the mounted file does not exist, the pod fails to start. To ensure that this mode works, you can try to mount directories and files separately, as shown below. +{{< /caution >}} + +#### Example Pod FileOrCreate + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: test-webserver +spec: + containers: + - name: test-webserver + image: k8s.gcr.io/test-webserver:latest + volumeMounts: + - mountPath: /var/local/aaa + name: mydir + - mountPath: /var/local/aaa/1.txt + name: myfile + volumes: + - name: mydir + hostPath: + # Ensure the file directory is created. + path: /var/local/aaa + type: DirectoryOrCreate + - name: myfile + hostPath: + path: /var/local/aaa/1.txt + type: FileOrCreate +``` + ### iscsi {#iscsi} An `iscsi` volume allows an existing iSCSI (SCSI over IP) volume to be mounted @@ -1302,19 +1334,13 @@ persistent volume: #### CSI raw block volume support -{{< feature-state for_k8s_version="v1.14" state="beta" >}} - -Starting with version 1.11, CSI introduced support for raw block volumes, which -relies on the raw block volume feature that was introduced in a previous version of -Kubernetes. This feature will make it possible for vendors with external CSI drivers to -implement raw block volumes support in Kubernetes workloads. +{{< feature-state for_k8s_version="v1.18" state="stable" >}} -CSI block volume support is feature-gated, but enabled by default. The two -feature gates which must be enabled for this feature are `BlockVolume` and -`CSIBlockVolume`. +Vendors with external CSI drivers can implement raw block volumes support +in Kubernetes workloads. -Learn how to -[setup your PV/PVC with raw block volume support](/docs/concepts/storage/persistent-volumes/#raw-block-volume-support). +You can [setup your PV/PVC with raw block volume support](/docs/concepts/storage/persistent-volumes/#raw-block-volume-support) +as usual, without any CSI specific changes. #### CSI ephemeral volumes diff --git a/content/en/docs/concepts/workloads/controllers/cron-jobs.md b/content/en/docs/concepts/workloads/controllers/cron-jobs.md index c56467322b183..6464b6ed040aa 100644 --- a/content/en/docs/concepts/workloads/controllers/cron-jobs.md +++ b/content/en/docs/concepts/workloads/controllers/cron-jobs.md @@ -17,17 +17,13 @@ A _Cron Job_ creates [Jobs](/docs/concepts/workloads/controllers/jobs-run-to-com One CronJob object is like one line of a _crontab_ (cron table) file. It runs a job periodically on a given schedule, written in [Cron](https://en.wikipedia.org/wiki/Cron) format. -{{< caution >}} -All **CronJob** `schedule:` times are based on the timezone of the -{{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}}. - -If your control plane runs the kube-controller-manager in Pods or bare -containers, the timezone set for the kube-controller-manager container determines the timezone -that the cron job controller uses. -{{< /caution >}} +{{< note >}} +All **CronJob** `schedule:` times are denoted in UTC. +{{< /note >}} When creating the manifest for a CronJob resource, make sure the name you provide -is no longer than 52 characters. This is because the CronJob controller will automatically +is a valid [DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). +The name must be no longer than 52 characters. This is because the CronJob controller will automatically append 11 characters to the job name provided and there is a constraint that the maximum length of a Job name is no more than 63 characters. diff --git a/content/en/docs/concepts/workloads/controllers/deployment.md b/content/en/docs/concepts/workloads/controllers/deployment.md index 03c58c6525ab8..a66cac7bb333f 100644 --- a/content/en/docs/concepts/workloads/controllers/deployment.md +++ b/content/en/docs/concepts/workloads/controllers/deployment.md @@ -64,7 +64,7 @@ In this example: * The Pods are labeled `app: nginx`using the `labels` field. * The Pod template's specification, or `.template.spec` field, indicates that the Pods run one container, `nginx`, which runs the `nginx` - [Docker Hub](https://hub.docker.com/) image at version 1.7.9. + [Docker Hub](https://hub.docker.com/) image at version 1.14.2. * Create one container and name it `nginx` using the `name` field. Follow the steps given below to create the above Deployment: @@ -153,15 +153,15 @@ is changed, for example if the labels or container images of the template are up Follow the steps given below to update your Deployment: -1. Let's update the nginx Pods to use the `nginx:1.9.1` image instead of the `nginx:1.7.9` image. +1. Let's update the nginx Pods to use the `nginx:1.16.1` image instead of the `nginx:1.14.2` image. ```shell - kubectl --record deployment.apps/nginx-deployment set image deployment.v1.apps/nginx-deployment nginx=nginx:1.9.1 + kubectl --record deployment.apps/nginx-deployment set image deployment.v1.apps/nginx-deployment nginx=nginx:1.16.1 ``` or simply use the following command: ```shell - kubectl set image deployment/nginx-deployment nginx=nginx:1.9.1 --record + kubectl set image deployment/nginx-deployment nginx=nginx:1.16.1 --record ``` The output is similar to this: @@ -169,7 +169,7 @@ Follow the steps given below to update your Deployment: deployment.apps/nginx-deployment image updated ``` - Alternatively, you can `edit` the Deployment and change `.spec.template.spec.containers[0].image` from `nginx:1.7.9` to `nginx:1.9.1`: + Alternatively, you can `edit` the Deployment and change `.spec.template.spec.containers[0].image` from `nginx:1.14.2` to `nginx:1.16.1`: ```shell kubectl edit deployment.v1.apps/nginx-deployment @@ -265,7 +265,7 @@ up to 3 replicas, as well as scaling down the old ReplicaSet to 0 replicas. Labels: app=nginx Containers: nginx: - Image: nginx:1.9.1 + Image: nginx:1.16.1 Port: 80/TCP Environment: Mounts: @@ -306,11 +306,11 @@ If you update a Deployment while an existing rollout is in progress, the Deploym as per the update and start scaling that up, and rolls over the ReplicaSet that it was scaling up previously -- it will add it to its list of old ReplicaSets and start scaling it down. -For example, suppose you create a Deployment to create 5 replicas of `nginx:1.7.9`, -but then update the Deployment to create 5 replicas of `nginx:1.9.1`, when only 3 -replicas of `nginx:1.7.9` had been created. In that case, the Deployment immediately starts -killing the 3 `nginx:1.7.9` Pods that it had created, and starts creating -`nginx:1.9.1` Pods. It does not wait for the 5 replicas of `nginx:1.7.9` to be created +For example, suppose you create a Deployment to create 5 replicas of `nginx:1.14.2`, +but then update the Deployment to create 5 replicas of `nginx:1.16.1`, when only 3 +replicas of `nginx:1.14.2` had been created. In that case, the Deployment immediately starts +killing the 3 `nginx:1.14.2` Pods that it had created, and starts creating +`nginx:1.16.1` Pods. It does not wait for the 5 replicas of `nginx:1.14.2` to be created before changing course. ### Label selector updates @@ -347,10 +347,10 @@ This means that when you roll back to an earlier revision, only the Deployment's rolled back. {{< /note >}} -* Suppose that you made a typo while updating the Deployment, by putting the image name as `nginx:1.91` instead of `nginx:1.9.1`: +* Suppose that you made a typo while updating the Deployment, by putting the image name as `nginx:1.161` instead of `nginx:1.16.1`: ```shell - kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.91 --record=true + kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.161 --record=true ``` The output is similar to this: @@ -427,7 +427,7 @@ rolled back. Labels: app=nginx Containers: nginx: - Image: nginx:1.91 + Image: nginx:1.161 Port: 80/TCP Host Port: 0/TCP Environment: @@ -468,13 +468,13 @@ Follow the steps given below to check the rollout history: deployments "nginx-deployment" REVISION CHANGE-CAUSE 1 kubectl apply --filename=https://k8s.io/examples/controllers/nginx-deployment.yaml --record=true - 2 kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.9.1 --record=true - 3 kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.91 --record=true + 2 kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.16.1 --record=true + 3 kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.161 --record=true ``` `CHANGE-CAUSE` is copied from the Deployment annotation `kubernetes.io/change-cause` to its revisions upon creation. You can specify the`CHANGE-CAUSE` message by: - * Annotating the Deployment with `kubectl annotate deployment.v1.apps/nginx-deployment kubernetes.io/change-cause="image updated to 1.9.1"` + * Annotating the Deployment with `kubectl annotate deployment.v1.apps/nginx-deployment kubernetes.io/change-cause="image updated to 1.16.1"` * Append the `--record` flag to save the `kubectl` command that is making changes to the resource. * Manually editing the manifest of the resource. @@ -488,10 +488,10 @@ Follow the steps given below to check the rollout history: deployments "nginx-deployment" revision 2 Labels: app=nginx pod-template-hash=1159050644 - Annotations: kubernetes.io/change-cause=kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.9.1 --record=true + Annotations: kubernetes.io/change-cause=kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.16.1 --record=true Containers: nginx: - Image: nginx:1.9.1 + Image: nginx:1.16.1 Port: 80/TCP QoS Tier: cpu: BestEffort @@ -549,7 +549,7 @@ Follow the steps given below to rollback the Deployment from the current version CreationTimestamp: Sun, 02 Sep 2018 18:17:55 -0500 Labels: app=nginx Annotations: deployment.kubernetes.io/revision=4 - kubernetes.io/change-cause=kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.9.1 --record=true + kubernetes.io/change-cause=kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.16.1 --record=true Selector: app=nginx Replicas: 3 desired | 3 updated | 3 total | 3 available | 0 unavailable StrategyType: RollingUpdate @@ -559,7 +559,7 @@ Follow the steps given below to rollback the Deployment from the current version Labels: app=nginx Containers: nginx: - Image: nginx:1.9.1 + Image: nginx:1.16.1 Port: 80/TCP Host Port: 0/TCP Environment: @@ -722,7 +722,7 @@ apply multiple fixes in between pausing and resuming without triggering unnecess * Then update the image of the Deployment: ```shell - kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.9.1 + kubectl set image deployment.v1.apps/nginx-deployment nginx=nginx:1.16.1 ``` The output is similar to this: @@ -1076,7 +1076,7 @@ All existing Pods are killed before new ones are created when `.spec.strategy.ty #### Rolling Update Deployment -The Deployment updates Pods in a [rolling update](/docs/tasks/run-application/rolling-update-replication-controller/) +The Deployment updates Pods in a rolling update fashion when `.spec.strategy.type==RollingUpdate`. You can specify `maxUnavailable` and `maxSurge` to control the rolling update process. @@ -1143,12 +1143,4 @@ a paused Deployment and one that is not paused, is that any changes into the Pod Deployment will not trigger new rollouts as long as it is paused. A Deployment is not paused by default when it is created. -## Alternative to Deployments - -### kubectl rolling-update - -[`kubectl rolling-update`](/docs/reference/generated/kubectl/kubectl-commands#rolling-update) updates Pods and ReplicationControllers -in a similar fashion. But Deployments are recommended, since they are declarative, server side, and have -additional features, such as rolling back to any previous revision even after the rolling update is done. - {{% /capture %}} diff --git a/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md b/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md index 70f5c7e0faa94..8848774103e46 100644 --- a/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md +++ b/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md @@ -39,7 +39,7 @@ It takes around 10s to complete. You can run the example with this command: ```shell -kubectl apply -f https://k8s.io/examples/controllers/job.yaml +kubectl apply -f https://kubernetes.io/examples/controllers/job.yaml ``` ``` job.batch/pi created @@ -114,6 +114,7 @@ The output is similar to this: ## Writing a Job Spec As with all other Kubernetes config, a Job needs `apiVersion`, `kind`, and `metadata` fields. +Its name must be a valid [DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). A Job also needs a [`.spec` section](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). diff --git a/content/en/docs/concepts/workloads/controllers/replicaset.md b/content/en/docs/concepts/workloads/controllers/replicaset.md index 5e8c6d67f399a..fe7a96c1387d5 100644 --- a/content/en/docs/concepts/workloads/controllers/replicaset.md +++ b/content/en/docs/concepts/workloads/controllers/replicaset.md @@ -26,7 +26,7 @@ it should create to meet the number of replicas criteria. A ReplicaSet then fulf and deleting Pods as needed to reach the desired number. When a ReplicaSet needs to create new Pods, it uses its Pod template. -The link a ReplicaSet has to its Pods is via the Pods' [metadata.ownerReferences](/docs/concepts/workloads/controllers/garbage-collection/#owners-and-dependents) +A ReplicaSet is linked to its Pods via the Pods' [metadata.ownerReferences](/docs/concepts/workloads/controllers/garbage-collection/#owners-and-dependents) field, which specifies what resource the current object is owned by. All Pods acquired by a ReplicaSet have their owning ReplicaSet's identifying information within their ownerReferences field. It's through this link that the ReplicaSet knows of the state of the Pods it is maintaining and plans accordingly. diff --git a/content/en/docs/concepts/workloads/controllers/replicationcontroller.md b/content/en/docs/concepts/workloads/controllers/replicationcontroller.md index d214fca6120cd..fe20980ce6e6f 100644 --- a/content/en/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/en/docs/concepts/workloads/controllers/replicationcontroller.md @@ -116,6 +116,8 @@ specifies an expression that just gets the name from each pod in the returned li ## Writing a ReplicationController Spec As with all other Kubernetes config, a ReplicationController needs `apiVersion`, `kind`, and `metadata` fields. +The name of a ReplicationController object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). For general information about working with config files, see [object management ](/docs/concepts/overview/working-with-objects/object-management/). A ReplicationController also needs a [`.spec` section](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). @@ -218,9 +220,6 @@ Ideally, the rolling update controller would take application readiness into acc The two ReplicationControllers would need to create pods with at least one differentiating label, such as the image tag of the primary container of the pod, since it is typically image updates that motivate rolling updates. -Rolling update is implemented in the client tool -[`kubectl rolling-update`](/docs/reference/generated/kubectl/kubectl-commands#rolling-update). Visit [`kubectl rolling-update` task](/docs/tasks/run-application/rolling-update-replication-controller/) for more concrete examples. - ### Multiple release tracks In addition to running multiple releases of an application while a rolling update is in progress, it's common to run multiple releases for an extended period of time, or even continuously, using multiple release tracks. The tracks would be differentiated by labels. @@ -244,7 +243,7 @@ The ReplicationController simply ensures that the desired number of pods matches The ReplicationController is forever constrained to this narrow responsibility. It itself will not perform readiness nor liveness probes. Rather than performing auto-scaling, it is intended to be controlled by an external auto-scaler (as discussed in [#492](http://issue.k8s.io/492)), which would change its `replicas` field. We will not add scheduling policies (for example, [spreading](http://issue.k8s.io/367#issuecomment-48428019)) to the ReplicationController. Nor should it verify that the pods controlled match the currently specified template, as that would obstruct auto-sizing and other automated processes. Similarly, completion deadlines, ordering dependencies, configuration expansion, and other features belong elsewhere. We even plan to factor out the mechanism for bulk pod creation ([#170](http://issue.k8s.io/170)). -The ReplicationController is intended to be a composable building-block primitive. We expect higher-level APIs and/or tools to be built on top of it and other complementary primitives for user convenience in the future. The "macro" operations currently supported by kubectl (run, scale, rolling-update) are proof-of-concept examples of this. For instance, we could imagine something like [Asgard](http://techblog.netflix.com/2012/06/asgard-web-based-cloud-management-and.html) managing ReplicationControllers, auto-scalers, services, scheduling policies, canaries, etc. +The ReplicationController is intended to be a composable building-block primitive. We expect higher-level APIs and/or tools to be built on top of it and other complementary primitives for user convenience in the future. The "macro" operations currently supported by kubectl (run, scale) are proof-of-concept examples of this. For instance, we could imagine something like [Asgard](http://techblog.netflix.com/2012/06/asgard-web-based-cloud-management-and.html) managing ReplicationControllers, auto-scalers, services, scheduling policies, canaries, etc. ## API Object @@ -264,9 +263,7 @@ Note that we recommend using Deployments instead of directly using Replica Sets, ### Deployment (Recommended) -[`Deployment`](/docs/concepts/workloads/controllers/deployment/) is a higher-level API object that updates its underlying Replica Sets and their Pods -in a similar fashion as `kubectl rolling-update`. Deployments are recommended if you want this rolling update functionality, -because unlike `kubectl rolling-update`, they are declarative, server-side, and have additional features. +[`Deployment`](/docs/concepts/workloads/controllers/deployment/) is a higher-level API object that updates its underlying Replica Sets and their Pods. Deployments are recommended if you want this rolling update functionality because, they are declarative, server-side, and have additional features. ### Bare Pods diff --git a/content/en/docs/concepts/workloads/pods/ephemeral-containers.md b/content/en/docs/concepts/workloads/pods/ephemeral-containers.md index 492027bd00d53..c6506df69c2c2 100644 --- a/content/en/docs/concepts/workloads/pods/ephemeral-containers.md +++ b/content/en/docs/concepts/workloads/pods/ephemeral-containers.md @@ -12,16 +12,15 @@ weight: 80 {{< feature-state state="alpha" for_k8s_version="v1.16" >}} This page provides an overview of ephemeral containers: a special type of container -that runs temporarily in an existing {{< glossary_tooltip term_id="pod" >}} to accomplish user-initiated actions such -as troubleshooting. You use ephemeral containers to inspect services rather than -to build applications. +that runs temporarily in an existing {{< glossary_tooltip term_id="pod" >}} to +accomplish user-initiated actions such as troubleshooting. You use ephemeral +containers to inspect services rather than to build applications. {{< warning >}} Ephemeral containers are in early alpha state and are not suitable for production -clusters. You should expect the feature not to work in some situations, such as -when targeting the namespaces of a container. In accordance with the [Kubernetes -Deprecation Policy](/docs/reference/using-api/deprecation-policy/), this alpha -feature could change significantly in the future or be removed entirely. +clusters. In accordance with the [Kubernetes Deprecation Policy]( +/docs/reference/using-api/deprecation-policy/), this alpha feature could change +significantly in the future or be removed entirely. {{< /warning >}} {{% /capture %}} @@ -78,7 +77,11 @@ When using ephemeral containers, it's helpful to enable [process namespace sharing](/docs/tasks/configure-pod-container/share-process-namespace/) so you can view processes in other containers. -### Examples +See [Debugging with Ephemeral Debug Container]( +/docs/tasks/debug-application-cluster/debug-running-pod/#debugging-with-ephemeral-debug-container) +for examples of troubleshooting using ephemeral containers. + +## Ephemeral containers API {{< note >}} The examples in this section require the `EphemeralContainers` [feature @@ -87,8 +90,9 @@ enabled, and Kubernetes client and server version v1.16 or later. {{< /note >}} The examples in this section demonstrate how ephemeral containers appear in -the API. You would normally use a `kubectl` plugin for troubleshooting that -automates these steps. +the API. You would normally use `kubectl alpha debug` or another `kubectl` +[plugin](/docs/tasks/extend-kubectl/kubectl-plugins/) to automate these steps +rather than invoking the API directly. Ephemeral containers are created using the `ephemeralcontainers` subresource of Pod, which can be demonstrated using `kubectl --raw`. First describe @@ -180,35 +184,12 @@ Ephemeral Containers: ... ``` -You can attach to the new ephemeral container using `kubectl attach`: +You can interact with the new ephemeral container in the same way as other +containers using `kubectl attach`, `kubectl exec`, and `kubectl logs`, for +example: ```shell kubectl attach -it example-pod -c debugger ``` -If process namespace sharing is enabled, you can see processes from all the containers in that Pod. -For example, after attaching, you run `ps` in the debugger container: - -```shell -# Run this in a shell inside the "debugger" ephemeral container -ps auxww -``` -The output is similar to: -``` -PID USER TIME COMMAND - 1 root 0:00 /pause - 6 root 0:00 nginx: master process nginx -g daemon off; - 11 101 0:00 nginx: worker process - 12 101 0:00 nginx: worker process - 13 101 0:00 nginx: worker process - 14 101 0:00 nginx: worker process - 15 101 0:00 nginx: worker process - 16 101 0:00 nginx: worker process - 17 101 0:00 nginx: worker process - 18 101 0:00 nginx: worker process - 19 root 0:00 /pause - 24 root 0:00 sh - 29 root 0:00 ps auxww -``` - {{% /capture %}} diff --git a/content/en/docs/concepts/workloads/pods/init-containers.md b/content/en/docs/concepts/workloads/pods/init-containers.md index ef4c5eeed99aa..14e7054a86908 100644 --- a/content/en/docs/concepts/workloads/pods/init-containers.md +++ b/content/en/docs/concepts/workloads/pods/init-containers.md @@ -122,10 +122,10 @@ spec: initContainers: - name: init-myservice image: busybox:1.28 - command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + command: ['sh', '-c', "until nslookup myservice.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"] - name: init-mydb image: busybox:1.28 - command: ['sh', '-c', 'until nslookup mydb; do echo waiting for mydb; sleep 2; done;'] + command: ['sh', '-c', "until nslookup mydb.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for mydb; sleep 2; done"] ``` You can start this Pod by running: diff --git a/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md b/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md index cca337b32b268..35a373473b514 100644 --- a/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md +++ b/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md @@ -6,7 +6,7 @@ weight: 50 {{% capture overview %}} -{{< feature-state for_k8s_version="v1.16" state="alpha" >}} +{{< feature-state for_k8s_version="v1.18" state="beta" >}} You can use _topology spread constraints_ to control how {{< glossary_tooltip text="Pods" term_id="Pod" >}} are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains. This can help to achieve high availability as well as efficient resource utilization. @@ -18,9 +18,8 @@ You can use _topology spread constraints_ to control how {{< glossary_tooltip te ### Enable Feature Gate -Ensure the `EvenPodsSpread` feature gate is enabled (it is disabled by default -in 1.16). See [Feature Gates](/docs/reference/command-line-tools-reference/feature-gates/) -for an explanation of enabling feature gates. The `EvenPodsSpread` feature gate must be enabled for the +The `EvenPodsSpread` [feature gate] (/docs/reference/command-line-tools-reference/feature-gates/) +must be enabled for the {{< glossary_tooltip text="API Server" term_id="kube-apiserver" >}} **and** {{< glossary_tooltip text="scheduler" term_id="kube-scheduler" >}}. @@ -183,6 +182,46 @@ There are some implicit conventions worth noting here: and you know that "zoneC" must be excluded. In this case, you can compose the yaml as below, so that "mypod" will be placed onto "zoneB" instead of "zoneC". Similarly `spec.nodeSelector` is also respected. {{< codenew file="pods/topology-spread-constraints/one-constraint-with-nodeaffinity.yaml" >}} + +### Cluster-level default constraints + +{{< feature-state for_k8s_version="v1.18" state="alpha" >}} + +It is possible to set default topology spread constraints for a cluster. Default +topology spread constraints are applied to a Pod if, and only if: + +- It doesn't define any constraints in its `.spec.topologySpreadConstraints`. +- It belongs to a service, replication controller, replica set or stateful set. + +Default constraints can be set as part of the `PodTopologySpread` plugin args +in a [scheduling profile](/docs/reference/scheduling/profiles). +The constraints are specified with the same [API above](#api), except that +`labelSelector` must be empty. The selectors are calculated from the services, +replication controllers, replica sets or stateful sets that the Pod belongs to. + +An example configuration might look like follows: + +```yaml +apiVersion: kubescheduler.config.k8s.io/v1alpha2 +kind: KubeSchedulerConfiguration + +profiles: + pluginConfig: + - name: PodTopologySpread + args: + defaultConstraints: + - maxSkew: 1 + topologyKey: failure-domain.beta.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway +``` + +{{< note >}} +The score produced by default scheduling constraints might conflict with the +score produced by the +[`DefaultPodTopologySpread` plugin](/docs/reference/scheduling/profiles/#scheduling-plugins). +It is recommended that you disable this plugin in the scheduling profile when +using default constraints for `PodTopologySpread`. +{{< /note >}} ## Comparison with PodAffinity/PodAntiAffinity @@ -201,9 +240,9 @@ See [Motivation](https://github.com/kubernetes/enhancements/blob/master/keps/sig ## Known Limitations -As of 1.16, at which this feature is Alpha, there are some known limitations: +As of 1.18, at which this feature is Beta, there are some known limitations: -- Scaling down a `Deployment` may result in imbalanced Pods distribution. +- Scaling down a Deployment may result in imbalanced Pods distribution. - Pods matched on tainted nodes are respected. See [Issue 80921](https://github.com/kubernetes/kubernetes/issues/80921) {{% /capture %}} diff --git a/content/en/docs/concepts/workloads/pods/pod.md b/content/en/docs/concepts/workloads/pods/pod.md index 7dff25cbb5d41..d64227be48edc 100644 --- a/content/en/docs/concepts/workloads/pods/pod.md +++ b/content/en/docs/concepts/workloads/pods/pod.md @@ -175,7 +175,7 @@ An example flow: 1. The Pod in the API server is updated with the time beyond which the Pod is considered "dead" along with the grace period. 1. Pod shows up as "Terminating" when listed in client commands 1. (simultaneous with 3) When the Kubelet sees that a Pod has been marked as terminating because the time in 2 has been set, it begins the Pod shutdown process. - 1. If one of the Pod's containers has defined a [preStop hook](/docs/concepts/containers/container-lifecycle-hooks/#hook-details), it is invoked inside of the container. If the `preStop` hook is still running after the grace period expires, step 2 is then invoked with a small (2 second) extended grace period. + 1. If one of the Pod's containers has defined a [preStop hook](/docs/concepts/containers/container-lifecycle-hooks/#hook-details), it is invoked inside of the container. If the `preStop` hook is still running after the grace period expires, step 2 is then invoked with a small (2 second) one-time extended grace period. You must modify `terminationGracePeriodSeconds` if the `preStop` hook needs longer to complete. 1. The container is sent the TERM signal. Note that not all containers in the Pod will receive the TERM signal at the same time and may each require a `preStop` hook if the order in which they shut down matters. 1. (simultaneous with 3) Pod is removed from endpoints list for service, and are no longer considered part of the set of running Pods for replication controllers. Pods that shutdown slowly cannot continue to serve traffic as load balancers (like the service proxy) remove them from their rotations. 1. When the grace period expires, any processes still running in the Pod are killed with SIGKILL. @@ -203,5 +203,7 @@ Your container runtime must support the concept of a privileged container for th Pod is a top-level resource in the Kubernetes REST API. The [Pod API object](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core) definition describes the object in detail. +When creating the manifest for a Pod object, make sure the name specified is a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). {{% /capture %}} diff --git a/content/en/docs/contribute/generate-ref-docs/kubectl.md b/content/en/docs/contribute/generate-ref-docs/kubectl.md index 797a0f537144d..5930a1f452304 100644 --- a/content/en/docs/contribute/generate-ref-docs/kubectl.md +++ b/content/en/docs/contribute/generate-ref-docs/kubectl.md @@ -47,7 +47,7 @@ Get a local clone of the following repositories: go get -u github.com/spf13/pflag go get -u github.com/spf13/cobra go get -u gopkg.in/yaml.v2 -go get -u kubernetes-sigs/reference-docs +go get -u github.com/kubernetes-sigs/reference-docs ``` If you don't already have the kubernetes/website repository, get it now: @@ -140,15 +140,15 @@ need to work with someone who can set the label and milestone for you. Go to ``. On you command line, set the following environment variables. * Set `K8S_ROOT` to ``. -* Set `WEB_ROOT` to ``. +* Set `K8S_WEBROOT` to ``. * Set `K8S_RELEASE` to the version of the docs you want to build. For example, if you want to build docs for Kubernetes 1.17, set `K8S_RELEASE` to 1.17. For example: ```shell -export WEB_ROOT=$(GOPATH)/src/github.com//website -export K8S_ROOT=$(GOPATH)/src/k8s.io/kubernetes +export K8S_WEBROOT=$GOPATH/src/github.com//website +export K8S_ROOT=$GOPATH/src/k8s.io/kubernetes export K8S_RELEASE=1.17 ``` diff --git a/content/en/docs/contribute/intermediate.md b/content/en/docs/contribute/intermediate.md index 2da6ccf5232d5..9e477a90a4df1 100644 --- a/content/en/docs/contribute/intermediate.md +++ b/content/en/docs/contribute/intermediate.md @@ -911,8 +911,8 @@ deadlines. Some deadlines related to documentation are: If your feature is an Alpha feature and is behind a feature gate, make sure you add it to [Feature gates](/docs/reference/command-line-tools-reference/feature-gates/) -as part of your pull request. If your feature is moving out of Alpha, make sure to -remove it from that file. +as part of your pull request. If your feature is moving to Beta +or to General Availability, update the feature gates file. ## Contribute to other repos diff --git a/content/en/docs/contribute/participating.md b/content/en/docs/contribute/participating.md index dbc44b586777f..ac384c8eed046 100644 --- a/content/en/docs/contribute/participating.md +++ b/content/en/docs/contribute/participating.md @@ -222,7 +222,7 @@ Approvers improve the documentation by reviewing and merging pull requests into - Visit the Netlify page preview for a PR to make sure things look good before approving. -- Participate in the [PR Wrangler rotation scheduler](https://github.com/kubernetes/website/wiki/PR-Wranglers) for weekly rotations. SIG Docs expects all approvers to participate in this +- Participate in the [PR Wrangler rotation schedule](https://github.com/kubernetes/website/wiki/PR-Wranglers) for weekly rotations. SIG Docs expects all approvers to participate in this rotation. See [Be the PR Wrangler for a week](/docs/contribute/advanced#be-the-pr-wrangler-for-a-week) for more details. @@ -298,7 +298,7 @@ SIG Docs approvers. Here's how it works. - Any Kubernetes member can add the `lgtm` label by adding a `/lgtm` comment. - Only SIG Docs approvers can merge a pull request by adding an `/approve` comment. Some approvers also perform additional - specific roles, such as [PR Wrangler](#pr-wrangler) or + specific roles, such as [PR Wrangler](/docs/contribute/advanced#be-the-pr-wrangler-for-a-week) or [SIG Docs chairperson](#sig-docs-chairperson). {{% /capture %}} diff --git a/content/en/docs/contribute/start.md b/content/en/docs/contribute/start.md index acd5a5bfdf2e9..181e35968229e 100644 --- a/content/en/docs/contribute/start.md +++ b/content/en/docs/contribute/start.md @@ -209,7 +209,7 @@ to base your work on. Use these guidelines to make the decision: - Some localization teams work with a series of long-lived branches, and periodically merge these to `master`. This kind of branch has a name like dev-\-\.\; for example: - `dev-{{< release-branch >}}-ja.1`. + `dev-{{< latest-semver >}}-ja.1` - If you're writing or updating documentation for a feature change release, then you need to know the major and minor version of Kubernetes that the change will first appear in. @@ -217,8 +217,8 @@ to base your work on. Use these guidelines to make the decision: to beta in the next minor version, you need to know what the next minor version number is. - Find the release branch named for that version. For example, features that - changed in the v{{< release-branch >}} release got documented in the branch - named `dev-{{< release-branch >}}`. + changed in the {{< latest-version >}} release got documented in the branch + named `dev-{{< latest-semver >}}`. If you're still not sure which branch to choose, ask in `#sig-docs` on Slack or attend a weekly SIG Docs meeting to get clarity. diff --git a/content/en/docs/contribute/style/content-guide.md b/content/en/docs/contribute/style/content-guide.md index 5d3f5790c7988..18d469510c48f 100644 --- a/content/en/docs/contribute/style/content-guide.md +++ b/content/en/docs/contribute/style/content-guide.md @@ -10,107 +10,68 @@ card: --- {{% capture overview %}} -This page contains guidelines for adding content to the Kubernetes documentation. -If you have questions about allowed content, join the [Kubernetes Slack](http://slack.k8s.io/) #sig-docs channel and ask! Use your best judgment, and feel free to -propose changes to this document in a pull request. -For additional information on creating new content for the Kubernetes -docs, follow the instructions in the [Style guide](/docs/contribute/style/style-guide). +This page contains guidelines for Kubernetes documentation. + +If you have questions about what's allowed, join the #sig-docs channel in +[Kubernetes Slack](http://slack.k8s.io/) and ask! + +You can register for Kubernetes Slack at http://slack.k8s.io/. + +For information on creating new content for the Kubernetes +docs, follow the [style guide](/docs/contribute/style/style-guide). + {{% /capture %}} {{% capture body %}} -## Contributing content -The Kubernetes documentation comprises the content of the -[kubernetes/website](https://github.com/kubernetes/website) source repository. +## Overview + +Source for the Kubernetes website, including the docs, resides in the +[kubernetes/website](https://github.com/kubernetes/website) repository. + Located in the `kubernetes/website/content//docs` folder, the -majority of the Kubernetes documentation is specific to the [Kubernetes -project](https://github.com/kubernetes/kubernetes). The Kubernetes -documentation may also include content from projects in the -[kubernetes](https://github.com/kubernetes) and -[kubernetes-sigs](https://github.com/kubernetes-sigs) GitHub organizations if -those projects do not have their own documentation. Linking to active kubernetes, -kubernetes-sigs, and ({{< glossary_tooltip text="CNCF" term_id="cncf" >}}) projects from the Kubernetes documentation is always -allowed, but linking to vendor-specific products is not. Check the CNCF project lists -([Graduated/Incubating](https://www.cncf.io/projects/), -[Sandbox](https://www.cncf.io/sandbox-projects/), -[Archived](https://www.cncf.io/archived-projects/)) if you are unsure of a -project's CNCF status. - -### Dual-sourced content - -Kubernetes documentation does not include duplicate content sourced from multiple -locations (*dual-sourced* content). Dual-sourced content requires duplicated -effort from project maintainers and tends to become outdated more quickly. -Before adding content, ask yourself this: - -- Is the content about an active CNCF project OR a project in the kubernetes or kubernetes-sigs GitHub organizations? - - If yes, then: - - Does the project have its own documentation? - - if yes, link to the project's documentation from the Kubernetes documentation - - if no, add the content to the project's repository if possible and then link to it from the Kubernetes documentation - - If no, then: - - Stop! - - Adding content about vendor-specific products is not allowed - - Linking to vendor-specific documentation and websites is not allowed - -### What is and isn't allowed - -There are some scenarios in which the Kubernetes documentation includes content from non-Kubernetes projects. -Below are general categories of non-Kubernetes project content along with guidelines of what is and is not allowed: - -1. Instructional content involving non-Kubernetes projects during setup or operation of Kubernetes - - Allowed: - - Referring to or linking to existing documentation about a CNCF project or a project in the kubernetes or kubernetes-sigs GitHub organizations - - Example: for installating Kubernetes in a learning environment, including a prerequisite stating that successful installation and configuration of minikube is required and linking to the relevant minikube documentation - - Adding content for kubernetes or kubernetes-sigs projects that don't have their own instructional content - - Example: including [kubeadm](https://github.com/kubernetes/kubeadm) installation and troubleshooting instructions - - Not Allowed: - - Adding content that duplicates documentation in another repository - - Examples: - - Including minikube installation and configuration instructions; minikube has its own [documentation](https://minikube.sigs.k8s.io/docs/) that provides those instructions - - Including instructions for installing Docker, CRI-O, containerd, and other container runtimes on various operating systems - - Including instructions for installing Kubernetes on production environments using various projects: - - Kubernetes Rebar Integrated Bootstrap (KRIB) is a vendor-specific project and content belongs in the vendor's documentation - - [Kubernetes Operations (kops)](https://github.com/kubernetes/kops) has installation instructions and tutorials in its GitHub repository - - [Kubespray](https://kubespray.io) has its own documentation - - Adding a tutorial that explains how to perform a task using a vendor-specific product or an open source project that is not a CNCF project or a project in the kubernetes or kubnetes-sigs GitHub organizations - - Adding a tutorial on how to use a CNCF project or a project in the kubernetes or kubnetes-sigs GitHub organizations if the project has its own documentation -1. Detailed technical content about how to use a non-Kubernetes project or how that project is designed - - Adding this type of content to the Kubernetes documentation is not allowed. -1. Content that describes a non-Kubernetes project - - Allowed: - - Adding a brief introductory paragraph about a CNCF project or a project in the kubernetes or kubernetes-sigs GitHub organizations; the paragraph may contain links to the project - - Not Allowed: - - Adding content describing a vendor-specific product - - Adding content describing an open source project that is not a CNCF project or a project in the kubernetes or kubnetes-sigs GitHub organizations - - Adding content that duplicates documentation from another project, regardless of source repository - - Example: adding [Kubernetes in Docker (KinD)](https://kind.sigs.k8s.io) documentation to the Kubernetes documentation -1. Content that simply links to information about a non-Kubernetes project - - Allowed: - - Linking to projects in the kubernetes and kubernetes-sigs GitHub organizations - - Example: linking to Kubernetes in Docker (KinD) [documentation](https://kind.sigs.k8s.io/docs/user/quick-start), which resides in the kubernetes-sigs GitHub organization - - Linking to active CNCF projects - - Example: linking to the Prometheus [documentation](https://prometheus.io/docs/introduction/overview/); Prometheus is an active CNCF project - - Not Allowed: - - Linking to vendor-specific products - - Linking to archived CNCF projects - - Linking to inactive projects in the kubernetes and kubernetes-sigs GitHub organizations - - Linking to open source projects that are not CNCF projects or do not reside in the kubernetes or kubernetes-sigs GitHub organizations -1. Content about training courses - - Allowed: - - Linking to vendor-neutral Kubernetes training courses offered by the [CNCF](https://www.cncf.io/), the [Linux Foundation](https://www.linuxfoundation.org/), and the [Linux Academy](https://linuxacademy.com/), which is a partner of the Linux Foundation - - Example: linking to Linux Academy courses such as [Kubernetes Quick Start](https://linuxacademy.com/course/kubernetes-quick-start/) and [Kubernetes Security](https://linuxacademy.com/course/kubernetes-security/) - - Not Allowed: - - Linking to online training outside of the CNCF, the Linux Foundation, or the Linux Academy; the Kubernetes documentation does not link to third-party content - - Example: linking to Kubernetes tutorials or courses on Medium, KodeKloud, Udacity, Coursera, learnk8s, and similar websites - - Linking to vendor-specific tutorials regardless of the training provider - - Example: linking to Linux Academy courses such as [Google Kubernetes Engine Deep Dive](https://linuxacademy.com/google-cloud-platform/training/course/name/google-kubernetes-engine-deep-dive) and [Amazon EKS Deep Dive](https://linuxacademy.com/course/amazon-eks-deep-dive/) +majority of Kubernetes documentation is specific to the [Kubernetes +project](https://github.com/kubernetes/kubernetes). + +## What's allowed + +Kubernetes docs permit only some kinds of content. + +### Third party content +Kubernetes documentation includes applied examples of projects in the Kubernetes project—projects that live in the [kubernetes](https://github.com/kubernetes) and +[kubernetes-sigs](https://github.com/kubernetes-sigs) GitHub organizations. + +Links to active content in the Kubernetes project are always allowed. + +Kubernetes requires some third party content to function. Examples include container runtimes (containerd, CRI-O, Docker), +[networking policy](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) (CNI plugins), [Ingress controllers](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/), and [logging](https://kubernetes.io/docs/concepts/cluster-administration/logging/). + +Docs can link to third-party open source software (OSS) outside the Kubernetes project if it's necessary for Kubernetes to function. + +### Dual sourced content + +Wherever possible, Kubernetes docs link to canonical sources instead of hosting +dual-sourced content. + +Dual-sourced content requires double the effort (or more!) to maintain +and grows stale more quickly. + +{{< note >}} + +If you're a maintainer for a Kubernetes project and need help hosting your own docs, +ask for help in [#sig-docs on Kubernetes Slack](https://kubernetes.slack.com/messages/C1J0BPD2M/). + +{{< /note >}} + +### More information If you have questions about allowed content, join the [Kubernetes Slack](http://slack.k8s.io/) #sig-docs channel and ask! + {{% /capture %}} {{% capture whatsnext %}} + * Read the [Style guide](/docs/contribute/style/style-guide). + {{% /capture %}} diff --git a/content/en/docs/contribute/style/content-organization.md b/content/en/docs/contribute/style/content-organization.md index 55997dcaf5ab7..e93cf8126edc4 100644 --- a/content/en/docs/contribute/style/content-organization.md +++ b/content/en/docs/contribute/style/content-organization.md @@ -107,7 +107,6 @@ Another widely used example is the `includes` bundle. It sets `headless: true` i ```bash en/includes ├── default-storage-class-prereqs.md -├── federated-task-tutorial-prereqs.md ├── index.md ├── partner-script.js ├── partner-style.css diff --git a/content/en/docs/contribute/style/style-guide.md b/content/en/docs/contribute/style/style-guide.md index 30865c49e8ef3..26722e607f18e 100644 --- a/content/en/docs/contribute/style/style-guide.md +++ b/content/en/docs/contribute/style/style-guide.md @@ -14,10 +14,10 @@ This page gives writing style guidelines for the Kubernetes documentation. These are guidelines, not rules. Use your best judgment, and feel free to propose changes to this document in a pull request. -For additional information on creating new content for the Kubernetes -documentation, read the [Documentation Content -Guide](/docs/contribute/style/content-guide/) and follow the instructions on -[using page templates](/docs/contribute/style/page-templates/) and [creating a +For additional information on creating new content for the Kubernetes +documentation, read the [Documentation Content +Guide](/docs/contribute/style/content-guide/) and follow the instructions on +[using page templates](/docs/contribute/style/page-templates/) and [creating a documentation pull request](/docs/contribute/start/#improve-existing-content). {{% /capture %}} @@ -58,11 +58,11 @@ leads to an awkward construction. {{< table caption = "Do and Don't - API objects" >}} Do | Don't :--| :----- -The Pod has two containers. | The pod has two containers. +The Pod has two containers. | The pod has two containers. The Deployment is responsible for ... | The Deployment object is responsible for ... A PodList is a list of Pods. | A Pod List is a list of pods. -The two ContainerPorts ... | The two ContainerPort objects ... -The two ContainerStateTerminated objects ... | The two ContainerStateTerminateds ... +The two ContainerPorts ... | The two ContainerPort objects ... +The two ContainerStateTerminated objects ... | The two ContainerStateTerminateds ... {{< /table >}} @@ -83,11 +83,11 @@ represents. Do | Don't :--| :----- Click **Fork**. | Click "Fork". -Select **Other**. | Select "Other". +Select **Other**. | Select "Other". {{< /table >}} ### Use italics to define or introduce new terms - + {{< table caption = "Do and Don't - Use italics for new terms" >}} Do | Don't :--| :----- @@ -102,7 +102,7 @@ Do | Don't :--| :----- Open the `envars.yaml` file. | Open the envars.yaml file. Go to the `/docs/tutorials` directory. | Go to the /docs/tutorials directory. -Open the `/_data/concepts.yaml` file. | Open the /_data/concepts.yaml file. +Open the `/_data/concepts.yaml` file. | Open the /\_data/concepts.yaml file. {{< /table >}} ### Use the international standard for punctuation inside quotes @@ -119,18 +119,18 @@ The copy is called a "fork". | The copy is called a "fork." ### Use code style for inline code and commands For inline code in an HTML document, use the `` tag. In a Markdown -document, use the backtick (`). +document, use the backtick (`` ` ``). {{< table caption = "Do and Don't - Use code style for inline code and commands" >}} Do | Don't :--| :----- The `kubectl run`command creates a Deployment. | The "kubectl run" command creates a Deployment. For declarative management, use `kubectl apply`. | For declarative management, use "kubectl apply". -Enclose code samples with triple backticks. `(```)`| Enclose code samples with any other syntax. -Use single backticks to enclose inline code. For example, `var example = true`. | Use two asterisks (**) or an underscore (_) to enclose inline code. For example, **var example = true**. +Enclose code samples with triple backticks. (\`\`\`)| Enclose code samples with any other syntax. +Use single backticks to enclose inline code. For example, `var example = true`. | Use two asterisks (`**`) or an underscore (`_`) to enclose inline code. For example, **var example = true**. Use triple backticks before and after a multi-line block of code for fenced code blocks. | Use multi-line blocks of code to create diagrams, flowcharts, or other illustrations. Use meaningful variable names that have a context. | Use variable names such as 'foo','bar', and 'baz' that are not meaningful and lack context. -Remove trailing spaces in the code. | Add trailing spaces in the code, where these are important, because the screen reader will read out the spaces as well. +Remove trailing spaces in the code. | Add trailing spaces in the code, where these are important, because the screen reader will read out the spaces as well. {{< /table >}} {{< note >}} @@ -183,9 +183,9 @@ For field values of type string or integer, use normal style without quotation m Do | Don't :--| :----- Set the value of `imagePullPolicy` to Always. | Set the value of `imagePullPolicy` to "Always". -Set the value of `image` to nginx:1.8. | Set the value of `image` to `nginx:1.8`. +Set the value of `image` to nginx:1.16. | Set the value of `image` to `nginx:1.16`. Set the value of the `replicas` field to 2. | Set the value of the `replicas` field to `2`. -{{< /table >}} +{{< /table >}} ## Code snippet formatting @@ -196,7 +196,7 @@ Set the value of the `replicas` field to 2. | Set the value of the `replicas` fi Do | Don't :--| :----- kubectl get pods | $ kubectl get pods -{{< /table >}} +{{< /table >}} ### Separate commands from output @@ -214,7 +214,7 @@ The output is similar to this: Code examples and configuration examples that include version information should be consistent with the accompanying text. -If the information is version specific, the Kubernetes version needs to be defined in the `prerequisites` section of the [Task template](/docs/contribute/style/page-templates/#task-template) or the [Tutorial template] (/docs/contribute/style/page-templates/#tutorial-template). Once the page is saved, the `prerequisites` section is shown as **Before you begin**. +If the information is version specific, the Kubernetes version needs to be defined in the `prerequisites` section of the [Task template](/docs/contribute/style/page-templates/#task-template) or the [Tutorial template](/docs/contribute/style/page-templates/#tutorial-template). Once the page is saved, the `prerequisites` section is shown as **Before you begin**. To specify the Kubernetes version for a task or tutorial page, include `min-kubernetes-server-version` in the front matter of the page. @@ -251,11 +251,11 @@ Kubernetes | Kubernetes should always be capitalized. Docker | Docker should always be capitalized. SIG Docs | SIG Docs rather than SIG-DOCS or other variations. On-premises | On-premises or On-prem rather than On-premise or other variations. -{{< /table >}} +{{< /table >}} ## Shortcodes -Hugo [Shortcodes](https://gohugo.io/content-management/shortcodes) help create different rhetorical appeal levels. Our documentation supports three different shortcodes in this category: **Note** {{}}, **Caution** {{}}, and **Warning** {{}}. +Hugo [Shortcodes](https://gohugo.io/content-management/shortcodes) help create different rhetorical appeal levels. Our documentation supports three different shortcodes in this category: **Note** `{{}}`, **Caution** `{{}}`, and **Warning** `{{}}`. 1. Surround the text with an opening and closing shortcode. @@ -275,7 +275,7 @@ The prefix you choose is the same text for the tag. ### Note -Use {{}} to highlight a tip or a piece of information that may be helpful to know. +Use `{{}}` to highlight a tip or a piece of information that may be helpful to know. For example: @@ -291,7 +291,7 @@ The output is: You can _still_ use Markdown inside these callouts. {{< /note >}} -You can use a {{}} in a list: +You can use a `{{}}` in a list: ``` 1. Use the note shortcode in a list @@ -323,7 +323,7 @@ The output is: ### Caution -Use {{}} to call attention to an important piece of information to avoid pitfalls. +Use `{{}}` to call attention to an important piece of information to avoid pitfalls. For example: @@ -341,7 +341,7 @@ The callout style only applies to the line directly above the tag. ### Warning -Use {{}} to indicate danger or a piece of information that is crucial to follow. +Use `{{}}` to indicate danger or a piece of information that is crucial to follow. For example: @@ -359,11 +359,11 @@ Beware. ### Katacoda Embedded Live Environment -This button lets users run Minikube in their browser using the [Katacoda Terminal](https://www.katacoda.com/embed/panel). -It lowers the barrier of entry by allowing users to use Minikube with one click instead of going through the complete +This button lets users run Minikube in their browser using the [Katacoda Terminal](https://www.katacoda.com/embed/panel). +It lowers the barrier of entry by allowing users to use Minikube with one click instead of going through the complete Minikube and Kubectl installation process locally. -The Embedded Live Environment is configured to run `minikube start` and lets users complete tutorials in the same window +The Embedded Live Environment is configured to run `minikube start` and lets users complete tutorials in the same window as the documentation. {{< caution >}} @@ -376,7 +376,7 @@ For example: {{}} ``` -The output is: +The output is: {{< kat-button >}} @@ -391,7 +391,7 @@ For example: 1. Preheat oven to 350˚F 1. Prepare the batter, and pour into springform pan. - {{}}Grease the pan for best results.{{}} + `{{}}Grease the pan for best results.{{}}` 1. Bake for 20-25 minutes or until set. @@ -429,9 +429,9 @@ Do | Don't :--| :----- Update the title in the front matter of the page or blog post. | Use first level heading, as Hugo automatically converts the title in the front matter of the page into a first-level heading. Use ordered headings to provide a meaningful high-level outline of your content. | Use headings level 4 through 6, unless it is absolutely necessary. If your content is that detailed, it may need to be broken into separate articles. -Use pound or hash signs (#) for non-blog post content. | Use underlines (--- or ===) to designate first-level headings. +Use pound or hash signs (`#`) for non-blog post content. | Use underlines (`---` or `===`) to designate first-level headings. Use sentence case for headings. For example, **Extend kubectl with plugins** | Use title case for headings. For example, **Extend Kubectl With Plugins** -{{< /table >}} +{{< /table >}} ### Paragraphs @@ -439,8 +439,8 @@ Use sentence case for headings. For example, **Extend kubectl with plugins** | U Do | Don't :--| :----- Try to keep paragraphs under 6 sentences. | Indent the first paragraph with space characters. For example, ⋅⋅⋅Three spaces before a paragraph will indent it. -Use three hyphens (---) to create a horizontal rule. Use horizontal rules for breaks in paragraph content. For example, a change of scene in a story, or a shift of topic within a section. | Use horizontal rules for decoration. -{{< /table >}} +Use three hyphens (`---`) to create a horizontal rule. Use horizontal rules for breaks in paragraph content. For example, a change of scene in a story, or a shift of topic within a section. | Use horizontal rules for decoration. +{{< /table >}} ### Links @@ -449,7 +449,7 @@ Do | Don't :--| :----- Write hyperlinks that give you context for the content they link to. For example: Certain ports are open on your machines. See Check required ports for more details. | Use ambiguous terms such as “click here”. For example: Certain ports are open on your machines. See here for more details. Write Markdown-style links: `[link text](URL)`. For example: `[Hugo shortcodes](/docs/contribute/style/hugo-shortcodes/#table-captions)` and the output is [Hugo shortcodes](/docs/contribute/style/hugo-shortcodes/#table-captions). | Write HTML-style links: `Visit our tutorial!`, or create links that open in new tabs or windows. For example: `[example website](https://example.com){target="_blank"}` -{{< /table >}} +{{< /table >}} ### Lists @@ -457,17 +457,17 @@ Group items in a list that are related to each other and need to appear in a spe Website navigation links can also be marked up as list items; after all they are nothing but a group of related links. - End each item in a list with a period if one or more items in the list are complete sentences. For the sake of consistency, normally either all items or none should be complete sentences. - + {{< note >}} Ordered lists that are part of an incomplete introductory sentence can be in lowercase and punctuated as if each item was a part of the introductory sentence.{{< /note >}} - - - Use the number one (1.) for ordered lists. - - - Use (+), (* ), or (-) for unordered lists. - - - Leave a blank line after each list. - - - Indent nested lists with four spaces (for example, ⋅⋅⋅⋅). - + + - Use the number one (`1.`) for ordered lists. + + - Use (`+`), (`*`), or (`-`) for unordered lists. + + - Leave a blank line after each list. + + - Indent nested lists with four spaces (for example, ⋅⋅⋅⋅). + - List items may consist of multiple paragraphs. Each subsequent paragraph in a list item must be indented by either four spaces or one tab. ### Tables @@ -486,7 +486,7 @@ This section contains suggested best practices for clear, concise, and consisten Do | Don't :--| :----- This command starts a proxy. | This command will start a proxy. - {{< /table >}} + {{< /table >}} Exception: Use future or past tense if it is required to convey the correct @@ -512,7 +512,7 @@ Use simple and direct language. Avoid using unnecessary phrases, such as saying Do | Don't :--| :----- To create a ReplicaSet, ... | In order to create a ReplicaSet, ... -See the configuration file. | Please see the configuration file. +See the configuration file. | Please see the configuration file. View the Pods. | With this next command, we'll view the Pods. {{< /table >}} @@ -522,7 +522,7 @@ View the Pods. | With this next command, we'll view the Pods. Do | Don't :--| :----- You can create a Deployment by ... | We'll create a Deployment by ... -In the preceding output, you can see... | In the preceding output, we can see ... +In the preceding output, you can see... | In the preceding output, we can see ... {{< /table >}} @@ -583,7 +583,7 @@ considered new in a few months. Do | Don't :--| :----- In version 1.4, ... | In the current version, ... -The Federation feature provides ... | The new Federation feature provides ... +The Federation feature provides ... | The new Federation feature provides ... {{< /table >}} diff --git a/content/en/docs/home/_index.md b/content/en/docs/home/_index.md index 31f37880ff631..dcaf6930395d0 100644 --- a/content/en/docs/home/_index.md +++ b/content/en/docs/home/_index.md @@ -5,7 +5,7 @@ title: Kubernetes Documentation noedit: true cid: docsHome layout: docsportal_home -class: gridPage +class: gridPage gridPageHome linkTitle: "Home" main_menu: true weight: 10 @@ -16,6 +16,8 @@ menu: weight: 20 post: >

Learn how to use Kubernetes with conceptual, tutorial, and reference documentation. You can even help contribute to the docs!

+description: > + Kubernetes is an open source container orchestration engine for automating deployment, scaling, and management of containerized applications. The open source project is hosted by the Cloud Native Computing Foundation. overview: > Kubernetes is an open source container orchestration engine for automating deployment, scaling, and management of containerized applications. The open source project is hosted by the Cloud Native Computing Foundation (CNCF). cards: @@ -36,9 +38,14 @@ cards: button_path: "/docs/setup" - name: tasks title: "Learn how to use Kubernetes" - description: "Look up common tasks and how to perform them using a short sequence of steps." + description: "Look up common tasks and how to perform them using a short sequence of steps." button: "View Tasks" button_path: "/docs/tasks" +- name: training + title: "Training" + description: "Get certified in Kubernetes and make your cloud native projects successful!" + button: "View training" + button_path: "/training" - name: reference title: Look up reference information description: Browse terminology, command line syntax, API resource types, and setup tool documentation. diff --git a/content/en/docs/reference/_index.md b/content/en/docs/reference/_index.md index 7efffa4ef683b..8b0faf5e91f8a 100644 --- a/content/en/docs/reference/_index.md +++ b/content/en/docs/reference/_index.md @@ -38,13 +38,15 @@ client libraries: * [JSONPath](/docs/reference/kubectl/jsonpath/) - Syntax guide for using [JSONPath expressions](http://goessner.net/articles/JsonPath/) with kubectl. * [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) - CLI tool to easily provision a secure Kubernetes cluster. -## Config Reference +## Components Reference * [kubelet](/docs/reference/command-line-tools-reference/kubelet/) - The primary *node agent* that runs on each node. The kubelet takes a set of PodSpecs and ensures that the described containers are running and healthy. * [kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/) - REST API that validates and configures data for API objects such as pods, services, replication controllers. * [kube-controller-manager](/docs/reference/command-line-tools-reference/kube-controller-manager/) - Daemon that embeds the core control loops shipped with Kubernetes. * [kube-proxy](/docs/reference/command-line-tools-reference/kube-proxy/) - Can do simple TCP/UDP stream forwarding or round-robin TCP/UDP forwarding across a set of back-ends. * [kube-scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/) - Scheduler that manages availability, performance, and capacity. + * [kube-scheduler Policies](/docs/reference/scheduling/policies) + * [kube-scheduler Profiles](/docs/reference/scheduling/profiles) ## Design Docs diff --git a/content/en/docs/reference/access-authn-authz/admission-controllers.md b/content/en/docs/reference/access-authn-authz/admission-controllers.md index 3d85194ed7fc8..dfd9e69428f89 100644 --- a/content/en/docs/reference/access-authn-authz/admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/admission-controllers.md @@ -115,6 +115,30 @@ required. Rejects all requests. AlwaysDeny is DEPRECATED as no real meaning. +### CertificateApproval {#certificateapproval} + +This admission controller observes requests to 'approve' CertificateSigningRequest resources and performs additional +authorization checks to ensure the approving user has permission to `approve` certificate requests with the +`spec.signerName` requested on the CertificateSigningRequest resource. + +See [Certificate Signing Requests](/docs/reference/access-authn-authz/certificate-signing-requests/) for more +information on the permissions required to perform different actions on CertificateSigningRequest resources. + +### CertificateSigning {#certificatesigning} + +This admission controller observes updates to the `status.certificate` field of CertificateSigningRequest resources +and performs an additional authorization checks to ensure the signing user has permission to `sign` certificate +requests with the `spec.signerName` requested on the CertificateSigningRequest resource. + +See [Certificate Signing Requests](/docs/reference/access-authn-authz/certificate-signing-requests/) for more +information on the permissions required to perform different actions on CertificateSigningRequest resources. + +### CertificateSubjectRestrictions {#certificatesubjectrestrictions} + +This admission controller observes creation of CertificateSigningRequest resources that have a `spec.signerName` +of `kubernetes.io/kube-apiserver-client`. It rejects any request that specifies a 'group' (or 'organization attribute') +of `system:masters`. + ### DefaultStorageClass {#defaultstorageclass} This admission controller observes creation of `PersistentVolumeClaim` objects that do not request any specific storage class @@ -645,21 +669,30 @@ for more information. ### PodTolerationRestriction {#podtolerationrestriction} -This admission controller first verifies any conflict between a pod's tolerations and its -namespace's tolerations, and rejects the pod request if there is a conflict. -It then merges the namespace's tolerations into the pod's tolerations. -The resulting tolerations are checked against the namespace's whitelist of -tolerations. If the check succeeds, the pod request is admitted otherwise -rejected. - -If the pod's namespace does not have any associated default or whitelist of -tolerations, then the cluster-level default or whitelist of tolerations are used -instead if specified. - -Tolerations to a namespace are assigned via the -`scheduler.alpha.kubernetes.io/defaultTolerations` and -`scheduler.alpha.kubernetes.io/tolerationsWhitelist` -annotation keys. +The PodTolerationRestriction admission controller verifies any conflict between tolerations of a pod and the tolerations of its namespace. +It rejects the pod request if there is a conflict. +It then merges the tolerations annotated on the namespace into the tolerations of the pod. +The resulting tolerations are checked against a whitelist of tolerations annotated to the namespace. +If the check succeeds, the pod request is admitted otherwise it is rejected. + +If the namespace of the pod does not have any associated default tolerations or a whitelist of +tolerations annotated, the cluster-level default tolerations or cluster-level whitelist of tolerations are used +instead if they are specified. + +Tolerations to a namespace are assigned via the `scheduler.alpha.kubernetes.io/defaultTolerations` annotation key. +The whitelist can be added via the `scheduler.alpha.kubernetes.io/tolerationsWhitelist` annotation key. + +Example for namespace annotations: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: apps-that-need-nodes-exclusively + annotations: + scheduler.alpha.kubernetes.io/defaultTolerations: '{"operator": "Exists", "effect": "NoSchedule", "key": "dedicated-node"}' + scheduler.alpha.kubernetes.io/tolerationsWhitelist: '{"operator": "Exists", "effect": "NoSchedule", "key": "dedicated-node"}' +``` ### Priority {#priority} diff --git a/content/en/docs/reference/access-authn-authz/authentication.md b/content/en/docs/reference/access-authn-authz/authentication.md index 0065bf7abe102..31ae3642227b0 100644 --- a/content/en/docs/reference/access-authn-authz/authentication.md +++ b/content/en/docs/reference/access-authn-authz/authentication.md @@ -33,7 +33,7 @@ stored as `Secrets`, which are mounted into pods allowing in-cluster processes to talk to the Kubernetes API. API requests are tied to either a normal user or a service account, or are treated -as anonymous requests. This means every process inside or outside the cluster, from +as [anonymous requests](#anonymous-requests). This means every process inside or outside the cluster, from a human user typing `kubectl` on a workstation, to `kubelets` on nodes, to members of the control plane, must authenticate when making requests to the API server, or be treated as an anonymous user. @@ -120,7 +120,7 @@ Authorization: Bearer 31ada4fd-adec-460c-809a-9e56ceb75269 ### Bootstrap Tokens -This feature is currently in **beta**. +{{< feature-state for_k8s_version="v1.18" state="stable" >}} To allow for streamlined bootstrapping for new clusters, Kubernetes includes a dynamically-managed Bearer token type called a *Bootstrap Token*. These tokens @@ -208,7 +208,7 @@ spec: serviceAccountName: bob-the-bot containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ``` Service account bearer tokens are perfectly valid to use outside the cluster and diff --git a/content/en/docs/reference/access-authn-authz/bootstrap-tokens.md b/content/en/docs/reference/access-authn-authz/bootstrap-tokens.md index 48d09fd47e437..c8c55c08d68a1 100644 --- a/content/en/docs/reference/access-authn-authz/bootstrap-tokens.md +++ b/content/en/docs/reference/access-authn-authz/bootstrap-tokens.md @@ -7,6 +7,9 @@ weight: 20 --- {{% capture overview %}} + +{{< feature-state for_k8s_version="v1.18" state="stable" >}} + Bootstrap tokens are a simple bearer token that is meant to be used when creating new clusters or joining new nodes to an existing cluster. It was built to support [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/), but can be used in other contexts @@ -26,8 +29,6 @@ Controller Manager. The tokens are also used to create a signature for a specific ConfigMap used in a "discovery" process through a BootstrapSigner controller. -{{< feature-state state="beta" >}} - ## Token Format Bootstrap Tokens take the form of `abcdef.0123456789abcdef`. More formally, @@ -115,7 +116,7 @@ authenticate to the API server as a bearer token. `cluster-info` ConfigMap as described below. The `expiration` field controls the expiry of the token. Expired tokens are -rejected when used for authentication and ignored during ConfigMap signing. +rejected when used for authentication and ignored during ConfigMap signing. The expiry value is encoded as an absolute UTC time using RFC3339. Enable the `tokencleaner` controller to automatically delete expired tokens. diff --git a/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md b/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md new file mode 100644 index 0000000000000..bdb1bfbb9c27e --- /dev/null +++ b/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md @@ -0,0 +1,330 @@ +--- +reviewers: +- liggitt +- mikedanese +- munnerz +title: Certificate Signing Requests +content_template: templates/concept +weight: 20 +--- + +{{% capture overview %}} + +{{< feature-state for_k8s_version="v1.18" state="beta" >}} + +The Certificates API enables automation of +[X.509](https://www.itu.int/rec/T-REC-X.509) credential provisioning by providing +a programmatic interface for clients of the Kubernetes API to request and obtain +X.509 {{< glossary_tooltip term_id="certificate" text="certificates" >}} from a Certificate Authority (CA). + +A CertificateSigningRequest (CSR) resource is used to request that a certificate be signed +by a denoted signer, after which the request may be approved or denied before +finally being signed. + +{{% /capture %}} + +{{% capture body %}} +## Request signing process + +The _CertificateSigningRequest_ resource type allows a client to ask for an X.509 certificate +be issued, based on a signing request. +The CertificateSigningRequest object includes a PEM-encoded PKCS#10 signing request in +the `spec.request` field. The CertificateSigningRequest denotes the _signer_ (the +recipient that the request is being made to) using the `spec.signerName` field. + +Once created, a CertificateSigningRequest must be approved before it can be signed. +Depending on the signer selected, a CertificateSigningRequest may be automatically approved +by a {{< glossary_tooltip text="controller" term_id="controller" >}}. +Otherwise, a CertificateSigningRequest must be manually approved either via the REST API (or client-go) +or by running `kubectl certificate approve`. Likewise, a CertificateSigningRequest may also be denied, +which tells the configured signer that it must not sign the request. + +For certificates that have been approved, the next step is signing. The relevant signing controller +first validates that the signing conditions are met and then creates a certificate. +The signing controller then updates the CertificateSigningRequest, storing the new certificate into +the `status.certificate` field of the existing CertificateSigningRequest object. The +`status.certificate` field is either empty or contains a X.509 certificate, encoded in PEM format. +The CertificateSigningRequest `status.certificate` field is empty until the signer does this. + +Once the `status.certificate` field has been populated, the request has been completed and clients can now +fetch the signed certificate PEM data from the CertificateSigningRequest resource. +Signers can instead deny certificate signing if the approval conditions are not met. + +In order to reduce the number of old CertificateSigningRequest resources left in a cluster, a garbage collection +controller runs periodically. The garbage collection removes CertificateSigningRequests that have not changed +state for some duration: + +* Approved requests: automatically deleted after 1 hour +* Denied requests: automatically deleted after 1 hour +* Pending requests: automatically deleted after 1 hour + +## Signers + +All signers should provide information about how they work so that clients can predict what will happen to their CSRs. +This includes: + +1. **Trust distribution**: how trust (CA bundles) are distributed. +1. **Permitted subjects**: any restrictions on and behavior when a disallowed subject is requested. +1. **Permitted x509 extensions**: including IP subjectAltNames, DNS subjectAltNames, Email subjectAltNames, URI subjectAltNames etc, and behavior when a disallowed extension is requested. +1. **Permitted key usages / extended key usages**: any restrictions on and behavior when usages different than the signer-determined usages are specified in the CSR. +1. **Expiration/certificate lifetime**: whether it is fixed by the signer, configurable by the admin, determined by the CSR object etc and behavior if an expiration different than the signer-determined expiration is specified in the CSR. +1. **CA bit allowed/disallowed**: and behavior if a CSR contains a request a for a CA certificate when the signer does not permit it. + +Commonly, the `status.certificate` field contains a single PEM-encoded X.509 certificate once the CSR is approved and the certificate is issued. Some signers store multiple certificates into the `status.certificate` field. In that case, the documentation for the signer should specify the meaning of additional certificates; for example, this might be certificate plus intermediates to be presented during TLS handshakes. + +### Kubernetes signers + +Kubernetes provides built-in signers that each have a well-known `signerName`: + +1. `kubernetes.io/kube-apiserver-client`: signs certificates that will be honored as client-certs by the kube-apiserver. + Never auto-approved by {{< glossary_tooltip term_id="kube-controller-manager" >}}. + 1. Trust distribution: signed certificates must be honored as client-certificates by the kube-apiserver. The CA bundle is not distributed by any other means. + 1. Permitted subjects - no subject restrictions, but approvers and signers may choose not to approve or sign. Certain subjects like cluster-admin level users or groups vary between distributions and installations, but deserve additional scrutiny before approval and signing. The `CertificateSubjectRestriction` admission plugin is available and enabled by default to restrict `system:masters`, but it is often not the only cluster-admin subject in a cluster. + 1. Permitted x509 extensions - honors subjectAltName and key usage extensions and discards other extensions. + 1. Permitted key usages - must include []string{"client auth"}. Must not include key usages beyond []string{"digital signature", "key encipherment", "client auth"} + 1. Expiration/certificate lifetime - minimum of CSR signer or request. The signer is responsible for checking that the certificate lifetime is valid and permissible. + 1. CA bit allowed/disallowed - not allowed. + +1. `kubernetes.io/kube-apiserver-client-kubelet`: signs client certificates that will be honored as client-certs by the + kube-apiserver. + May be auto-approved by {{< glossary_tooltip term_id="kube-controller-manager" >}}. + 1. Trust distribution: signed certificates must be honored as client-certificates by the kube-apiserver. The CA bundle + is not distributed by any other means. + 1. Permitted subjects - organizations are exactly `[]string{"system:nodes"}`, common name starts with `"system:node:"` + 1. Permitted x509 extensions - honors key usage extensions, forbids subjectAltName extensions, drops other extensions. + 1. Permitted key usages - exactly `[]string{"key encipherment", "digital signature", "client auth"}` + 1. Expiration/certificate lifetime - minimum of CSR signer or request. Sanity of the time is the concern of the signer. + 1. CA bit allowed/disallowed - not allowed. + +1. `kubernetes.io/kubelet-serving`: signs serving certificates that are honored as a valid kubelet serving certificate + by the kube-apiserver, but has no other guarantees. + Never auto-approved by {{< glossary_tooltip term_id="kube-controller-manager" >}}. + 1. Trust distribution: signed certificates must be honored by the kube-apiserver as valid to terminate connections to a kubelet. + The CA bundle is not distributed by any other means. + 1. Permitted subjects - organizations are exactly `[]string{"system:nodes"}`, common name starts with `"system:node:"` + 1. Permitted x509 extensions - honors key usage and DNSName/IPAddress subjectAltName extensions, forbids EmailAddress and URI subjectAltName extensions, drops other extensions. At least one DNS or IP subjectAltName must be present. + 1. Permitted key usages - exactly `[]string{"key encipherment", "digital signature", "server auth"}` + 1. Expiration/certificate lifetime - minimum of CSR signer or request. + 1. CA bit allowed/disallowed - not allowed. + +1. `kubernetes.io/legacy-unknown`: has no guarantees for trust at all. Some distributions may honor these as client + certs, but that behavior is not standard Kubernetes behavior. + Never auto-approved by {{< glossary_tooltip term_id="kube-controller-manager" >}}. + 1. Trust distribution: None. There is no standard trust or distribution for this signer in a Kubernetes cluster. + 1. Permitted subjects - any + 1. Permitted x509 extensions - honors subjectAltName and key usage extensions and discards other extensions. + 1. Permitted key usages - any + 1. Expiration/certificate lifetime - minimum of CSR signer or request. Sanity of the time is the concern of the signer. + 1. CA bit allowed/disallowed - not allowed. + +{{< note >}} +Failures for all of these are only reported in kube-controller-manager logs. +{{< /note >}} + +Distribution of trust happens out of band for these signers. Any trust outside of those described above are strictly +coincidental. For instance, some distributions may honor `kubernetes.io/legacy-unknown` as client certificates for the +kube-apiserver, but this is not a standard. +None of these usages are related to ServiceAccount token secrets `.data[ca.crt]` in any way. That CA bundle is only +guaranteed to verify a connection to the kube-apiserver using the default service (`kubernetes.default.svc`). + +## Authorization + +To allow creating a CertificateSigningRequest and retrieving any CertificateSigningRequest: + +* Verbs: `create`, `get`, `list`, `watch`, group: `certificates.k8s.io`, resource: `certificatesigningrequests` + +For example: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csr-creator +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - create + - get + - list + - watch +``` + +To allow approving a CertificateSigningRequest: + +* Verbs: `get`, `list`, `watch`, group: `certificates.k8s.io`, resource: `certificatesigningrequests` +* Verbs: `update`, group: `certificates.k8s.io`, resource: `certificatesigningrequests/approval` +* Verbs: `approve`, group: `certificates.k8s.io`, resource: `signers`, resourceName: `/` or `/*` + +For example: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csr-approver +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - get + - list + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/approval + verbs: + - update +- apiGroups: + - certificates.k8s.io + resources: + - signers + resourceNames: + - example.com/my-signer-name # example.com/* can be used to authorize for all signers in the 'example.com' domain + verbs: + - approve +``` + +To allow signing a CertificateSigningRequest: + +* Verbs: `get`, `list`, `watch`, group: `certificates.k8s.io`, resource: `certificatesigningrequests` +* Verbs: `update`, group: `certificates.k8s.io`, resource: `certificatesigningrequests/status` +* Verbs: `sign`, group: `certificates.k8s.io`, resource: `signers`, resourceName: `/` or `/*` + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csr-signer +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - get + - list + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/status + verbs: + - update +- apiGroups: + - certificates.k8s.io + resources: + - signers + resourceName: + - example.com/my-signer-name # example.com/* can be used to authorize for all signers in the 'example.com' domain + verbs: + - sign +``` + +## Approval & rejection + +### Control plane automated approval {#approval-rejection-control-plane} + +The kube-controller-manager ships with a built-in approver for certificates with +a signerName of `kubernetes.io/kube-apiserver-client-kubelet` that delegates various +permissions on CSRs for node credentials to authorization. +The kube-controller-manager POSTs SubjectAccessReview resources to the API server +in order to check authorization for certificate approval. + +### Approval & rejection using `kubectl` {#approval-rejection-kubectl} + +A Kubernetes administrator (with appropriate permissions) can manually approve +(or deny) CertificateSigningRequests by using the `kubectl certificate +approve` and `kubectl certificate deny` commands. + +To approve a CSR with kubectl: + +```bash +kubectl certificate approve +``` + +Likewise, to deny a CSR: + +```bash +kubectl certificate deny +``` + +### Approval & rejection using the Kubernetes API {#approval-rejection-api-client} + +Users of the REST API can approve CSRs by submitting an UPDATE request to the `approval` +subresource of the CSR to be approved. For example, you could write an +{{< glossary_tooltip term_id="operator-pattern" text="operator" >}} that watches for a particular +kind of CSR and then sends an UPDATE to approve them. + +When you make an approval or rejection request, set either the `Approved` or `Denied` +status condition based on the state you determine: + +For `Approved` CSRs: + +```yaml +apiVersion: certificates.k8s.io/v1beta1 +kind: CertificateSigningRequest +... +status: + conditions: + - lastUpdateTime: "2020-02-08T11:37:35Z" + message: Approved by my custom approver controller + reason: ApprovedByMyPolicy # You can set this to any string + type: Approved +``` + +For `Denied` CSRs: + +```yaml +apiVersion: certificates.k8s.io/v1beta1 +kind: CertificateSigningRequest +... +status: + conditions: + - lastUpdateTime: "2020-02-08T11:37:35Z" + message: Denied by my custom approver controller + reason: DeniedByMyPolicy # You can set this to any string + type: Denied +``` + +It's usual to set `status.condtions.reason` to a machine-friendly reason +code using TitleCase; this is a convention but you can set it to anything +you like. If you want to add a note just for human consumption, use the +`status.condtions.message` field. + +## Signing + +### Control plane signer {#signer-control-plane} + +The Kubernetes control plane implements each of the [Kubernetes signers](/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers), +as part of the kube-controller-manager. + +{{< note >}} +Prior to Kubernetes v1.18, the kube-controller-manager would sign any CSRs that +were marked as approved. +{{< /note >}} + +### API-based signers {#signer-api} + +Users of the REST API can sign CSRs by submitting an UPDATE request to the `status` +subresource of the CSR to be signed. + +As part of this request, the `status.certificate` field should be set to contain the +signed certificate. + +{{% /capture %}} + +{{% capture whatsnext %}} + +* Read [Manage TLS Certificates in a Cluster](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/) +* View the source code for the kube-controller-manager built in [signer](https://github.com/kubernetes/kubernetes/blob/32ec6c212ec9415f604ffc1f4c1f29b782968ff1/pkg/controller/certificates/signer/cfssl_signer.go) +* View the source code for the kube-controller-manager built in [approver](https://github.com/kubernetes/kubernetes/blob/32ec6c212ec9415f604ffc1f4c1f29b782968ff1/pkg/controller/certificates/approver/sarapprove.go) +* For details of X.509 itself, refer to [RFC 5280](https://tools.ietf.org/html/rfc5280#section-3.1) section 3.1 +* For information on the syntax of PKCS#10 certificate signing requests, refer to [RFC 2986](https://tools.ietf.org/html/rfc2986) + +{{% /capture %}} diff --git a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md index 4da9bc951c13c..c57bcdaf341fe 100644 --- a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md @@ -114,7 +114,7 @@ webhooks: service: namespace: "example-namespace" name: "example-service" - caBundle: "Ci0tLS0tQk......tLS0K" + caBundle: "Ci0tLS0tQk...<`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.>...tLS0K" admissionReviewVersions: ["v1", "v1beta1"] sideEffects: None timeoutSeconds: 5 @@ -139,7 +139,7 @@ webhooks: service: namespace: "example-namespace" name: "example-service" - caBundle: "Ci0tLS0tQk......tLS0K" + caBundle: "Ci0tLS0tQk...<`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate>...tLS0K" admissionReviewVersions: ["v1beta1"] timeoutSeconds: 5 ``` @@ -1122,7 +1122,7 @@ kind: MutatingWebhookConfiguration webhooks: - name: my-webhook.example.com clientConfig: - caBundle: "Ci0tLS0tQk......tLS0K" + caBundle: "Ci0tLS0tQk...<`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate>...tLS0K" service: namespace: my-service-namespace name: my-service-name diff --git a/content/en/docs/reference/access-authn-authz/rbac.md b/content/en/docs/reference/access-authn-authz/rbac.md index 852e73fd799bc..1ca0b98b7c393 100644 --- a/content/en/docs/reference/access-authn-authz/rbac.md +++ b/content/en/docs/reference/access-authn-authz/rbac.md @@ -10,35 +10,61 @@ weight: 70 --- {{% capture overview %}} -Role-based access control (RBAC) is a method of regulating access to computer or network resources based on the roles of individual users within an enterprise. +Role-based access control (RBAC) is a method of regulating access to computer or +network resources based on the roles of individual users within your organization. {{% /capture %}} {{% capture body %}} -`RBAC` uses the `rbac.authorization.k8s.io` {{< glossary_tooltip text="API Group" term_id="api-group" >}} -to drive authorization decisions, allowing admins to dynamically configure policies -through the Kubernetes API. - -As of 1.8, RBAC mode is stable and backed by the rbac.authorization.k8s.io/v1 API. +RBAC authorization uses the `rbac.authorization.k8s.io` +{{< glossary_tooltip text="API group" term_id="api-group" >}} to drive authorization +decisions, allowing you to dynamically configure policies through the Kubernetes API. + +To enable RBAC, start the {{< glossary_tooltip text="API server" term_id="kube-apiserver" >}} +with the `--authorization-mode` flag set to a comma-separated list that includes `RBAC`; +for example: +```shell +kube-apiserver --authorization-mode=Example,RBAC --other-options --more-options +``` -To enable RBAC, start the apiserver with `--authorization-mode=RBAC`. +## API objects {#api-overview} -## API Overview +The RBAC API declares four kinds of Kubernetes object: _Role_, _ClusterRole_, +_RoleBinding_ and _ClusterRoleBinding_. You can +[describe objects](/docs/concepts/overview/working-with-objects/kubernetes-objects/#understanding-kubernetes-objects), +or amend them, using tools such as `kubectl,` just like any other Kubernetes object. -The RBAC API declares four top-level types which will be covered in this -section. Users can interact with these resources as they would with any other -API resource (via `kubectl`, API calls, etc.). For instance, -`kubectl apply -f (resource).yml` can be used with any of these examples, -though readers who wish to follow along should review the section on -bootstrapping first. +{{< caution >}} +These objects, by design, impose access restrictions. If you are making changes +to a cluster as you learn, see +[privilege escalation prevention and bootstrapping](#privilege-escalation-prevention-and-bootstrapping) +to understand how those restrictions can prevent you making some changes. +{{< /caution >}} ### Role and ClusterRole -In the RBAC API, a role contains rules that represent a set of permissions. +An RBAC _Role_ or _ClusterRole_ contains rules that represent a set of permissions. Permissions are purely additive (there are no "deny" rules). -A role can be defined within a namespace with a `Role`, or cluster-wide with a `ClusterRole`. -A `Role` can only be used to grant access to resources within a single namespace. -Here's an example `Role` in the "default" namespace that can be used to grant read access to pods: +A Role always sets permissions within a particular {{< glossary_tooltip text="namespace" term_id="namespace" >}}; +when you create a Role, you have to specify the namespace it belongs in. + +ClusterRole, by contrast, is a non-namespaced resource. The resources have different names (Role +and ClusterRole) because a Kubernetes object always has to be either namespaced or not namespaced; +it can't be both. + +ClusterRoles have several uses. You can use a ClusterRole to: + +1. define permissions on namespaced resources and be granted within individual namespace(s) +1. define permissions on namespaced resources and be granted across all namespaces +1. define permissions on cluster-scoped resources + +If you want to define a role within a namespace, use a Role; if you want to define +a role cluster-wide, use a ClusterRole. + +#### Role example + +Here's an example Role in the "default" namespace that can be used to grant read access to +{{< glossary_tooltip text="pods" term_id="pod" >}}: ```yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -52,14 +78,19 @@ rules: verbs: ["get", "watch", "list"] ``` -A `ClusterRole` can be used to grant the same permissions as a `Role`, -but because they are cluster-scoped, they can also be used to grant access to: +#### ClusterRole example + +A ClusterRole can be used to grant the same permissions as a Role. +Because ClusterRoles are cluster-scoped, you can also use them to grant access to: -* cluster-scoped resources (like nodes) -* non-resource endpoints (like "/healthz") -* namespaced resources (like pods) across all namespaces (needed to run `kubectl get pods --all-namespaces`, for example) +* cluster-scoped resources (like {{< glossary_tooltip text="nodes" term_id="node" >}}) +* non-resource endpoints (like `/healthz`) +* namespaced resources (like Pods), across all namespaces + For example: you can use a ClusterRole to allow a particular user to run + `kubectl get pods --all-namespaces`. -The following `ClusterRole` can be used to grant read access to secrets in any particular namespace, +Here is an example of a ClusterRole that can be used to grant read access to +{{< glossary_tooltip text="secrets" term_id="secret" >}} in any particular namespace, or across all namespaces (depending on how it is [bound](#rolebinding-and-clusterrolebinding)): ```yaml @@ -70,55 +101,78 @@ metadata: name: secret-reader rules: - apiGroups: [""] + # + # at the HTTP level, the name of the resource for accessing Secret + # objects is "secrets" resources: ["secrets"] verbs: ["get", "watch", "list"] ``` +The name of a Role or a ClusterRole object must be a valid +[path segment name](/docs/concepts/overview/working-with-objects/names#path-segment-names). + ### RoleBinding and ClusterRoleBinding A role binding grants the permissions defined in a role to a user or set of users. -It holds a list of subjects (users, groups, or service accounts), and a reference to the role being granted. -Permissions can be granted within a namespace with a `RoleBinding`, or cluster-wide with a `ClusterRoleBinding`. +It holds a list of *subjects* (users, groups, or service accounts), and a reference to the +role being granted. +A RoleBinding grants permissions within a specific namespace whereas a ClusterRoleBinding +grants that access cluster-wide. -A `RoleBinding` may reference a `Role` in the same namespace. -The following `RoleBinding` grants the "pod-reader" role to the user "jane" within the "default" namespace. -This allows "jane" to read pods in the "default" namespace. +A RoleBinding may reference any Role in the same namespace. Alternatively, a RoleBinding +can reference a ClusterRole and bind that ClusterRole to the namespace of the RoleBinding. +If you want to bind a ClusterRole to all the namespaces in your cluster, you use a +ClusterRoleBinding. + +The name of a RoleBinding or ClusterRoleBinding object must be a valid +[path segment name](/docs/concepts/overview/working-with-objects/names#path-segment-names). -`roleRef` is how you will actually create the binding. The `kind` will be either `Role` or `ClusterRole`, and the `name` will reference the name of the specific `Role` or `ClusterRole` you want. In the example below, this RoleBinding is using `roleRef` to bind the user "jane" to the `Role` created above named `pod-reader`. +#### RoleBinding examples {#rolebinding-example} + +Here is an example of a RoleBinding that grants the "pod-reader" Role to the user "jane" +within the "default" namespace. +This allows "jane" to read pods in the "default" namespace. ```yaml apiVersion: rbac.authorization.k8s.io/v1 # This role binding allows "jane" to read pods in the "default" namespace. +# You need to already have a Role named "pod-reader" in that namespace. kind: RoleBinding metadata: name: read-pods namespace: default subjects: +# You can specify more than one "subject" - kind: User - name: jane # Name is case sensitive + name: jane # "name" is case sensitive apiGroup: rbac.authorization.k8s.io roleRef: + # "roleRef" specifies the binding to a Role / ClusterRole kind: Role #this must be Role or ClusterRole name: pod-reader # this must match the name of the Role or ClusterRole you wish to bind to apiGroup: rbac.authorization.k8s.io ``` -A `RoleBinding` may also reference a `ClusterRole` to grant the permissions to namespaced -resources defined in the `ClusterRole` within the `RoleBinding`'s namespace. -This allows administrators to define a set of common roles for the entire cluster, -then reuse them within multiple namespaces. +A RoleBinding can also reference a ClusterRole to grant the permissions defined in that +ClusterRole to resources inside the RoleBinding's namespace. This kind of reference +lets you define a set of common roles across your cluster, then reuse them within +multiple namespaces. -For instance, even though the following `RoleBinding` refers to a `ClusterRole`, -"dave" (the subject, case sensitive) will only be able to read secrets in the "development" -namespace (the namespace of the `RoleBinding`). +For instance, even though the following RoleBinding refers to a ClusterRole, +"dave" (the subject, case sensitive) will only be able to read Secrets in the "development" +namespace, because the RoleBinding's namespace (in its metadata) is "development". ```yaml apiVersion: rbac.authorization.k8s.io/v1 # This role binding allows "dave" to read secrets in the "development" namespace. +# You need to already have a ClusterRole named "secret-reader". kind: RoleBinding metadata: name: read-secrets - namespace: development # This only grants permissions within the "development" namespace. + # + # The namespace of the RoleBinding determines where the permissions are granted. + # This only grants permissions within the "development" namespace. + namespace: development subjects: - kind: User name: dave # Name is case sensitive @@ -129,8 +183,10 @@ roleRef: apiGroup: rbac.authorization.k8s.io ``` -Finally, a `ClusterRoleBinding` may be used to grant permission at the cluster level and in all -namespaces. The following `ClusterRoleBinding` allows any user in the group "manager" to read +#### ClusterRoleBinding example + +To grant permissions across a whole cluster, you can use a ClusterRoleBinding. +The following ClusterRoleBinding allows any user in the group "manager" to read secrets in any namespace. ```yaml @@ -149,37 +205,43 @@ roleRef: apiGroup: rbac.authorization.k8s.io ``` -You cannot modify which `Role` or `ClusterRole` a binding object refers to. -Attempts to change the `roleRef` field of a binding object will result in a validation error. -To change the `roleRef` field on an existing binding object, the binding object must be deleted and recreated. -There are two primary reasons for this restriction: +After you create a binding, you cannot change the Role or ClusterRole that it refers to. +If you try to change a binding's `roleRef`, you get a validation error. If you do want +to change the `roleRef` for a binding, you need to remove the binding object and create +a replacement. + +There are two reasons for this restriction: +1. Making `roleRef` immutable allows granting someone `update` permission on an existing binding +object, so that they can manage the list of subjects, without being able to change +the role that is granted to those subjects. 1. A binding to a different role is a fundamentally different binding. Requiring a binding to be deleted/recreated in order to change the `roleRef` ensures the full list of subjects in the binding is intended to be granted -the new role (as opposed to enabling accidentally modifying just the roleRef -without verifying all of the existing subjects should be given the new role's permissions). -2. Making `roleRef` immutable allows giving `update` permission on an existing binding object -to a user, which lets them manage the list of subjects, without being able to change the -role that is granted to those subjects. +the new role (as opposed to enabling accidentally modifying just the roleRef +without verifying all of the existing subjects should be given the new role's +permissions). The `kubectl auth reconcile` command-line utility creates or updates a manifest file containing RBAC objects, and handles deleting and recreating binding objects if required to change the role they refer to. See [command usage and examples](#kubectl-auth-reconcile) for more information. -### Referring to Resources +### Referring to resources -Most resources are represented by a string representation of their name, such as "pods", just as it -appears in the URL for the relevant API endpoint. However, some Kubernetes APIs involve a -"subresource", such as the logs for a pod. The URL for the pods logs endpoint is: +In the Kubernetes API, most resources are represented and accessed using a string representation of +their object name, such as `pods` for a Pod. RBAC refers to resources using exactly the same +name that appears in the URL for the relevant API endpoint. +Some Kubernetes APIs involve a +_subresource_, such as the logs for a Pod. A request for a Pod's logs looks like: ```http GET /api/v1/namespaces/{namespace}/pods/{name}/log ``` -In this case, "pods" is the namespaced resource, and "log" is a subresource of pods. To represent -this in an RBAC role, use a slash to delimit the resource and subresource. To allow a subject -to read both pods and pod logs, you would write: +In this case, `pods` is the namespaced resource for Pod resources, and `log` is a +subresource of `pods`. To represent this in an RBAC role, use a slash (`/`) to +delimit the resource and subresource. To allow a subject to read `pods` and +also access the `log` subresource for each of those Pods, you write: ```yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -193,9 +255,11 @@ rules: verbs: ["get", "list"] ``` -Resources can also be referred to by name for certain requests through the `resourceNames` list. -When specified, requests can be restricted to individual instances of a resource. To restrict a -subject to only "get" and "update" a single configmap, you would write: +You can also refer to resources by name for certain requests through the `resourceNames` list. +When specified, requests can be restricted to individual instances of a resource. +Here is an example that restricts its subject to only `get` or `update` a +{{< glossary_tooltip term_id="ConfigMap" >}} named `my-configmap`: + ```yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -205,19 +269,30 @@ metadata: name: configmap-updater rules: - apiGroups: [""] + # + # at the HTTP level, the name of the resource for accessing ConfigMap + # objects is "configmaps" resources: ["configmaps"] resourceNames: ["my-configmap"] verbs: ["update", "get"] ``` -Note that `create` requests cannot be restricted by resourceName, as the object name is not known at -authorization time. The other exception is `deletecollection`. +{{< note >}} +You cannot restrict `create` or `deletecollection` requests by resourceName. For `create`, this +limitation is because the object name is not known at authorization time. +{{< /note >}} + ### Aggregated ClusterRoles -As of 1.9, ClusterRoles can be created by combining other ClusterRoles using an `aggregationRule`. The -permissions of aggregated ClusterRoles are controller-managed, and filled in by unioning the rules of any -ClusterRole that matches the provided label selector. An example aggregated ClusterRole: +You can _aggregate_ several ClusterRoles into one combined ClusterRole. +A controller, running as part of the cluster control plane, watches for ClusterRole +objects with an `aggregationRule` set. The `aggregationRule` defines a label +{{< glossary_tooltip text="selector" term_id="selector" >}} that the controller +uses to match other ClusterRole objects that should be combined into the `rules` +field of this one. + +Here is an example aggregated ClusterRole: ```yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -228,12 +303,13 @@ aggregationRule: clusterRoleSelectors: - matchLabels: rbac.example.com/aggregate-to-monitoring: "true" -rules: [] # Rules are automatically filled in by the controller manager. +rules: [] # The control plane automatically fills in the rules ``` -Creating a ClusterRole that matches the label selector will add rules to the aggregated ClusterRole. In this case -rules can be added to the "monitoring" ClusterRole by creating another ClusterRole that has the label -`rbac.example.com/aggregate-to-monitoring: true`. +If you create a new ClusterRole that matches the label selector of an existing aggregated ClusterRole, +that change triggers adding the new rules into the aggregated ClusterRole. +Here is an example that adds rules to the "monitoring" ClusterRole, by creating another +ClusterRole labeled `rbac.example.com/aggregate-to-monitoring: true`. ```yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -242,19 +318,22 @@ metadata: name: monitoring-endpoints labels: rbac.example.com/aggregate-to-monitoring: "true" -# These rules will be added to the "monitoring" role. +# When you create the "monitoring-endpoints" ClusterRole, +# the rules below will be added to the "monitoring" ClusterRole. rules: - apiGroups: [""] resources: ["services", "endpoints", "pods"] verbs: ["get", "list", "watch"] ``` -The default user-facing roles (described below) use ClusterRole aggregation. This lets admins include rules -for custom resources, such as those served by CustomResourceDefinitions or Aggregated API servers, on the -default roles. +The [default user-facing roles](#default-roles-and-role-bindings) use ClusterRole aggregation. This lets you, +as a cluster administrator, include rules for custom resources, such as those served by +{{< glossary_tooltip term_id="CustomResourceDefinition" text="CustomResourceDefinitions" >}} +or aggregated API servers, to extend the default roles. -For example, the following ClusterRoles let the "admin" and "edit" default roles manage the custom resource -"CronTabs" and the "view" role perform read-only actions on the resource. +For example: the following ClusterRoles let the "admin" and "edit" default roles manage the custom resource +named CronTab, whereas the "view" role can perform just read actions on CronTab resources. +You can assume that CronTab objects are named `"crontabs"` in URLs as seen by the API server. ```yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -283,60 +362,87 @@ rules: verbs: ["get", "list", "watch"] ``` -#### Role Examples +#### Role examples -Only the `rules` section is shown in the following examples. +The following examples are excerpts from Role or ClusterRole objects, showing only +the `rules` section. -Allow reading the resource "pods" in the core {{< glossary_tooltip text="API Group" term_id="api-group" >}}: +Allow reading `"pods"` resources in the core +{{< glossary_tooltip text="API Group" term_id="api-group" >}}: ```yaml rules: - apiGroups: [""] + # + # at the HTTP level, the name of the resource for accessing Pod + # objects is "pods" resources: ["pods"] verbs: ["get", "list", "watch"] ``` -Allow reading/writing "deployments" in both the "extensions" and "apps" API groups: +Allow reading/writing Deployments (at the HTTP level: objects with `"deployments"` +in the resource part of their URL) in both the `"extensions"` and `"apps"` API groups: ```yaml rules: - apiGroups: ["extensions", "apps"] + # + # at the HTTP level, the name of the resource for accessing Deployment + # objects is "deployments" resources: ["deployments"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] ``` -Allow reading "pods" and reading/writing "jobs": +Allow reading Pods in the core API group, as well as reading or writing Job +resources in the `"batch"` or `"extensions"` API groups: ```yaml rules: - apiGroups: [""] + # + # at the HTTP level, the name of the resource for accessing Pod + # objects is "pods" resources: ["pods"] verbs: ["get", "list", "watch"] - apiGroups: ["batch", "extensions"] + # + # at the HTTP level, the name of the resource for accessing Job + # objects is "jobs" resources: ["jobs"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] ``` -Allow reading a `ConfigMap` named "my-config" (must be bound with a `RoleBinding` to limit to a single `ConfigMap` in a single namespace): +Allow reading a ConfigMap named "my-config" (must be bound with a +RoleBinding to limit to a single ConfigMap in a single namespace): ```yaml rules: - apiGroups: [""] + # + # at the HTTP level, the name of the resource for accessing ConfigMap + # objects is "configmaps" resources: ["configmaps"] resourceNames: ["my-config"] verbs: ["get"] ``` -Allow reading the resource "nodes" in the core group (because a `Node` is cluster-scoped, this must be in a `ClusterRole` bound with a `ClusterRoleBinding` to be effective): +Allow reading the resource `"nodes"` in the core group (because a +Node is cluster-scoped, this must be in a ClusterRole bound with a +ClusterRoleBinding to be effective): ```yaml rules: - apiGroups: [""] + # + # at the HTTP level, the name of the resource for accessing Node + # objects is "nodes" resources: ["nodes"] verbs: ["get", "list", "watch"] ``` -Allow "GET" and "POST" requests to the non-resource endpoint "/healthz" and all subpaths (must be in a `ClusterRole` bound with a `ClusterRoleBinding` to be effective): +Allow GET and POST requests to the non-resource endpoint `/healthz` and +all subpaths (must be in a ClusterRole bound with a ClusterRoleBinding +to be effective): ```yaml rules: @@ -344,32 +450,44 @@ rules: verbs: ["get", "post"] ``` -### Referring to Subjects +### Referring to subjects + +A RoleBinding or ClusterRoleBinding binds a role to subjects. +Subjects can be groups, users or +{{< glossary_tooltip text="ServiceAccounts" term_id="service-account" >}}. + +Kubernetes represents usernames as strings. +These can be: plain names, such as "alice"; email-style names, like "bob@example.com"; +or numeric user IDs represented as a string. It is up to you as a cluster administrator +to configure the [authentication modules](/docs/reference/access-authn-authz/authentication/) +so that authentication produces usernames in the format you want. -A `RoleBinding` or `ClusterRoleBinding` binds a role to *subjects*. -Subjects can be groups, users or service accounts. +{{< caution >}} +The prefix `system:` is reserved for Kubernetes system use, so you should ensure +that you don't have users or groups with names that start with `system:` by +accident. +Other than this special prefix, the RBAC authorization system does not require any format +for usernames. +{{< /caution >}} -Users are represented by strings. These can be plain usernames, like -"alice", email-style names, like "bob@example.com", or numeric IDs -represented as a string. It is up to the Kubernetes admin to configure -the [authentication modules](/docs/reference/access-authn-authz/authentication/) to produce -usernames in the desired format. The RBAC authorization system does -not require any particular format. However, the prefix `system:` is -reserved for Kubernetes system use, and so the admin should ensure -usernames do not contain this prefix by accident. +In Kubernetes, Authenticator modules provide group information. +Groups, like users, are represented as strings, and that string has no format requirements, +other than that the prefix `system:` is reserved. -Group information in Kubernetes is currently provided by the Authenticator -modules. Groups, like users, are represented as strings, and that string -has no format requirements, other than that the prefix `system:` is reserved. +[ServiceAccounts](/docs/tasks/configure-pod-container/configure-service-account/) have names prefixed +with `system:serviceaccount:`, and belong to groups that have names prefixed with `system:serviceaccounts:`. -[Service Accounts](/docs/tasks/configure-pod-container/configure-service-account/) have usernames with the `system:serviceaccount:` prefix and belong -to groups with the `system:serviceaccounts:` prefix. +{{< note >}} +- `system:serviceaccount:` (singular) is the prefix for service account usernames. +- `system:serviceaccounts:` (plural) is the prefix for service account groups. +{{< /note >}} -#### Role Binding Examples +#### RoleBinding examples {#role-binding-examples} -Only the `subjects` section of a `RoleBinding` is shown in the following examples. +The following examples are `RoleBinding` excerpts that only +show the `subjects` section. -For a user named "alice@example.com": +For a user named `alice@example.com`: ```yaml subjects: @@ -378,7 +496,7 @@ subjects: apiGroup: rbac.authorization.k8s.io ``` -For a group named "frontend-admins": +For a group named `frontend-admins`: ```yaml subjects: @@ -387,7 +505,7 @@ subjects: apiGroup: rbac.authorization.k8s.io ``` -For the default service account in the kube-system namespace: +For the default service account in the "kube-system" namespace: ```yaml subjects: @@ -405,7 +523,7 @@ subjects: apiGroup: rbac.authorization.k8s.io ``` -For all service accounts everywhere: +For all service accounts in any namespace: ```yaml subjects: @@ -414,7 +532,7 @@ subjects: apiGroup: rbac.authorization.k8s.io ``` -For all authenticated users (version 1.5+): +For all authenticated users: ```yaml subjects: @@ -423,7 +541,7 @@ subjects: apiGroup: rbac.authorization.k8s.io ``` -For all unauthenticated users (version 1.5+): +For all unauthenticated users: ```yaml subjects: @@ -432,7 +550,7 @@ subjects: apiGroup: rbac.authorization.k8s.io ``` -For all users (version 1.5+): +For all users: ```yaml subjects: @@ -444,42 +562,51 @@ subjects: apiGroup: rbac.authorization.k8s.io ``` -## Default Roles and Role Bindings +## Default roles and role bindings -API servers create a set of default `ClusterRole` and `ClusterRoleBinding` objects. -Many of these are `system:` prefixed, which indicates that the resource is "owned" by the infrastructure. -Modifications to these resources can result in non-functional clusters. One example is the `system:node` ClusterRole. -This role defines permissions for kubelets. If the role is modified, it can prevent kubelets from working. +API servers create a set of default ClusterRole and ClusterRoleBinding objects. +Many of these are `system:` prefixed, which indicates that the resource is directly +managed by the cluster control plane. +All of the default ClusterRoles and ClusterRoleBindings are labeled with `kubernetes.io/bootstrapping=rbac-defaults`. -All of the default cluster roles and rolebindings are labeled with `kubernetes.io/bootstrapping=rbac-defaults`. +{{< caution >}} +Take care when modifying ClusterRoles and ClusterRoleBindings with names +that have a `system:` prefix. +Modifications to these resources can result in non-functional clusters. +{{< /caution >}} ### Auto-reconciliation At each start-up, the API server updates default cluster roles with any missing permissions, and updates default cluster role bindings with any missing subjects. -This allows the cluster to repair accidental modifications, -and to keep roles and rolebindings up-to-date as permissions and subjects change in new releases. +This allows the cluster to repair accidental modifications, and helps to keep roles and role bindings +up-to-date as permissions and subjects change in new Kubernetes releases. -To opt out of this reconciliation, set the `rbac.authorization.kubernetes.io/autoupdate` +To opt out of this reconciliation, set the `rbac.authorization.kubernetes.io/autoupdate` annotation on a default cluster role or rolebinding to `false`. Be aware that missing default permissions and subjects can result in non-functional clusters. -Auto-reconciliation is enabled in Kubernetes version 1.6+ when the RBAC authorizer is active. +Auto-reconciliation is enabled by default if the RBAC authorizer is active. -### Discovery Roles +### API discovery roles {#discovery-roles} -Default role bindings authorize unauthenticated and authenticated users to read API information that is deemed safe to be publicly accessible (including CustomResourceDefinitions). To disable anonymous unauthenticated access add `--anonymous-auth=false` to the API server configuration. +Default role bindings authorize unauthenticated and authenticated users to read API information that is deemed safe to be publicly accessible (including CustomResourceDefinitions). To disable anonymous unauthenticated access, add `--anonymous-auth=false` to the API server configuration. To view the configuration of these roles via `kubectl` run: -``` +```shell kubectl get clusterroles system:discovery -o yaml ``` -NOTE: editing the role is not recommended as changes will be overwritten on API server restart via auto-reconciliation (see above). +{{< note >}} +If you edit that ClusterRole, your changes will be overwritten on API server restart +via [auto-reconciliation](#auto-reconciliation). To avoid that overwriting, +either do not manually edit the role, or disable auto-reconciliation. +{{< /note >}} - + + @@ -488,30 +615,30 @@ NOTE: editing the role is not recommended as changes will be overwritten on API - + - + - +
Kubernetes RBAC API discovery roles
Default ClusterRole Default ClusterRoleBinding
system:basic-user system:authenticated groupAllows a user read-only access to basic information about themselves. Prior to 1.14, this role was also bound to `system:unauthenticated` by default.Allows a user read-only access to basic information about themselves. Prior to v1.14, this role was also bound to system:unauthenticated by default.
system:discovery system:authenticated groupAllows read-only access to API discovery endpoints needed to discover and negotiate an API level. Prior to 1.14, this role was also bound to `system:unauthenticated` by default.Allows read-only access to API discovery endpoints needed to discover and negotiate an API level. Prior to v1.14, this role was also bound to system:unauthenticated by default.
system:public-info-viewer system:authenticated and system:unauthenticated groupsAllows read-only access to non-sensitive information about the cluster. Introduced in 1.14.Allows read-only access to non-sensitive information about the cluster. Introduced in Kubernetes v1.14.
-### User-facing Roles +### User-facing roles -Some of the default roles are not `system:` prefixed. These are intended to be user-facing roles. -They include super-user roles (`cluster-admin`), -roles intended to be granted cluster-wide using ClusterRoleBindings (`cluster-status`), -and roles intended to be granted within particular namespaces using RoleBindings (`admin`, `edit`, `view`). +Some of the default ClusterRoles are not `system:` prefixed. These are intended to be user-facing roles. +They include super-user roles (`cluster-admin`), roles intended to be granted cluster-wide +using ClusterRoleBindings, and roles intended to be granted within particular +namespaces using RoleBindings (`admin`, `edit`, `view`). -As of 1.9, user-facing roles use [ClusterRole Aggregation](#aggregated-clusterroles) to allow admins to include -rules for custom resources on these roles. To add rules to the "admin", "edit", or "view" role, create a -ClusterRole with one or more of the following labels: +User-facing ClusterRoles use [ClusterRole aggregation](#aggregated-clusterroles) to allow admins to include +rules for custom resources on these ClusterRoles. To add rules to the `admin`, `edit`, or `view` roles, create +a ClusterRole with one or more of the following labels: ```yaml metadata: @@ -533,32 +660,40 @@ metadata: system:masters group Allows super-user access to perform any action on any resource. When used in a ClusterRoleBinding, it gives full control over every resource in the cluster and in all namespaces. -When used in a RoleBinding, it gives full control over every resource in the rolebinding's namespace, including the namespace itself. +When used in a RoleBinding, it gives full control over every resource in the role binding's namespace, including the namespace itself. admin None Allows admin access, intended to be granted within a namespace using a RoleBinding. If used in a RoleBinding, allows read/write access to most resources in a namespace, -including the ability to create roles and rolebindings within the namespace. -It does not allow write access to resource quota or to the namespace itself. +including the ability to create roles and role bindings within the namespace. +This role does not allow write access to resource quota or to the namespace itself. edit None Allows read/write access to most objects in a namespace. -It does not allow viewing or modifying roles or rolebindings. + +This role does not allow viewing or modifying roles or role bindings. +However, this role allows accessing Secrets and running Pods as any ServiceAccount in +the namespace, so it can be used to gain the API access levels of any ServiceAccount in +the namespace. view None Allows read-only access to see most objects in a namespace. -It does not allow viewing roles or rolebindings. -It does not allow viewing secrets, since those are escalating. +It does not allow viewing roles or role bindings. + +This role does not allow viewing Secrets, since reading +the contents of Secrets enables access to ServiceAccount credentials +in the namespace, which would allow API access as any ServiceAccount +in the namespace (a form of privilege escalation). -### Core Component Roles +### Core component roles @@ -570,7 +705,7 @@ It does not allow viewing secrets, since those are escalating. - + @@ -580,28 +715,27 @@ It does not allow viewing secrets, since those are escalating. - + - - + - +
system:kube-scheduler system:kube-scheduler userAllows access to the resources required by the kube-scheduler component.Allows access to the resources required by the {{< glossary_tooltip term_id="kube-scheduler" text="scheduler" >}} component.
system:volume-scheduler
system:kube-controller-manager system:kube-controller-manager userAllows access to the resources required by the kube-controller-manager component. -The permissions required by individual control loops are contained in the controller roles.Allows access to the resources required by the {{< glossary_tooltip term_id="kube-controller-manager" text="controller manager" >}} component. +The permissions required by individual controllers are detailed in the controller roles.
system:nodeNone in 1.8+Allows access to resources required by the kubelet component, including read access to all secrets, and write access to all pod status objects. +NoneAllows access to resources required by the kubelet, including read access to all secrets, and write access to all pod status objects. + +You should use the Node authorizer and NodeRestriction admission plugin instead of the system:node role, and allow granting API access to kubelets based on the Pods scheduled to run on them. -As of 1.7, use of the Node authorizer and NodeRestriction admission plugin is recommended instead of this role, and allow granting API access to kubelets based on the pods scheduled to run on them. -Prior to 1.7, this role was automatically bound to the `system:nodes` group. -In 1.7, this role was automatically bound to the `system:nodes` group if the `Node` authorization mode is not enabled. -In 1.8+, no binding is automatically created. +The system:node role only exists for compatibility with Kubernetes clusters upgraded from versions prior to v1.8.
system:node-proxier system:kube-proxy userAllows access to the resources required by the kube-proxy component.Allows access to the resources required by the {{< glossary_tooltip term_id="kube-proxy" text="kube-proxy" >}} component.
-### Other Component Roles +### Other component roles @@ -619,7 +753,7 @@ This is commonly used by add-on API servers for unified authentication and autho - + @@ -635,12 +769,12 @@ This is commonly used by add-on API servers for unified authentication and autho - + +kubelet TLS bootstrapping. @@ -654,73 +788,80 @@ This is commonly used by add-on API servers for unified authentication and autho
system:heapster NoneRole for the Heapster component.Role for the Heapster component (deprecated).
system:kube-aggregatorsystem:kubelet-api-admin None Allows full access to the kubelet API.
system:node-bootstrapper None Allows access to the resources required to perform -Kubelet TLS bootstrapping.
system:node-problem-detector
-### Controller Roles +### Roles for built-in controllers {#controller-roles} -The [Kubernetes controller manager](/docs/admin/kube-controller-manager/) runs core control loops. -When invoked with `--use-service-account-credentials`, each control loop is started using a separate service account. -Corresponding roles exist for each control loop, prefixed with `system:controller:`. -If the controller manager is not started with `--use-service-account-credentials`, -it runs all control loops using its own credential, which must be granted all the relevant roles. +The Kubernetes {{< glossary_tooltip term_id="kube-controller-manager" text="controller manager" >}} runs +{{< glossary_tooltip term_id="controller" text="controllers" >}} that are built in to the Kubernetes +control plane. +When invoked with `--use-service-account-credentials`, kube-controller-manager starts each controller +using a separate service account. +Corresponding roles exist for each built-in controller, prefixed with `system:controller:`. +If the controller manager is not started with `--use-service-account-credentials`, it runs all control loops +using its own credential, which must be granted all the relevant roles. These roles include: -* system:controller:attachdetach-controller -* system:controller:certificate-controller -* system:controller:clusterrole-aggregation-controller -* system:controller:cronjob-controller -* system:controller:daemon-set-controller -* system:controller:deployment-controller -* system:controller:disruption-controller -* system:controller:endpoint-controller -* system:controller:expand-controller -* system:controller:generic-garbage-collector -* system:controller:horizontal-pod-autoscaler -* system:controller:job-controller -* system:controller:namespace-controller -* system:controller:node-controller -* system:controller:persistent-volume-binder -* system:controller:pod-garbage-collector -* system:controller:pv-protection-controller -* system:controller:pvc-protection-controller -* system:controller:replicaset-controller -* system:controller:replication-controller -* system:controller:resourcequota-controller -* system:controller:root-ca-cert-publisher -* system:controller:route-controller -* system:controller:service-account-controller -* system:controller:service-controller -* system:controller:statefulset-controller -* system:controller:ttl-controller - -## Privilege Escalation Prevention and Bootstrapping +* `system:controller:attachdetach-controller` +* `system:controller:certificate-controller` +* `system:controller:clusterrole-aggregation-controller` +* `system:controller:cronjob-controller` +* `system:controller:daemon-set-controller` +* `system:controller:deployment-controller` +* `system:controller:disruption-controller` +* `system:controller:endpoint-controller` +* `system:controller:expand-controller` +* `system:controller:generic-garbage-collector` +* `system:controller:horizontal-pod-autoscaler` +* `system:controller:job-controller` +* `system:controller:namespace-controller` +* `system:controller:node-controller` +* `system:controller:persistent-volume-binder` +* `system:controller:pod-garbage-collector` +* `system:controller:pv-protection-controller` +* `system:controller:pvc-protection-controller` +* `system:controller:replicaset-controller` +* `system:controller:replication-controller` +* `system:controller:resourcequota-controller` +* `system:controller:root-ca-cert-publisher` +* `system:controller:route-controller` +* `system:controller:service-account-controller` +* `system:controller:service-controller` +* `system:controller:statefulset-controller` +* `system:controller:ttl-controller` + +## Privilege escalation prevention and bootstrapping The RBAC API prevents users from escalating privileges by editing roles or role bindings. Because this is enforced at the API level, it applies even when the RBAC authorizer is not in use. -A user can only create/update a role if at least one of the following things is true: +### Restrictions on role creation or update + +You can only create/update a role if at least one of the following things is true: -1. They already have all the permissions contained in the role, at the same scope as the object being modified -(cluster-wide for a `ClusterRole`, within the same namespace or cluster-wide for a `Role`) -2. They are given explicit permission to perform the `escalate` verb on the `roles` or `clusterroles` resource in the `rbac.authorization.k8s.io` API group (Kubernetes 1.12 and newer) +1. You already have all the permissions contained in the role, at the same scope as the object being modified +(cluster-wide for a ClusterRole, within the same namespace or cluster-wide for a Role). +2. You are granted explicit permission to perform the `escalate` verb on the `roles` or `clusterroles` resource in the `rbac.authorization.k8s.io` API group. -For example, if "user-1" does not have the ability to list secrets cluster-wide, they cannot create a `ClusterRole` +For example, if `user-1` does not have the ability to list Secrets cluster-wide, they cannot create a ClusterRole containing that permission. To allow a user to create/update roles: -1. Grant them a role that allows them to create/update `Role` or `ClusterRole` objects, as desired. -2. Grant them permission to include specific permissions in the roles the create/update: - * implicitly, by giving them those permissions (if they attempt to create or modify a `Role` or `ClusterRole` with permissions they themselves have not been granted, the API request will be forbidden) - * or explicitly allow specifying any permission in a `Role` or `ClusterRole` by giving them permission to perform the `escalate` verb on `roles` or `clusterroles` resources in the `rbac.authorization.k8s.io` API group (Kubernetes 1.12 and newer) +1. Grant them a role that allows them to create/update Role or ClusterRole objects, as desired. +2. Grant them permission to include specific permissions in the roles they create/update: + * implicitly, by giving them those permissions (if they attempt to create or modify a Role or ClusterRole with permissions they themselves have not been granted, the API request will be forbidden) + * or explicitly allow specifying any permission in a `Role` or `ClusterRole` by giving them permission to perform the `escalate` verb on `roles` or `clusterroles` resources in the `rbac.authorization.k8s.io` API group + +### Restrictions on role binding creation or update -A user can only create/update a role binding if they already have all the permissions contained in the referenced role -(at the same scope as the role binding) *or* if they've been given explicit permission to perform the `bind` verb on the referenced role. -For example, if "user-1" does not have the ability to list secrets cluster-wide, they cannot create a `ClusterRoleBinding` +You can only create/update a role binding if you already have all the permissions contained in the referenced role +(at the same scope as the role binding) *or* if you have been authorized to perform the `bind` verb on the referenced role. +For example, if `user-1` does not have the ability to list Secrets cluster-wide, they cannot create a ClusterRoleBinding to a role that grants that permission. To allow a user to create/update role bindings: -1. Grant them a role that allows them to create/update `RoleBinding` or `ClusterRoleBinding` objects, as desired. +1. Grant them a role that allows them to create/update RoleBinding or ClusterRoleBinding objects, as desired. 2. Grant them permissions needed to bind a particular role: * implicitly, by giving them the permissions contained in the role. - * explicitly, by giving them permission to perform the `bind` verb on the particular role (or cluster role). + * explicitly, by giving them permission to perform the `bind` verb on the particular Role (or ClusterRole). -For example, this cluster role and role binding would allow "user-1" to grant other users the `admin`, `edit`, and `view` roles in the "user-1-namespace" namespace: +For example, this ClusterRole and RoleBinding would allow `user-1` to grant other users the `admin`, `edit`, and `view` roles in the namespace `user-1-namespace`: ```yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -754,126 +895,126 @@ subjects: When bootstrapping the first roles and role bindings, it is necessary for the initial user to grant permissions they do not yet have. To bootstrap initial roles and role bindings: -* Use a credential with the `system:masters` group, which is bound to the `cluster-admin` super-user role by the default bindings. +* Use a credential with the "system:masters" group, which is bound to the "cluster-admin" super-user role by the default bindings. * If your API server runs with the insecure port enabled (`--insecure-port`), you can also make API calls via that port, which does not enforce authentication or authorization. -## Command-line Utilities +## Command-line utilities ### `kubectl create role` -Creates a `Role` object defining permissions within a single namespace. Examples: +Creates a Role object defining permissions within a single namespace. Examples: -* Create a `Role` named "pod-reader" that allows user to perform "get", "watch" and "list" on pods: +* Create a Role named "pod-reader" that allows users to perform `get`, `watch` and `list` on pods: - ``` + ```shell kubectl create role pod-reader --verb=get --verb=list --verb=watch --resource=pods ``` -* Create a `Role` named "pod-reader" with resourceNames specified: +* Create a Role named "pod-reader" with resourceNames specified: - ``` + ```shell kubectl create role pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod ``` -* Create a `Role` named "foo" with apiGroups specified: +* Create a Role named "foo" with apiGroups specified: - ``` + ```shell kubectl create role foo --verb=get,list,watch --resource=replicasets.apps ``` -* Create a `Role` named "foo" with subresource permissions: +* Create a Role named "foo" with subresource permissions: - ``` + ```shell kubectl create role foo --verb=get,list,watch --resource=pods,pods/status ``` -* Create a `Role` named "my-component-lease-holder" with permissions to get/update a resource with a specific name: +* Create a Role named "my-component-lease-holder" with permissions to get/update a resource with a specific name: - ``` + ```shell kubectl create role my-component-lease-holder --verb=get,list,watch,update --resource=lease --resource-name=my-component ``` ### `kubectl create clusterrole` -Creates a `ClusterRole` object. Examples: +Creates a ClusterRole. Examples: -* Create a `ClusterRole` named "pod-reader" that allows user to perform "get", "watch" and "list" on pods: +* Create a ClusterRole named "pod-reader" that allows user to perform `get`, `watch` and `list` on pods: - ``` + ```shell kubectl create clusterrole pod-reader --verb=get,list,watch --resource=pods ``` -* Create a `ClusterRole` named "pod-reader" with resourceNames specified: +* Create a ClusterRole named "pod-reader" with resourceNames specified: - ``` + ```shell kubectl create clusterrole pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod ``` -* Create a `ClusterRole` named "foo" with apiGroups specified: +* Create a ClusterRole named "foo" with apiGroups specified: - ``` + ```shell kubectl create clusterrole foo --verb=get,list,watch --resource=replicasets.apps ``` -* Create a `ClusterRole` named "foo" with subresource permissions: +* Create a ClusterRole named "foo" with subresource permissions: - ``` + ```shell kubectl create clusterrole foo --verb=get,list,watch --resource=pods,pods/status ``` -* Create a `ClusterRole` name "foo" with nonResourceURL specified: +* Create a ClusterRole named "foo" with nonResourceURL specified: - ``` + ```shell kubectl create clusterrole "foo" --verb=get --non-resource-url=/logs/* ``` -* Create a `ClusterRole` name "monitoring" with aggregationRule specified: +* Create a ClusterRole named "monitoring" with an aggregationRule specified: - ``` + ```shell kubectl create clusterrole monitoring --aggregation-rule="rbac.example.com/aggregate-to-monitoring=true" ``` ### `kubectl create rolebinding` -Grants a `Role` or `ClusterRole` within a specific namespace. Examples: +Grants a Role or ClusterRole within a specific namespace. Examples: -* Within the namespace "acme", grant the permissions in the `admin` `ClusterRole` to a user named "bob": +* Within the namespace "acme", grant the permissions in the "admin" ClusterRole to a user named "bob": - ``` + ```shell kubectl create rolebinding bob-admin-binding --clusterrole=admin --user=bob --namespace=acme ``` -* Within the namespace "acme", grant the permissions in the `view` `ClusterRole` to the service account in the namespace "acme" named "myapp" : +* Within the namespace "acme", grant the permissions in the "view" ClusterRole to the service account in the namespace "acme" named "myapp": - ``` + ```shell kubectl create rolebinding myapp-view-binding --clusterrole=view --serviceaccount=acme:myapp --namespace=acme ``` -* Within the namespace "acme", grant the permissions in the `view` `ClusterRole` to a service account in the namespace "myappnamespace" named "myapp": +* Within the namespace "acme", grant the permissions in the "view" ClusterRole to a service account in the namespace "myappnamespace" named "myapp": - ``` + ```shell kubectl create rolebinding myappnamespace-myapp-view-binding --clusterrole=view --serviceaccount=myappnamespace:myapp --namespace=acme ``` ### `kubectl create clusterrolebinding` -Grants a `ClusterRole` across the entire cluster, including all namespaces. Examples: +Grants a ClusterRole across the entire cluster (all namespaces). Examples: -* Across the entire cluster, grant the permissions in the `cluster-admin` `ClusterRole` to a user named "root": +* Across the entire cluster, grant the permissions in the "cluster-admin" ClusterRole to a user named "root": - ``` + ```shell kubectl create clusterrolebinding root-cluster-admin-binding --clusterrole=cluster-admin --user=root ``` -* Across the entire cluster, grant the permissions in the `system:node-proxier ` `ClusterRole` to a user named "system:kube-proxy": +* Across the entire cluster, grant the permissions in the "system:node-proxier" ClusterRole to a user named "system:kube-proxy": - ``` + ```shell kubectl create clusterrolebinding kube-proxy-binding --clusterrole=system:node-proxier --user=system:kube-proxy ``` -* Across the entire cluster, grant the permissions in the `view` `ClusterRole` to a service account named "myapp" in the namespace "acme": +* Across the entire cluster, grant the permissions in the "view" ClusterRole to a service account named "myapp" in the namespace "acme": - ``` + ```shell kubectl create clusterrolebinding myapp-view-binding --clusterrole=view --serviceaccount=acme:myapp ``` @@ -894,32 +1035,31 @@ Examples: * Test applying a manifest file of RBAC objects, displaying changes that would be made: ``` - kubectl auth reconcile -f my-rbac-rules.yaml --dry-run + kubectl auth reconcile -f my-rbac-rules.yaml --dry-run=client ``` * Apply a manifest file of RBAC objects, preserving any extra permissions (in roles) and any extra subjects (in bindings): - ``` + ```shell kubectl auth reconcile -f my-rbac-rules.yaml ``` * Apply a manifest file of RBAC objects, removing any extra permissions (in roles) and any extra subjects (in bindings): - ``` + ```shell kubectl auth reconcile -f my-rbac-rules.yaml --remove-extra-subjects --remove-extra-permissions ``` -See the CLI help for detailed usage. - -## Service Account Permissions +## ServiceAccount permissions {#service-account-permissions} Default RBAC policies grant scoped permissions to control-plane components, nodes, and controllers, but grant *no permissions* to service accounts outside the `kube-system` namespace (beyond discovery permissions given to all authenticated users). -This allows you to grant particular roles to particular service accounts as needed. +This allows you to grant particular roles to particular ServiceAccounts as needed. Fine-grained role bindings provide greater security, but require more effort to administrate. -Broader grants can give unnecessary (and potentially escalating) API access to service accounts, but are easier to administrate. +Broader grants can give unnecessary (and potentially escalating) API access to +ServiceAccounts, but are easier to administrate. In order from most secure to least secure, the approaches are: @@ -941,9 +1081,10 @@ In order from most secure to least secure, the approaches are: If an application does not specify a `serviceAccountName`, it uses the "default" service account. - {{< note >}}Permissions given to the "default" service - account are available to any pod in the namespace that does not - specify a `serviceAccountName`.{{< /note >}} + {{< note >}} + Permissions given to the "default" service account are available to any pod + in the namespace that does not specify a `serviceAccountName`. + {{< /note >}} For example, grant read-only permission within "my-namespace" to the "default" service account: @@ -954,12 +1095,15 @@ In order from most secure to least secure, the approaches are: --namespace=my-namespace ``` - Many [add-ons](/docs/concepts/cluster-administration/addons/) currently run as the "default" service account in the `kube-system` namespace. - To allow those add-ons to run with super-user access, grant cluster-admin permissions to the "default" service account in the `kube-system` namespace. + Many [add-ons](/docs/concepts/cluster-administration/addons/) run as the + "default" service account in the `kube-system` namespace. + To allow those add-ons to run with super-user access, grant cluster-admin + permissions to the "default" service account in the `kube-system` namespace. - {{< note >}}Enabling this means the `kube-system` - namespace contains secrets that grant super-user access to the - API.{{< /note >}} + {{< caution >}} + Enabling this means the `kube-system` namespace contains Secrets + that grant super-user access to your cluster's API. + {{< /caution >}} ```shell kubectl create clusterrolebinding add-on-cluster-admin \ @@ -998,9 +1142,9 @@ In order from most secure to least secure, the approaches are: If you don't care about partitioning permissions at all, you can grant super-user access to all service accounts. {{< warning >}} - This allows any user with read access - to secrets or the ability to create a pod to access super-user - credentials. + This allows any application full access to your cluster, and also grants + any user with read access to Secrets (or the ability to create any pod) + full access to your cluster. {{< /warning >}} ```shell @@ -1009,10 +1153,11 @@ In order from most secure to least secure, the approaches are: --group=system:serviceaccounts ``` -## Upgrading from 1.5 +## Upgrading from ABAC -Prior to Kubernetes 1.6, many deployments used very permissive ABAC policies, -including granting full API access to all service accounts. +Clusters that originally ran older Kubernetes versions often used +permissive ABAC policies, including granting full API access to all +service accounts. Default RBAC policies grant scoped permissions to control-plane components, nodes, and controllers, but grant *no permissions* to service accounts outside the `kube-system` namespace @@ -1021,28 +1166,31 @@ and controllers, but grant *no permissions* to service accounts outside the `kub While far more secure, this can be disruptive to existing workloads expecting to automatically receive API permissions. Here are two approaches for managing this transition: -### Parallel Authorizers +### Parallel authorizers Run both the RBAC and ABAC authorizers, and specify a policy file that contains -[the legacy ABAC policy](/docs/reference/access-authn-authz/abac/#policy-file-format): +the [legacy ABAC policy](/docs/reference/access-authn-authz/abac/#policy-file-format): ``` ---authorization-mode=RBAC,ABAC --authorization-policy-file=mypolicy.json +--authorization-mode=...,RBAC,ABAC --authorization-policy-file=mypolicy.json ``` -The RBAC authorizer will attempt to authorize requests first. If it denies an API request, -the ABAC authorizer is then run. This means that any request allowed by *either* the RBAC -or ABAC policies is allowed. +To explain that first command line option in detail: if earlier authorizers, such as Node, +deny a request, then the the RBAC authorizer attempts to authorize the API request. If RBAC +also denies that API request, the ABAC authorizer is then run. This means that any request +allowed by *either* the RBAC or ABAC policies is allowed. -When the apiserver is run with a log level of 5 or higher for the RBAC component (`--vmodule=rbac*=5` or `--v=5`), -you can see RBAC denials in the apiserver log (prefixed with `RBAC DENY:`). +When the kube-apiserver is run with a log level of 5 or higher for the RBAC component +(`--vmodule=rbac*=5` or `--v=5`), you can see RBAC denials in the API server log +(prefixed with `RBAC DENY:`). You can use that information to determine which roles need to be granted to which users, groups, or service accounts. -Once you have [granted roles to service accounts](#service-account-permissions) and workloads are running with no RBAC denial messages -in the server logs, you can remove the ABAC authorizer. -## Permissive RBAC Permissions +Once you have [granted roles to service accounts](#service-account-permissions) and workloads +are running with no RBAC denial messages in the server logs, you can remove the ABAC authorizer. + +### Permissive RBAC permissions -You can replicate a permissive policy using RBAC role bindings. +You can replicate a permissive ABAC policy using RBAC role bindings. {{< warning >}} The following policy allows **ALL** service accounts to act as cluster administrators. @@ -1050,7 +1198,7 @@ Any application running in a container receives service account credentials auto and could perform any action against the API, including viewing secrets and modifying permissions. This is not a recommended policy. -``` +```shell kubectl create clusterrolebinding permissive-binding \ --clusterrole=cluster-admin \ --user=admin \ @@ -1059,4 +1207,7 @@ kubectl create clusterrolebinding permissive-binding \ ``` {{< /warning >}} +After you have transitioned to use RBAC, you should adjust the access controls +for your cluster to ensure that these meet your information security needs. + {{% /capture %}} diff --git a/content/en/docs/reference/command-line-tools-reference/cloud-controller-manager.md b/content/en/docs/reference/command-line-tools-reference/cloud-controller-manager.md index ccc5f775a1872..17d1bee09cfb0 100644 --- a/content/en/docs/reference/command-line-tools-reference/cloud-controller-manager.md +++ b/content/en/docs/reference/command-line-tools-reference/cloud-controller-manager.md @@ -50,7 +50,7 @@ cloud-controller-manager [flags] --authentication-kubeconfig string - kubeconfig file pointing at the 'core' kubernetes server with enough rights to create tokenaccessreviews.authentication.k8s.io. This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster. + kubeconfig file pointing at the 'core' kubernetes server with enough rights to create tokenreviews.authentication.k8s.io. This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster. @@ -113,7 +113,7 @@ cloud-controller-manager [flags] --bind-address ip     Default: 0.0.0.0 - The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank, all interfaces will be used (0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces). + The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. @@ -225,7 +225,7 @@ cloud-controller-manager [flags] --feature-gates mapStringBool - A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (ALPHA - default=false)
APIResponseCompression=true|false (BETA - default=true)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (BETA - default=true)
CSIDriverRegistry=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (ALPHA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (ALPHA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EndpointSlice=true|false (BETA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
EvenPodsSpread=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HyperVContainer=true|false (ALPHA - default=false)
IPv6DualStack=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (ALPHA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (ALPHA - default=false)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
RuntimeClass=true|false (BETA - default=true)
SCTPSupport=true|false (ALPHA - default=false)
ServerSideApply=true|false (BETA - default=true)
ServiceNodeExclusion=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
StartupProbe=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportNodePidsLimit=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
TopologyManager=true|false (ALPHA - default=false)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumePVCDataSource=true|false (BETA - default=true)
VolumeSnapshotDataSource=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (ALPHA - default=false)
WindowsGMSA=true|false (BETA - default=true)
WindowsRunAsUserName=true|false (BETA - default=true) + A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (ALPHA - default=false)
APIResponseCompression=true|false (BETA - default=true)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (ALPHA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DefaultIngressClass=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EndpointSlice=true|false (BETA - default=true)
EndpointSliceProxying=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
EvenPodsSpread=true|false (BETA - default=true)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (ALPHA - default=false)
HyperVContainer=true|false (ALPHA - default=false)
IPv6DualStack=true|false (ALPHA - default=false)
ImmutableEphemeralVolumes=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (ALPHA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (ALPHA - default=false)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
RuntimeClass=true|false (BETA - default=true)
SCTPSupport=true|false (ALPHA - default=false)
SelectorIndex=true|false (ALPHA - default=false)
ServerSideApply=true|false (BETA - default=true)
ServiceAccountIssuerDiscovery=true|false (ALPHA - default=false)
ServiceAppProtocol=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
StartupProbe=true|false (BETA - default=true)
StorageVersionHash=true|false (BETA - default=true)
SupportNodePidsLimit=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeSnapshotDataSource=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (ALPHA - default=false) @@ -390,7 +390,7 @@ cloud-controller-manager [flags] - --profiling + --profiling     Default: true Enable profiling via web interface host:port/debug/pprof/ @@ -442,7 +442,7 @@ cloud-controller-manager [flags] --secure-port int     Default: 10258 - The port on which to serve HTTPS with authentication and authorization.If 0, don't serve HTTPS at all. + The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. @@ -498,7 +498,7 @@ cloud-controller-manager [flags] --tls-sni-cert-key namedCertKey     Default: [] - A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". + A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates.md index b0b24ba517857..64ab4d45bfdcd 100644 --- a/content/en/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates.md @@ -48,23 +48,18 @@ different Kubernetes components. | Feature | Default | Stage | Since | Until | |---------|---------|-------|-------|-------| +| `AnyVolumeDataSource` | `false` | Alpha | 1.18 | | | `APIListChunking` | `false` | Alpha | 1.8 | 1.8 | | `APIListChunking` | `true` | Beta | 1.9 | | | `APIPriorityAndFairness` | `false` | Alpha | 1.17 | | | `APIResponseCompression` | `false` | Alpha | 1.7 | | | `AppArmor` | `true` | Beta | 1.4 | | | `BalanceAttachedNodeVolumes` | `false` | Alpha | 1.11 | | -| `BlockVolume` | `false` | Alpha | 1.9 | 1.12 | -| `BlockVolume` | `true` | Beta | 1.13 | - | | `BoundServiceAccountTokenVolume` | `false` | Alpha | 1.13 | | | `CPUManager` | `false` | Alpha | 1.8 | 1.9 | | `CPUManager` | `true` | Beta | 1.10 | | | `CRIContainerLogRotation` | `false` | Alpha | 1.10 | 1.10 | | `CRIContainerLogRotation` | `true` | Beta| 1.11 | | -| `CSIBlockVolume` | `false` | Alpha | 1.11 | 1.13 | -| `CSIBlockVolume` | `true` | Beta | 1.14 | | -| `CSIDriverRegistry` | `false` | Alpha | 1.12 | 1.13 | -| `CSIDriverRegistry` | `true` | Beta | 1.14 | | | `CSIInlineVolume` | `false` | Alpha | 1.15 | 1.15 | | `CSIInlineVolume` | `true` | Beta | 1.16 | - | | `CSIMigration` | `false` | Alpha | 1.14 | 1.16 | @@ -81,6 +76,7 @@ different Kubernetes components. | `CSIMigrationGCEComplete` | `false` | Alpha | 1.17 | | | `CSIMigrationOpenStack` | `false` | Alpha | 1.14 | | | `CSIMigrationOpenStackComplete` | `false` | Alpha | 1.17 | | +| `ConfigurableFSGroupPolicy` | `false` | Alpha | 1.18 | | | `CustomCPUCFSQuotaPeriod` | `false` | Alpha | 1.12 | | | `CustomResourceDefaulting` | `false` | Alpha| 1.15 | 1.15 | | `CustomResourceDefaulting` | `true` | Beta | 1.16 | | @@ -93,6 +89,8 @@ different Kubernetes components. | `DynamicKubeletConfig` | `true` | Beta | 1.11 | | | `EndpointSlice` | `false` | Alpha | 1.16 | 1.16 | | `EndpointSlice` | `false` | Beta | 1.17 | | +| `EndpointSlice` | `true` | Beta | 1.18 | | +| `EndpointSliceProxying` | `false` | Alpha | 1.18 | | | `EphemeralContainers` | `false` | Alpha | 1.16 | | | `ExpandCSIVolumes` | `false` | Alpha | 1.14 | 1.15 | | `ExpandCSIVolumes` | `true` | Beta | 1.16 | | @@ -101,9 +99,12 @@ different Kubernetes components. | `ExpandPersistentVolumes` | `false` | Alpha | 1.8 | 1.10 | | `ExpandPersistentVolumes` | `true` | Beta | 1.11 | | | `ExperimentalHostUserNamespaceDefaulting` | `false` | Beta | 1.5 | | -| `EvenPodsSpread` | `false` | Alpha | 1.16 | | +| `EvenPodsSpread` | `false` | Alpha | 1.16 | 1.17 | +| `EvenPodsSpread` | `true` | Beta | 1.18 | | | `HPAScaleToZero` | `false` | Alpha | 1.16 | | +| `HugePageStorageMediumSize` | `false` | Alpha | 1.18 | | | `HyperVContainer` | `false` | Alpha | 1.10 | | +| `ImmutableEphemeralVolumes` | `false` | Alpha | 1.18 | | | `KubeletPodResources` | `false` | Alpha | 1.13 | 1.14 | | `KubeletPodResources` | `true` | Beta | 1.15 | | | `LegacyNodeRoleBehavior` | `true` | Alpha | 1.16 | | @@ -125,9 +126,11 @@ different Kubernetes components. | `RuntimeClass` | `false` | Alpha | 1.12 | 1.13 | | `RuntimeClass` | `true` | Beta | 1.14 | | | `SCTPSupport` | `false` | Alpha | 1.12 | | +| `ServiceAppProtocol` | `false` | Alpha | 1.18 | | | `ServerSideApply` | `false` | Alpha | 1.14 | 1.15 | | `ServerSideApply` | `true` | Beta | 1.16 | | | `ServiceNodeExclusion` | `false` | Alpha | 1.8 | | +| `ServiceTopology` | `false` | Alpha | 1.17 | | | `StartupProbe` | `false` | Alpha | 1.16 | | | `StorageVersionHash` | `false` | Alpha | 1.14 | 1.14 | | `StorageVersionHash` | `true` | Beta | 1.15 | | @@ -138,8 +141,6 @@ different Kubernetes components. | `SupportPodPidsLimit` | `false` | Alpha | 1.10 | 1.13 | | `SupportPodPidsLimit` | `true` | Beta | 1.14 | | | `Sysctls` | `true` | Beta | 1.11 | | -| `TaintBasedEvictions` | `false` | Alpha | 1.6 | 1.12 | -| `TaintBasedEvictions` | `true` | Beta | 1.13 | | | `TokenRequest` | `false` | Alpha | 1.10 | 1.11 | | `TokenRequest` | `true` | Beta | 1.12 | | | `TokenRequestProjection` | `false` | Alpha | 1.11 | 1.11 | @@ -148,8 +149,6 @@ different Kubernetes components. | `TopologyManager` | `false` | Alpha | 1.16 | | | `ValidateProxyRedirects` | `false` | Alpha | 1.12 | 1.13 | | `ValidateProxyRedirects` | `true` | Beta | 1.14 | | -| `VolumePVCDataSource` | `false` | Alpha | 1.15 | 1.15 | -| `VolumePVCDataSource` | `true` | Beta | 1.16 | | | `VolumeSnapshotDataSource` | `false` | Alpha | 1.12 | 1.16 | | `VolumeSnapshotDataSource` | `true` | Beta | 1.17 | - | | `WindowsGMSA` | `false` | Alpha | 1.14 | | @@ -173,6 +172,15 @@ different Kubernetes components. | `AffinityInAnnotations` | - | Deprecated | 1.8 | - | | `AllowExtTrafficLocalEndpoints` | `false` | Beta | 1.4 | 1.6 | | `AllowExtTrafficLocalEndpoints` | `true` | GA | 1.7 | - | +| `BlockVolume` | `false` | Alpha | 1.9 | 1.12 | +| `BlockVolume` | `true` | Beta | 1.13 | 1.17 | +| `BlockVolume` | `true` | GA | 1.18 | - | +| `CSIBlockVolume` | `false` | Alpha | 1.11 | 1.13 | +| `CSIBlockVolume` | `true` | Beta | 1.14 | 1.17 | +| `CSIBlockVolume` | `true` | GA | 1.18 | - | +| `CSIDriverRegistry` | `false` | Alpha | 1.12 | 1.13 | +| `CSIDriverRegistry` | `true` | Beta | 1.14 | 1.17 | +| `CSIDriverRegistry` | `true` | GA | 1.18 | | | `CSINodeInfo` | `false` | Alpha | 1.12 | 1.13 | | `CSINodeInfo` | `true` | Beta | 1.14 | 1.16 | | `CSINodeInfo` | `true` | GA | 1.17 | | @@ -253,9 +261,15 @@ different Kubernetes components. | `SupportIPVSProxyMode` | `false` | Beta | 1.9 | 1.9 | | `SupportIPVSProxyMode` | `true` | Beta | 1.10 | 1.10 | | `SupportIPVSProxyMode` | `true` | GA | 1.11 | - | +| `TaintBasedEvictions` | `false` | Alpha | 1.6 | 1.12 | +| `TaintBasedEvictions` | `true` | Beta | 1.13 | 1.17 | +| `TaintBasedEvictions` | `true` | GA | 1.18 | - | | `TaintNodesByCondition` | `false` | Alpha | 1.8 | 1.11 | | `TaintNodesByCondition` | `true` | Beta | 1.12 | 1.16 | | `TaintNodesByCondition` | `true` | GA | 1.17 | - | +| `VolumePVCDataSource` | `false` | Alpha | 1.15 | 1.15 | +| `VolumePVCDataSource` | `true` | Beta | 1.16 | 1.17 | +| `VolumePVCDataSource` | `true` | GA | 1.18 | - | | `VolumeScheduling` | `false` | Alpha | 1.9 | 1.9 | | `VolumeScheduling` | `true` | Beta | 1.10 | 1.12 | | `VolumeScheduling` | `true` | GA | 1.13 | - | @@ -266,6 +280,12 @@ different Kubernetes components. | `WatchBookmark` | `false` | Alpha | 1.15 | 1.15 | | `WatchBookmark` | `true` | Beta | 1.16 | 1.16 | | `WatchBookmark` | `true` | GA | 1.17 | - | +| `WindowsGMSA` | `false` | Alpha | 1.14 | 1.15 | +| `WindowsGMSA` | `true` | Beta | 1.16 | 1.17 | +| `WindowsGMSA` | `true` | GA | 1.18 | - | +| `WindowsRunAsUserName` | `false` | Alpha | 1.16 | 1.16 | +| `WindowsRunAsUserName` | `true` | Beta | 1.17 | 1.17 | +| `WindowsRunAsUserName` | `true` | GA | 1.18 | - | {{< /table >}} ## Using a feature @@ -315,6 +335,8 @@ Each feature gate is designed for enabling/disabling a specific feature: - `AdvancedAuditing`: Enable [advanced auditing](/docs/tasks/debug-application-cluster/audit/#advanced-audit) - `AffinityInAnnotations`(*deprecated*): Enable setting [Pod affinity or anti-affinity](/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). - `AllowExtTrafficLocalEndpoints`: Enable a service to route external requests to node local endpoints. +- `AnyVolumeDataSource`: Enable use of any custom resource as the `DataSource` of a + {{< glossary_tooltip text="PVC" term_id="persistent-volume-claim" >}}. - `APIListChunking`: Enable the API clients to retrieve (`LIST` or `GET`) resources from API server in chunks. - `APIPriorityAndFairness`: Enable managing request concurrency with prioritization and fairness at each server. (Renamed from `RequestManagement`) - `APIResponseCompression`: Compress the API responses for `LIST` or `GET` requests. @@ -333,6 +355,7 @@ Each feature gate is designed for enabling/disabling a specific feature: ServiceAccountTokenVolumeProjection. Check [Service Account Token Volumes](https://git.k8s.io/community/contributors/design-proposals/storage/svcacct-token-volume-source.md) for more details. +- `ConfigurableFSGroupPolicy`: Allows user to configure volume permission change policy for fsGroups when mounting a volume in a Pod. See [Configure volume permission and ownership change policy for Pods](/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods) for more details. - `CPUManager`: Enable container level CPU affinity support, see [CPU Management Policies](/docs/tasks/administer-cluster/cpu-management-policies/). - `CRIContainerLogRotation`: Enable container log rotation for cri container runtime. - `CSIBlockVolume`: Enable external CSI volume drivers to support block storage. See the [`csi` raw block volume support](/docs/concepts/storage/volumes/#csi-raw-block-volume-support) documentation for more details. @@ -391,12 +414,16 @@ Each feature gate is designed for enabling/disabling a specific feature: capabilities (e.g. `MKNODE`, `SYS_MODULE` etc.). This should only be enabled if user namespace remapping is enabled in the Docker daemon. - `EndpointSlice`: Enables Endpoint Slices for more scalable and extensible - network endpoints. Requires corresponding API and Controller to be enabled. - See [Enabling Endpoint Slices](/docs/tasks/administer-cluster/enabling-endpointslices/). + network endpoints. See [Enabling Endpoint Slices](/docs/tasks/administer-cluster/enabling-endpointslices/). +- `EndpointSliceProxying`: When this feature gate is enabled, kube-proxy will + use EndpointSlices as the primary data source instead of Endpoints, enabling + scalability and performance improvements. See [Enabling Endpoint Slices](/docs/tasks/administer-cluster/enabling-endpointslices/). - `GCERegionalPersistentDisk`: Enable the regional PD feature on GCE. - `HugePages`: Enable the allocation and consumption of pre-allocated [huge pages](/docs/tasks/manage-hugepages/scheduling-hugepages/). +- `HugePageStorageMediumSize`: Enable support for multiple sizes pre-allocated [huge pages](/docs/tasks/manage-hugepages/scheduling-hugepages/). - `HyperVContainer`: Enable [Hyper-V isolation](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container) for Windows containers. - `HPAScaleToZero`: Enables setting `minReplicas` to 0 for `HorizontalPodAutoscaler` resources when using custom or external metrics. +- `ImmutableEphemeralVolumes`: Allows for marking individual Secrets and ConfigMaps as immutable for better safety and performance. - `KubeletConfigFile`: Enable loading kubelet configuration from a file specified using a config file. See [setting kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file/) for more details. - `KubeletPluginsWatcher`: Enable probe-based plugin watcher utility to enable kubelet @@ -441,9 +468,11 @@ Each feature gate is designed for enabling/disabling a specific feature: - `ScheduleDaemonSetPods`: Enable DaemonSet Pods to be scheduled by the default scheduler instead of the DaemonSet controller. - `SCTPSupport`: Enables the usage of SCTP as `protocol` value in `Service`, `Endpoint`, `NetworkPolicy` and `Pod` definitions - `ServerSideApply`: Enables the [Sever Side Apply (SSA)](/docs/reference/using-api/api-concepts/#server-side-apply) path at the API Server. +- `ServiceAppProtocol`: Enables the `AppProtocol` field on Services and Endpoints. - `ServiceLoadBalancerFinalizer`: Enable finalizer protection for Service load balancers. - `ServiceNodeExclusion`: Enable the exclusion of nodes from load balancers created by a cloud provider. A node is eligible for exclusion if labelled with "`alpha.service-controller.kubernetes.io/exclude-balancer`" key or `node.kubernetes.io/exclude-from-external-load-balancers`. +- `ServiceTopology`: Enable service to route traffic based upon the Node topology of the cluster. See [ServiceTopology](https://kubernetes.io/docs/concepts/services-networking/service-topology/) for more details. - `StartupProbe`: Enable the [startup](/docs/concepts/workloads/pods/pod-lifecycle/#when-should-you-use-a-startup-probe) probe in the kubelet. - `StorageObjectInUseProtection`: Postpone the deletion of PersistentVolume or PersistentVolumeClaim objects if they are still being used. @@ -473,6 +502,8 @@ Each feature gate is designed for enabling/disabling a specific feature: - `VolumeSubpathEnvExpansion`: Enable `subPathExpr` field for expanding environment variables into a `subPath`. - `WatchBookmark`: Enable support for watch bookmark events. - `WindowsGMSA`: Enables passing of GMSA credential specs from pods to container runtimes. +- `WindowsRunAsUserName` : Enable support for running applications in Windows containers with as a non-default user. + See [Configuring RunAsUserName](/docs/tasks/configure-pod-container/configure-runasusername) for more details. - `WinDSR`: Allows kube-proxy to create DSR loadbalancers for Windows. - `WinOverlay`: Allows kube-proxy to run in overlay mode for Windows. diff --git a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md index 52a264fabc0ca..e952a53ce392b 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md @@ -73,7 +73,7 @@ kube-apiserver [flags] --api-audiences stringSlice - Identifiers of the API. The service account token authenticator will validate that tokens used against the API are bound to at least one of these audiences. If the --service-account-issuer flag is configured and this flag is not, this field defaults to a single element list containing the issuer URL . + Identifiers of the API. The service account token authenticator will validate that tokens used against the API are bound to at least one of these audiences. If the --service-account-issuer flag is configured and this flag is not, this field defaults to a single element list containing the issuer URL. @@ -374,7 +374,7 @@ kube-apiserver [flags] --bind-address ip     Default: 0.0.0.0 - The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank, all interfaces will be used (0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces). + The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. @@ -458,7 +458,7 @@ kube-apiserver [flags] --disable-admission-plugins stringSlice - admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, RuntimeClass, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodPreset, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodPreset, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. @@ -472,7 +472,7 @@ kube-apiserver [flags] --enable-admission-plugins stringSlice - admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, RuntimeClass, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodPreset, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. + admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodPreset, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter. @@ -497,10 +497,10 @@ kube-apiserver [flags] - --enable-inflight-quota-handler + --enable-priority-and-fairness     Default: true - If true, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness + If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness @@ -584,14 +584,21 @@ kube-apiserver [flags] --external-hostname string - The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs). + The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery). --feature-gates mapStringBool - A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (ALPHA - default=false)
APIResponseCompression=true|false (BETA - default=true)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (BETA - default=true)
CSIDriverRegistry=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (ALPHA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (ALPHA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EndpointSlice=true|false (BETA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
EvenPodsSpread=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HyperVContainer=true|false (ALPHA - default=false)
IPv6DualStack=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (ALPHA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (ALPHA - default=false)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
RuntimeClass=true|false (BETA - default=true)
SCTPSupport=true|false (ALPHA - default=false)
ServerSideApply=true|false (BETA - default=true)
ServiceNodeExclusion=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
StartupProbe=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportNodePidsLimit=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
TopologyManager=true|false (ALPHA - default=false)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumePVCDataSource=true|false (BETA - default=true)
VolumeSnapshotDataSource=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (ALPHA - default=false)
WindowsGMSA=true|false (BETA - default=true)
WindowsRunAsUserName=true|false (BETA - default=true) + A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (ALPHA - default=false)
APIResponseCompression=true|false (BETA - default=true)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (ALPHA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DefaultIngressClass=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EndpointSlice=true|false (BETA - default=true)
EndpointSliceProxying=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
EvenPodsSpread=true|false (BETA - default=true)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (ALPHA - default=false)
HyperVContainer=true|false (ALPHA - default=false)
IPv6DualStack=true|false (ALPHA - default=false)
ImmutableEphemeralVolumes=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (ALPHA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (ALPHA - default=false)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
RuntimeClass=true|false (BETA - default=true)
SCTPSupport=true|false (ALPHA - default=false)
SelectorIndex=true|false (ALPHA - default=false)
ServerSideApply=true|false (BETA - default=true)
ServiceAccountIssuerDiscovery=true|false (ALPHA - default=false)
ServiceAppProtocol=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
StartupProbe=true|false (BETA - default=true)
StorageVersionHash=true|false (BETA - default=true)
SupportNodePidsLimit=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeSnapshotDataSource=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (ALPHA - default=false) + + + + --goaway-chance float + + + To prevent HTTP/2 clients from getting stuck on a single apiserver, randomly close a connection (GOAWAY). The client's other in-flight requests won't be affected, and the client will reconnect, likely landing on a different apiserver after going through the load balancer again. This argument sets the fraction of requests that will be sent a GOAWAY. Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. Min is 0 (off), Max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point. @@ -710,7 +717,7 @@ kube-apiserver [flags] --master-service-namespace string     Default: "default" - DEPRECATED: the namespace from which the kubernetes master services should be injected into pods. + DEPRECATED: the namespace from which the Kubernetes master services should be injected into pods. @@ -878,14 +885,21 @@ kube-apiserver [flags] --secure-port int     Default: 6443 - The port on which to serve HTTPS with authentication and authorization.It cannot be switched off with 0. + The port on which to serve HTTPS with authentication and authorization. It cannot be switched off with 0. + + + + --service-account-issuer {service-account-issuer}/.well-known/openid-configuration + + + Identifier of the service account token issuer. The issuer will assert this identifier in "iss" claim of issued tokens. This value is a string or URI. If this option is not a valid URI per the OpenID Discovery 1.0 spec, the ServiceAccountIssuerDiscovery feature will remain disabled, even if the feature gate is set to true. It is highly recommended that this value comply with the OpenID spec: https://openid.net/specs/openid-connect-discovery-1_0.html. In practice, this means that service-account-issuer must be an https URL. It is also highly recommended that this URL be capable of serving OpenID discovery documents at {service-account-issuer}/.well-known/openid-configuration. - --service-account-issuer string + --service-account-jwks-uri string - Identifier of the service account token issuer. The issuer will assert this identifier in "iss" claim of issued tokens. This value is a string or URI. + Overrides the URI for the JSON Web Key Set in the discovery doc served at /.well-known/openid-configuration. This flag is useful if the discovery docand key set are served to relying parties from a URL other than the API server's external (as auto-detected or overridden with external-hostname). Only valid if the ServiceAccountIssuerDiscovery feature gate is enabled. @@ -1018,7 +1032,7 @@ kube-apiserver [flags] --tls-sni-cert-key namedCertKey     Default: [] - A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". + A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". diff --git a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md index 3e0a66be528d0..99595543a9cc1 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md @@ -63,7 +63,7 @@ kube-controller-manager [flags] --authentication-kubeconfig string - kubeconfig file pointing at the 'core' kubernetes server with enough rights to create tokenaccessreviews.authentication.k8s.io. This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster. + kubeconfig file pointing at the 'core' kubernetes server with enough rights to create tokenreviews.authentication.k8s.io. This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster. @@ -126,7 +126,7 @@ kube-controller-manager [flags] --bind-address ip     Default: 0.0.0.0 - The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank, all interfaces will be used (0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces). + The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. @@ -353,6 +353,13 @@ kube-controller-manager [flags] The length of endpoint updates batching period. Processing of pod changes will be delayed by this duration to join them with potential upcoming updates and reduce the overall number of endpoints updates. Larger number = higher endpoint programming latency, but lower number of endpoints revision generated + + --endpointslice-updates-batch-period duration + + + The length of endpoint slice updates batching period. Processing of pod changes will be delayed by this duration to join them with potential upcoming updates and reduce the overall number of endpoints updates. Larger number = higher endpoint programming latency, but lower number of endpoints revision generated + + --experimental-cluster-signing-duration duration     Default: 8760h0m0s @@ -371,7 +378,7 @@ kube-controller-manager [flags] --feature-gates mapStringBool - A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (ALPHA - default=false)
APIResponseCompression=true|false (BETA - default=true)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (BETA - default=true)
CSIDriverRegistry=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (ALPHA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (ALPHA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EndpointSlice=true|false (BETA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
EvenPodsSpread=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HyperVContainer=true|false (ALPHA - default=false)
IPv6DualStack=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (ALPHA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (ALPHA - default=false)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
RuntimeClass=true|false (BETA - default=true)
SCTPSupport=true|false (ALPHA - default=false)
ServerSideApply=true|false (BETA - default=true)
ServiceNodeExclusion=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
StartupProbe=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportNodePidsLimit=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
TopologyManager=true|false (ALPHA - default=false)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumePVCDataSource=true|false (BETA - default=true)
VolumeSnapshotDataSource=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (ALPHA - default=false)
WindowsGMSA=true|false (BETA - default=true)
WindowsRunAsUserName=true|false (BETA - default=true) + A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (ALPHA - default=false)
APIResponseCompression=true|false (BETA - default=true)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (ALPHA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DefaultIngressClass=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EndpointSlice=true|false (BETA - default=true)
EndpointSliceProxying=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
EvenPodsSpread=true|false (BETA - default=true)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (ALPHA - default=false)
HyperVContainer=true|false (ALPHA - default=false)
IPv6DualStack=true|false (ALPHA - default=false)
ImmutableEphemeralVolumes=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (ALPHA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (ALPHA - default=false)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
RuntimeClass=true|false (BETA - default=true)
SCTPSupport=true|false (ALPHA - default=false)
SelectorIndex=true|false (ALPHA - default=false)
ServerSideApply=true|false (BETA - default=true)
ServiceAccountIssuerDiscovery=true|false (ALPHA - default=false)
ServiceAppProtocol=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
StartupProbe=true|false (BETA - default=true)
StorageVersionHash=true|false (BETA - default=true)
SupportNodePidsLimit=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeSnapshotDataSource=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (ALPHA - default=false) @@ -641,7 +648,7 @@ kube-controller-manager [flags] - --profiling + --profiling     Default: true Enable profiling via web interface host:port/debug/pprof/ @@ -763,7 +770,7 @@ kube-controller-manager [flags] --secure-port int     Default: 10257 - The port on which to serve HTTPS with authentication and authorization.If 0, don't serve HTTPS at all. + The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. @@ -780,6 +787,13 @@ kube-controller-manager [flags] CIDR Range for Services in cluster. Requires --allocate-node-cidrs to be true + + --show-hidden-metrics-for-version string + + + The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is <major>.<minor>, e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that. + + --skip-headers @@ -840,7 +854,7 @@ kube-controller-manager [flags] --tls-sni-cert-key namedCertKey     Default: [] - A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". + A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". diff --git a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md index 224cb1c64bdd3..c8780d4626270 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md @@ -38,10 +38,10 @@ kube-proxy [flags] - --bind-address 0.0.0.0     Default: 0.0.0.0 + --bind-address ip     Default: 0.0.0.0 - The IP address for the proxy server to serve on (set to 0.0.0.0 for all IPv4 interfaces and `::` for all IPv6 interfaces) + The IP address for the proxy server to serve on (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces) @@ -101,24 +101,24 @@ kube-proxy [flags] - --feature-gates mapStringBool + --detect-local-mode LocalMode - A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (ALPHA - default=false)
APIResponseCompression=true|false (BETA - default=true)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (BETA - default=true)
CSIDriverRegistry=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (ALPHA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (ALPHA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EndpointSlice=true|false (BETA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
EvenPodsSpread=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HyperVContainer=true|false (ALPHA - default=false)
IPv6DualStack=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (ALPHA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (ALPHA - default=false)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
RuntimeClass=true|false (BETA - default=true)
SCTPSupport=true|false (ALPHA - default=false)
ServerSideApply=true|false (BETA - default=true)
ServiceNodeExclusion=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
StartupProbe=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportNodePidsLimit=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
TopologyManager=true|false (ALPHA - default=false)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumePVCDataSource=true|false (BETA - default=true)
VolumeSnapshotDataSource=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (ALPHA - default=false)
WindowsGMSA=true|false (BETA - default=true)
WindowsRunAsUserName=true|false (BETA - default=true) + Mode to use to detect local traffic - --healthz-bind-address 0.0.0.0     Default: 0.0.0.0:10256 + --feature-gates mapStringBool - The IP address for the health check server to serve on (set to 0.0.0.0 for all IPv4 interfaces and `::` for all IPv6 interfaces) + A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (ALPHA - default=false)
APIResponseCompression=true|false (BETA - default=true)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (ALPHA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DefaultIngressClass=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EndpointSlice=true|false (BETA - default=true)
EndpointSliceProxying=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
EvenPodsSpread=true|false (BETA - default=true)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (ALPHA - default=false)
HyperVContainer=true|false (ALPHA - default=false)
IPv6DualStack=true|false (ALPHA - default=false)
ImmutableEphemeralVolumes=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (ALPHA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (ALPHA - default=false)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
RuntimeClass=true|false (BETA - default=true)
SCTPSupport=true|false (ALPHA - default=false)
SelectorIndex=true|false (ALPHA - default=false)
ServerSideApply=true|false (BETA - default=true)
ServiceAccountIssuerDiscovery=true|false (ALPHA - default=false)
ServiceAppProtocol=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
StartupProbe=true|false (BETA - default=true)
StorageVersionHash=true|false (BETA - default=true)
SupportNodePidsLimit=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeSnapshotDataSource=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (ALPHA - default=false) - --healthz-port int32     Default: 10256 + --healthz-bind-address ipport     Default: 0.0.0.0:10256 - The port to bind the health check server. Use 0 to disable. + The IP address with port for the health check server to serve on (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. @@ -191,6 +191,27 @@ kube-proxy [flags] The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0. + + --ipvs-tcp-timeout duration + + + The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + + + + --ipvs-tcpfin-timeout duration + + + The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + + + + --ipvs-udp-timeout duration + + + The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m'). + + --kube-api-burst int32     Default: 10 @@ -241,17 +262,10 @@ kube-proxy [flags] - --metrics-bind-address 0.0.0.0     Default: 127.0.0.1:10249 + --metrics-bind-address ipport     Default: 127.0.0.1:10249 - The IP address for the metrics server to serve on (set to 0.0.0.0 for all IPv4 interfaces and `::` for all IPv6 interfaces) - - - - --metrics-port int32     Default: 10249 - - - The port to bind the metrics server. Use 0 to disable. + The IP address with port for the metrics server to serve on (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. @@ -289,6 +303,13 @@ kube-proxy [flags] Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) that may be consumed in order to proxy service traffic. If (unspecified, 0, or 0-0) then ports will be randomly chosen. + + --show-hidden-metrics-for-version string + + + The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is <major>.<minor>, e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that. + + --udp-timeout duration     Default: 250ms diff --git a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md index 29afb09966eed..1a4b0882addc9 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md @@ -13,7 +13,8 @@ and capacity. The scheduler needs to take into account individual and collective resource requirements, quality of service requirements, hardware/software/policy constraints, affinity and anti-affinity specifications, data locality, inter-workload interference, deadlines, and so on. Workload-specific requirements will be exposed -through the API as necessary. +through the API as necessary. See [scheduling](https://kubernetes.io/docs/concepts/scheduling/) +for more information about scheduling and the kube-scheduler component. ``` kube-scheduler [flags] @@ -62,7 +63,7 @@ kube-scheduler [flags] --authentication-kubeconfig string - kubeconfig file pointing at the 'core' kubernetes server with enough rights to create tokenaccessreviews.authentication.k8s.io. This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster. + kubeconfig file pointing at the 'core' kubernetes server with enough rights to create tokenreviews.authentication.k8s.io. This is optional. If empty, all token requests are considered to be anonymous and no client CA is looked up in the cluster. @@ -125,7 +126,7 @@ kube-scheduler [flags] --bind-address ip     Default: 0.0.0.0 - The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank, all interfaces will be used (0.0.0.0 for all IPv4 interfaces and :: for all IPv6 interfaces). + The IP address on which to listen for the --secure-port port. The associated interface(s) must be reachable by the rest of the cluster, and by CLI/web clients. If blank or an unspecified address (0.0.0.0 or ::), all interfaces will be used. @@ -160,7 +161,7 @@ kube-scheduler [flags] --feature-gates mapStringBool - A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (ALPHA - default=false)
APIResponseCompression=true|false (BETA - default=true)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BlockVolume=true|false (BETA - default=true)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIBlockVolume=true|false (BETA - default=true)
CSIDriverRegistry=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (ALPHA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (ALPHA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EndpointSlice=true|false (BETA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
EvenPodsSpread=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HyperVContainer=true|false (ALPHA - default=false)
IPv6DualStack=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (ALPHA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (ALPHA - default=false)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
RuntimeClass=true|false (BETA - default=true)
SCTPSupport=true|false (ALPHA - default=false)
ServerSideApply=true|false (BETA - default=true)
ServiceNodeExclusion=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
StartupProbe=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
StreamingProxyRedirects=true|false (BETA - default=true)
SupportNodePidsLimit=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TaintBasedEvictions=true|false (BETA - default=true)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
TopologyManager=true|false (ALPHA - default=false)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumePVCDataSource=true|false (BETA - default=true)
VolumeSnapshotDataSource=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (ALPHA - default=false)
WindowsGMSA=true|false (BETA - default=true)
WindowsRunAsUserName=true|false (BETA - default=true) + A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (ALPHA - default=false)
APIResponseCompression=true|false (BETA - default=true)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (ALPHA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DefaultIngressClass=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DryRun=true|false (BETA - default=true)
DynamicAuditing=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EndpointSlice=true|false (BETA - default=true)
EndpointSliceProxying=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
EvenPodsSpread=true|false (BETA - default=true)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (ALPHA - default=false)
HyperVContainer=true|false (ALPHA - default=false)
IPv6DualStack=true|false (ALPHA - default=false)
ImmutableEphemeralVolumes=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (ALPHA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (ALPHA - default=false)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (ALPHA - default=false)
ResourceLimitsPriorityFunction=true|false (ALPHA - default=false)
RotateKubeletClientCertificate=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
RuntimeClass=true|false (BETA - default=true)
SCTPSupport=true|false (ALPHA - default=false)
SelectorIndex=true|false (ALPHA - default=false)
ServerSideApply=true|false (BETA - default=true)
ServiceAccountIssuerDiscovery=true|false (ALPHA - default=false)
ServiceAppProtocol=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
StartupProbe=true|false (BETA - default=true)
StorageVersionHash=true|false (BETA - default=true)
SupportNodePidsLimit=true|false (BETA - default=true)
SupportPodPidsLimit=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TokenRequest=true|false (BETA - default=true)
TokenRequestProjection=true|false (BETA - default=true)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeSnapshotDataSource=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (ALPHA - default=false) @@ -405,7 +406,14 @@ kube-scheduler [flags] --secure-port int     Default: 10259 - The port on which to serve HTTPS with authentication and authorization.If 0, don't serve HTTPS at all. + The port on which to serve HTTPS with authentication and authorization. If 0, don't serve HTTPS at all. + + + + --show-hidden-metrics-for-version string + + + The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. Accepted format of version is <major>.<minor>, e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that. @@ -461,7 +469,7 @@ kube-scheduler [flags] --tls-sni-cert-key namedCertKey     Default: [] - A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". + A pair of x509 certificate and private key file paths, optionally suffixed with a list of domain patterns which are fully qualified domain names, possibly with prefixed wildcard segments. The domain patterns also allow IP addresses, but IPs should only be used if the apiserver has visibility to the IP address requested by a client. If no domain patterns are provided, the names of the certificate are extracted. Non-wildcard matches trump over wildcard matches, explicit domain patterns trump over extracted names. For multiple key/certificate pairs, use the --tls-sni-cert-key multiple times. Examples: "example.crt,example.key" or "foo.crt,foo.key:*.foo.com,foo.com". diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md b/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md index 97c38b438b440..6269a3ec5ae07 100644 --- a/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md +++ b/content/en/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping.md @@ -62,7 +62,7 @@ In the bootstrap initialization process, the following occurs: 4. kubelet reads its bootstrap file, retrieving the URL of the API server and a limited usage "token" 5. kubelet connects to the API server, authenticates using the token 6. kubelet now has limited credentials to create and retrieve a certificate signing request (CSR) -7. kubelet creates a CSR for itself +7. kubelet creates a CSR for itself with the signerName set to `kubernetes.io/kube-apiserver-client-kubelet` 8. CSR is approved in one of two ways: * If configured, kube-controller-manager automatically approves the CSR * If configured, an outside process, possibly a person, approves the CSR using the Kubernetes API or via `kubectl` @@ -117,7 +117,7 @@ While any authentication strategy can be used for the kubelet's initial bootstrap credentials, the following two authenticators are recommended for ease of provisioning. -1. [Bootstrap Tokens](#bootstrap-tokens) - __beta__ +1. [Bootstrap Tokens](#bootstrap-tokens) 2. [Token authentication file](#token-authentication-file) Bootstrap tokens are a simpler and more easily managed method to authenticate kubelets, and do not require any additional flags when starting kube-apiserver. @@ -292,35 +292,6 @@ roleRef: apiGroup: rbac.authorization.k8s.io ``` -**Note: Kubernetes Below 1.8**: If you are running an earlier version of Kubernetes, notably a version below 1.8, then the cluster roles referenced above do not ship by default. You will have to create them yourself _in addition to_ the `ClusterRoleBindings` listed. - -To create the `ClusterRole`s: - -```yml -# A ClusterRole which instructs the CSR approver to approve a user requesting -# node client credentials. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:certificates.k8s.io:certificatesigningrequests:nodeclient -rules: -- apiGroups: ["certificates.k8s.io"] - resources: ["certificatesigningrequests/nodeclient"] - verbs: ["create"] ---- -# A ClusterRole which instructs the CSR approver to approve a node renewing its -# own client credentials. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient -rules: -- apiGroups: ["certificates.k8s.io"] - resources: ["certificatesigningrequests/selfnodeclient"] - verbs: ["create"] -``` - - The `csrapproving` controller that ships as part of [kube-controller-manager](/docs/admin/kube-controller-manager/) and is enabled by default. The controller uses the [`SubjectAccessReview` diff --git a/content/en/docs/reference/glossary/annotation.md b/content/en/docs/reference/glossary/annotation.md index 03feb69a058d3..474573ef16100 100755 --- a/content/en/docs/reference/glossary/annotation.md +++ b/content/en/docs/reference/glossary/annotation.md @@ -14,5 +14,5 @@ tags: -The metadata in an annotation can be small or large, structured or unstructured, and can include characters not permitted by labels. Clients such as tools and libraries can retrieve this metadata. +The metadata in an annotation can be small or large, structured or unstructured, and can include characters not permitted by {{< glossary_tooltip text="labels" term_id="label" >}}. Clients such as tools and libraries can retrieve this metadata. diff --git a/content/en/docs/reference/glossary/cluster-infrastructure.md b/content/en/docs/reference/glossary/cluster-infrastructure.md index 0b8a5377b4fa9..d8a6fe7280d71 100644 --- a/content/en/docs/reference/glossary/cluster-infrastructure.md +++ b/content/en/docs/reference/glossary/cluster-infrastructure.md @@ -8,6 +8,6 @@ short_description: > aka: tags: -- operations +- operation --- -The infrastructure layer provides and maintains VMs, networking, security groups and others. + The infrastructure layer provides and maintains VMs, networking, security groups and others. diff --git a/content/en/docs/reference/glossary/cluster-operations.md b/content/en/docs/reference/glossary/cluster-operations.md index 424e6579fcefc..cf4831d935f6a 100644 --- a/content/en/docs/reference/glossary/cluster-operations.md +++ b/content/en/docs/reference/glossary/cluster-operations.md @@ -4,10 +4,18 @@ id: cluster-operations date: 2019-05-12 full_link: short_description: > - Activities such as upgrading the clusters, implementing security, storage, ingress, networking, logging and monitoring, and other operations involved in managing a Kubernetes cluster. + The work involved in managing a Kubernetes cluster. aka: tags: -- operations +- operation --- - Activities such as upgrading the clusters, implementing security, storage, ingress, networking, logging and monitoring, and other operations involved in managing a Kubernetes cluster. + The work involved in managing a Kubernetes cluster: managing +day-to-day operations, and co-ordinating upgrades. + + + + Examples of cluster operations work include: deploying new Nodes to +scale the cluster; performing software upgrades; implementing security +controls; adding or removing storage; configuring cluster networking; +managing cluster-wide observability; and responding to events. diff --git a/content/en/docs/reference/glossary/cluster.md b/content/en/docs/reference/glossary/cluster.md index 2e8aecb23e902..8f2659459b827 100755 --- a/content/en/docs/reference/glossary/cluster.md +++ b/content/en/docs/reference/glossary/cluster.md @@ -11,7 +11,13 @@ tags: - fundamental - operation --- -A set of worker machines, called nodes, that run containerized applications. Every cluster has at least one worker node. +A set of worker machines, called {{< glossary_tooltip text="nodes" term_id="node" >}}, +that run containerized applications. Every cluster has at least one worker node. -The worker node(s) host the pods that are the components of the application. The Control Plane manages the worker nodes and the pods in the cluster. In production environments, the Control Plane usually runs across multiple computers and a cluster usually runs multiple nodes, providing fault-tolerance and high availability. +The worker node(s) host the {{< glossary_tooltip text="Pods" term_id="pod" >}} that are +the components of the application workload. The +{{< glossary_tooltip text="control plane" term_id="control-plane" >}} manages the worker +nodes and the Pods in the cluster. In production environments, the control plane usually +runs across multiple computers and a cluster usually runs multiple nodes, providing +fault-tolerance and high availability. diff --git a/content/en/docs/reference/glossary/container-env-variables.md b/content/en/docs/reference/glossary/container-env-variables.md index a4682f57191d1..5e19a1dfa2f6e 100755 --- a/content/en/docs/reference/glossary/container-env-variables.md +++ b/content/en/docs/reference/glossary/container-env-variables.md @@ -10,8 +10,8 @@ aka: tags: - fundamental --- - Container environment variables are name=value pairs that provide useful information into containers running in a Pod. + Container environment variables are name=value pairs that provide useful information into containers running in a {{< glossary_tooltip text="pod" term_id="pod" >}} -Container environment variables provide information that is required by the running containerized applications along with information about important resources to the {{< glossary_tooltip text="Containers" term_id="container" >}}. For example, file system details, information about the container itself, and other cluster resources such as service endpoints. \ No newline at end of file +Container environment variables provide information that is required by the running containerized applications along with information about important resources to the {{< glossary_tooltip text="containers" term_id="container" >}}. For example, file system details, information about the container itself, and other cluster resources such as service endpoints. diff --git a/content/en/docs/reference/glossary/deployment.md b/content/en/docs/reference/glossary/deployment.md index 89a4f7b9b2b9c..b1ea465746151 100755 --- a/content/en/docs/reference/glossary/deployment.md +++ b/content/en/docs/reference/glossary/deployment.md @@ -16,5 +16,5 @@ tags: -Each replica is represented by a {{< glossary_tooltip term_id="pod" >}}, and the Pods are distributed among the nodes of a cluster. +Each replica is represented by a {{< glossary_tooltip term_id="pod" >}}, and the Pods are distributed among the {{< glossary_tooltip text="nodes" term_id="node" >}} of a cluster. diff --git a/content/en/docs/reference/glossary/horizontal-pod-autoscaler.md b/content/en/docs/reference/glossary/horizontal-pod-autoscaler.md index 4e3ace729e33f..d90009583c63a 100755 --- a/content/en/docs/reference/glossary/horizontal-pod-autoscaler.md +++ b/content/en/docs/reference/glossary/horizontal-pod-autoscaler.md @@ -11,9 +11,9 @@ aka: tags: - operation --- - An API resource that automatically scales the number of pod replicas based on targeted CPU utilization or custom metric targets. + An API resource that automatically scales the number of {{< glossary_tooltip term_id="pod" >}} replicas based on targeted CPU utilization or custom metric targets. -HPA is typically used with {{< glossary_tooltip text="Replication Controllers" term_id="replication-controller" >}}, {{< glossary_tooltip text="Deployments" term_id="deployment" >}}, or Replica Sets. It cannot be applied to objects that cannot be scaled, for example {{< glossary_tooltip text="DaemonSets" term_id="daemonset" >}}. +HPA is typically used with {{< glossary_tooltip text="ReplicationControllers" term_id="replication-controller" >}}, {{< glossary_tooltip text="Deployments" term_id="deployment" >}}, or {{< glossary_tooltip text="ReplicaSets" term_id="replica-set" >}}. It cannot be applied to objects that cannot be scaled, for example {{< glossary_tooltip text="DaemonSets" term_id="daemonset" >}}. diff --git a/content/en/docs/reference/glossary/host-aliases.md b/content/en/docs/reference/glossary/host-aliases.md index fa92be21f4027..47bd22d433d11 100644 --- a/content/en/docs/reference/glossary/host-aliases.md +++ b/content/en/docs/reference/glossary/host-aliases.md @@ -10,7 +10,7 @@ aka: tags: - operation --- - A HostAliases is a mapping between the IP address and hostname to be injected into a Pod's hosts file. + A HostAliases is a mapping between the IP address and hostname to be injected into a {{< glossary_tooltip text="Pod" term_id="pod" >}}'s hosts file. diff --git a/content/en/docs/reference/glossary/image.md b/content/en/docs/reference/glossary/image.md index cdb2c7e02c6b2..d185fc2eede80 100755 --- a/content/en/docs/reference/glossary/image.md +++ b/content/en/docs/reference/glossary/image.md @@ -10,9 +10,9 @@ aka: tags: - fundamental --- - Stored instance of a container that holds a set of software needed to run an application. + Stored instance of a {{< glossary_tooltip term_id="container" >}} that holds a set of software needed to run an application. - + A way of packaging software that allows it to be stored in a container registry, pulled to a local system, and run as an application. Meta data is included in the image that can indicate what executable to run, who built it, and other information. diff --git a/content/en/docs/reference/glossary/ingress.md b/content/en/docs/reference/glossary/ingress.md index 0a8342800259e..6fe935fb4edc3 100755 --- a/content/en/docs/reference/glossary/ingress.md +++ b/content/en/docs/reference/glossary/ingress.md @@ -16,5 +16,5 @@ tags: -Ingress can provide load balancing, SSL termination and name-based virtual hosting. +Ingress may provide load balancing, SSL termination and name-based virtual hosting. diff --git a/content/en/docs/reference/glossary/init-container.md b/content/en/docs/reference/glossary/init-container.md index 2ea793661ca93..a999042e3056f 100755 --- a/content/en/docs/reference/glossary/init-container.md +++ b/content/en/docs/reference/glossary/init-container.md @@ -4,15 +4,14 @@ id: init-container date: 2018-04-12 full_link: short_description: > - One or more initialization containers that must run to completion before any app containers run. + One or more initialization containers that must run to completion before any app containers run. aka: tags: - fundamental --- - One or more initialization containers that must run to completion before any app containers run. + One or more initialization {{< glossary_tooltip text="containers" term_id="container" >}} that must run to completion before any app containers run. -Initialization (init) containers are like regular app containers, with one difference: init containers must run to completion before any app containers can start. Init containers run in series: each init container must run to completion before the next init container begins. - +Initialization (init) containers are like regular app containers, with one difference: init containers must run to completion before any app containers can start. Init containers run in series: each init container must run to completion before the next init container begins. diff --git a/content/en/docs/reference/glossary/kube-proxy.md b/content/en/docs/reference/glossary/kube-proxy.md index 87be7e68acbb4..3b2a572504d38 100755 --- a/content/en/docs/reference/glossary/kube-proxy.md +++ b/content/en/docs/reference/glossary/kube-proxy.md @@ -11,15 +11,17 @@ tags: - fundamental - networking --- - [kube-proxy](/docs/reference/command-line-tools-reference/kube-proxy/) is a -network proxy that runs on each node in your cluster, implementing part of -the Kubernetes {{< glossary_tooltip term_id="service">}} concept. + kube-proxy is a network proxy that runs on each +{{< glossary_tooltip text="node" term_id="node" >}} in your cluster, +implementing part of the Kubernetes +{{< glossary_tooltip term_id="service">}} concept. -kube-proxy maintains network rules on nodes. These network rules allow -network communication to your Pods from network sessions inside or outside -of your cluster. +[kube-proxy](/docs/reference/command-line-tools-reference/kube-proxy/) +maintains network rules on nodes. These network rules allow network +communication to your Pods from network sessions inside or outside of +your cluster. kube-proxy uses the operating system packet filtering layer if there is one and it's available. Otherwise, kube-proxy forwards the traffic itself. diff --git a/content/en/docs/reference/glossary/kube-scheduler.md b/content/en/docs/reference/glossary/kube-scheduler.md index 7094a6982a0af..a1a91a1527d94 100755 --- a/content/en/docs/reference/glossary/kube-scheduler.md +++ b/content/en/docs/reference/glossary/kube-scheduler.md @@ -4,15 +4,20 @@ id: kube-scheduler date: 2018-04-12 full_link: /docs/reference/generated/kube-scheduler/ short_description: > - Control Plane component that watches for newly created pods with no assigned node, and selects a node for them to run on. + Control plane component that watches for newly created pods with no assigned node, and selects a node for them to run on. aka: tags: - architecture --- - Control Plane component that watches for newly created pods with no assigned node, and selects a node for them to run on. +Control plane component that watches for newly created +{{< glossary_tooltip term_id="pod" text="Pods" >}} with no assigned +{{< glossary_tooltip term_id="node" text="node">}}, and selects a node for them +to run on. - - -Factors taken into account for scheduling decisions include individual and collective resource requirements, hardware/software/policy constraints, affinity and anti-affinity specifications, data locality, inter-workload interference and deadlines. + +Factors taken into account for scheduling decisions include: +individual and collective resource requirements, hardware/software/policy +constraints, affinity and anti-affinity specifications, data locality, +inter-workload interference, and deadlines. diff --git a/content/en/docs/reference/glossary/kubeadm.md b/content/en/docs/reference/glossary/kubeadm.md index ce26062270819..74cc0d1f6aaeb 100755 --- a/content/en/docs/reference/glossary/kubeadm.md +++ b/content/en/docs/reference/glossary/kubeadm.md @@ -15,5 +15,5 @@ tags: -You can use kubeadm to install both the control plane and the worker node components. +You can use kubeadm to install both the control plane and the {{< glossary_tooltip text="worker node" term_id="node" >}} components. diff --git a/content/en/docs/reference/glossary/kubelet.md b/content/en/docs/reference/glossary/kubelet.md index 0c4ea9425a1b9..0c95ac846e3d9 100755 --- a/content/en/docs/reference/glossary/kubelet.md +++ b/content/en/docs/reference/glossary/kubelet.md @@ -11,9 +11,8 @@ tags: - fundamental - core-object --- - An agent that runs on each node in the cluster. It makes sure that containers are running in a pod. + An agent that runs on each {{< glossary_tooltip text="node" term_id="node" >}} in the cluster. It makes sure that {{< glossary_tooltip text="containers" term_id="container" >}} are running in a {{< glossary_tooltip text="Pod" term_id="pod" >}}. The kubelet takes a set of PodSpecs that are provided through various mechanisms and ensures that the containers described in those PodSpecs are running and healthy. The kubelet doesn’t manage containers which were not created by Kubernetes. - diff --git a/content/en/docs/reference/glossary/persistent-volume-claim.md b/content/en/docs/reference/glossary/persistent-volume-claim.md index dc6314d8873d9..6af54d60f5cc3 100755 --- a/content/en/docs/reference/glossary/persistent-volume-claim.md +++ b/content/en/docs/reference/glossary/persistent-volume-claim.md @@ -11,9 +11,8 @@ tags: - core-object - storage --- - Claims storage resources defined in a PersistentVolume so that it can be mounted as a volume in a container. + Claims storage resources defined in a {{< glossary_tooltip text="PersistentVolume" term_id="persistent-volume" >}} so that it can be mounted as a volume in a {{< glossary_tooltip text="container" term_id="container" >}}. -Specifies the amount of storage, how the storage will be accessed (read-only, read-write and/or exclusive) and how it is reclaimed (retained, recycled or deleted). Details of the storage itself are in the PersistentVolume specification. - +Specifies the amount of storage, how the storage will be accessed (read-only, read-write and/or exclusive) and how it is reclaimed (retained, recycled or deleted). Details of the storage itself are described in the PersistentVolume object. diff --git a/content/en/docs/reference/glossary/pod-priority.md b/content/en/docs/reference/glossary/pod-priority.md index 0b80602b35fde..994f8bc4d8b55 100644 --- a/content/en/docs/reference/glossary/pod-priority.md +++ b/content/en/docs/reference/glossary/pod-priority.md @@ -10,7 +10,7 @@ aka: tags: - operation --- - Pod Priority indicates the importance of a Pod relative to other Pods. + Pod Priority indicates the importance of a {{< glossary_tooltip term_id="pod" >}} relative to other Pods. diff --git a/content/en/docs/reference/glossary/podpreset.md b/content/en/docs/reference/glossary/podpreset.md index c60f03cd9571c..f63187ff714d5 100755 --- a/content/en/docs/reference/glossary/podpreset.md +++ b/content/en/docs/reference/glossary/podpreset.md @@ -10,9 +10,9 @@ aka: tags: - operation --- - An API object that injects information such as secrets, volume mounts, and environment variables into pods at creation time. + An API object that injects information such as secrets, volume mounts, and environment variables into {{< glossary_tooltip text="Pods" term_id="pod" >}} at creation time. -This object chooses the pods to inject information into using standard selectors. This allows the podspec definitions to be nonspecific, decoupling the podspec from environment specific configuration. +This object chooses the Pods to inject information into using standard selectors. This allows the podspec definitions to be nonspecific, decoupling the podspec from environment specific configuration. diff --git a/content/en/docs/reference/glossary/preemption.md b/content/en/docs/reference/glossary/preemption.md index 0810acfcfcb00..f27e36c66fa84 100644 --- a/content/en/docs/reference/glossary/preemption.md +++ b/content/en/docs/reference/glossary/preemption.md @@ -10,7 +10,7 @@ aka: tags: - operation --- - Preemption logic in Kubernetes helps a pending Pod to find a suitable Node by evicting low priority Pods existing on that Node. + Preemption logic in Kubernetes helps a pending {{< glossary_tooltip term_id="pod" >}} to find a suitable {{< glossary_tooltip term_id="node" >}} by evicting low priority Pods existing on that Node. diff --git a/content/en/docs/reference/glossary/replication-controller.md b/content/en/docs/reference/glossary/replication-controller.md index b564c29691e47..0fae40842fc03 100755 --- a/content/en/docs/reference/glossary/replication-controller.md +++ b/content/en/docs/reference/glossary/replication-controller.md @@ -1,19 +1,25 @@ --- -title: Replication Controller +title: ReplicationController id: replication-controller date: 2018-04-12 full_link: short_description: > - Kubernetes service that ensures a specific number of instances of a pod are always running. + A (deprecated) API object that manages a replicated application. aka: tags: - workload - core-object --- - Kubernetes service that ensures a specific number of instances of a pod are always running. + A workload resource that manages a replicated application, ensuring that +a specific number of instances of a {{< glossary_tooltip text="Pod" term_id="pod" >}} are running. - + -Will automatically add or remove running instances of a pod, based on a set value for that pod. Allows the pod to return to the defined number of instances if pods are deleted or if too many are started by mistake. +The control plane ensures that the defined number of Pods are running, even if some +Pods fail, if you delete Pods manually, or if too many are started by mistake. +{{< note >}} +ReplicationController is deprecated. See +{{< glossary_tooltip text="Deployment" term_id="deployment" >}}, which is similar. +{{< /note >}} diff --git a/content/en/docs/reference/glossary/security-context.md b/content/en/docs/reference/glossary/security-context.md index 9812304e4dd2a..c53e96aa190f7 100755 --- a/content/en/docs/reference/glossary/security-context.md +++ b/content/en/docs/reference/glossary/security-context.md @@ -4,14 +4,20 @@ id: security-context date: 2018-04-12 full_link: /docs/tasks/configure-pod-container/security-context/ short_description: > - The securityContext field defines privilege and access control settings for a Pod or Container, including the runtime UID and GID. + The securityContext field defines privilege and access control settings for a Pod or container. aka: tags: - security --- - The securityContext field defines privilege and access control settings for a Pod or Container, including the runtime UID and GID. + The `securityContext` field defines privilege and access control settings for +a {{< glossary_tooltip text="Pod" term_id="pod" >}} or +{{< glossary_tooltip text="container" term_id="container" >}}. - + -The securityContext field in a {{< glossary_tooltip term_id="pod" >}} (applying to all containers) or container is used to set the user, groups, capabilities, privilege settings, and security policies (SELinux/AppArmor/Seccomp) and more that container processes use. +In a `securityContext`, you can define: the user that processes run as, +the group that processes run as, and privilege settings. +You can also configure security policies (for example: SELinux, AppArmor or seccomp). + +The `PodSpec.securityContext` setting applies to all containers in a Pod. diff --git a/content/en/docs/reference/glossary/selector.md b/content/en/docs/reference/glossary/selector.md index 9b8a0202524e6..622b24694d335 100755 --- a/content/en/docs/reference/glossary/selector.md +++ b/content/en/docs/reference/glossary/selector.md @@ -10,9 +10,9 @@ aka: tags: - fundamental --- - Allows users to filter a list of resources based on labels. + Allows users to filter a list of resources based on {{< glossary_tooltip text="labels" term_id="label" >}}. -Selectors are applied when querying lists of resources to filter them by {{< glossary_tooltip text="Labels" term_id="label" >}}. +Selectors are applied when querying lists of resources to filter them by labels. diff --git a/content/en/docs/reference/glossary/shuffle-sharding.md b/content/en/docs/reference/glossary/shuffle-sharding.md new file mode 100644 index 0000000000000..7d1a128762a7e --- /dev/null +++ b/content/en/docs/reference/glossary/shuffle-sharding.md @@ -0,0 +1,45 @@ +--- +title: shuffle sharding +id: shuffle-sharding +date: 2020-03-04 +full_link: +short_description: > + A technique for assigning requests to queues that provides better isolation than hashing modulo the number of queues. + +aka: +tags: +- fundamental +--- +A technique for assigning requests to queues that provides better isolation than hashing modulo the number of queues. + + + +We are often concerned with insulating different flows of requests +from each other, so that a high-intensity flow does not crowd out low-intensity flows. +A simple way to put requests into queues is to hash some +characteristics of the request, modulo the number of queues, to get +the index of the queue to use. The hash function uses as input +characteristics of the request that align with flows. For example, in +the Internet this is often the 5-tuple of source and destination +address, protocol, and source and destination port. + +That simple hash-based scheme has the property that any high-intensity flow +will crowd out all the low-intensity flows that hash to the same queue. +Providing good insulation for a large number of flows requires a large +number of queues, which is problematic. Shuffle sharding is a more +nimble technique that can do a better job of insulating the low-intensity +flows from the high-intensity flows. The terminology of shuffle sharding uses +the metaphor of dealing a hand from a deck of cards; each queue is a +metaphorical card. The shuffle sharding technique starts with hashing +the flow-identifying characteristics of the request, to produce a hash +value with dozens or more of bits. Then the hash value is used as a +source of entropy to shuffle the deck and deal a hand of cards +(queues). All the dealt queues are examined, and the request is put +into one of the examined queues with the shortest length. With a +modest hand size, it does not cost much to examine all the dealt cards +and a given low-intensity flow has a good chance to dodge the effects of a +given high-intensity flow. With a large hand size it is expensive to examine +the dealt queues and more difficult for the low-intensity flows to dodge the +collective effects of a set of high-intensity flows. Thus, the hand size +should be chosen judiciously. + diff --git a/content/en/docs/reference/glossary/taint.md b/content/en/docs/reference/glossary/taint.md index 9faaee579be93..c1aaaf5f657ac 100644 --- a/content/en/docs/reference/glossary/taint.md +++ b/content/en/docs/reference/glossary/taint.md @@ -11,8 +11,8 @@ tags: - core-object - fundamental --- - A core object consisting of three required properties: key, value, and effect. Taints prevent the scheduling of pods on nodes or node groups. + A core object consisting of three required properties: key, value, and effect. Taints prevent the scheduling of {{< glossary_tooltip text="Pods" term_id="pod" >}} on {{< glossary_tooltip text="nodes" term_id="node" >}} or node groups. -Taints and {{< glossary_tooltip text="tolerations" term_id="toleration" >}} work together to ensure that pods are not scheduled onto inappropriate nodes. One or more taints are applied to a {{< glossary_tooltip text="node" term_id="node" >}}. A node should only schedule a pod with the matching tolerations for the configured taints. +Taints and {{< glossary_tooltip text="tolerations" term_id="toleration" >}} work together to ensure that pods are not scheduled onto inappropriate nodes. One or more taints are applied to a node. A node should only schedule a Pod with the matching tolerations for the configured taints. diff --git a/content/en/docs/reference/glossary/volume.md b/content/en/docs/reference/glossary/volume.md index f162ce0010c0e..2076378bb33a4 100755 --- a/content/en/docs/reference/glossary/volume.md +++ b/content/en/docs/reference/glossary/volume.md @@ -11,9 +11,10 @@ tags: - core-object - fundamental --- - A directory containing data, accessible to the containers in a {{< glossary_tooltip text="pod" term_id="pod" >}}. + A directory containing data, accessible to the {{< glossary_tooltip text="containers" term_id="container" >}} in a {{< glossary_tooltip term_id="pod" >}}. -A Kubernetes volume lives as long as the {{< glossary_tooltip text="pod" term_id="pod" >}} that encloses it. Consequently, a volume outlives any {{< glossary_tooltip text="containers" term_id="container" >}} that run within the {{< glossary_tooltip text="pod" term_id="pod" >}}, and data is preserved across {{< glossary_tooltip text="container" term_id="container" >}} restarts. +A Kubernetes volume lives as long as the Pod that encloses it. Consequently, a volume outlives any containers that run within the Pod, and data in the volume is preserved across container restarts. +See [storage](https://kubernetes.io/docs/concepts/storage/) for more information. diff --git a/content/en/docs/reference/issues-security/security.md b/content/en/docs/reference/issues-security/security.md index e66cad55d159c..709f26ffe1b56 100644 --- a/content/en/docs/reference/issues-security/security.md +++ b/content/en/docs/reference/issues-security/security.md @@ -17,7 +17,7 @@ This page describes Kubernetes security and disclosure information. {{% capture body %}} ## Security Announcements -Join the [kubernetes-announce](https://groups.google.com/forum/#!forum/kubernetes-announce) group for emails about security and major API announcements. +Join the [kubernetes-security-announce](https://groups.google.com/forum/#!forum/kubernetes-security-announce) group for emails about security and major API announcements. You can also subscribe to an RSS feed of the above using [this link](https://groups.google.com/forum/feed/kubernetes-announce/msgs/rss_v2_0.xml?num=50). diff --git a/content/en/docs/reference/kubectl/cheatsheet.md b/content/en/docs/reference/kubectl/cheatsheet.md index adb7fb8b6f339..5e10e88ece1b6 100644 --- a/content/en/docs/reference/kubectl/cheatsheet.md +++ b/content/en/docs/reference/kubectl/cheatsheet.md @@ -42,7 +42,7 @@ complete -F __start_kubectl k ```bash source <(kubectl completion zsh) # setup autocomplete in zsh into the current shell -echo "if [ $commands[kubectl] ]; then source <(kubectl completion zsh); fi" >> ~/.zshrc # add autocomplete permanently to your zsh shell +echo "[[ $commands[kubectl] ]] && source <(kubectl completion zsh)" >> ~/.zshrc # add autocomplete permanently to your zsh shell ``` ## Kubectl Context and Configuration @@ -95,7 +95,7 @@ kubectl apply -f ./my1.yaml -f ./my2.yaml # create from multiple files kubectl apply -f ./dir # create resource(s) in all manifest files in dir kubectl apply -f https://git.io/vPieo # create resource(s) from url kubectl create deployment nginx --image=nginx # start a single instance of nginx -kubectl explain pods,svc # get the documentation for pod and svc manifests +kubectl explain pods # get the documentation for pod manifests # Create multiple YAML objects from stdin cat <database username + + --tls-server-name string + + + Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used + + --token string @@ -518,6 +525,7 @@ kubectl [flags] {{% capture seealso %}} +* [kubectl alpha](/docs/reference/generated/kubectl/kubectl-commands#alpha) - Commands for features in alpha * [kubectl annotate](/docs/reference/generated/kubectl/kubectl-commands#annotate) - Update the annotations on a resource * [kubectl api-resources](/docs/reference/generated/kubectl/kubectl-commands#api-resources) - Print the supported API resources on the server * [kubectl api-versions](/docs/reference/generated/kubectl/kubectl-commands#api-versions) - Print the supported API versions on the server, in the form of "group/version" diff --git a/content/en/docs/reference/kubectl/overview.md b/content/en/docs/reference/kubectl/overview.md index 7b98ff7326ce0..1bb82cf96237a 100644 --- a/content/en/docs/reference/kubectl/overview.md +++ b/content/en/docs/reference/kubectl/overview.md @@ -69,31 +69,30 @@ The following table includes short descriptions and the general syntax for all o Operation | Syntax | Description -------------------- | -------------------- | -------------------- -`annotate` | `kubectl annotate (-f FILENAME | TYPE NAME | TYPE/NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--overwrite] [--all] [--resource-version=version] [flags]` | Add or update the annotations of one or more resources. +`annotate` | kubectl annotate (-f FILENAME | TYPE NAME | TYPE/NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--overwrite] [--all] [--resource-version=version] [flags] | Add or update the annotations of one or more resources. `api-versions` | `kubectl api-versions [flags]` | List the API versions that are available. `apply` | `kubectl apply -f FILENAME [flags]`| Apply a configuration change to a resource from a file or stdin. `attach` | `kubectl attach POD -c CONTAINER [-i] [-t] [flags]` | Attach to a running container either to view the output stream or interact with the container (stdin). -`autoscale` | `kubectl autoscale (-f FILENAME | TYPE NAME | TYPE/NAME) [--min=MINPODS] --max=MAXPODS [--cpu-percent=CPU] [flags]` | Automatically scale the set of pods that are managed by a replication controller. +`autoscale` | kubectl autoscale (-f FILENAME | TYPE NAME | TYPE/NAME) [--min=MINPODS] --max=MAXPODS [--cpu-percent=CPU] [flags] | Automatically scale the set of pods that are managed by a replication controller. `cluster-info` | `kubectl cluster-info [flags]` | Display endpoint information about the master and services in the cluster. `config` | `kubectl config SUBCOMMAND [flags]` | Modifies kubeconfig files. See the individual subcommands for details. `create` | `kubectl create -f FILENAME [flags]` | Create one or more resources from a file or stdin. -`delete` | `kubectl delete (-f FILENAME | TYPE [NAME | /NAME | -l label | --all]) [flags]` | Delete resources either from a file, stdin, or specifying label selectors, names, resource selectors, or resources. -`describe` | `kubectl describe (-f FILENAME | TYPE [NAME_PREFIX | /NAME | -l label]) [flags]` | Display the detailed state of one or more resources. -`diff` | `kubectl diff -f FILENAME [flags]`| Diff file or stdin against live configuration (**BETA**) -`edit` | `kubectl edit (-f FILENAME | TYPE NAME | TYPE/NAME) [flags]` | Edit and update the definition of one or more resources on the server by using the default editor. +`delete` | kubectl delete (-f FILENAME | TYPE [NAME | /NAME | -l label | --all]) [flags] | Delete resources either from a file, stdin, or specifying label selectors, names, resource selectors, or resources. +`describe` | kubectl describe (-f FILENAME | TYPE [NAME_PREFIX | /NAME | -l label]) [flags] | Display the detailed state of one or more resources. +`diff` | `kubectl diff -f FILENAME [flags]`| Diff file or stdin against live configuration. +`edit` | kubectl edit (-f FILENAME | TYPE NAME | TYPE/NAME) [flags] | Edit and update the definition of one or more resources on the server by using the default editor. `exec` | `kubectl exec POD [-c CONTAINER] [-i] [-t] [flags] [-- COMMAND [args...]]` | Execute a command against a container in a pod. `explain` | `kubectl explain [--recursive=false] [flags]` | Get documentation of various resources. For instance pods, nodes, services, etc. -`expose` | `kubectl expose (-f FILENAME | TYPE NAME | TYPE/NAME) [--port=port] [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [--external-ip=external-ip-of-service] [--type=type] [flags]` | Expose a replication controller, service, or pod as a new Kubernetes service. -`get` | `kubectl get (-f FILENAME | TYPE [NAME | /NAME | -l label]) [--watch] [--sort-by=FIELD] [[-o | --output]=OUTPUT_FORMAT] [flags]` | List one or more resources. -`label` | `kubectl label (-f FILENAME | TYPE NAME | TYPE/NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--overwrite] [--all] [--resource-version=version] [flags]` | Add or update the labels of one or more resources. +`expose` | kubectl expose (-f FILENAME | TYPE NAME | TYPE/NAME) [--port=port] [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [--external-ip=external-ip-of-service] [--type=type] [flags] | Expose a replication controller, service, or pod as a new Kubernetes service. +`get` | kubectl get (-f FILENAME | TYPE [NAME | /NAME | -l label]) [--watch] [--sort-by=FIELD] [[-o | --output]=OUTPUT_FORMAT] [flags] | List one or more resources. +`label` | kubectl label (-f FILENAME | TYPE NAME | TYPE/NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--overwrite] [--all] [--resource-version=version] [flags] | Add or update the labels of one or more resources. `logs` | `kubectl logs POD [-c CONTAINER] [--follow] [flags]` | Print the logs for a container in a pod. -`patch` | `kubectl patch (-f FILENAME | TYPE NAME | TYPE/NAME) --patch PATCH [flags]` | Update one or more fields of a resource by using the strategic merge patch process. +`patch` | kubectl patch (-f FILENAME | TYPE NAME | TYPE/NAME) --patch PATCH [flags] | Update one or more fields of a resource by using the strategic merge patch process. `port-forward` | `kubectl port-forward POD [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N] [flags]` | Forward one or more local ports to a pod. `proxy` | `kubectl proxy [--port=PORT] [--www=static-dir] [--www-prefix=prefix] [--api-prefix=prefix] [flags]` | Run a proxy to the Kubernetes API server. `replace` | `kubectl replace -f FILENAME` | Replace a resource from a file or stdin. -`rolling-update` | `kubectl rolling-update OLD_CONTROLLER_NAME ([NEW_CONTROLLER_NAME] --image=NEW_CONTAINER_IMAGE | -f NEW_CONTROLLER_SPEC) [flags]` | Perform a rolling update by gradually replacing the specified replication controller and its pods. -`run` | `kubectl run NAME --image=image [--env="key=value"] [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json] [flags]` | Run a specified image on the cluster. -`scale` | `kubectl scale (-f FILENAME | TYPE NAME | TYPE/NAME) --replicas=COUNT [--resource-version=version] [--current-replicas=count] [flags]` | Update the size of the specified replication controller. +`run` | `kubectl run NAME --image=image [--env="key=value"] [--port=port] [--replicas=replicas] [--dry-run=server|client|none] [--overrides=inline-json] [flags]` | Run a specified image on the cluster. +`scale` | kubectl scale (-f FILENAME | TYPE NAME | TYPE/NAME) --replicas=COUNT [--resource-version=version] [--current-replicas=count] [flags] | Update the size of the specified replication controller. `version` | `kubectl version [--client] [flags]` | Display the Kubernetes version running on the client and server. Remember: For more about command operations, see the [kubectl](/docs/user-guide/kubectl/) reference documentation. @@ -370,6 +369,16 @@ kubectl logs kubectl logs -f ``` +`kubectl diff` - View a diff of the proposed updates to a cluster. + +```shell +# Diff resources included in "pod.json". +kubectl diff -f pod.json + +# Diff file read from stdin. +cat service.yaml | kubectl diff -f - +``` + ## Examples: Creating and using plugins Use the following set of examples to help you familiarize yourself with writing and using `kubectl` plugins: diff --git a/content/en/docs/reference/kubernetes-api/api-index.md b/content/en/docs/reference/kubernetes-api/api-index.md index 2d1a45b225e46..60d24e906b771 100644 --- a/content/en/docs/reference/kubernetes-api/api-index.md +++ b/content/en/docs/reference/kubernetes-api/api-index.md @@ -1,6 +1,6 @@ --- -title: v1.17 +title: v1.18 weight: 50 --- -[Kubernetes API v1.17](/docs/reference/generated/kubernetes-api/v1.17/) +[Kubernetes API v1.18](/docs/reference/generated/kubernetes-api/v1.18/) diff --git a/content/en/docs/reference/scheduling/_index.md b/content/en/docs/reference/scheduling/_index.md new file mode 100644 index 0000000000000..316b774081953 --- /dev/null +++ b/content/en/docs/reference/scheduling/_index.md @@ -0,0 +1,5 @@ +--- +title: Scheduling +weight: 70 +toc-hide: true +--- diff --git a/content/en/docs/reference/scheduling/policies.md b/content/en/docs/reference/scheduling/policies.md new file mode 100644 index 0000000000000..23d0bc915efc9 --- /dev/null +++ b/content/en/docs/reference/scheduling/policies.md @@ -0,0 +1,125 @@ +--- +title: Scheduling Policies +content_template: templates/concept +weight: 10 +--- + +{{% capture overview %}} + +A scheduling Policy can be used to specify the *predicates* and *priorities* +that the {{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}} +runs to [filter and score nodes](/docs/concepts/scheduling/kube-scheduler/#kube-scheduler-implementation), +respectively. + +You can set a scheduling policy by running +`kube-scheduler --policy-config-file ` or +`kube-scheduler --policy-configmap ` +and using the [Policy type](https://pkg.go.dev/k8s.io/kube-scheduler@v0.18.0/config/v1?tab=doc#Policy). + +{{% /capture %}} + +{{% capture body %}} + +## Predicates + +The following *predicates* implement filtering: + +- `PodFitsHostPorts`: Checks if a Node has free ports (the network protocol kind) + for the Pod ports the Pod is requesting. + +- `PodFitsHost`: Checks if a Pod specifies a specific Node by its hostname. + +- `PodFitsResources`: Checks if the Node has free resources (eg, CPU and Memory) + to meet the requirement of the Pod. + +- `PodMatchNodeSelector`: Checks if a Pod's Node {{< glossary_tooltip term_id="selector" >}} + matches the Node's {{< glossary_tooltip text="label(s)" term_id="label" >}}. + +- `NoVolumeZoneConflict`: Evaluate if the {{< glossary_tooltip text="Volumes" term_id="volume" >}} + that a Pod requests are available on the Node, given the failure zone restrictions for + that storage. + +- `NoDiskConflict`: Evaluates if a Pod can fit on a Node due to the volumes it requests, + and those that are already mounted. + +- `MaxCSIVolumeCount`: Decides how many {{< glossary_tooltip text="CSI" term_id="csi" >}} + volumes should be attached, and whether that's over a configured limit. + +- `CheckNodeMemoryPressure`: If a Node is reporting memory pressure, and there's no + configured exception, the Pod won't be scheduled there. + +- `CheckNodePIDPressure`: If a Node is reporting that process IDs are scarce, and + there's no configured exception, the Pod won't be scheduled there. + +- `CheckNodeDiskPressure`: If a Node is reporting storage pressure (a filesystem that + is full or nearly full), and there's no configured exception, the Pod won't be + scheduled there. + +- `CheckNodeCondition`: Nodes can report that they have a completely full filesystem, + that networking isn't available or that kubelet is otherwise not ready to run Pods. + If such a condition is set for a Node, and there's no configured exception, the Pod + won't be scheduled there. + +- `PodToleratesNodeTaints`: checks if a Pod's {{< glossary_tooltip text="tolerations" term_id="toleration" >}} + can tolerate the Node's {{< glossary_tooltip text="taints" term_id="taint" >}}. + +- `CheckVolumeBinding`: Evaluates if a Pod can fit due to the volumes it requests. + This applies for both bound and unbound + {{< glossary_tooltip text="PVCs" term_id="persistent-volume-claim" >}}. + +## Priorities + +The following *priorities* implement scoring: + +- `SelectorSpreadPriority`: Spreads Pods across hosts, considering Pods that + belong to the same {{< glossary_tooltip text="Service" term_id="service" >}}, + {{< glossary_tooltip term_id="statefulset" >}} or + {{< glossary_tooltip term_id="replica-set" >}}. + +- `InterPodAffinityPriority`: Implements preferred + [inter pod affininity and antiaffinity](/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity). + +- `LeastRequestedPriority`: Favors nodes with fewer requested resources. In other + words, the more Pods that are placed on a Node, and the more resources those + Pods use, the lower the ranking this policy will give. + +- `MostRequestedPriority`: Favors nodes with most requested resources. This policy + will fit the scheduled Pods onto the smallest number of Nodes needed to run your + overall set of workloads. + +- `RequestedToCapacityRatioPriority`: Creates a requestedToCapacity based ResourceAllocationPriority using default resource scoring function shape. + +- `BalancedResourceAllocation`: Favors nodes with balanced resource usage. + +- `NodePreferAvoidPodsPriority`: Prioritizes nodes according to the node annotation + `scheduler.alpha.kubernetes.io/preferAvoidPods`. You can use this to hint that + two different Pods shouldn't run on the same Node. + +- `NodeAffinityPriority`: Prioritizes nodes according to node affinity scheduling + preferences indicated in PreferredDuringSchedulingIgnoredDuringExecution. + You can read more about this in [Assigning Pods to Nodes](/docs/concepts/configuration/assign-pod-node/). + +- `TaintTolerationPriority`: Prepares the priority list for all the nodes, based on + the number of intolerable taints on the node. This policy adjusts a node's rank + taking that list into account. + +- `ImageLocalityPriority`: Favors nodes that already have the + {{< glossary_tooltip text="container images" term_id="image" >}} for that + Pod cached locally. + +- `ServiceSpreadingPriority`: For a given Service, this policy aims to make sure that + the Pods for the Service run on different nodes. It favours scheduling onto nodes + that don't have Pods for the service already assigned there. The overall outcome is + that the Service becomes more resilient to a single Node failure. + +- `EqualPriority`: Gives an equal weight of one to all nodes. + +- `EvenPodsSpreadPriority`: Implements preferred + [pod topology spread constraints](/docs/concepts/workloads/pods/pod-topology-spread-constraints/). + +{{% /capture %}} + +{{% capture whatsnext %}} +* Learn about [scheduling](/docs/concepts/scheduling/kube-scheduler/) +* Learn about [kube-scheduler profiles](/docs/reference/scheduling/profiles/) +{{% /capture %}} diff --git a/content/en/docs/reference/scheduling/profiles.md b/content/en/docs/reference/scheduling/profiles.md new file mode 100644 index 0000000000000..f5595f8480bdf --- /dev/null +++ b/content/en/docs/reference/scheduling/profiles.md @@ -0,0 +1,181 @@ +--- +title: Scheduling Profiles +content_template: templates/concept +weight: 20 +--- + +{{% capture overview %}} + +{{< feature-state for_k8s_version="v1.18" state="alpha" >}} + +A scheduling Profile allows you to configure the different stages of scheduling +in the {{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}}. +Each stage is exposed in a extension point. Plugins provide scheduling behaviors +by implementing one or more of these extension points. + +You can specify scheduling profiles by running `kube-scheduler --config `, +using the component config APIs +([`v1alpha1`](https://pkg.go.dev/k8s.io/kube-scheduler@{{< param "fullversion" >}}/config/v1alpha1?tab=doc#KubeSchedulerConfiguration) +or [`v1alpha2`](https://pkg.go.dev/k8s.io/kube-scheduler@{{< param "fullversion" >}}/config/v1alpha2?tab=doc#KubeSchedulerConfiguration)). +The `v1alpha2` API allows you to configure kube-scheduler to run +[multiple profiles](#multiple-profiles). + +{{% /capture %}} + +{{% capture body %}} + +## Extension points + +Scheduling happens in a series of stages that are exposed through the following +extension points: + +1. `QueueSort`: These plugins provide an ordering function that is used to + sort pending Pods in the scheduling queue. Exactly one queue sort plugin + may be enabled at a time. +1. `PreFilter`: These plugins are used to pre-process or check information + about a Pod or the cluster before filtering. +1. `Filter`: These plugins are the equivalent of Predicates in a scheduling + Policy and are used to filter out nodes that can not run the Pod. Filters + are called in the configured order. +1. `PreScore`: This is an informational extension point that can be used + for doing pre-scoring work. +1. `Score`: These plugins provide a score to each node that has passed the + filtering phase. The scheduler will then select the node with the highest + weighted scores sum. +1. `Reserve`: This is an informational extension point that notifies plugins + when resources have being reserved for a given Pod. +1. `Permit`: These plugins can prevent or delay the binding of a Pod. +1. `PreBind`: These plugins perform any work required before a Pod is bound. +1. `Bind`: The plugins bind a Pod to a Node. Bind plugins are called in order + and once one has done the binding, the remaining plugins are skipped. At + least one bind plugin is required. +1. `PostBind`: This is an informational extension point that is called after + a Pod has been bound. +1. `UnReserve`: This is an informational extension point that is called if + a Pod is rejected after being reserved and put on hold by a `Permit` plugin. + +## Scheduling plugins + +The following plugins, enabled by default, implement one or more of these +extension points: + +- `DefaultTopologySpread`: Favors spreading across nodes for Pods that belong to + {{< glossary_tooltip text="Services" term_id="service" >}}, + {{< glossary_tooltip text="ReplicaSets" term_id="replica-set" >}} and + {{< glossary_tooltip text="StatefulSets" term_id="statefulset" >}} + Extension points: `PreScore`, `Score`. +- `ImageLocality`: Favors nodes that already have the container images that the + Pod runs. + Extension points: `Score`. +- `TaintToleration`: Implements + [taints and tolerations](/docs/concepts/configuration/taint-and-toleration/). + Implements extension points: `Filter`, `Prescore`, `Score`. +- `NodeName`: Checks if a Pod spec node name matches the current node. + Extension points: `Filter`. +- `NodePorts`: Checks if a node has free ports for the requested Pod ports. + Extension points: `PreFilter`, `Filter`. +- `NodePreferAvoidPods`: Scores nodes according to the node + {{< glossary_tooltip text="annotation" term_id="annotation" >}} + `scheduler.alpha.kubernetes.io/preferAvoidPods`. + Extension points: `Score`. +- `NodeAffinity`: Implements + [node selectors](/docs/concepts/configuration/assign-pod-node/#nodeselector) + and [node affinity](/docs/concepts/configuration/assign-pod-node/#node-affinity). + Extension points: `Filter`, `Score`. +- `PodTopologySpread`: Implements + [Pod topology spread](/docs/concepts/workloads/pods/pod-topology-spread-constraints/). + Extension points: `PreFilter`, `Filter`, `PreScore`, `Score`. +- `NodeUnschedulable`: Filters out nodes that have `.spec.unschedulable` set to + true. + Extension points: `Filter`. +- `NodeResourcesFit`: Checks if the node has all the resources that the Pod is + requesting. + Extension points: `PreFilter`, `Filter`. +- `NodeResourcesBallancedAllocation`: Favors nodes that would obtain a more + balanced resource usage if the Pod is scheduled there. + Extension points: `Score`. +- `NodeResourcesLeastAllocated`: Favors nodes that have a low allocation of + resources. + Extension points: `Score`. +- `VolumeBinding`: Checks if the node has or if it can bind the requested + {{< glossary_tooltip text="volumes" term_id="volume" >}}. + Extension points: `Filter`. +- `VolumeRestrictions`: Checks that volumes mounted in the node satisfy + restrictions that are specific to the volume provider. + Extension points: `Filter`. +- `VolumeZone`: Checks that volumes requested satisfy any zone requirements they + might have. + Extension points: `Filter`. +- `NodeVolumeLimits`: Checks that CSI volume limits can be satisfied for the + node. + Extension points: `Filter`. +- `EBSLimits`: Checks that AWS EBS volume limits can be satisfied for the node. + Extension points: `Filter`. +- `GCEPDLimits`: Checks that GCP-PD volume limits can be satisfied for the node. + Extension points: `Filter`. +- `AzureDiskLimits`: Checks that Azure disk volume limits can be satisfied for + the node. + Extension points: `Filter`. +- `InterPodAffinity`: Implements + [inter-Pod affinity and anti-affinity](/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity). + Extension points: `PreFilter`, `Filter`, `PreScore`, `Score`. +- `PrioritySort`: Provides the default priority based sorting. + Extension points: `QueueSort`. +- `DefaultBinder`: Provides the default binding mechanism. + Extension points: `Bind`. + +You can also enable the following plugins, through the component config APIs, +that are not enabled by default: + +- `NodeResourcesMostAllocated`: Favors nodes that have a high allocation of + resources. + Extension points: `Score`. +- `RequestedToCapacityRatio`: Favor nodes according to a configured function of + the allocated resources. + Extension points: `Score`. +- `NodeResourceLimits`: Favors nodes that satisfy the Pod resource limits. + Extension points: `PreScore`, `Score`. +- `CinderVolume`: Checks that OpenStack Cinder volume limits can be satisfied + for the node. + Extension points: `Filter`. +- `NodeLabel`: Filters and / or scores a node according to configured + {{< glossary_tooltip text="label(s)" term_id="label" >}}. + Extension points: `Filter`, `Score`. +- `ServiceAffinity`: Checks that Pods that belong to a + {{< glossary_tooltip term_id="service" >}} fit in a set of nodes defined by + configured labels. This plugin also favors spreading the Pods belonging to a + Service across nodes. + Extension points: `PreFilter`, `Filter`, `Score`. + +## Multiple profiles + +When using the component config API v1alpha2, a scheduler can be configured to +run more than one profile. Each profile has an associated scheduler name. +Pods that want to be scheduled according to a specific profile can include +the corresponding scheduler name in its `.spec.schedulerName`. + +By default, one profile with the scheduler name `default-scheduler` is created. +This profile includes the default plugins described above. When declaring more +than one profile, a unique scheduler name for each of them is required. + +If a Pod doesn't specify a scheduler name, kube-apiserver will set it to +`default-scheduler`. Therefore, a profile with this scheduler name should exist +to get those pods scheduled. + +{{< note >}} +Pod's scheduling events have `.spec.schedulerName` as the ReportingController. +Events for leader election use the scheduler name of the first profile in the +list. +{{< /note >}} + +{{< note >}} +All profiles must use the same plugin in the QueueSort extension point and have +the same configuration parameters (if applicable). This is because the scheduler +only has one pending pods queue. +{{< /note >}} + +{{% /capture %}} + +{{% capture whatsnext %}} +* Learn about [scheduling](/docs/concepts/scheduling/kube-scheduler/) +{{% /capture %}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_admin.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_admin.conf.md index cb532d9b98026..bed37769d03e0 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_admin.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_admin.conf.md @@ -65,13 +65,6 @@ kubeadm alpha certs renew admin.conf [flags] The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. - - --use-api - - - Use the Kubernetes certificate API to renew certificates - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all.md index dc10f4190fc33..be586b8e4b2ab 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_all.md @@ -59,13 +59,6 @@ kubeadm alpha certs renew all [flags] The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. - - --use-api - - - Use the Kubernetes certificate API to renew certificates - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-etcd-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-etcd-client.md index 0ce4b3aac9133..33113474a3968 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-etcd-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-etcd-client.md @@ -65,13 +65,6 @@ kubeadm alpha certs renew apiserver-etcd-client [flags] The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. - - --use-api - - - Use the Kubernetes certificate API to renew certificates - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-kubelet-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-kubelet-client.md index c1b9777480834..5123a9a0e10c8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-kubelet-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver-kubelet-client.md @@ -65,13 +65,6 @@ kubeadm alpha certs renew apiserver-kubelet-client [flags] The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. - - --use-api - - - Use the Kubernetes certificate API to renew certificates - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver.md index 63dc1b4fc2724..7dda656795560 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_apiserver.md @@ -65,13 +65,6 @@ kubeadm alpha certs renew apiserver [flags] The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. - - --use-api - - - Use the Kubernetes certificate API to renew certificates - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_controller-manager.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_controller-manager.conf.md index bb208fa1b4f1b..9e33b47bc45ba 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_controller-manager.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_controller-manager.conf.md @@ -65,13 +65,6 @@ kubeadm alpha certs renew controller-manager.conf [flags] The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. - - --use-api - - - Use the Kubernetes certificate API to renew certificates - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-healthcheck-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-healthcheck-client.md index 57f86e1874037..12c57913dcad2 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-healthcheck-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-healthcheck-client.md @@ -65,13 +65,6 @@ kubeadm alpha certs renew etcd-healthcheck-client [flags] The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. - - --use-api - - - Use the Kubernetes certificate API to renew certificates - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-peer.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-peer.md index 2b86d657b6d41..3fa0f3fd52946 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-peer.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-peer.md @@ -65,13 +65,6 @@ kubeadm alpha certs renew etcd-peer [flags] The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. - - --use-api - - - Use the Kubernetes certificate API to renew certificates - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-server.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-server.md index 827febf1a9cc1..3484542725b4f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-server.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_etcd-server.md @@ -65,13 +65,6 @@ kubeadm alpha certs renew etcd-server [flags] The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. - - --use-api - - - Use the Kubernetes certificate API to renew certificates - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_front-proxy-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_front-proxy-client.md index 2945b4dafa1c5..1bfc2f1d312fb 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_front-proxy-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_front-proxy-client.md @@ -65,13 +65,6 @@ kubeadm alpha certs renew front-proxy-client [flags] The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. - - --use-api - - - Use the Kubernetes certificate API to renew certificates - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_scheduler.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_scheduler.conf.md index f4970fde9cb42..77537a7452548 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_scheduler.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_certs_renew_scheduler.conf.md @@ -65,13 +65,6 @@ kubeadm alpha certs renew scheduler.conf [flags] The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. - - --use-api - - - Use the Kubernetes certificate API to renew certificates - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_enable-dynamic.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_enable-dynamic.md index 379a01f535c0d..88fb003f6833e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_enable-dynamic.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubelet_config_enable-dynamic.md @@ -16,7 +16,7 @@ kubeadm alpha kubelet config enable-dynamic [flags] ``` # Enable dynamic kubelet configuration for a Node. - kubeadm alpha phase kubelet enable-dynamic-config --node-name node-1 --kubelet-version 1.17.0 + kubeadm alpha phase kubelet enable-dynamic-config --node-name node-1 --kubelet-version 1.18.0 WARNING: This feature is still experimental, and disabled by default. Enable only if you know what you are doing, as it may have surprising side-effects at this stage. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md index 61894d48dddc3..c0b924e5d9a04 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md @@ -17,6 +17,13 @@ kubeadm config images list [flags] + + --allow-missing-template-keys     Default: true + + + If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. + + --config string @@ -24,11 +31,18 @@ kubeadm config images list [flags] Path to a kubeadm configuration file. + + -o, --experimental-output string     Default: "text" + + + Output format. One of: text|json|yaml|go-template|go-template-file|template|templatefile|jsonpath|jsonpath-file. + + --feature-gates string - A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false) + A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md index 2c5cbaca25c11..2a03893d45a40 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md @@ -35,7 +35,7 @@ kubeadm config images pull [flags] --feature-gates string - A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false) + A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md index 7cc5bbb078c74..d19bb01a99c18 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md @@ -132,7 +132,7 @@ kubeadm init [flags] --feature-gates string - A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false) + A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md index f649bd04d8d9c..ff285596d5204 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md @@ -49,7 +49,7 @@ kubeadm init phase addon all [flags] --feature-gates string - A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false) + A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md index 9da2cf2bd355d..40bc2e8101724 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md @@ -28,7 +28,7 @@ kubeadm init phase addon coredns [flags] --feature-gates string - A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false) + A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md index c22fc6141a7f1..fa735c27effc9 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md @@ -88,7 +88,7 @@ kubeadm init phase control-plane all [flags] --feature-gates string - A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false) + A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md index 9444b664cd1b8..06348123864a5 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md @@ -70,7 +70,7 @@ kubeadm init phase control-plane apiserver [flags] --feature-gates string - A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false) + A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md index debdc2485e7ad..b6b9f6d261895 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md @@ -70,7 +70,7 @@ kubeadm upgrade apply [version] --feature-gates string - A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false) + A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md index 0f7e472655efe..7ec3ff6bbc719 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md @@ -66,13 +66,6 @@ kubeadm upgrade node [flags] The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. - - --kubelet-version string - - - The *desired* version for the kubelet config after the upgrade. If not specified, the KubernetesVersion from the kubeadm-config ConfigMap will be used - - --skip-phases stringSlice diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md index 47ba9ada499a0..4b90ef8f344da 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md @@ -2,7 +2,7 @@ ### Synopsis -Download the kubelet configuration from a ConfigMap of the form "kubelet-config-1.X" in the cluster, where X is the minor version of the kubelet. kubeadm uses the KuberneteVersion field in the kubeadm-config ConfigMap to determine what the _desired_ kubelet version is, but the user can override this by using the --kubelet-version parameter. +Download the kubelet configuration from a ConfigMap of the form "kubelet-config-1.X" in the cluster, where X is the minor version of the kubelet. kubeadm uses the KuberneteVersion field in the kubeadm-config ConfigMap to determine what the _desired_ kubelet version is. ``` kubeadm upgrade node phase kubelet-config [flags] @@ -38,13 +38,6 @@ kubeadm upgrade node phase kubelet-config [flags] The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. - - --kubelet-version string - - - The *desired* version for the kubelet config after the upgrade. If not specified, the KubernetesVersion from the kubeadm-config ConfigMap will be used - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md index d69233d4fc182..569e2bf8ae25d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md @@ -42,7 +42,7 @@ kubeadm upgrade plan [version] [flags] --feature-gates string - A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false) + A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) diff --git a/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md b/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md index ca02a61dac5ca..7186f28071179 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md +++ b/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md @@ -447,21 +447,11 @@ A ServiceAccount for `kube-proxy` is created in the `kube-system` namespace; the #### DNS -Note that: - +- In Kubernetes version 1.18 kube-dns usage with kubeadm is deprecated and will be removed in a future release - The CoreDNS service is named `kube-dns`. This is done to prevent any interruption in service when the user is switching the cluster DNS from kube-dns to CoreDNS or vice-versa -- In Kubernetes version 1.10 and earlier, you must enable CoreDNS with `--feature-gates=CoreDNS=true` -- In Kubernetes version 1.11 and 1.12, CoreDNS is the default DNS server and you must -invoke kubeadm with `--feature-gates=CoreDNS=false` to install kube-dns instead -- In Kubernetes version 1.13 and later, the `CoreDNS` feature gate is no longer available and kube-dns can be installed using the `--config` method described [here](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-addon) - - -A ServiceAccount for CoreDNS/kube-dns is created in the `kube-system` namespace. - -Deploy the `kube-dns` Deployment and Service: - -- It's the upstream CoreDNS deployment relatively unmodified +the `--config` method described [here](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-addon) +- A ServiceAccount for CoreDNS/kube-dns is created in the `kube-system` namespace. - The `kube-dns` ServiceAccount is bound to the privileges in the `system:kube-dns` ClusterRole ## kubeadm join phases internal design diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md index 5db402766d4bf..c6374e54e8689 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md @@ -157,6 +157,8 @@ dns: type: "kube-dns" ``` +Please note that kube-dns usage with kubeadm is deprecated as of v1.18 and will be removed in a future release. + For more details on each field in the `v1beta2` configuration you can navigate to our [API reference pages.] (https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md index 5c10b0ce73303..9b006d15c012d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md @@ -67,10 +67,14 @@ following steps: 1. Installs a DNS server (CoreDNS) and the kube-proxy addon components via the API server. In Kubernetes version 1.11 and later CoreDNS is the default DNS server. - To install kube-dns instead of CoreDNS, the DNS addon has to be configured in the kubeadm `ClusterConfiguration`. For more information about the configuration see the section - `Using kubeadm init with a configuration file` below. + To install kube-dns instead of CoreDNS, the DNS addon has to be configured in the kubeadm `ClusterConfiguration`. + For more information about the configuration see the section `Using kubeadm init with a configuration file` below. Please note that although the DNS server is deployed, it will not be scheduled until CNI is installed. + {{< warning >}} + kube-dns usage with kubeadm is deprecated as of v1.18 and will be removed in a future release. + {{< /warning >}} + ### Using init phases with kubeadm {#init-phases} Kubeadm allows you to create a control-plane node in phases using the `kubeadm init phase` command. diff --git a/content/en/docs/reference/tools.md b/content/en/docs/reference/tools.md index 4264ae873f6ed..349ce58f2c372 100644 --- a/content/en/docs/reference/tools.md +++ b/content/en/docs/reference/tools.md @@ -18,11 +18,6 @@ Kubernetes contains several built-in tools to help you work with the Kubernetes [`kubeadm`](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) is the command line tool for easily provisioning a secure Kubernetes cluster on top of physical or cloud servers or virtual machines (currently in alpha). -## Kubefed - -[`kubefed`](/docs/tasks/federation/set-up-cluster-federation-kubefed/) is the command line tool -to help you administrate your federated clusters. - ## Minikube [`minikube`](/docs/tasks/tools/install-minikube/) is a tool that makes it diff --git a/content/en/docs/reference/using-api/api-concepts.md b/content/en/docs/reference/using-api/api-concepts.md index 8c7464d6948b2..fbdbf14f878d5 100644 --- a/content/en/docs/reference/using-api/api-concepts.md +++ b/content/en/docs/reference/using-api/api-concepts.md @@ -336,12 +336,14 @@ Once the last finalizer is removed, the resource is actually removed from etcd. ## Dry-run -{{< feature-state for_k8s_version="v1.13" state="beta" >}} In version 1.13, the dry-run beta feature is enabled by default. The modifying verbs (`POST`, `PUT`, `PATCH`, and `DELETE`) can accept requests in a dry-run mode. DryRun mode helps to evaluate a request through the typical request stages (admission chain, validation, merge conflicts) up until persisting objects to storage. The response body for the request is as close as possible to a non-dry-run response. The system guarantees that dry-run requests will not be persisted in storage or have any other side effects. + {{< feature-state for_k8s_version="v1.18" state="stable" >}} + +The modifying verbs (`POST`, `PUT`, `PATCH`, and `DELETE`) can accept requests in a _dry run_ mode. Dry run mode helps to evaluate a request through the typical request stages (admission chain, validation, merge conflicts) up until persisting objects to storage. The response body for the request is as close as possible to a non-dry-run response. The system guarantees that dry-run requests will not be persisted in storage or have any other side effects. ### Make a dry-run request -Dry-run is triggered by setting the `dryRun` query parameter. This parameter is a string, working as an enum, and in 1.13 the only accepted values are: +Dry-run is triggered by setting the `dryRun` query parameter. This parameter is a string, working as an enum, and the only accepted values are: * `All`: Every stage runs as normal, except for the final storage stage. Admission controllers are run to check that the request is valid, mutating controllers mutate the request, merge is performed on `PATCH`, fields are defaulted, and schema validation occurs. The changes are not persisted to the underlying storage, but the final object which would have been persisted is still returned to the user, along with the normal status code. If the request would trigger an admission controller which would have side effects, the request will be failed rather than risk an unwanted side effect. All built in admission control plugins support dry-run. Additionally, admission webhooks can declare in their [configuration object](/docs/reference/generated/kubernetes-api/v1.13/#webhook-v1beta1-admissionregistration-k8s-io) that they do not have side effects by setting the sideEffects field to "None". If a webhook actually does have side effects, then the sideEffects field should be set to "NoneOnDryRun", and the webhook should also be modified to understand the `DryRun` field in AdmissionReview, and prevent side effects on dry-run requests. * Leave the value empty, which is also the default: Keep the default modifying behavior. @@ -386,6 +388,8 @@ Some values of an object are typically generated before the object is persisted. {{< feature-state for_k8s_version="v1.16" state="beta" >}} +{{< note >}}Starting from Kubernetes v1.18, if you have Server Side Apply enabled then the control plane tracks managed fields for all newly created objects.{{< /note >}} + ### Introduction Server Side Apply helps users and controllers manage their resources via @@ -515,6 +519,13 @@ content type `application/apply-patch+yaml`) and `Update` (all other operations which modify the object). Both operations update the `managedFields`, but behave a little differently. +{{< note >}} +Whether you are submitting JSON data or YAML data, use `application/apply-patch+yaml` as the +Content-Type header value. + +All JSON documents are valid YAML. +{{< /note >}} + For instance, only the apply operation fails on conflicts while update does not. Also, apply operations are required to identify themselves by providing a `fieldManager` query parameter, while the query parameter is optional for update @@ -626,8 +637,9 @@ case. With the Server Side Apply feature enabled, the `PATCH` endpoint accepts the additional `application/apply-patch+yaml` content type. Users of Server Side -Apply can send partially specified objects to this endpoint. An applied config -should always include every field that the applier has an opinion about. +Apply can send partially specified objects as YAML to this endpoint. +When applying a configuration, one should always include all the fields +that they have an opinion about. ### Clearing ManagedFields @@ -661,6 +673,11 @@ the managedFields, this will result in the managedFields being reset first and the other changes being processed afterwards. As a result the applier takes ownership of any fields updated in the same request. +{{< caution >}} Server Side Apply does not correctly track ownership on +sub-resources that don't receive the resource object type. If you are +using Server Side Apply with such a sub-resource, the changed fields +won't be tracked. {{< /caution >}} + ### Disabling the feature Server Side Apply is a beta feature, so it is enabled by default. To turn this diff --git a/content/en/docs/reference/using-api/api-overview.md b/content/en/docs/reference/using-api/api-overview.md index a004c863537fe..3820085e6bb35 100644 --- a/content/en/docs/reference/using-api/api-overview.md +++ b/content/en/docs/reference/using-api/api-overview.md @@ -91,12 +91,14 @@ The two paths that support extending the API with [custom resources](/docs/conce - [aggregator](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/aggregated-api-servers.md) for a full set of Kubernetes API semantics to implement their own apiserver. -## Enabling API groups +## Enabling or disabling API groups Certain resources and API groups are enabled by default. You can enable or disable them by setting `--runtime-config` on the apiserver. `--runtime-config` accepts comma separated values. For example: + - to disable batch/v1, set `--runtime-config=batch/v1=false` - to enable batch/v2alpha1, set `--runtime-config=batch/v2alpha1` + The flag accepts comma separated set of key=value pairs describing runtime configuration of the apiserver. {{< note >}} @@ -104,12 +106,10 @@ When you enable or disable groups or resources, you need to restart the apiserve to pick up the `--runtime-config` changes. {{< /note >}} -## Enabling resources in the groups - -DaemonSets, Deployments, HorizontalPodAutoscalers, Ingress, Jobs and ReplicaSets are enabled by default. -You can enable other extensions resources by setting `--runtime-config` on -apiserver. `--runtime-config` accepts comma separated values. For example, to disable deployments and jobs, set -`--runtime-config=extensions/v1beta1/deployments=false,extensions/v1beta1/jobs=false` -{{% /capture %}} +## Enabling specific resources in the extensions/v1beta1 group +DaemonSets, Deployments, StatefulSet, NetworkPolicies, PodSecurityPolicies and ReplicaSets in the `extensions/v1beta1` API group are disabled by default. +For example: to enable deployments and daemonsets, set +`--runtime-config=extensions/v1beta1/deployments=true,extensions/v1beta1/daemonsets=true`. +{{< note >}}Individual resource enablement/disablement is only supported in the `extensions/v1beta1` API group for legacy reasons.{{< /note >}} diff --git a/content/en/docs/reference/using-api/client-libraries.md b/content/en/docs/reference/using-api/client-libraries.md index 093490b3453e1..4f76e16352ecd 100644 --- a/content/en/docs/reference/using-api/client-libraries.md +++ b/content/en/docs/reference/using-api/client-libraries.md @@ -60,6 +60,7 @@ their authors, not the Kubernetes team. | PHP | [github.com/allansun/kubernetes-php-client](https://github.com/allansun/kubernetes-php-client) | | PHP | [github.com/travisghansen/kubernetes-client-php](https://github.com/travisghansen/kubernetes-client-php) | | Python | [github.com/eldarion-gondor/pykube](https://github.com/eldarion-gondor/pykube) | +| Python | [github.com/fiaas/k8s](https://github.com/fiaas/k8s) | | Python | [github.com/mnubo/kubernetes-py](https://github.com/mnubo/kubernetes-py) | | Python | [github.com/tomplus/kubernetes_asyncio](https://github.com/tomplus/kubernetes_asyncio) | | Ruby | [github.com/Ch00k/kuber](https://github.com/Ch00k/kuber) | diff --git a/content/en/docs/setup/_index.md b/content/en/docs/setup/_index.md index 6c903d4e60f15..880dd460241aa 100644 --- a/content/en/docs/setup/_index.md +++ b/content/en/docs/setup/_index.md @@ -24,7 +24,7 @@ This section covers different options to set up and run Kubernetes. Different Kubernetes solutions meet different requirements: ease of maintenance, security, control, available resources, and expertise required to operate and manage a cluster. -You can deploy a Kubernetes cluster on a local machine, cloud, on-prem datacenter; or choose a managed Kubernetes cluster. You can also create custom solutions across a wide range of cloud providers, or bare metal environments. +You can deploy a Kubernetes cluster on a local machine, cloud, on-prem datacenter, or choose a managed Kubernetes cluster. You can also create custom solutions across a wide range of cloud providers, or bare metal environments. More simply, you can create a Kubernetes cluster in learning and production environments. @@ -53,63 +53,6 @@ If you're learning Kubernetes, use the Docker-based solutions: tools supported b When evaluating a solution for a production environment, consider which aspects of operating a Kubernetes cluster (or _abstractions_) you want to manage yourself or offload to a provider. -Some possible abstractions of a Kubernetes cluster are {{< glossary_tooltip text="applications" term_id="applications" >}}, {{< glossary_tooltip text="data plane" term_id="data-plane" >}}, {{< glossary_tooltip text="control plane" term_id="control-plane" >}}, {{< glossary_tooltip text="cluster infrastructure" term_id="cluster-infrastructure" >}}, and {{< glossary_tooltip text="cluster operations" term_id="cluster-operations" >}}. - -The following diagram lists the possible abstractions of a Kubernetes cluster and whether an abstraction is self-managed or managed by a provider. - -Production environment solutions![Production environment solutions](/images/docs/KubernetesSolutions.svg) - -{{< table caption="Production environment solutions table lists the providers and the solutions." >}} -The following production environment solutions table lists the providers and the solutions that they offer. - -|Providers | Managed | Turnkey cloud | On-prem datacenter | Custom (cloud) | Custom (On-premises VMs)| Custom (Bare Metal) | -| --------- | ------ | ------ | ------ | ------ | ------ | ----- | -| [Agile Stacks](https://www.agilestacks.com/products/kubernetes)| | ✔ | ✔ | | | -| [Alibaba Cloud](https://www.alibabacloud.com/product/kubernetes)| | ✔ | | | | -| [Amazon](https://aws.amazon.com) | [Amazon EKS](https://aws.amazon.com/eks/) |[Amazon EC2](https://aws.amazon.com/ec2/) | | | | -| [AppsCode](https://appscode.com/products/pharmer/) | ✔ | | | | | -| [APPUiO](https://appuio.ch/)  | ✔ | ✔ | ✔ | | | | -| [Banzai Cloud Pipeline Kubernetes Engine (PKE)](https://banzaicloud.com/products/pke/) | | ✔ | | ✔ | ✔ | ✔ | -| [CenturyLink Cloud](https://www.ctl.io/) | | ✔ | | | | -| [Cisco Container Platform](https://cisco.com/go/containers) | | | ✔ | | | -| [Cloud Foundry Container Runtime (CFCR)](https://docs-cfcr.cfapps.io/) | | | | ✔ |✔ | -| [CloudStack](https://cloudstack.apache.org/) | | | | | ✔| -| [Canonical](https://ubuntu.com/kubernetes) | ✔ | ✔ | ✔ | ✔ |✔ | ✔ -| [Containership](https://containership.io) | ✔ |✔ | | | | -| [D2iQ](https://d2iq.com/) | | [Kommander](https://d2iq.com/solutions/ksphere) | [Konvoy](https://d2iq.com/solutions/ksphere/konvoy) | [Konvoy](https://d2iq.com/solutions/ksphere/konvoy) | [Konvoy](https://d2iq.com/solutions/ksphere/konvoy) | [Konvoy](https://d2iq.com/solutions/ksphere/konvoy) | -| [Digital Rebar](https://provision.readthedocs.io/en/tip/README.html) | | | | | | ✔ -| [DigitalOcean](https://www.digitalocean.com/products/kubernetes/) | ✔ | | | | | -| [Docker Enterprise](https://www.docker.com/products/docker-enterprise) | |✔ | ✔ | | | ✔ -| [Gardener](https://gardener.cloud/) | ✔ | ✔ | ✔ | ✔ | ✔ | [Custom Extensions](https://github.com/gardener/gardener/blob/master/docs/extensions/overview.md) | -| [Giant Swarm](https://www.giantswarm.io/) | ✔ | ✔ | ✔ | | -| [Google](https://cloud.google.com/) | [Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/) | [Google Compute Engine (GCE)](https://cloud.google.com/compute/)|[GKE On-Prem](https://cloud.google.com/gke-on-prem/) | | | | | | | | -| [Hidora](https://hidora.com/) | ✔ | ✔| ✔ | | | | | | | | -| [IBM](https://www.ibm.com/in-en/cloud) | [IBM Cloud Kubernetes Service](https://cloud.ibm.com/kubernetes/catalog/cluster)| |[IBM Cloud Private](https://www.ibm.com/in-en/cloud/private) | | -| [Ionos](https://www.ionos.com/enterprise-cloud) | [Ionos Managed Kubernetes](https://www.ionos.com/enterprise-cloud/managed-kubernetes) | [Ionos Enterprise Cloud](https://www.ionos.com/enterprise-cloud) | | -| [Kontena Pharos](https://www.kontena.io/pharos/) | |✔| ✔ | | | -| [KubeOne](https://kubeone.io/) | | ✔ | ✔ | ✔ | ✔ | ✔ | -| [Kubermatic](https://kubermatic.io/) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | -| [KubeSail](https://kubesail.com/) | ✔ | | | | | -| [Kubespray](https://kubespray.io/#/) | | | |✔ | ✔ | ✔ | -| [Kublr](https://kublr.com/) |✔ | ✔ |✔ |✔ |✔ |✔ | -| [Microsoft Azure](https://azure.microsoft.com) | [Azure Kubernetes Service (AKS)](https://azure.microsoft.com/en-us/services/kubernetes-service/) | | | | | -| [Mirantis Cloud Platform](https://www.mirantis.com/software/kubernetes/) | | | ✔ | | | -| [NetApp Kubernetes Service (NKS)](https://cloud.netapp.com/kubernetes-service) | ✔ | ✔ | ✔ | | | -| [Nirmata](https://www.nirmata.com/) | | ✔ | ✔ | | | -| [Nutanix](https://www.nutanix.com/en) | [Nutanix Karbon](https://www.nutanix.com/products/karbon) | [Nutanix Karbon](https://www.nutanix.com/products/karbon) | | | [Nutanix AHV](https://www.nutanix.com/products/acropolis/virtualization) | -| [OpenNebula](https://www.opennebula.org) |[OpenNebula Kubernetes](https://marketplace.opennebula.systems/docs/service/kubernetes.html) | | | | | -| [OpenShift](https://www.openshift.com) |[OpenShift Dedicated](https://www.openshift.com/products/dedicated/) and [OpenShift Online](https://www.openshift.com/products/online/) | | [OpenShift Container Platform](https://www.openshift.com/products/container-platform/) | | [OpenShift Container Platform](https://www.openshift.com/products/container-platform/) |[OpenShift Container Platform](https://www.openshift.com/products/container-platform/) -| [Oracle Cloud Infrastructure Container Engine for Kubernetes (OKE)](https://docs.cloud.oracle.com/iaas/Content/ContEng/Concepts/contengoverview.htm) | ✔ | ✔ | | | | -| [oVirt](https://www.ovirt.org/) | | | | | ✔ | -| [Pivotal](https://pivotal.io/) | | [Enterprise Pivotal Container Service (PKS)](https://pivotal.io/platform/pivotal-container-service) | [Enterprise Pivotal Container Service (PKS)](https://pivotal.io/platform/pivotal-container-service) | | | -| [Platform9](https://platform9.com/) | [Platform9 Managed Kubernetes](https://platform9.com/managed-kubernetes/) | | [Platform9 Managed Kubernetes](https://platform9.com/managed-kubernetes/) | ✔ | ✔ | ✔ -| [Rancher](https://rancher.com/) | | [Rancher 2.x](https://rancher.com/docs/rancher/v2.x/en/) | | [Rancher Kubernetes Engine (RKE)](https://rancher.com/docs/rke/latest/en/) | | [k3s](https://k3s.io/) -| [Supergiant](https://supergiant.io/) | |✔ | | | | -| [SUSE](https://www.suse.com/) | | ✔ | | | | -| [SysEleven](https://www.syseleven.io/) | ✔ | | | | | -| [Tencent Cloud](https://intl.cloud.tencent.com/) | [Tencent Kubernetes Engine](https://intl.cloud.tencent.com/product/tke) | ✔ | ✔ | | | ✔ | -| [VEXXHOST](https://vexxhost.com/) | ✔ | ✔ | | | | -| [VMware](https://cloud.vmware.com/) | [VMware Cloud PKS](https://cloud.vmware.com/vmware-cloud-pks) |[VMware Enterprise PKS](https://cloud.vmware.com/vmware-enterprise-pks) | [VMware Enterprise PKS](https://cloud.vmware.com/vmware-enterprise-pks) | [VMware Essential PKS](https://cloud.vmware.com/vmware-essential-pks) | |[VMware Essential PKS](https://cloud.vmware.com/vmware-essential-pks) -| [Z.A.R.V.I.S.](https://zarvis.ai/) | ✔ | | | | | | +For a list of [Certified Kubernetes](https://github.com/cncf/k8s-conformance/#certified-kubernetes) providers, see "[Partners](https://kubernetes.io/partners/#conformance)". {{% /capture %}} diff --git a/content/en/docs/setup/best-practices/multiple-zones.md b/content/en/docs/setup/best-practices/multiple-zones.md index b01cbceedf4bc..ba58df028f55f 100644 --- a/content/en/docs/setup/best-practices/multiple-zones.md +++ b/content/en/docs/setup/best-practices/multiple-zones.md @@ -188,7 +188,7 @@ kubernetes-minion-wf8i Ready 2m v1.13.0 Create a volume using the dynamic volume creation (only PersistentVolumes are supported for zone affinity): -```json +```bash kubectl apply -f - <}}, you would run the following minikube start --kubernetes-version {{< param "fullversion" >}} ``` #### Specifying the VM driver -You can change the VM driver by adding the `--vm-driver=` flag to `minikube start`. +You can change the VM driver by adding the `--driver=` flag to `minikube start`. For example the command would be. ```shell -minikube start --vm-driver= +minikube start --driver= ``` Minikube supports the following drivers: {{< note >}} - See [DRIVERS](https://git.k8s.io/minikube/docs/drivers.md) for details on supported drivers and how to install + See [DRIVERS](https://minikube.sigs.k8s.io/docs/reference/drivers/) for details on supported drivers and how to install plugins. {{< /note >}} * virtualbox * vmwarefusion -* kvm2 ([driver installation](https://git.k8s.io/minikube/docs/drivers.md#kvm2-driver)) -* hyperkit ([driver installation](https://git.k8s.io/minikube/docs/drivers.md#hyperkit-driver)) -* hyperv ([driver installation](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#hyperv-driver)) +* docker (EXPERIMENTAL) +* kvm2 ([driver installation](https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/)) +* hyperkit ([driver installation](https://minikube.sigs.k8s.io/docs/reference/drivers/hyperkit/)) +* hyperv ([driver installation](https://minikube.sigs.k8s.io/docs/reference/drivers/hyperv/)) Note that the IP below is dynamic and can change. It can be retrieved with `minikube ip`. -* vmware ([driver installation](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#vmware-unified-driver)) (VMware unified driver) +* vmware ([driver installation](https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/)) (VMware unified driver) +* parallels ([driver installation](https://minikube.sigs.k8s.io/docs/reference/drivers/parallels/)) * none (Runs the Kubernetes components on the host and not in a virtual machine. You need to be running Linux and to have {{< glossary_tooltip term_id="docker" >}} installed.) {{< caution >}} @@ -330,8 +332,8 @@ Starting the cluster again will restore it to its previous state. The `minikube delete` command can be used to delete your cluster. This command shuts down and deletes the Minikube Virtual Machine. No data or state is preserved. -### Upgrading minikube -See [upgrade minikube](https://minikube.sigs.k8s.io/docs/start/macos/) +### Upgrading Minikube +If you are using macOS, see [Upgrading Minikube](https://minikube.sigs.k8s.io/docs/start/macos/#upgrading-minikube) to upgrade your existing minikube installation. ## Interacting with Your Cluster diff --git a/content/en/docs/setup/production-environment/container-runtimes.md b/content/en/docs/setup/production-environment/container-runtimes.md index 493e2ae5efe6e..972bf1810bc7d 100644 --- a/content/en/docs/setup/production-environment/container-runtimes.md +++ b/content/en/docs/setup/production-environment/container-runtimes.md @@ -64,7 +64,7 @@ is to drain the Node from its workloads, remove it from the cluster and re-join ## Docker On each of your machines, install Docker. -Version 19.03.4 is recommended, but 1.13.1, 17.03, 17.06, 17.09, 18.06 and 18.09 are known to work as well. +Version 19.03.8 is recommended, but 1.13.1, 17.03, 17.06, 17.09, 18.06 and 18.09 are known to work as well. Keep track of the latest verified Docker version in the Kubernetes release notes. Use the following commands to install Docker on your system: @@ -88,9 +88,9 @@ add-apt-repository \ ## Install Docker CE. apt-get update && apt-get install -y \ - containerd.io=1.2.10-3 \ - docker-ce=5:19.03.4~3-0~ubuntu-$(lsb_release -cs) \ - docker-ce-cli=5:19.03.4~3-0~ubuntu-$(lsb_release -cs) + containerd.io=1.2.13-1 \ + docker-ce=5:19.03.8~3-0~ubuntu-$(lsb_release -cs) \ + docker-ce-cli=5:19.03.8~3-0~ubuntu-$(lsb_release -cs) # Setup daemon. cat > /etc/docker/daemon.json <}} -{{< tab name="Ubuntu 16.04" codelang="bash" >}} +{{< tab name="Debian" codelang="bash" >}} +# Debian Unstable/Sid +echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Unstable/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list +wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_Unstable/Release.key -O- | sudo apt-key add - -# Install prerequisites -apt-get update -apt-get install -y software-properties-common +# Debian Testing +echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_Testing/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list +wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_Testing/Release.key -O- | sudo apt-key add - + +# Debian 10 +echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Debian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list +wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Debian_10/Release.key -O- | sudo apt-key add - -add-apt-repository ppa:projectatomic/ppa -apt-get update +# Raspbian 10 +echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/Raspbian_10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list +wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/Raspbian_10/Release.key -O- | sudo apt-key add - # Install CRI-O -apt-get install -y cri-o-1.15 +sudo apt-get install cri-o-1.17 +{{< /tab >}} + +{{< tab name="Ubuntu 18.04, 19.04 and 19.10" codelang="bash" >}} +# Setup repository +. /etc/os-release +sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/x${NAME}_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" +wget -nv https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/x${NAME}_${VERSION_ID}/Release.key -O- | sudo apt-key add - +sudo apt-get update +# Install CRI-O +sudo apt-get install cri-o-1.17 {{< /tab >}} -{{< tab name="CentOS/RHEL 7.4+" codelang="bash" >}} +{{< tab name="CentOS/RHEL 7.4+" codelang="bash" >}} # Install prerequisites yum-config-manager --add-repo=https://cbs.centos.org/repos/paas7-crio-115-release/x86_64/os/ # Install CRI-O yum install --nogpgcheck -y cri-o +{{< /tab >}} +{{< tab name="openSUSE Tumbleweed" codelang="bash" >}} +sudo zypper install cri-o {{< /tab >}} {{< /tabs >}} diff --git a/content/en/docs/setup/production-environment/tools/kops.md b/content/en/docs/setup/production-environment/tools/kops.md index 03d45f7827a13..10ae6dfa65285 100644 --- a/content/en/docs/setup/production-environment/tools/kops.md +++ b/content/en/docs/setup/production-environment/tools/kops.md @@ -49,14 +49,12 @@ Download the latest release with the command: curl -LO https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-darwin-amd64 ``` -To download a specific version, replace the +To download a specific version, replace the following portion of the command with the specific kops version. ```shell $(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4) ``` -portion of the command with the specific version. - For example, to download kops version v1.15.0 type: ```shell diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index 889b85f64d7dc..9d35fa2c5a0df 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -344,10 +344,6 @@ Please refer to this installation guide: [Contiv-VPP Manual Installation](https: For `flannel` to work correctly, you must pass `--pod-network-cidr=10.244.0.0/16` to `kubeadm init`. -Set `/proc/sys/net/bridge/bridge-nf-call-iptables` to `1` by running `sysctl net.bridge.bridge-nf-call-iptables=1` -to pass bridged IPv4 traffic to iptables' chains. This is a requirement for some CNI plugins to work, for more information -please see [Network Plugin Requirements](/docs/concepts/cluster-administration/network-plugins/#network-plugin-requirements). - Make sure that your firewall rules allow UDP ports 8285 and 8472 traffic for all hosts participating in the overlay network. The [Firewall](https://coreos.com/flannel/docs/latest/troubleshooting.html#firewalls) section of Flannel's troubleshooting guide explains about this in more detail. Flannel works on `amd64`, `arm`, `arm64`, `ppc64le` and `s390x` architectures under Linux. @@ -362,9 +358,6 @@ For more information about `flannel`, see [the CoreOS flannel repository on GitH {{% /tab %}} {{% tab name="Kube-router" %}} -Set `/proc/sys/net/bridge/bridge-nf-call-iptables` to `1` by running `sysctl net.bridge.bridge-nf-call-iptables=1` -to pass bridged IPv4 traffic to iptables' chains. This is a requirement for some CNI plugins to work, for more information -please see [Network Plugin Requirements](/docs/concepts/cluster-administration/network-plugins/#network-plugin-requirements). Kube-router relies on kube-controller-manager to allocate Pod CIDR for the nodes. Therefore, use `kubeadm init` with the `--pod-network-cidr` flag. @@ -374,9 +367,6 @@ For information on using the `kubeadm` tool to set up a Kubernetes cluster with {{% /tab %}} {{% tab name="Weave Net" %}} -Set `/proc/sys/net/bridge/bridge-nf-call-iptables` to `1` by running `sysctl net.bridge.bridge-nf-call-iptables=1` -to pass bridged IPv4 traffic to iptables' chains. This is a requirement for some CNI plugins to work, for more information -please see [Network Plugin Requirements](/docs/concepts/cluster-administration/network-plugins/#network-plugin-requirements). For more information on setting up your Kubernetes cluster with Weave Net, please see [Integrating Kubernetes via the Addon]((https://www.weave.works/docs/net/latest/kube-addon/). diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index 09aa01de0ce1b..455ee95c2c852 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -51,37 +51,22 @@ may [fail](https://github.com/kubernetes/kubeadm/issues/31). If you have more than one network adapter, and your Kubernetes components are not reachable on the default route, we recommend you add IP route(s) so Kubernetes cluster addresses go via the appropriate adapter. -## Ensure iptables tooling does not use the nftables backend +## Letting iptables see bridged traffic -In Linux, nftables is available as a modern replacement for the kernel's iptables subsystem. The -`iptables` tooling can act as a compatibility layer, behaving like iptables but actually configuring -nftables. This nftables backend is not compatible with the current kubeadm packages: it causes duplicated -firewall rules and breaks `kube-proxy`. +As a requirement for your Linux Node's iptables to correctly see bridged traffic, you should ensure `net.bridge.bridge-nf-call-iptables` is set to 1 in your `sysctl` config, e.g. -If your system's `iptables` tooling uses the nftables backend, you will need to switch the `iptables` -tooling to 'legacy' mode to avoid these problems. This is the case on at least Debian 10 (Buster), -Ubuntu 19.04, Fedora 29 and newer releases of these distributions by default. RHEL 8 does not support -switching to legacy mode, and is therefore incompatible with current kubeadm packages. - -{{< tabs name="iptables_legacy" >}} -{{% tab name="Debian or Ubuntu" %}} ```bash -# ensure legacy binaries are installed -sudo apt-get install -y iptables arptables ebtables - -# switch to legacy versions -sudo update-alternatives --set iptables /usr/sbin/iptables-legacy -sudo update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy -sudo update-alternatives --set arptables /usr/sbin/arptables-legacy -sudo update-alternatives --set ebtables /usr/sbin/ebtables-legacy -``` -{{% /tab %}} -{{% tab name="Fedora" %}} -```bash -update-alternatives --set iptables /usr/sbin/iptables-legacy +cat < /etc/sysctl.d/k8s.conf +net.bridge.bridge-nf-call-ip6tables = 1 +net.bridge.bridge-nf-call-iptables = 1 +EOF +sysctl --system ``` -{{% /tab %}} -{{< /tabs >}} + +Make sure that the `br_netfilter` module is loaded before this step. This can be done by running `lsmod | grep br_netfilter`. To load it explicitly call `modprobe br_netfilter`. + +For more details please see the [Network Plugin Requirements](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#network-plugin-requirements) page. + ## Check required ports @@ -209,7 +194,7 @@ sudo apt-mark hold kubelet kubeadm kubectl cat < /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes -baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 +baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-$basearch enabled=1 gpgcheck=1 repo_gpgcheck=1 @@ -230,17 +215,7 @@ systemctl enable --now kubelet - Setting SELinux in permissive mode by running `setenforce 0` and `sed ...` effectively disables it. This is required to allow containers to access the host filesystem, which is needed by pod networks for example. You have to do this until SELinux support is improved in the kubelet. - - Some users on RHEL/CentOS 7 have reported issues with traffic being routed incorrectly due to iptables being bypassed. You should ensure - `net.bridge.bridge-nf-call-iptables` is set to 1 in your `sysctl` config, e.g. - - ```bash - cat < /etc/sysctl.d/k8s.conf - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - EOF - sysctl --system - ``` - - Make sure that the `br_netfilter` module is loaded before this step. This can be done by running `lsmod | grep br_netfilter`. To load it explicitly call `modprobe br_netfilter`. + {{% /tab %}} {{% tab name="Container Linux" %}} Install CNI plugins (required for most pod network): @@ -254,7 +229,7 @@ curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_ Install crictl (required for kubeadm / Kubelet Container Runtime Interface (CRI)) ```bash -CRICTL_VERSION="v1.16.0" +CRICTL_VERSION="v1.17.0" mkdir -p /opt/bin curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-amd64.tar.gz" | tar -C /opt/bin -xz ``` @@ -269,9 +244,10 @@ cd /opt/bin curl -L --remote-name-all https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/amd64/{kubeadm,kubelet,kubectl} chmod +x {kubeadm,kubelet,kubectl} -curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service +RELEASE_VERSION="v0.2.7" +curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service mkdir -p /etc/systemd/system/kubelet.service.d -curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf ``` Enable and start `kubelet`: diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md index 31f94ef137e6c..a7ef2080522d7 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md @@ -22,6 +22,49 @@ If your problem is not listed below, please follow the following steps: {{% capture body %}} +## Not possible to join a v1.18 Node to a v1.17 cluster due to missing RBAC + +In v1.18 kubeadm added prevention for joining a Node in the cluster if a Node with the same name already exists. +This required adding RBAC for the bootstrap-token user to be able to GET a Node object. + +However this causes an issue where `kubeadm join` from v1.18 cannot join a cluster created by kubeadm v1.17. + +To workaround the issue you have two options: + +Execute `kubeadm init phase bootstrap-token` on a control-plane node using kubeadm v1.18. +Note that this enables the rest of the bootstrap-token permissions as well. + +or + +Apply the following RBAC manually using `kubectl apply -f ...`: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kubeadm:get-nodes +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubeadm:get-nodes +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeadm:get-nodes +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:bootstrappers:kubeadm:default-node-token +``` + ## `ebtables` or some similar executable not found during installation If you see the following warnings while running `kubeadm init` @@ -307,17 +350,17 @@ The tracking issue for this problem is [here](https://github.com/kubernetes/kube *Note: This [issue](https://github.com/kubernetes/kubeadm/issues/1358) only applies to tools that marshal kubeadm types (e.g. to a YAML configuration file). It will be fixed in kubeadm API v1beta2.* -By default, kubeadm applies the `role.kubernetes.io/master:NoSchedule` taint to control-plane nodes. +By default, kubeadm applies the `node-role.kubernetes.io/master:NoSchedule` taint to control-plane nodes. If you prefer kubeadm to not taint the control-plane node, and set `InitConfiguration.NodeRegistration.Taints` to an empty slice, the field will be omitted when marshalling. When the field is omitted, kubeadm applies the default taint. There are at least two workarounds: -1. Use the `role.kubernetes.io/master:PreferNoSchedule` taint instead of an empty slice. [Pods will get scheduled on masters](/docs/concepts/configuration/taint-and-toleration/), unless other nodes have capacity. +1. Use the `node-role.kubernetes.io/master:PreferNoSchedule` taint instead of an empty slice. [Pods will get scheduled on masters](/docs/concepts/configuration/taint-and-toleration/), unless other nodes have capacity. 2. Remove the taint after kubeadm init exits: ```bash -kubectl taint nodes NODE_NAME role.kubernetes.io/master:NoSchedule- +kubectl taint nodes NODE_NAME node-role.kubernetes.io/master:NoSchedule- ``` ## `/usr` is mounted read-only on nodes {#usr-mounted-read-only} diff --git a/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md b/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md index 6d079d0274c41..e8e23b8574774 100644 --- a/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md +++ b/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md @@ -100,7 +100,26 @@ Pods, Controllers and Services are critical elements to managing Windows workloa #### Container Runtime -Docker EE-basic 18.09 is required on Windows Server 2019 / 1809 nodes for Kubernetes. This works with the dockershim code included in the kubelet. Additional runtimes such as CRI-ContainerD may be supported in later Kubernetes versions. +##### Docker EE + +{{< feature-state for_k8s_version="v1.14" state="stable" >}} + +Docker EE-basic 18.09+ is the recommended container runtime for Windows Server 2019 / 1809 nodes running Kubernetes. This works with the dockershim code included in the kubelet. + +##### CRI-ContainerD + +{{< feature-state for_k8s_version="v1.18" state="alpha" >}} + +ContainerD is an OCI-compliant runtime that works with Kubernetes on Linux. Kubernetes v1.18 adds support for {{< glossary_tooltip term_id="containerd" text="ContainerD" >}} on Windows. Progress for ContainerD on Windows can be tracked at [enhancements#1001](https://github.com/kubernetes/enhancements/issues/1001). + +{{< caution >}} + +ContainerD on Windows in Kubernetes v1.18 has the following known shortcomings: + +* ContainerD does not have an official release with support for Windows; all development in Kubernetes has been performed against active ContainerD development branches. Production deployments should always use official releases that have been fully tested and are supported with security fixes. +* Group-Managed Service Accounts are not implemented when using ContainerD - see [containerd/cri#1276](https://github.com/containerd/cri/issues/1276). + +{{< /caution >}} #### Persistent Storage @@ -408,7 +427,6 @@ Your main source of help for troubleshooting your Kubernetes cluster should star # Register kubelet.exe # Microsoft releases the pause infrastructure container at mcr.microsoft.com/k8s/core/pause:1.2.0 - # For more info search for "pause" in the "Guide for adding Windows Nodes in Kubernetes" nssm install kubelet C:\k\kubelet.exe nssm set kubelet AppParameters --hostname-override= --v=6 --pod-infra-container-image=mcr.microsoft.com/k8s/core/pause:1.2.0 --resolv-conf="" --allow-privileged=true --enable-debugging-handlers --cluster-dns= --cluster-domain=cluster.local --kubeconfig=c:\k\config --hairpin-mode=promiscuous-bridge --image-pull-progress-deadline=20m --cgroups-per-qos=false --log-dir= --logtostderr=false --enforce-node-allocatable="" --network-plugin=cni --cni-bin-dir=c:\k\cni --cni-conf-dir=c:\k\cni\config nssm set kubelet AppDirectory C:\k @@ -520,7 +538,7 @@ Your main source of help for troubleshooting your Kubernetes cluster should star Check that your pause image is compatible with your OS version. The [instructions](https://docs.microsoft.com/en-us/virtualization/windowscontainers/kubernetes/deploying-resources) assume that both the OS and the containers are version 1803. If you have a later version of Windows, such as an Insider build, you need to adjust the images accordingly. Please refer to the Microsoft's [Docker repository](https://hub.docker.com/u/microsoft/) for images. Regardless, both the pause image Dockerfile and the sample service expect the image to be tagged as :latest. - Starting with Kubernetes v1.14, Microsoft releases the pause infrastructure container at `mcr.microsoft.com/k8s/core/pause:1.2.0`. For more information search for "pause" in the [Guide for adding Windows Nodes in Kubernetes](../user-guide-windows-nodes). + Starting with Kubernetes v1.14, Microsoft releases the pause infrastructure container at `mcr.microsoft.com/k8s/core/pause:1.2.0`. 1. DNS resolution is not properly working @@ -534,6 +552,7 @@ Your main source of help for troubleshooting your Kubernetes cluster should star 1. My Kubernetes installation is failing because my Windows Server node is behind a proxy If you are behind a proxy, the following PowerShell environment variables must be defined: + ```PowerShell [Environment]::SetEnvironmentVariable("HTTP_PROXY", "http://proxy.example.com:80/", [EnvironmentVariableTarget]::Machine) [Environment]::SetEnvironmentVariable("HTTPS_PROXY", "http://proxy.example.com:443/", [EnvironmentVariableTarget]::Machine) @@ -571,19 +590,15 @@ If filing a bug, please include detailed information about how to reproduce the We have a lot of features in our roadmap. An abbreviated high level list is included below, but we encourage you to view our [roadmap project](https://github.com/orgs/kubernetes/projects/8) and help us make Windows support better by [contributing](https://github.com/kubernetes/community/blob/master/sig-windows/). -### CRI-ContainerD - -{{< glossary_tooltip term_id="containerd" >}} is another OCI-compliant runtime that recently graduated as a {{< glossary_tooltip text="CNCF" term_id="cncf" >}} project. It's currently tested on Linux, but 1.3 will bring support for Windows and Hyper-V. [[reference](https://blog.docker.com/2019/02/containerd-graduates-within-the-cncf/)] +### Hyper-V isolation -The CRI-ContainerD interface will be able to manage sandboxes based on Hyper-V. This provides a foundation where RuntimeClass could be implemented for new use cases including: +Hyper-V isolation is requried to enable the following use cases for Windows containers in Kubernetes: * Hypervisor-based isolation between pods for additional security * Backwards compatibility allowing a node to run a newer Windows Server version without requiring containers to be rebuilt * Specific CPU/NUMA settings for a pod * Memory isolation and reservations -### Hyper-V isolation - The existing Hyper-V isolation support, an experimental feature as of v1.10, will be deprecated in the future in favor of the CRI-ContainerD and RuntimeClass features mentioned above. To use the current features and create a Hyper-V isolated container, the kubelet should be started with feature gates `HyperVContainer=true` and the Pod should include the annotation `experimental.windows.kubernetes.io/isolation-type=hyperv`. In the experiemental release, this feature is limited to 1 container per Pod. ```yaml @@ -612,7 +627,11 @@ spec: ### Deployment with kubeadm and cluster API -Kubeadm is becoming the de facto standard for users to deploy a Kubernetes cluster. Windows node support in kubeadm will come in a future release. We are also making investments in cluster API to ensure Windows nodes are properly provisioned. +Kubeadm is becoming the de facto standard for users to deploy a Kubernetes +cluster. Windows node support in kubeadm is currently a work-in-progress but a +guide is available [here](/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes/). +We are also making investments in cluster API to ensure Windows nodes are +properly provisioned. ### A few other key features * Beta support for Group Managed Service Accounts diff --git a/content/en/docs/setup/production-environment/windows/kubecluster.ps1-install.gif b/content/en/docs/setup/production-environment/windows/kubecluster.ps1-install.gif deleted file mode 100644 index e3d94b9b54ac2..0000000000000 Binary files a/content/en/docs/setup/production-environment/windows/kubecluster.ps1-install.gif and /dev/null differ diff --git a/content/en/docs/setup/production-environment/windows/kubecluster.ps1-join.gif b/content/en/docs/setup/production-environment/windows/kubecluster.ps1-join.gif deleted file mode 100644 index 828417d685c69..0000000000000 Binary files a/content/en/docs/setup/production-environment/windows/kubecluster.ps1-join.gif and /dev/null differ diff --git a/content/en/docs/setup/production-environment/windows/kubecluster.ps1-reset.gif b/content/en/docs/setup/production-environment/windows/kubecluster.ps1-reset.gif deleted file mode 100644 index e71d40d6dfb09..0000000000000 Binary files a/content/en/docs/setup/production-environment/windows/kubecluster.ps1-reset.gif and /dev/null differ diff --git a/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md b/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md index 2366f61018dc7..a79cc80b59347 100644 --- a/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md +++ b/content/en/docs/setup/production-environment/windows/user-guide-windows-containers.md @@ -22,7 +22,7 @@ Windows applications constitute a large portion of the services and applications ## Before you begin -* Create a Kubernetes cluster that includes a [master and a worker node running Windows Server](../user-guide-windows-nodes) +* Create a Kubernetes cluster that includes a [master and a worker node running Windows Server](/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes) * It is important to note that creating and deploying services and workloads on Kubernetes behaves in much the same way for Linux and Windows containers. [Kubectl commands](/docs/reference/kubectl/overview/) to interface with the cluster are identical. The example in the section below is provided simply to jumpstart your experience with Windows containers. ## Getting Started: Deploying a Windows container @@ -107,6 +107,14 @@ Port mapping is also supported, but for simplicity in this example the container Windows container hosts are not able to access the IP of services scheduled on them due to current platform limitations of the Windows networking stack. Only Windows pods are able to access service IPs. {{< /note >}} +## Observability + +### Capturing logs from workloads + +Logs are an important element of observability; they enable users to gain insights into the operational aspect of workloads and are a key ingredient to troubleshooting issues. Because Windows containers and workloads inside Windows containers behave differently from Linux containers, users had a hard time collecting logs, limiting operational visibility. Windows workloads for example are usually configured to log to ETW (Event Tracing for Windows) or push entries to the application event log. [LogMonitor](https://github.com/microsoft/windows-container-tools/tree/master/LogMonitor), an open source tool by Microsoft, is the recommended way to monitor configured log sources inside a Windows container. LogMonitor supports monitoring event logs, ETW providers, and custom application logs, piping them to STDOUT for consumption by `kubectl logs `. + +Follow the instructions in the LogMonitor GitHub page to copy its binaries and configuration files to all your containers and add the necessary entrypoints for LogMonitor to push your logs to STDOUT. + ## Using configurable Container usernames Starting with Kubernetes v1.16, Windows containers can be configured to run their entrypoints and processes with different usernames than the image defaults. The way this is achieved is a bit different from the way it is done for Linux containers. Learn more about it [here](/docs/tasks/configure-pod-container/configure-runasusername/). diff --git a/content/en/docs/setup/production-environment/windows/user-guide-windows-nodes.md b/content/en/docs/setup/production-environment/windows/user-guide-windows-nodes.md deleted file mode 100644 index 297ec97d79232..0000000000000 --- a/content/en/docs/setup/production-environment/windows/user-guide-windows-nodes.md +++ /dev/null @@ -1,356 +0,0 @@ ---- -reviewers: -- michmike -- patricklang -title: Guide for adding Windows Nodes in Kubernetes -min-kubernetes-server-version: v1.14 -content_template: templates/tutorial -weight: 70 ---- - -{{% capture overview %}} - -The Kubernetes platform can now be used to run both Linux and Windows containers. This page shows how one or more Windows nodes can be registered to a cluster. - -{{% /capture %}} - - -{{% capture prerequisites %}} - -* Obtain a [Windows Server 2019 license](https://www.microsoft.com/en-us/cloud-platform/windows-server-pricing) (or higher) in order to configure the Windows node that hosts Windows containers. You can use your organization's licenses for the cluster, or acquire one from Microsoft, a reseller, or via the major cloud providers such as GCP, AWS, and Azure by provisioning a virtual machine running Windows Server through their marketplaces. A [time-limited trial](https://www.microsoft.com/en-us/cloud-platform/windows-server-trial) is also available. - -* Build a Linux-based Kubernetes cluster in which you have access to the control-plane (some examples include [Creating a single control-plane cluster with kubeadm](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/), [AKS Engine](/docs/setup/production-environment/turnkey/azure/), [GCE](/docs/setup/production-environment/turnkey/gce/), [AWS](/docs/setup/production-environment/turnkey/aws/). - -{{% /capture %}} - - -{{% capture objectives %}} - -* Register a Windows node to the cluster -* Configure networking so Pods and Services on Linux and Windows can communicate with each other - -{{% /capture %}} - - -{{% capture lessoncontent %}} - -## Getting Started: Adding a Windows Node to Your Cluster - -### Plan IP Addressing - -Kubernetes cluster management requires careful planning of your IP addresses so that you do not inadvertently cause network collision. This guide assumes that you are familiar with the [Kubernetes networking concepts](/docs/concepts/cluster-administration/networking/). - -In order to deploy your cluster you need the following address spaces: - -| Subnet / address range | Description | Default value | -| --- | --- | --- | -| Service Subnet | A non-routable, purely virtual subnet that is used by pods to uniformly access services without caring about the network topology. It is translated to/from routable address space by `kube-proxy` running on the nodes. | 10.96.0.0/12 | -| Cluster Subnet | This is a global subnet that is used by all pods in the cluster. Each node is assigned a smaller /24 subnet from this for their pods to use. It must be large enough to accommodate all pods used in your cluster. To calculate *minimumsubnet* size: `(number of nodes) + (number of nodes * maximum pods per node that you configure)`. Example: for a 5 node cluster for 100 pods per node: `(5) + (5 * 100) = 505.` | 10.244.0.0/16 | -| Kubernetes DNS Service IP | IP address of `kube-dns` service that is used for DNS resolution & cluster service discovery. | 10.96.0.10 | - -Review the networking options supported in 'Intro to Windows containers in Kubernetes: Supported Functionality: Networking' to determine how you need to allocate IP addresses for your cluster. - -### Components that run on Windows - -While the Kubernetes control-plane runs on your Linux node(s), the following components are configured and run on your Windows node(s). - -1. kubelet -2. kube-proxy -3. kubectl (optional) -4. Container runtime - -Get the latest binaries from [https://github.com/kubernetes/kubernetes/releases](https://github.com/kubernetes/kubernetes/releases), starting with v1.14 or later. The Windows-amd64 binaries for kubeadm, kubectl, kubelet, and kube-proxy can be found under the CHANGELOG link. - -### Networking Configuration - -Once you have a Linux-based Kubernetes control-plane ("Master") node you are ready to choose a networking solution. This guide illustrates using Flannel in VXLAN mode for simplicity. - -#### Configuring Flannel in VXLAN mode on the Linux control-plane - -1. Prepare Kubernetes master for Flannel - - Some minor preparation is recommended on the Kubernetes master in our cluster. It is recommended to enable bridged IPv4 traffic to iptables chains when using Flannel. This can be done using the following command: - - ```bash - sudo sysctl net.bridge.bridge-nf-call-iptables=1 - ``` - -1. Download & configure Flannel - - Download the most recent Flannel manifest: - - ```bash - wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml - ``` - - There are two sections you should modify to enable the vxlan networking backend: - - After applying the steps below, the `net-conf.json` section of `kube-flannel.yml` should look as follows: - - ```json - net-conf.json: | - { - "Network": "10.244.0.0/16", - "Backend": { - "Type": "vxlan", - "VNI" : 4096, - "Port": 4789 - } - } - ``` - - {{< note >}}The VNI must be set to 4096 and port 4789 for Flannel on Linux to interoperate with Flannel on Windows. Support for other VNIs is coming soon. See the [VXLAN documentation](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan) - for an explanation of these fields.{{< /note >}} - -1. In the `net-conf.json` section of your `kube-flannel.yml`, double-check: - 1. The cluster subnet (e.g. "10.244.0.0/16") is set as per your IP plan. - * VNI 4096 is set in the backend - * Port 4789 is set in the backend - 1. In the `cni-conf.json` section of your `kube-flannel.yml`, change the network name to `vxlan0`. - - Your `cni-conf.json` should look as follows: - - ```json - cni-conf.json: | - { - "name": "vxlan0", - "plugins": [ - { - "type": "flannel", - "delegate": { - "hairpinMode": true, - "isDefaultGateway": true - } - }, - { - "type": "portmap", - "capabilities": { - "portMappings": true - } - } - ] - } - ``` - -1. Apply the Flannel manifest and validate - - Let's apply the Flannel configuration: - - ```bash - kubectl apply -f kube-flannel.yml - ``` - - After a few minutes, you should see all the pods as running if the Flannel pod network was deployed. - - ```bash - kubectl get pods --all-namespaces - ``` - - The output looks like as follows: - - ``` - NAMESPACE NAME READY STATUS RESTARTS AGE - kube-system etcd-flannel-master 1/1 Running 0 1m - kube-system kube-apiserver-flannel-master 1/1 Running 0 1m - kube-system kube-controller-manager-flannel-master 1/1 Running 0 1m - kube-system kube-dns-86f4d74b45-hcx8x 3/3 Running 0 12m - kube-system kube-flannel-ds-54954 1/1 Running 0 1m - kube-system kube-proxy-Zjlxz 1/1 Running 0 1m - kube-system kube-scheduler-flannel-master 1/1 Running 0 1m - ``` - - Verify that the Flannel DaemonSet has the NodeSelector applied. - - ```bash - kubectl get ds -n kube-system - ``` - - The output looks like as follows. The NodeSelector `beta.kubernetes.io/os=linux` is applied. - - ``` - NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE - kube-flannel-ds 2 2 2 2 2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux 21d - kube-proxy 2 2 2 2 2 beta.kubernetes.io/os=linux 26d - ``` - - - -### Join Windows Worker Node - -In this section we'll cover configuring a Windows node from scratch to join a cluster on-prem. If your cluster is on a cloud you'll likely want to follow the cloud specific guides in the [public cloud providers section](#public-cloud-providers). - -#### Preparing a Windows Node - -{{< note >}} -All code snippets in Windows sections are to be run in a PowerShell environment with elevated permissions (Administrator) on the Windows worker node. -{{< /note >}} - -1. Download the [SIG Windows tools](https://github.com/kubernetes-sigs/sig-windows-tools) repository containing install and join scripts - ```PowerShell - [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 - Start-BitsTransfer https://github.com/kubernetes-sigs/sig-windows-tools/archive/master.zip - tar -xvf .\master.zip --strip-components 3 sig-windows-tools-master/kubeadm/v1.15.0/* - Remove-Item .\master.zip - ``` - -1. Customize the Kubernetes [configuration file](https://github.com/kubernetes-sigs/sig-windows-tools/blob/master/kubeadm/v1.15.0/Kubeclustervxlan.json) - - ``` - { - "Cri" : { // Contains values for container runtime and base container setup - "Name" : "dockerd", // Container runtime name - "Images" : { - "Pause" : "mcr.microsoft.com/k8s/core/pause:1.2.0", // Infrastructure container image - "Nanoserver" : "mcr.microsoft.com/windows/nanoserver:1809", // Base Nanoserver container image - "ServerCore" : "mcr.microsoft.com/windows/servercore:ltsc2019" // Base ServerCore container image - } - }, - "Cni" : { // Contains values for networking executables - "Name" : "flannel", // Name of network fabric - "Source" : [{ // Contains array of objects containing values for network daemon(s) - "Name" : "flanneld", // Name of network daemon - "Url" : "https://github.com/coreos/flannel/releases/download/v0.11.0/flanneld.exe" // Direct URL pointing to network daemon executable - } - ], - "Plugin" : { // Contains values for CNI network plugin - "Name": "vxlan" // Backend network mechanism to use: ["vxlan" | "bridge"] - }, - "InterfaceName" : "Ethernet" // Designated network interface name on Windows node to use as container network - }, - "Kubernetes" : { // Contains values for Kubernetes node binaries - "Source" : { // Contains values for Kubernetes node binaries - "Release" : "1.15.0", // Version of Kubernetes node binaries - "Url" : "https://dl.k8s.io/v1.15.0/kubernetes-node-windows-amd64.tar.gz" // Direct URL pointing to Kubernetes node binaries tarball - }, - "ControlPlane" : { // Contains values associated with Kubernetes control-plane ("Master") node - "IpAddress" : "kubemasterIP", // IP address of control-plane ("Master") node - "Username" : "localadmin", // Username on control-plane ("Master") node with remote SSH access - "KubeadmToken" : "token", // Kubeadm bootstrap token - "KubeadmCAHash" : "discovery-token-ca-cert-hash" // Kubeadm CA key hash - }, - "KubeProxy" : { // Contains values for Kubernetes network proxy configuration - "Gates" : "WinOverlay=true" // Comma-separated key-value pairs passed to kube-proxy feature gate flag - }, - "Network" : { // Contains values for IP ranges in CIDR notation for Kubernetes networking - "ServiceCidr" : "10.96.0.0/12", // Service IP subnet used by Services in CIDR notation - "ClusterCidr" : "10.244.0.0/16" // Cluster IP subnet used by Pods in CIDR notation - } - }, - "Install" : { // Contains values and configurations for Windows node installation - "Destination" : "C:\\ProgramData\\Kubernetes" // Absolute DOS path where Kubernetes will be installed on the Windows node - } -} - ``` - -{{< note >}} -Users can generate values for the `ControlPlane.KubeadmToken` and `ControlPlane.KubeadmCAHash` fields by running `kubeadm token create --print-join-command` on the Kubernetes control-plane ("Master") node. -{{< /note >}} - -1. Install containers and Kubernetes (requires a system reboot) - -Use the previously downloaded [KubeCluster.ps1](https://github.com/kubernetes-sigs/sig-windows-tools/blob/master/kubeadm/KubeCluster.ps1) script to install Kubernetes on the Windows Server container host: - - ```PowerShell - .\KubeCluster.ps1 -ConfigFile .\Kubeclustervxlan.json -install - ``` - where `-ConfigFile` points to the path of the Kubernetes configuration file. - -{{< note >}} -In the example below, we are using overlay networking mode. This requires Windows Server version 2019 with [KB4489899](https://support.microsoft.com/help/4489899) and at least Kubernetes v1.14 or above. Users that cannot meet this requirement must use `L2bridge` networking instead by selecting `bridge` as the [plugin](https://github.com/kubernetes-sigs/sig-windows-tools/blob/master/kubeadm/v1.15.0/Kubeclusterbridge.json#L18) in the configuration file. -{{< /note >}} - - ![alt_text](../kubecluster.ps1-install.gif "KubeCluster.ps1 install output") - - -On the Windows node you target, this step will: - -1. Enable Windows Server containers role (and reboot) -1. Download and install the chosen container runtime -1. Download all needed container images -1. Download Kubernetes binaries and add them to the `$PATH` environment variable -1. Download CNI plugins based on the selection made in the Kubernetes Configuration file -1. (Optionally) Generate a new SSH key which is required to connect to the control-plane ("Master") node during joining - - {{< note >}}For the SSH key generation step, you also need to add the generated public SSH key to the `authorized_keys` file on your (Linux) control-plane node. You only need to do this once. The script prints out the steps you can follow to do this, at the end of its output.{{< /note >}} - -Once installation is complete, any of the generated configuration files or binaries can be modified before joining the Windows node. - -#### Join the Windows Node to the Kubernetes cluster -This section covers how to join a [Windows node with Kubernetes installed](#preparing-a-windows-node) with an existing (Linux) control-plane, to form a cluster. - -Use the previously downloaded [KubeCluster.ps1](https://github.com/kubernetes-sigs/sig-windows-tools/blob/master/kubeadm/KubeCluster.ps1) script to join the Windows node to the cluster: - - ```PowerShell - .\KubeCluster.ps1 -ConfigFile .\Kubeclustervxlan.json -join - ``` - where `-ConfigFile` points to the path of the Kubernetes configuration file. - -![alt_text](../kubecluster.ps1-join.gif "KubeCluster.ps1 join output") - -{{< note >}} -Should the script fail during the bootstrap or joining procedure for whatever reason, start a new PowerShell session before starting each consecutive join attempt. -{{< /note >}} - -This step will perform the following actions: - -1. Connect to the control-plane ("Master") node via SSH, to retrieve the [Kubeconfig file](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file. -1. Register kubelet as a Windows service -1. Configure CNI network plugins -1. Create an HNS network on top of the chosen network interface - {{< note >}} - This may cause a network blip for a few seconds while the vSwitch is being created. - {{< /note >}} -1. (If vxlan plugin is selected) Open up inbound firewall UDP port 4789 for overlay traffic -1. Register flanneld as a Windows service -1. Register kube-proxy as a Windows service - -Now you can view the Windows nodes in your cluster by running the following: - -```bash -kubectl get nodes -``` - -#### Remove the Windows Node from the Kubernetes cluster -In this section we'll cover how to remove a Windows node from a Kubernetes cluster. - -Use the previously downloaded [KubeCluster.ps1](https://github.com/kubernetes-sigs/sig-windows-tools/blob/master/kubeadm/KubeCluster.ps1) script to remove the Windows node from the cluster: - - ```PowerShell - .\KubeCluster.ps1 -ConfigFile .\Kubeclustervxlan.json -reset - ``` - where `-ConfigFile` points to the path of the Kubernetes configuration file. - -![alt_text](../kubecluster.ps1-reset.gif "KubeCluster.ps1 reset output") - -This step will perform the following actions on the targeted Windows node: - -1. Delete the Windows node from the Kubernetes cluster -1. Stop all running containers -1. Remove all container networking (HNS) resources -1. Unregister all Kubernetes services (flanneld, kubelet, kube-proxy) -1. Delete all Kubernetes binaries (kube-proxy.exe, kubelet.exe, flanneld.exe, kubeadm.exe) -1. Delete all CNI network plugins binaries -1. Delete [Kubeconfig file](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) used to access the Kubernetes cluster - - -### Public Cloud Providers - -#### Azure - -AKS-Engine can deploy a complete, customizable Kubernetes cluster with both Linux & Windows nodes. There is a step-by-step walkthrough available in the [docs on GitHub](https://github.com/Azure/aks-engine/blob/master/docs/topics/windows.md). - -#### GCP - -Users can easily deploy a complete Kubernetes cluster on GCE following this step-by-step walkthrough on [GitHub](https://github.com/kubernetes/kubernetes/blob/master/cluster/gce/windows/README-GCE-Windows-kube-up.md) - -#### Deployment with kubeadm and cluster API - -Kubeadm is becoming the de facto standard for users to deploy a Kubernetes cluster. Windows node support in kubeadm is an alpha feature since Kubernetes release v1.16. We are also making investments in cluster API to ensure Windows nodes are properly provisioned. For more details, please consult the [kubeadm for Windows KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/kubeadm/20190424-kubeadm-for-windows.md). - - -### Next Steps - -Now that you've configured a Windows worker in your cluster to run Windows containers you may want to add one or more Linux nodes as well to run Linux containers. You are now ready to schedule Windows containers on your cluster. - -{{% /capture %}} - diff --git a/content/en/docs/setup/release/notes.md b/content/en/docs/setup/release/notes.md index c1ad709781b91..a344a11fc0648 100644 --- a/content/en/docs/setup/release/notes.md +++ b/content/en/docs/setup/release/notes.md @@ -1,5 +1,5 @@ --- -title: v1.17 Release Notes +title: v1.18 Release Notes weight: 10 card: name: download @@ -13,731 +13,1360 @@ card: -# v1.17.0 +# v1.18.0 [Documentation](https://docs.k8s.io) -## Downloads for v1.17.0 +## Downloads for v1.18.0 -| filename | sha512 hash | -| ------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------- | -| [kubernetes.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes.tar.gz) | `68d5af15901281954de01164426cfb5ca31c14341387fad34d0cb9aa5f40c932ad44f0de4f987caf2be6bdcea2051e589d25878cf4f9ac0ee73048029a11825f` | -| [kubernetes-src.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-src.tar.gz) | `5424576d7f7936df15243fee0036e7936d2d6224e98ac805ce96cdf7b83a7c5b66dfffc8823d7bc0c17c700fa3c01841208e8cf89be91d237d12e18f3d2f307c` | +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes.tar.gz) | `cd5b86a3947a4f2cea6d857743ab2009be127d782b6f2eb4d37d88918a5e433ad2c7ba34221c34089ba5ba13701f58b657f0711401e51c86f4007cb78744dee7` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-src.tar.gz) | `fb42cf133355ef18f67c8c4bb555aa1f284906c06e21fa41646e086d34ece774e9d547773f201799c0c703ce48d4d0e62c6ba5b2a4d081e12a339a423e111e52` ### Client Binaries -| filename | sha512 hash | -| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | -| [kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-client-darwin-386.tar.gz) | `4c9a06409561b8ecc8901d0b88bc955ab8b8c99256b3f6066811539211cff5ba7fb9e3802ac2d8b00a14ce619fa82aeebe83eae9f4b0774bedabd3da0235b78b` | -| [kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-client-darwin-amd64.tar.gz) | `78ce6875c5f5a03bc057e7194fd1966beb621f825ba786d35a9921ab1ae33ed781d0f93a473a6b985da1ba4fbe95c15b23cdca9e439dfd653dbcf5a2b23d1a73` | -| [kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-client-linux-386.tar.gz) | `7a4bcd7d06d0f4ba929451f652c92a3c4d428f9b38ed83093f076bb25699b9c4e82f8f851ab981e68becbf10b148ddab4f7dce3743e84d642baa24c00312a2aa` | -| [kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-client-linux-amd64.tar.gz) | `7f9fc9ac07e9acbf12b58ae9077a8ce1f7fb4b5ceccd3856b55d2beb5e435d4fd27884c10ffdf3e2e18cafd4acc001ed5cf2a0a9a5b0545d9be570f63012d9c0` | -| [kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-client-linux-arm.tar.gz) | `8f74fff80a000cfaefa2409bdce6fd0d546008c7942a7178a4fa88a9b3ca05d10f34352e2ea2aec5297aa5c630c2b9701b507273c0ed0ddc0c297e57b655d62e` | -| [kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-client-linux-arm64.tar.gz) | `18d92b320f138f5080f98f1ffee20e405187549ab3aad55b7f60f02e3b7f5a44eb9826098576b42937fd0aac01fe6bcae36b5a8ee52ddde3571a1281b279c114` | -| [kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-client-linux-ppc64le.tar.gz) | `fd9b15a88b3d5a506a84ebfb56de291b85978b14f61a2c05f4bdb6a7e45a36f92af5a024a6178dbebd82a92574ec6d8cf9d8ac912f868f757649a2a8434011fe` | -| [kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-client-linux-s390x.tar.gz) | `ae3b284a78975cbfccaac04ea802085c31fd75cccf4ece3a983f44faf755dd94c43833e60f52c5ea57bc462cb24268ef4b7246876189113f588a012dd58e9630` | -| [kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-client-windows-386.tar.gz) | `4ba83b068e7f4a203bcc5cc8bb2c456a6a9c468e695f86f69d8f2ac81be9a1ce156f9a2f28286cb7eb0480faac397d964821c009473bdb443d84a30b6d020551` | -| [kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-client-windows-amd64.tar.gz) | `fc79b0e926a823c7d8b9010dee0c559587b7f97c9290b2126d517c4272891ce36e310a64c85f3861a1c951da8dc21f46244a59ff9d52b7b7a3f84879f533e6aa` | +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-darwin-386.tar.gz) | `26df342ef65745df12fa52931358e7f744111b6fe1e0bddb8c3c6598faf73af997c00c8f9c509efcd7cd7e82a0341a718c08fbd96044bfb58e80d997a6ebd3c2` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-darwin-amd64.tar.gz) | `803a0fed122ef6b85f7a120b5485723eaade765b7bc8306d0c0da03bd3df15d800699d15ea2270bb7797fa9ce6a81da90e730dc793ea4ed8c0149b63d26eca30` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-linux-386.tar.gz) | `110844511b70f9f3ebb92c15105e6680a05a562cd83f79ce2d2e25c2dd70f0dbd91cae34433f61364ae1ce4bd573b635f2f632d52de8f72b54acdbc95a15e3f0` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-linux-amd64.tar.gz) | `594ca3eadc7974ec4d9e4168453e36ca434812167ef8359086cd64d048df525b7bd46424e7cc9c41e65c72bda3117326ba1662d1c9d739567f10f5684fd85bee` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-linux-arm.tar.gz) | `d3627b763606557a6c9a5766c34198ec00b3a3cd72a55bc2cb47731060d31c4af93543fb53f53791062bb5ace2f15cbaa8592ac29009641e41bd656b0983a079` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-linux-arm64.tar.gz) | `ba9056eff1452cbdaef699efbf88f74f5309b3f7808d372ebf6918442d0c9fea1653c00b9db3b7626399a460eef9b1fa9e29b827b7784f34561cbc380554e2ea` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-linux-ppc64le.tar.gz) | `f80fb3769358cb20820ff1a1ce9994de5ed194aabe6c73fb8b8048bffc394d1b926de82c204f0e565d53ffe7562faa87778e97a3ccaaaf770034a992015e3a86` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-linux-s390x.tar.gz) | `a9b658108b6803d60fa3cd4e76d9e58bf75201017164fe54054b7ccadbb68c4ad7ba7800746940bc518d90475e6c0a96965a26fa50882f4f0e56df404f4ae586` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-windows-386.tar.gz) | `18adffab5d1be146906fd8531f4eae7153576aac235150ce2da05aee5ae161f6bd527e8dec34ae6131396cd4b3771e0d54ce770c065244ad3175a1afa63c89e1` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-client-windows-amd64.tar.gz) | `162396256429cef07154f817de2a6b67635c770311f414e38b1e2db25961443f05d7b8eb1f8da46dec8e31c5d1d2cd45f0c95dad1bc0e12a0a7278a62a0b9a6b` ### Server Binaries -| filename | sha512 hash | -| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | -| [kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-server-linux-amd64.tar.gz) | `28b2703c95894ab0565e372517c4a4b2c33d1be3d778fae384a6ab52c06cea7dd7ec80060dbdba17c8ab23bbedcde751cccee7657eba254f7d322cf7c4afc701` | -| [kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-server-linux-arm.tar.gz) | `b36a9f602131dba23f267145399aad0b19e97ab7b5194b2e3c01c57f678d7b0ea30c1ea6b4c15fd87b1fd3bf06abd4ec443bef5a3792c0d813356cdeb3b6a935` | -| [kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-server-linux-arm64.tar.gz) | `42adae077603f25b194e893f15e7f415011f25e173507a190bafbee0d0e86cdd6ee8f11f1bcf0a5366e845bd968f92e5bf66785f20c1125c801cf3ec9850d0bd` | -| [kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-server-linux-ppc64le.tar.gz) | `7e72d4255e661e946203c1c0c684cd0923034eb112c35e3ba08fbf9d1ef5e8bb291840c6ff99aea6180083846f9a9ba88387e176ee7a5def49e1d19366e2789f` | -| [kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-server-linux-s390x.tar.gz) | `00bc634654ec7d1ec2eca7a3e943ac287395503a06c8da22b7efb3a35435ceb323618c6d9931d6693bfb19f2b8467ae8f05f98392df8ee4954556c438409c8d4` | +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-server-linux-amd64.tar.gz) | `a92f8d201973d5dfa44a398e95fcf6a7b4feeb1ef879ab3fee1c54370e21f59f725f27a9c09ace8c42c96ac202e297fd458e486c489e05f127a5cade53b8d7c4` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-server-linux-arm.tar.gz) | `62fbff3256bc0a83f70244b09149a8d7870d19c2c4b6dee8ca2714fc7388da340876a0f540d2ae9bbd8b81fdedaf4b692c72d2840674db632ba2431d1df1a37d` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-server-linux-arm64.tar.gz) | `842910a7013f61a60d670079716b207705750d55a9e4f1f93696d19d39e191644488170ac94d8740f8e3aa3f7f28f61a4347f69d7e93d149c69ac0efcf3688fe` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-server-linux-ppc64le.tar.gz) | `95c5b952ac1c4127a5c3b519b664972ee1fb5e8e902551ce71c04e26ad44b39da727909e025614ac1158c258dc60f504b9a354c5ab7583c2ad769717b30b3836` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-server-linux-s390x.tar.gz) | `a46522d2119a0fd58074564c1fa95dd8a929a79006b82ba3c4245611da8d2db9fd785c482e1b61a9aa361c5c9a6d73387b0e15e6a7a3d84fffb3f65db3b9deeb` ### Node Binaries -| filename | sha512 hash | -| ------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------- | -| [kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-node-linux-amd64.tar.gz) | `49ef6a41c65b3f26a4f3ffe63b92c8096c26aa27a89d227d935bc06a497c97505ad8bc215b4c5d5ad3af6489c1366cd26ecc8e2781a83f46a91503678abba71b` | -| [kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-node-linux-arm.tar.gz) | `21a213fd572200998bdd71f5ebbb96576fc7a7e7cfb1469f028cc1a310bc2b5c0ce32660629beb166b88f54e6ebecb2022b2ed1fdb902a9b9d5acb193d76fa0f` | -| [kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-node-linux-arm64.tar.gz) | `3642ee5e7476080a44005db8e7282fdbe4e4f220622761b95951c2c15b3e10d7b70566bfb7a9a58574f3fc385d5aae80738d88195fa308a07f199cee70f912f4` | -| [kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-node-linux-ppc64le.tar.gz) | `99687088be50a794894911d43827b7e1125fbc86bfba799f77c096ddaa5b2341b31d009b8063a177e503ce2ce0dafbda1115216f8a5777f34e0e2d81f0114104` | -| [kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-node-linux-s390x.tar.gz) | `73b9bc356de43fbed7d3294be747b83e0aac47051d09f1df7be52c33be670b63c2ea35856a483ebc2f57e30a295352b77f1b1a6728afa10ec1f3338cafbdb2bb` | -| [kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.17.0/kubernetes-node-windows-amd64.tar.gz) | `2fbc80f928231f60a5a7e4f427953ef17244b3a8f6fdeebcbfceb05b0587b84933fa723898c64488d94b9ce180357d6d4ca1505ca3c3c7fb11067b7b3bf6361b` | +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-node-linux-amd64.tar.gz) | `f714f80feecb0756410f27efb4cf4a1b5232be0444fbecec9f25cb85a7ccccdcb5be588cddee935294f460046c0726b90f7acc52b20eeb0c46a7200cf10e351a` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-node-linux-arm.tar.gz) | `806000b5f6d723e24e2f12d19d1b9b3d16c74b855f51c7063284adf1fcc57a96554a3384f8c05a952c6f6b929a05ed12b69151b1e620c958f74c9600f3db0fcb` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-node-linux-arm64.tar.gz) | `c207e9ab60587d135897b5366af79efe9d2833f33401e469b2a4e0d74ecd2cf6bb7d1e5bc18d80737acbe37555707f63dd581ccc6304091c1d98dafdd30130b7` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-node-linux-ppc64le.tar.gz) | `a542ed5ed02722af44ef12d1602f363fcd4e93cf704da2ea5d99446382485679626835a40ae2ba47a4a26dce87089516faa54479a1cfdee2229e8e35aa1c17d7` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-node-linux-s390x.tar.gz) | `651e0db73ee67869b2ae93cb0574168e4bd7918290fc5662a6b12b708fa628282e3f64be2b816690f5a2d0f4ff8078570f8187e65dee499a876580a7a63d1d19` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0/kubernetes-node-windows-amd64.tar.gz) | `d726ed904f9f7fe7e8831df621dc9094b87e767410a129aa675ee08417b662ddec314e165f29ecb777110fbfec0dc2893962b6c71950897ba72baaa7eb6371ed` -# Changes +## Changelog since v1.17.0 -A complete changelog for the release notes is now hosted in a customizable format at [relnotes.k8s.io](https://relnotes.k8s.io). Check it out and please give us your feedback! +A complete changelog for the release notes is now hosted in a customizable +format at [https://relnotes.k8s.io][1]. Check it out and please give us your +feedback! + +[1]: https://relnotes.k8s.io/?releaseVersions=1.18.0 ## What’s New (Major Themes) -### Cloud Provider Labels reach General Availability +### Kubernetes Topology Manager Moves to Beta - Align Up! + +A beta feature of Kubernetes in release 1.18, the [Topology Manager feature](https://github.com/nolancon/website/blob/f4200307260ea3234540ef13ed80de325e1a7267/content/en/docs/tasks/administer-cluster/topology-manager.md) enables NUMA alignment of CPU and devices (such as SR-IOV VFs) that will allow your workload to run in an environment optimized for low-latency. Prior to the introduction of the Topology Manager, the CPU and Device Manager would make resource allocation decisions independent of each other. This could result in undesirable allocations on multi-socket systems, causing degraded performance on latency critical applications. + +### Serverside Apply - Beta 2 + +Server-side Apply was promoted to Beta in 1.16, but is now introducing a second Beta in 1.18. This new version will track and manage changes to fields of all new Kubernetes objects, allowing you to know what changed your resources and when. + +### Extending Ingress with and replacing a deprecated annotation with IngressClass + +In Kubernetes 1.18, there are two significant additions to Ingress: A new `pathType` field and a new `IngressClass` resource. The `pathType` field allows specifying how paths should be matched. In addition to the default `ImplementationSpecific` type, there are new `Exact` and `Prefix` path types. + +The `IngressClass` resource is used to describe a type of Ingress within a Kubernetes cluster. Ingresses can specify the class they are associated with by using a new `ingressClassName` field on Ingresses. This new resource and field replace the deprecated `kubernetes.io/ingress.class` annotation. + +### SIG CLI introduces kubectl debug + +SIG CLI was debating the need for a debug utility for quite some time already. With the development of [ephemeral containers](https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/), it became more obvious how we can support developers with tooling built on top of `kubectl exec`. The addition of the `kubectl debug` [command](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cli/20190805-kubectl-debug.md) (it is alpha but your feedback is more than welcome), allows developers to easily debug their Pods inside the cluster. We think this addition is invaluable. This command allows one to create a temporary container which runs next to the Pod one is trying to examine, but also attaches to the console for interactive troubleshooting. -Added as a beta feature way back in v1.2, v1.17 sees the general availability of cloud provider labels. +### Introducing Windows CSI support alpha for Kubernetes -### Volume Snapshot Moves to Beta +With the release of Kubernetes 1.18, an alpha version of CSI Proxy for Windows is getting released. CSI proxy enables non-privileged (pre-approved) containers to perform privileged storage operations on Windows. CSI drivers can now be supported in Windows by leveraging CSI proxy. +SIG Storage made a lot of progress in the 1.18 release. +In particular, the following storage features are moving to GA in Kubernetes 1.18: +- Raw Block Support: Allow volumes to be surfaced as block devices inside containers instead of just mounted filesystems. +- Volume Cloning: Duplicate a PersistentVolumeClaim and underlying storage volume using the Kubernetes API via CSI. +- CSIDriver Kubernetes API Object: Simplifies CSI driver discovery and allows CSI Drivers to customize Kubernetes behavior. -The Kubernetes Volume Snapshot feature is now beta in Kubernetes v1.17. It was introduced as alpha in Kubernetes v1.12, with a second alpha with breaking changes in Kubernetes v1.13. +SIG Storage is also introducing the following new storage features as alpha in Kubernetes 1.18: +- Windows CSI Support: Enabling containerized CSI node plugins in Windows via new [CSIProxy](https://github.com/kubernetes-csi/csi-proxy) +- Recursive Volume Ownership OnRootMismatch Option: Add a new “OnRootMismatch” policy that can help shorten the mount time for volumes that require ownership change and have many directories and files. -### CSI Migration Beta +### Other notable announcements -The Kubernetes in-tree storage plugin to Container Storage Interface (CSI) migration infrastructure is now beta in Kubernetes v1.17. CSI migration was introduced as alpha in Kubernetes v1.14. +SIG Network is moving IPv6 to Beta in Kubernetes 1.18, after incrementing significantly the test coverage with new CI jobs. + +NodeLocal DNSCache is an add-on that runs a dnsCache pod as a daemonset to improve clusterDNS performance and reliability. The feature has been in Alpha since 1.13 release. The SIG Network is announcing the GA graduation of Node Local DNSCache [#1351](https://github.com/kubernetes/enhancements/pull/1351) ## Known Issues -- volumeDevices mapping ignored when container is privileged -- The `Should recreate evicted statefulset` conformance [test](https://github.com/kubernetes/kubernetes/blob/master/test/e2e/apps/statefulset.go) fails because `Pod ss-0 expected to be re-created at least once`. This was caused by the `Predicate PodFitsHostPorts failed` scheduling error. The root cause was a host port conflict for port `21017`. This port was in-use as an ephemeral port by another application running on the node. This will be looked at for the 1.18 release. -- client-go discovery clients constructed using `NewDiscoveryClientForConfig` or `NewDiscoveryClientForConfigOrDie` default to rate limits that cause normal discovery request patterns to take several seconds. This is fixed in https://issue.k8s.io/86168 and will be resolved in v1.17.1. As a workaround, the `Burst` value can be adjusted higher in the rest.Config passed into `NewDiscoveryClientForConfig` or `NewDiscoveryClientForConfigOrDie`. -- The IP allocator in v1.17.0 can return errors such as `the cluster IP for service is not within the service CIDR ; please recreate` in the logs of the kube-apiserver. The cause is incorrect CIDR calculations if the service CIDR (`--service-cluster-ip-range`) is set to bits lower than `/16`. This is fixed in http://issue.k8s.io/86534 and will be resolved in v1.17.1. +No Known Issues Reported ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) -#### Cluster Lifecycle - -- Kubeadm: add a new `kubelet-finalize` phase as part of the `init` workflow and an experimental sub-phase to enable automatic kubelet client certificate rotation on primary control-plane nodes. - Prior to 1.17 and for existing nodes created by `kubeadm init` where kubelet client certificate rotation is desired, you must modify `/etc/kubernetes/kubelet.conf` to point to the PEM symlink for rotation: - `client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem` and `client-key: /var/lib/kubelet/pki/kubelet-client-current.pem`, replacing the embedded client certificate and key. ([#84118](https://github.com/kubernetes/kubernetes/pull/84118), [@neolit123](https://github.com/neolit123)) - -#### Network - -- EndpointSlices: If upgrading a cluster with EndpointSlices already enabled, any EndpointSlices that should be managed by the EndpointSlice controller should have a `http://endpointslice.kubernetes.io/managed-by` label set to `endpointslice-controller.k8s.io`. - -#### Scheduling - -- Kubeadm: when adding extra apiserver authorization-modes, the defaults `Node,RBAC` are no longer prepended in the resulting static Pod manifests and a full override is allowed. ([#82616](https://github.com/kubernetes/kubernetes/pull/82616), [@ghouscht](https://github.com/ghouscht)) - -#### Storage - -- A node that uses a CSI raw block volume needs to be drained before kubelet can be upgraded to 1.17. ([#74026](https://github.com/kubernetes/kubernetes/pull/74026), [@mkimuram](https://github.com/mkimuram)) - -#### Windows - -- The Windows containers RunAsUsername feature is now beta. -- Windows worker nodes in a Kubernetes cluster now support Windows Server version 1903 in addition to the existing support for Windows Server 2019 -- The RuntimeClass scheduler can now simplify steering Linux or Windows pods to appropriate nodes -- All Windows nodes now get the new label `node.kubernetes.io/windows-build` that reflects the Windows major, minor, and build number that are needed to match compatibility between Windows containers and Windows worker nodes. - -## Deprecations and Removals - -- `kubeadm.k8s.io/v1beta1` has been deprecated, you should update your config to use newer non-deprecated API versions. ([#83276](https://github.com/kubernetes/kubernetes/pull/83276), [@Klaven](https://github.com/Klaven)) -- The deprecated feature gates GCERegionalPersistentDisk, EnableAggregatedDiscoveryTimeout and PersistentLocalVolumes are now unconditionally enabled and can no longer be specified in component invocations. ([#82472](https://github.com/kubernetes/kubernetes/pull/82472), [@draveness](https://github.com/draveness)) -- Deprecate the default service IP CIDR. The previous default was `10.0.0.0/24` which will be removed in 6 months/2 releases. Cluster admins must specify their own desired value, by using `--service-cluster-ip-range` on kube-apiserver. ([#81668](https://github.com/kubernetes/kubernetes/pull/81668), [@darshanime](https://github.com/darshanime)) -- Remove deprecated "include-uninitialized" flag. ([#80337](https://github.com/kubernetes/kubernetes/pull/80337), [@draveness](https://github.com/draveness)) -- All resources within the `rbac.authorization.k8s.io/v1alpha1` and `rbac.authorization.k8s.io/v1beta1` API groups are deprecated in favor of `rbac.authorization.k8s.io/v1`, and will no longer be served in v1.20. ([#84758](https://github.com/kubernetes/kubernetes/pull/84758), [@liggitt](https://github.com/liggitt)) -- The certificate signer no longer accepts ca.key passwords via the `CFSSL_CA_PK_PASSWORD` environment variable. This capability was not prompted by user request, never advertised, and recommended against in the security audit. ([#84677](https://github.com/kubernetes/kubernetes/pull/84677), [@mikedanese](https://github.com/mikedanese)) -- Deprecate the instance type beta label (`beta.kubernetes.io/instance-type`) in favor of its GA equivalent: `node.kubernetes.io/instance-type` ([#82049](https://github.com/kubernetes/kubernetes/pull/82049), [@andrewsykim](https://github.com/andrewsykim)) -- The built-in system:csi-external-provisioner and system:csi-external-attacher cluster roles are removed as of 1.17 release ([#84282](https://github.com/kubernetes/kubernetes/pull/84282), [@tedyu](https://github.com/tedyu)) -- The in-tree GCE PD plugin `kubernetes.io/gce-pd` is now deprecated and will be removed in 1.21. Users that self-deploy Kubernetes on GCP should enable CSIMigration + CSIMigrationGCE features and install the GCE PD CSI Driver (https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) to avoid disruption to existing Pod and PVC objects at that time. Users should start using the GCE PD CSI CSI Driver directly for any new volumes. ([#85231](https://github.com/kubernetes/kubernetes/pull/85231), [@davidz627](https://github.com/davidz627)) -- The in-tree AWS EBS plugin `kubernetes.io/aws-ebs` is now deprecated and will be removed in 1.21. Users that self-deploy Kubernetes on AWS should enable CSIMigration + CSIMigrationAWS features and install the AWS EBS CSI Driver (https://github.com/kubernetes-sigs/aws-ebs-csi-driver) to avoid disruption to existing Pod and PVC objects at that time. Users should start using the AWS EBS CSI CSI Driver directly for any new volumes. ([#85237](https://github.com/kubernetes/kubernetes/pull/85237), [@leakingtapan](https://github.com/leakingtapan)) -- The CSINodeInfo feature gate is deprecated and will be removed in a future release. The storage.k8s.io/v1beta1 CSINode object is deprecated and will be removed in a future release. ([#83474](https://github.com/kubernetes/kubernetes/pull/83474), [@msau42](https://github.com/msau42)) -- Removed Alpha feature `MountContainers` ([#84365](https://github.com/kubernetes/kubernetes/pull/84365), [@codenrhoden](https://github.com/codenrhoden)) -- Removed plugin watching of the deprecated directory `{kubelet_root_dir}/plugins` and CSI V0 support in accordance with deprecation announcement in https://v1-13.docs.kubernetes.io/docs/setup/release/notes ([#84533](https://github.com/kubernetes/kubernetes/pull/84533), [@davidz627](https://github.com/davidz627)) -- kubeadm deprecates the use of the hyperkube image ([#85094](https://github.com/kubernetes/kubernetes/pull/85094), [@rosti](https://github.com/rosti)) - -## Metrics Changes - -### Added metrics - -- Add `scheduler_goroutines` metric to track number of kube-scheduler binding and prioritizing goroutines ([#83535](https://github.com/kubernetes/kubernetes/pull/83535), [@wgliang](https://github.com/wgliang)) -- Adding initial EndpointSlice metrics. ([#83257](https://github.com/kubernetes/kubernetes/pull/83257), [@robscott](https://github.com/robscott)) -- Adds a metric `apiserver_request_error_total` to kube-apiserver. This metric tallies the number of `request_errors` encountered by verb, group, version, resource, subresource, scope, component, and code. ([#83427](https://github.com/kubernetes/kubernetes/pull/83427), [@logicalhan](https://github.com/logicalhan)) -- A new `kubelet_preemptions` metric is reported from Kubelets to track the number of preemptions occurring over time, and which resource is triggering those preemptions. ([#84120](https://github.com/kubernetes/kubernetes/pull/84120), [@smarterclayton](https://github.com/smarterclayton)) -- Kube-apiserver: Added metrics `authentication_latency_seconds` that can be used to understand the latency of authentication. ([#82409](https://github.com/kubernetes/kubernetes/pull/82409), [@RainbowMango](https://github.com/RainbowMango)) -- Add `plugin_execution_duration_seconds` metric for scheduler framework plugins. ([#84522](https://github.com/kubernetes/kubernetes/pull/84522), [@liu-cong](https://github.com/liu-cong)) -- Add `permit_wait_duration_seconds` metric to the scheduler. ([#84011](https://github.com/kubernetes/kubernetes/pull/84011), [@liu-cong](https://github.com/liu-cong)) - -### Deprecated/changed metrics - -- etcd version monitor metrics are now marked as with the ALPHA stability level. ([#83283](https://github.com/kubernetes/kubernetes/pull/83283), [@RainbowMango](https://github.com/RainbowMango)) -- Change `pod_preemption_victims` metric from Gauge to Histogram. ([#83603](https://github.com/kubernetes/kubernetes/pull/83603), [@Tabrizian](https://github.com/Tabrizian)) -- Following metrics from kubelet are now marked as with the ALPHA stability level: - `kubelet_container_log_filesystem_used_bytes` - `kubelet_volume_stats_capacity_bytes` - `kubelet_volume_stats_available_bytes` - `kubelet_volume_stats_used_bytes` - `kubelet_volume_stats_inodes` - `kubelet_volume_stats_inodes_free` - `kubelet_volume_stats_inodes_used` - `plugin_manager_total_plugins` - `volume_manager_total_volumes` - ([#84907](https://github.com/kubernetes/kubernetes/pull/84907), [@RainbowMango](https://github.com/RainbowMango)) -- Deprecated metric `rest_client_request_latency_seconds` has been turned off. ([#83836](https://github.com/kubernetes/kubernetes/pull/83836), [@RainbowMango](https://github.com/RainbowMango)) -- Following metrics from kubelet are now marked as with the ALPHA stability level: - `node_cpu_usage_seconds_total` - `node_memory_working_set_bytes` - `container_cpu_usage_seconds_total` - `container_memory_working_set_bytes` - `scrape_error` - ([#84987](https://github.com/kubernetes/kubernetes/pull/84987), [@RainbowMango](https://github.com/RainbowMango)) -- Deprecated prometheus request meta-metrics have been removed - `http_request_duration_microseconds` `http_request_duration_microseconds_sum` `http_request_duration_microseconds_count` - `http_request_size_bytes` - `http_request_size_bytes_sum` - `http_request_size_bytes_count` - `http_requests_total, http_response_size_bytes` - `http_response_size_bytes_sum` - `http_response_size_bytes_count` - due to removal from the prometheus client library. Prometheus http request meta-metrics are now generated from [`promhttp.InstrumentMetricHandler`](https://godoc.org/github.com/prometheus/client_golang/prometheus/promhttp#InstrumentMetricHandler) instead. -- Following metrics from kube-controller-manager are now marked as with the ALPHA stability level: - `storage_count_attachable_volumes_in_use` - `attachdetach_controller_total_volumes` - `pv_collector_bound_pv_count` - `pv_collector_unbound_pv_count` - `pv_collector_bound_pvc_count` - `pv_collector_unbound_pvc_count` - ([#84896](https://github.com/kubernetes/kubernetes/pull/84896), [@RainbowMango](https://github.com/RainbowMango)) -- Following metrics have been turned off: - `apiserver_request_count` - `apiserver_request_latencies` - `apiserver_request_latencies_summary` - `apiserver_dropped_requests` - `etcd_request_latencies_summary` - `apiserver_storage_transformation_latencies_microseconds` - `apiserver_storage_data_key_generation_latencies_microseconds` - `apiserver_storage_transformation_failures_total` - ([#83837](https://github.com/kubernetes/kubernetes/pull/83837), [@RainbowMango](https://github.com/RainbowMango)) +#### kube-apiserver: +- in an `--encryption-provider-config` config file, an explicit `cacheSize: 0` parameter previously silently defaulted to caching 1000 keys. In Kubernetes 1.18, this now returns a config validation error. To disable caching, you can specify a negative cacheSize value in Kubernetes 1.18+. +- consumers of the 'certificatesigningrequests/approval' API must now have permission to 'approve' CSRs for the specific signer requested by the CSR. More information on the new signerName field and the required authorization can be found at https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests#authorization ([#88246](https://github.com/kubernetes/kubernetes/pull/88246), [@munnerz](https://github.com/munnerz)) [SIG API Machinery, Apps, Auth, CLI, Node and Testing] +- The following features are unconditionally enabled and the corresponding `--feature-gates` flags have been removed: `PodPriority`, `TaintNodesByCondition`, `ResourceQuotaScopeSelectors` and `ScheduleDaemonSetPods` ([#86210](https://github.com/kubernetes/kubernetes/pull/86210), [@draveness](https://github.com/draveness)) [SIG Apps and Scheduling] + +#### kubelet: +- `--enable-cadvisor-endpoints` is now disabled by default. If you need access to the cAdvisor v1 Json API please enable it explicitly in the kubelet command line. Please note that this flag was deprecated in 1.15 and will be removed in 1.19. ([#87440](https://github.com/kubernetes/kubernetes/pull/87440), [@dims](https://github.com/dims)) [SIG Instrumentation, Node and Testing] +- Promote CSIMigrationOpenStack to Beta (off by default since it requires installation of the OpenStack Cinder CSI Driver. The in-tree AWS OpenStack Cinder driver "kubernetes.io/cinder" was deprecated in 1.16 and will be removed in 1.20. Users should enable CSIMigration + CSIMigrationOpenStack features and install the OpenStack Cinder CSI Driver (https://github.com/kubernetes-sigs/cloud-provider-openstack) to avoid disruption to existing Pod and PVC objects at that time. Users should start using the OpenStack Cinder CSI Driver directly for any new volumes. ([#85637](https://github.com/kubernetes/kubernetes/pull/85637), [@dims](https://github.com/dims)) [SIG Cloud Provider] + +#### kubectl: +- `kubectl` and k8s.io/client-go no longer default to a server address of `http://localhost:8080`. If you own one of these legacy clusters, you are *strongly* encouraged to secure your server. If you cannot secure your server, you can set the `$KUBERNETES_MASTER` environment variable to `http://localhost:8080` to continue defaulting the server address. `kubectl` users can also set the server address using the `--server` flag, or in a kubeconfig file specified via `--kubeconfig` or `$KUBECONFIG`. ([#86173](https://github.com/kubernetes/kubernetes/pull/86173), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, CLI and Testing] +- `kubectl run` has removed the previously deprecated generators, along with flags unrelated to creating pods. `kubectl run` now only creates pods. See specific `kubectl create` subcommands to create objects other than pods. +([#87077](https://github.com/kubernetes/kubernetes/pull/87077), [@soltysh](https://github.com/soltysh)) [SIG Architecture, CLI and Testing] +- The deprecated command `kubectl rolling-update` has been removed ([#88057](https://github.com/kubernetes/kubernetes/pull/88057), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG Architecture, CLI and Testing] + +#### client-go: +- Signatures on methods in generated clientsets, dynamic, metadata, and scale clients have been modified to accept `context.Context` as a first argument. Signatures of Create, Update, and Patch methods have been updated to accept CreateOptions, UpdateOptions and PatchOptions respectively. Signatures of Delete and DeleteCollection methods now accept DeleteOptions by value instead of by reference. Generated clientsets with the previous interface have been added in new "deprecated" packages to allow incremental migration to the new APIs. The deprecated packages will be removed in the 1.21 release. A tool is available at http://sigs.k8s.io/clientgofix to rewrite method invocations to the new signatures. + +- The following deprecated metrics are removed, please convert to the corresponding metrics: + - The following replacement metrics are available from v1.14.0: + - `rest_client_request_latency_seconds` -> `rest_client_request_duration_seconds` + - `scheduler_scheduling_latency_seconds` -> `scheduler_scheduling_duration_seconds ` + - `docker_operations` -> `docker_operations_total` + - `docker_operations_latency_microseconds` -> `docker_operations_duration_seconds` + - `docker_operations_errors` -> `docker_operations_errors_total` + - `docker_operations_timeout` -> `docker_operations_timeout_total` + - `network_plugin_operations_latency_microseconds` -> `network_plugin_operations_duration_seconds` + - `kubelet_pod_worker_latency_microseconds` -> `kubelet_pod_worker_duration_seconds` + - `kubelet_pod_start_latency_microseconds` -> `kubelet_pod_start_duration_seconds` + - `kubelet_cgroup_manager_latency_microseconds` -> `kubelet_cgroup_manager_duration_seconds` + - `kubelet_pod_worker_start_latency_microseconds` -> `kubelet_pod_worker_start_duration_seconds` + - `kubelet_pleg_relist_latency_microseconds` -> `kubelet_pleg_relist_duration_seconds` + - `kubelet_pleg_relist_interval_microseconds` -> `kubelet_pleg_relist_interval_seconds` + - `kubelet_eviction_stats_age_microseconds` -> `kubelet_eviction_stats_age_seconds` + - `kubelet_runtime_operations` -> `kubelet_runtime_operations_total` + - `kubelet_runtime_operations_latency_microseconds` -> `kubelet_runtime_operations_duration_seconds` + - `kubelet_runtime_operations_errors` -> `kubelet_runtime_operations_errors_total` + - `kubelet_device_plugin_registration_count` -> `kubelet_device_plugin_registration_total` + - `kubelet_device_plugin_alloc_latency_microseconds` -> `kubelet_device_plugin_alloc_duration_seconds` + - `scheduler_e2e_scheduling_latency_microseconds` -> `scheduler_e2e_scheduling_duration_seconds` + - `scheduler_scheduling_algorithm_latency_microseconds` -> `scheduler_scheduling_algorithm_duration_seconds` + - `scheduler_scheduling_algorithm_predicate_evaluation` -> `scheduler_scheduling_algorithm_predicate_evaluation_seconds` + - `scheduler_scheduling_algorithm_priority_evaluation` -> `scheduler_scheduling_algorithm_priority_evaluation_seconds` + - `scheduler_scheduling_algorithm_preemption_evaluation` -> `scheduler_scheduling_algorithm_preemption_evaluation_seconds` + - `scheduler_binding_latency_microseconds` -> `scheduler_binding_duration_seconds` + - `kubeproxy_sync_proxy_rules_latency_microseconds` -> `kubeproxy_sync_proxy_rules_duration_seconds` + - `apiserver_request_latencies` -> `apiserver_request_duration_seconds` + - `apiserver_dropped_requests` -> `apiserver_dropped_requests_total` + - `etcd_request_latencies_summary` -> `etcd_request_duration_seconds` + - `apiserver_storage_transformation_latencies_microseconds ` -> `apiserver_storage_transformation_duration_seconds` + - `apiserver_storage_data_key_generation_latencies_microseconds` -> `apiserver_storage_data_key_generation_duration_seconds` + - `apiserver_request_count` -> `apiserver_request_total` + - `apiserver_request_latencies_summary` + - The following replacement metrics are available from v1.15.0: + - `apiserver_storage_transformation_failures_total` -> `apiserver_storage_transformation_operations_total` ([#76496](https://github.com/kubernetes/kubernetes/pull/76496), [@danielqsj](https://github.com/danielqsj)) [SIG API Machinery, Cluster Lifecycle, Instrumentation, Network, Node and Scheduling] + +## Changes by Kind + +### Deprecation + +#### kube-apiserver: +- the following deprecated APIs can no longer be served: + - All resources under `apps/v1beta1` and `apps/v1beta2` - use `apps/v1` instead + - `daemonsets`, `deployments`, `replicasets` resources under `extensions/v1beta1` - use `apps/v1` instead + - `networkpolicies` resources under `extensions/v1beta1` - use `networking.k8s.io/v1` instead + - `podsecuritypolicies` resources under `extensions/v1beta1` - use `policy/v1beta1` instead ([#85903](https://github.com/kubernetes/kubernetes/pull/85903), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps, Cluster Lifecycle, Instrumentation and Testing] + +#### kube-controller-manager: +- Azure service annotation service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset has been deprecated. Its support would be removed in a future release. ([#88462](https://github.com/kubernetes/kubernetes/pull/88462), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] + +#### kubelet: +- The StreamingProxyRedirects feature and `--redirect-container-streaming` flag are deprecated, and will be removed in a future release. The default behavior (proxy streaming requests through the kubelet) will be the only supported option. If you are setting `--redirect-container-streaming=true`, then you must migrate off this configuration. The flag will no longer be able to be enabled starting in v1.20. If you are not setting the flag, no action is necessary. ([#88290](https://github.com/kubernetes/kubernetes/pull/88290), [@tallclair](https://github.com/tallclair)) [SIG API Machinery and Node] +- resource metrics endpoint `/metrics/resource/v1alpha1` as well as all metrics under this endpoint have been deprecated. Please convert to the following metrics emitted by endpoint `/metrics/resource`: + - scrape_error --> scrape_error + - node_cpu_usage_seconds_total --> node_cpu_usage_seconds + - node_memory_working_set_bytes --> node_memory_working_set_bytes + - container_cpu_usage_seconds_total --> container_cpu_usage_seconds + - container_memory_working_set_bytes --> container_memory_working_set_bytes + - scrape_error --> scrape_error + ([#86282](https://github.com/kubernetes/kubernetes/pull/86282), [@RainbowMango](https://github.com/RainbowMango)) [SIG Node] +- In a future release, kubelet will no longer create the CSI NodePublishVolume target directory, in accordance with the CSI specification. CSI drivers may need to be updated accordingly to properly create and process the target path. ([#75535](https://github.com/kubernetes/kubernetes/issues/75535)) [SIG Storage] + +#### kube-proxy: +- `--healthz-port` and `--metrics-port` flags are deprecated, please use `--healthz-bind-address` and `--metrics-bind-address` instead ([#88512](https://github.com/kubernetes/kubernetes/pull/88512), [@SataQiu](https://github.com/SataQiu)) [SIG Network] +- a new `EndpointSliceProxying` feature gate has been added to control the use of EndpointSlices in kube-proxy. The EndpointSlice feature gate that used to control this behavior no longer affects kube-proxy. This feature has been disabled by default. ([#86137](https://github.com/kubernetes/kubernetes/pull/86137), [@robscott](https://github.com/robscott)) + +#### kubeadm: +- command line option "kubelet-version" for `kubeadm upgrade node` has been deprecated and will be removed in a future release. ([#87942](https://github.com/kubernetes/kubernetes/pull/87942), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- deprecate the usage of the experimental flag '--use-api' under the 'kubeadm alpha certs renew' command. ([#88827](https://github.com/kubernetes/kubernetes/pull/88827), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- kube-dns is deprecated and will not be supported in a future version ([#86574](https://github.com/kubernetes/kubernetes/pull/86574), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- the `ClusterStatus` struct present in the kubeadm-config ConfigMap is deprecated and will be removed in a future version. It is going to be maintained by kubeadm until it gets removed. The same information can be found on `etcd` and `kube-apiserver` pod annotations, `kubeadm.kubernetes.io/etcd.advertise-client-urls` and `kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint` respectively. ([#87656](https://github.com/kubernetes/kubernetes/pull/87656), [@ereslibre](https://github.com/ereslibre)) [SIG Cluster Lifecycle] + +#### kubectl: +- the boolean and unset values for the --dry-run flag are deprecated and a value --dry-run=server|client|none will be required in a future version. ([#87580](https://github.com/kubernetes/kubernetes/pull/87580), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG CLI] +- `kubectl apply --server-dry-run` is deprecated and replaced with --dry-run=server ([#87580](https://github.com/kubernetes/kubernetes/pull/87580), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG CLI] + +#### add-ons: +- Remove cluster-monitoring addon ([#85512](https://github.com/kubernetes/kubernetes/pull/85512), [@serathius](https://github.com/serathius)) [SIG Cluster Lifecycle, Instrumentation, Scalability and Testing] + +#### kube-scheduler: +- The `scheduling_duration_seconds` summary metric is deprecated ([#86586](https://github.com/kubernetes/kubernetes/pull/86586), [@xiaoanyunfei](https://github.com/xiaoanyunfei)) [SIG Scheduling] +- The `scheduling_algorithm_predicate_evaluation_seconds` and + `scheduling_algorithm_priority_evaluation_seconds` metrics are deprecated, replaced by `framework_extension_point_duration_seconds[extension_point="Filter"]` and `framework_extension_point_duration_seconds[extension_point="Score"]`. ([#86584](https://github.com/kubernetes/kubernetes/pull/86584), [@xiaoanyunfei](https://github.com/xiaoanyunfei)) [SIG Scheduling] +- `AlwaysCheckAllPredicates` is deprecated in scheduler Policy API. ([#86369](https://github.com/kubernetes/kubernetes/pull/86369), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] + +#### Other deprecations: +- The k8s.io/node-api component is no longer updated. Instead, use the RuntimeClass types located within k8s.io/api, and the generated clients located within k8s.io/client-go ([#87503](https://github.com/kubernetes/kubernetes/pull/87503), [@liggitt](https://github.com/liggitt)) [SIG Node and Release] +- Removed the 'client' label from apiserver_request_total. ([#87669](https://github.com/kubernetes/kubernetes/pull/87669), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery and Instrumentation] + +### API Change + +#### New API types/versions: +- A new IngressClass resource has been added to enable better Ingress configuration. ([#88509](https://github.com/kubernetes/kubernetes/pull/88509), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, CLI, Network, Node and Testing] +- The CSIDriver API has graduated to storage.k8s.io/v1, and is now available for use. ([#84814](https://github.com/kubernetes/kubernetes/pull/84814), [@huffmanca](https://github.com/huffmanca)) [SIG Storage] + +#### New API fields: +- autoscaling/v2beta2 HorizontalPodAutoscaler added a `spec.behavior` field that allows scale behavior to be configured. Behaviors are specified separately for scaling up and down. In each direction a stabilization window can be specified as well as a list of policies and how to select amongst them. Policies can limit the absolute number of pods added or removed, or the percentage of pods added or removed. ([#74525](https://github.com/kubernetes/kubernetes/pull/74525), [@gliush](https://github.com/gliush)) [SIG API Machinery, Apps, Autoscaling and CLI] +- Ingress: + - `spec.ingressClassName` replaces the deprecated `kubernetes.io/ingress.class` annotation, and allows associating an Ingress object with a particular controller. + - path definitions added a `pathType` field to allow indicating how the specified path should be matched against incoming requests. Valid values are `Exact`, `Prefix`, and `ImplementationSpecific` ([#88587](https://github.com/kubernetes/kubernetes/pull/88587), [@cmluciano](https://github.com/cmluciano)) [SIG Apps, Cluster Lifecycle and Network] +- The alpha feature `AnyVolumeDataSource` enables PersistentVolumeClaim objects to use the spec.dataSource field to reference a custom type as a data source ([#88636](https://github.com/kubernetes/kubernetes/pull/88636), [@bswartz](https://github.com/bswartz)) [SIG Apps and Storage] +- The alpha feature `ConfigurableFSGroupPolicy` enables v1 Pods to specify a spec.securityContext.fsGroupChangePolicy policy to control how file permissions are applied to volumes mounted into the pod. ([#88488](https://github.com/kubernetes/kubernetes/pull/88488), [@gnufied](https://github.com/gnufied)) [SIG Storage] +- The alpha feature `ServiceAppProtocol` enables setting an `appProtocol` field in ServicePort and EndpointPort definitions. ([#88503](https://github.com/kubernetes/kubernetes/pull/88503), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- The alpha feature `ImmutableEphemeralVolumes` enables an `immutable` field in both Secret and ConfigMap objects to mark their contents as immutable. ([#86377](https://github.com/kubernetes/kubernetes/pull/86377), [@wojtek-t](https://github.com/wojtek-t)) [SIG Apps, CLI and Testing] + +#### Other API changes: +- The beta feature `ServerSideApply` enables tracking and managing changed fields for all new objects, which means there will be `managedFields` in `metadata` with the list of managers and their owned fields. +- The alpha feature `ServiceAccountIssuerDiscovery` enables publishing OIDC discovery information and service account token verification keys at `/.well-known/openid-configuration` and `/openid/v1/jwks` endpoints by API servers configured to issue service account tokens. ([#80724](https://github.com/kubernetes/kubernetes/pull/80724), [@cceckman](https://github.com/cceckman)) [SIG API Machinery, Auth, Cluster Lifecycle and Testing] +- CustomResourceDefinition schemas that use `x-kubernetes-list-map-keys` to specify properties that uniquely identify list items must make those properties required or have a default value, to ensure those properties are present for all list items. See https://kubernetes.io/docs/reference/using-api/api-concepts/#merge-strategy for details. ([#88076](https://github.com/kubernetes/kubernetes/pull/88076), [@eloyekunle](https://github.com/eloyekunle)) [SIG API Machinery and Testing] +- CustomResourceDefinition schemas that use `x-kubernetes-list-type: map` or `x-kubernetes-list-type: set` now enable validation that the list items in the corresponding custom resources are unique. ([#84920](https://github.com/kubernetes/kubernetes/pull/84920), [@sttts](https://github.com/sttts)) [SIG API Machinery] + +#### Configuration file changes: + +#### kube-apiserver: +- The `--egress-selector-config-file` configuration file now accepts an apiserver.k8s.io/v1beta1 EgressSelectorConfiguration configuration object, and has been updated to allow specifying HTTP or GRPC connections to the network proxy ([#87179](https://github.com/kubernetes/kubernetes/pull/87179), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery, Cloud Provider and Cluster Lifecycle] + +#### kube-scheduler: +- A kubescheduler.config.k8s.io/v1alpha2 configuration file version is now accepted, with support for multiple scheduling profiles ([#87628](https://github.com/kubernetes/kubernetes/pull/87628), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] + - HardPodAffinityWeight moved from a top level ComponentConfig parameter to a PluginConfig parameter of InterPodAffinity Plugin in `kubescheduler.config.k8s.io/v1alpha2` ([#88002](https://github.com/kubernetes/kubernetes/pull/88002), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling and Testing] + - Kube-scheduler can run more than one scheduling profile. Given a pod, the profile is selected by using its `.spec.schedulerName`. ([#88285](https://github.com/kubernetes/kubernetes/pull/88285), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps, Scheduling and Testing] + - Scheduler Extenders can now be configured in the v1alpha2 component config ([#88768](https://github.com/kubernetes/kubernetes/pull/88768), [@damemi](https://github.com/damemi)) [SIG Release, Scheduling and Testing] + - The PostFilter of scheduler framework is renamed to PreScore in kubescheduler.config.k8s.io/v1alpha2. ([#87751](https://github.com/kubernetes/kubernetes/pull/87751), [@skilxn-go](https://github.com/skilxn-go)) [SIG Scheduling and Testing] + +#### kube-proxy: +- Added kube-proxy flags `--ipvs-tcp-timeout`, `--ipvs-tcpfin-timeout`, `--ipvs-udp-timeout` to configure IPVS connection timeouts. ([#85517](https://github.com/kubernetes/kubernetes/pull/85517), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cluster Lifecycle and Network] +- Added optional `--detect-local-mode` flag to kube-proxy. Valid values are "ClusterCIDR" (default matching previous behavior) and "NodeCIDR" ([#87748](https://github.com/kubernetes/kubernetes/pull/87748), [@satyasm](https://github.com/satyasm)) [SIG Cluster Lifecycle, Network and Scheduling] +- Kube-controller-manager and kube-scheduler expose profiling by default to match the kube-apiserver. Use `--enable-profiling=false` to disable. ([#88663](https://github.com/kubernetes/kubernetes/pull/88663), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Cloud Provider and Scheduling] +- Kubelet pod resources API now provides the information about active pods only. ([#79409](https://github.com/kubernetes/kubernetes/pull/79409), [@takmatsu](https://github.com/takmatsu)) [SIG Node] +- New flag `--endpointslice-updates-batch-period` in kube-controller-manager can be used to reduce the number of endpointslice updates generated by pod changes. ([#88745](https://github.com/kubernetes/kubernetes/pull/88745), [@mborsz](https://github.com/mborsz)) [SIG API Machinery, Apps and Network] +- New flag `--show-hidden-metrics-for-version` in kube-proxy, kubelet, kube-controller-manager, and kube-scheduler can be used to show all hidden metrics that are deprecated in the previous minor release. ([#85279](https://github.com/kubernetes/kubernetes/pull/85279), [@RainbowMango](https://github.com/RainbowMango)) [SIG Cluster Lifecycle and Network] + +#### Features graduated to beta: + - StartupProbe ([#83437](https://github.com/kubernetes/kubernetes/pull/83437), [@matthyx](https://github.com/matthyx)) [SIG Node, Scalability and Testing] + +#### Features graduated to GA: + - VolumePVCDataSource ([#88686](https://github.com/kubernetes/kubernetes/pull/88686), [@j-griffith](https://github.com/j-griffith)) [SIG Storage] + - TaintBasedEvictions ([#87487](https://github.com/kubernetes/kubernetes/pull/87487), [@skilxn-go](https://github.com/skilxn-go)) [SIG API Machinery, Apps, Node, Scheduling and Testing] + - BlockVolume and CSIBlockVolume ([#88673](https://github.com/kubernetes/kubernetes/pull/88673), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] + - Windows RunAsUserName ([#87790](https://github.com/kubernetes/kubernetes/pull/87790), [@marosset](https://github.com/marosset)) [SIG Apps and Windows] +- The following feature gates are removed, because the associated features were unconditionally enabled in previous releases: CustomResourceValidation, CustomResourceSubresources, CustomResourceWebhookConversion, CustomResourcePublishOpenAPI, CustomResourceDefaulting ([#87475](https://github.com/kubernetes/kubernetes/pull/87475), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] + +### Feature + +- API request throttling (due to a high rate of requests) is now reported in client-go logs at log level 2. The messages are of the form:`Throttling request took 1.50705208s, request: GET:` The presence of these messages may indicate to the administrator the need to tune the cluster accordingly. ([#87740](https://github.com/kubernetes/kubernetes/pull/87740), [@jennybuckley](https://github.com/jennybuckley)) [SIG API Machinery] +- Add support for mount options to the FC volume plugin ([#87499](https://github.com/kubernetes/kubernetes/pull/87499), [@ejweber](https://github.com/ejweber)) [SIG Storage] +- Added a config-mode flag in azure auth module to enable getting AAD token without spn: prefix in audience claim. When it's not specified, the default behavior doesn't change. ([#87630](https://github.com/kubernetes/kubernetes/pull/87630), [@weinong](https://github.com/weinong)) [SIG API Machinery, Auth, CLI and Cloud Provider] +- Allow for configuration of CoreDNS replica count ([#85837](https://github.com/kubernetes/kubernetes/pull/85837), [@pickledrick](https://github.com/pickledrick)) [SIG Cluster Lifecycle] +- Allow user to specify resource using --filename flag when invoking kubectl exec ([#88460](https://github.com/kubernetes/kubernetes/pull/88460), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] +- Apiserver added a new flag --goaway-chance which is the fraction of requests that will be closed gracefully(GOAWAY) to prevent HTTP/2 clients from getting stuck on a single apiserver. ([#88567](https://github.com/kubernetes/kubernetes/pull/88567), [@answer1991](https://github.com/answer1991)) [SIG API Machinery] +- Azure Cloud Provider now supports using Azure network resources (Virtual Network, Load Balancer, Public IP, Route Table, Network Security Group, etc.) in different AAD Tenant and Subscription than those for the Kubernetes cluster. To use the feature, please reference https://github.com/kubernetes-sigs/cloud-provider-azure/blob/master/docs/cloud-provider-config.md#host-network-resources-in-different-aad-tenant-and-subscription. ([#88384](https://github.com/kubernetes/kubernetes/pull/88384), [@bowen5](https://github.com/bowen5)) [SIG Cloud Provider] +- Azure VMSS/VMSSVM clients now suppress requests on throttling ([#86740](https://github.com/kubernetes/kubernetes/pull/86740), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Azure cloud provider cache TTL is configurable, list of the azure cloud provider is as following: + - "availabilitySetNodesCacheTTLInSeconds" + - "vmssCacheTTLInSeconds" + - "vmssVirtualMachinesCacheTTLInSeconds" + - "vmCacheTTLInSeconds" + - "loadBalancerCacheTTLInSeconds" + - "nsgCacheTTLInSeconds" + - "routeTableCacheTTLInSeconds" + ([#86266](https://github.com/kubernetes/kubernetes/pull/86266), [@zqingqing1](https://github.com/zqingqing1)) [SIG Cloud Provider] +- Azure global rate limit is switched to per-client. A set of new rate limit configure options are introduced, including routeRateLimit, SubnetsRateLimit, InterfaceRateLimit, RouteTableRateLimit, LoadBalancerRateLimit, PublicIPAddressRateLimit, SecurityGroupRateLimit, VirtualMachineRateLimit, StorageAccountRateLimit, DiskRateLimit, SnapshotRateLimit, VirtualMachineScaleSetRateLimit and VirtualMachineSizeRateLimit. The original rate limit options would be default values for those new client's rate limiter. ([#86515](https://github.com/kubernetes/kubernetes/pull/86515), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Azure network and VM clients now suppress requests on throttling ([#87122](https://github.com/kubernetes/kubernetes/pull/87122), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Azure storage clients now suppress requests on throttling ([#87306](https://github.com/kubernetes/kubernetes/pull/87306), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Azure: add support for single stack IPv6 ([#88448](https://github.com/kubernetes/kubernetes/pull/88448), [@aramase](https://github.com/aramase)) [SIG Cloud Provider] +- DefaultConstraints can be specified for PodTopologySpread Plugin in the scheduler’s ComponentConfig ([#88671](https://github.com/kubernetes/kubernetes/pull/88671), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] +- DisableAvailabilitySetNodes is added to avoid VM list for VMSS clusters. It should only be used when vmType is "vmss" and all the nodes (including control plane nodes) are VMSS virtual machines. ([#87685](https://github.com/kubernetes/kubernetes/pull/87685), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Elasticsearch supports automatically setting the advertise address ([#85944](https://github.com/kubernetes/kubernetes/pull/85944), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle and Instrumentation] +- EndpointSlices will now be enabled by default. A new `EndpointSliceProxying` feature gate determines if kube-proxy will use EndpointSlices, this is disabled by default. ([#86137](https://github.com/kubernetes/kubernetes/pull/86137), [@robscott](https://github.com/robscott)) [SIG Network] +- Kube-proxy: Added dual-stack IPv4/IPv6 support to the iptables proxier. ([#82462](https://github.com/kubernetes/kubernetes/pull/82462), [@vllry](https://github.com/vllry)) [SIG Network] +- Kubeadm now supports automatic calculations of dual-stack node cidr masks to kube-controller-manager. ([#85609](https://github.com/kubernetes/kubernetes/pull/85609), [@Arvinderpal](https://github.com/Arvinderpal)) [SIG Cluster Lifecycle] +- Kubeadm: add a upgrade health check that deploys a Job ([#81319](https://github.com/kubernetes/kubernetes/pull/81319), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: add the experimental feature gate PublicKeysECDSA that can be used to create a + cluster with ECDSA certificates from "kubeadm init". Renewal of existing ECDSA certificates is also supported using "kubeadm alpha certs renew", but not switching between the RSA and ECDSA algorithms on the fly or during upgrades. ([#86953](https://github.com/kubernetes/kubernetes/pull/86953), [@rojkov](https://github.com/rojkov)) [SIG API Machinery, Auth and Cluster Lifecycle] +- Kubeadm: implemented structured output of 'kubeadm config images list' command in JSON, YAML, Go template and JsonPath formats ([#86810](https://github.com/kubernetes/kubernetes/pull/86810), [@bart0sh](https://github.com/bart0sh)) [SIG Cluster Lifecycle] +- Kubeadm: on kubeconfig certificate renewal, keep the embedded CA in sync with the one on disk ([#88052](https://github.com/kubernetes/kubernetes/pull/88052), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: reject a node joining the cluster if a node with the same name already exists ([#81056](https://github.com/kubernetes/kubernetes/pull/81056), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: support Windows specific kubelet flags in kubeadm-flags.env ([#88287](https://github.com/kubernetes/kubernetes/pull/88287), [@gab-satchi](https://github.com/gab-satchi)) [SIG Cluster Lifecycle and Windows] +- Kubeadm: support automatic retry after failing to pull image ([#86899](https://github.com/kubernetes/kubernetes/pull/86899), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubeadm: upgrade supports fallback to the nearest known etcd version if an unknown k8s version is passed ([#88373](https://github.com/kubernetes/kubernetes/pull/88373), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubectl/drain: add disable-eviction option.Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, and should be used with caution. ([#85571](https://github.com/kubernetes/kubernetes/pull/85571), [@michaelgugino](https://github.com/michaelgugino)) [SIG CLI] +- Kubectl/drain: add skip-wait-for-delete-timeout option. If a pod’s `DeletionTimestamp` is older than N seconds, skip waiting for the pod. Seconds must be greater than 0 to skip. ([#85577](https://github.com/kubernetes/kubernetes/pull/85577), [@michaelgugino](https://github.com/michaelgugino)) [SIG CLI] +- Option `preConfiguredBackendPoolLoadBalancerTypes` is added to azure cloud provider for the pre-configured load balancers, possible values: `""`, `"internal"`, `"external"`,`"all"` ([#86338](https://github.com/kubernetes/kubernetes/pull/86338), [@gossion](https://github.com/gossion)) [SIG Cloud Provider] +- PodTopologySpread plugin now excludes terminatingPods when making scheduling decisions. ([#87845](https://github.com/kubernetes/kubernetes/pull/87845), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] +- Provider/azure: Network security groups can now be in a separate resource group. ([#87035](https://github.com/kubernetes/kubernetes/pull/87035), [@CecileRobertMichon](https://github.com/CecileRobertMichon)) [SIG Cloud Provider] +- SafeSysctlWhitelist: add net.ipv4.ping_group_range ([#85463](https://github.com/kubernetes/kubernetes/pull/85463), [@AkihiroSuda](https://github.com/AkihiroSuda)) [SIG Auth] +- Scheduler framework permit plugins now run at the end of the scheduling cycle, after reserve plugins. Waiting on permit will remain in the beginning of the binding cycle. ([#88199](https://github.com/kubernetes/kubernetes/pull/88199), [@mateuszlitwin](https://github.com/mateuszlitwin)) [SIG Scheduling] +- Scheduler: Add DefaultBinder plugin ([#87430](https://github.com/kubernetes/kubernetes/pull/87430), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling and Testing] +- Skip default spreading scoring plugin for pods that define TopologySpreadConstraints ([#87566](https://github.com/kubernetes/kubernetes/pull/87566), [@skilxn-go](https://github.com/skilxn-go)) [SIG Scheduling] +- The kubectl --dry-run flag now accepts the values 'client', 'server', and 'none', to support client-side and server-side dry-run strategies. The boolean and unset values for the --dry-run flag are deprecated and a value will be required in a future version. ([#87580](https://github.com/kubernetes/kubernetes/pull/87580), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG CLI] +- Support server-side dry-run in kubectl with --dry-run=server for commands including apply, patch, create, run, annotate, label, set, autoscale, drain, rollout undo, and expose. ([#87714](https://github.com/kubernetes/kubernetes/pull/87714), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG API Machinery, CLI and Testing] +- Add --dry-run=server|client to kubectl delete, taint, replace ([#88292](https://github.com/kubernetes/kubernetes/pull/88292), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG CLI and Testing] +- The feature PodTopologySpread (feature gate `EvenPodsSpread`) has been enabled by default in 1.18. ([#88105](https://github.com/kubernetes/kubernetes/pull/88105), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] +- The kubelet and the default docker runtime now support running ephemeral containers in the Linux process namespace of a target container. Other container runtimes must implement support for this feature before it will be available for that runtime. ([#84731](https://github.com/kubernetes/kubernetes/pull/84731), [@verb](https://github.com/verb)) [SIG Node] +- The underlying format of the `CPUManager` state file has changed. Upgrades should be seamless, but any third-party tools that rely on reading the previous format need to be updated. ([#84462](https://github.com/kubernetes/kubernetes/pull/84462), [@klueska](https://github.com/klueska)) [SIG Node and Testing] +- Update CNI version to v0.8.5 ([#78819](https://github.com/kubernetes/kubernetes/pull/78819), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Cluster Lifecycle, Network, Release and Testing] +- Webhooks have alpha support for network proxy ([#85870](https://github.com/kubernetes/kubernetes/pull/85870), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery, Auth and Testing] +- When client certificate files are provided, reload files for new connections, and close connections when a certificate changes. ([#79083](https://github.com/kubernetes/kubernetes/pull/79083), [@jackkleeman](https://github.com/jackkleeman)) [SIG API Machinery, Auth, Node and Testing] +- When deleting objects using kubectl with the --force flag, you are no longer required to also specify --grace-period=0. ([#87776](https://github.com/kubernetes/kubernetes/pull/87776), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] +- Windows nodes on GCE can use virtual TPM-based authentication to the control plane. ([#85466](https://github.com/kubernetes/kubernetes/pull/85466), [@pjh](https://github.com/pjh)) [SIG Cluster Lifecycle] +- You can now pass "--node-ip ::" to kubelet to indicate that it should autodetect an IPv6 address to use as the node's primary address. ([#85850](https://github.com/kubernetes/kubernetes/pull/85850), [@danwinship](https://github.com/danwinship)) [SIG Cloud Provider, Network and Node] +- `kubectl` now contains a `kubectl alpha debug` command. This command allows attaching an ephemeral container to a running pod for the purposes of debugging. ([#88004](https://github.com/kubernetes/kubernetes/pull/88004), [@verb](https://github.com/verb)) [SIG CLI] +- TLS Server Name overrides can now be specified in a kubeconfig file and via --tls-server-name in kubectl ([#88769](https://github.com/kubernetes/kubernetes/pull/88769), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth and CLI] + +#### Metrics: +- Add `rest_client_rate_limiter_duration_seconds` metric to component-base to track client side rate limiter latency in seconds. Broken down by verb and URL. ([#88134](https://github.com/kubernetes/kubernetes/pull/88134), [@jennybuckley](https://github.com/jennybuckley)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] +- Added two client certificate metrics for exec auth: + - `rest_client_certificate_expiration_seconds` a gauge reporting the lifetime of the current client certificate. Reports the time of expiry in seconds since January 1, 1970 UTC. + - `rest_client_certificate_rotation_age` a histogram reporting the age of a just rotated client certificate in seconds. ([#84382](https://github.com/kubernetes/kubernetes/pull/84382), [@sambdavidson](https://github.com/sambdavidson)) [SIG API Machinery, Auth, Cluster Lifecycle and Instrumentation] +- Controller manager serve workqueue metrics ([#87967](https://github.com/kubernetes/kubernetes/pull/87967), [@zhan849](https://github.com/zhan849)) [SIG API Machinery] - Following metrics have been turned off: - `scheduler_scheduling_latency_seconds` - `scheduler_e2e_scheduling_latency_microseconds` - `scheduler_scheduling_algorithm_latency_microseconds` - `scheduler_scheduling_algorithm_predicate_evaluation` - `scheduler_scheduling_algorithm_priority_evaluation` - `scheduler_scheduling_algorithm_preemption_evaluation` - `scheduler_scheduling_binding_latency_microseconds ([#83838](https://github.com/kubernetes/kubernetes/pull/83838`), [@RainbowMango](https://github.com/RainbowMango)) -- Deprecated metric `kubeproxy_sync_proxy_rules_latency_microseconds` has been turned off. ([#83839](https://github.com/kubernetes/kubernetes/pull/83839), [@RainbowMango](https://github.com/RainbowMango)) - -## Notable Features - -### Stable - -- Graduate ScheduleDaemonSetPods to GA. (feature gate will be removed in 1.18) ([#82795](https://github.com/kubernetes/kubernetes/pull/82795), [@draveness](https://github.com/draveness)) -- Graduate TaintNodesByCondition to GA in 1.17. (feature gate will be removed in 1.18) ([#82703](https://github.com/kubernetes/kubernetes/pull/82703), [@draveness](https://github.com/draveness)) -- The WatchBookmark feature is promoted to GA. With WatchBookmark feature, clients are able to request watch events with BOOKMARK type. See https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks for more details. ([#83195](https://github.com/kubernetes/kubernetes/pull/83195), [@wojtek-t](https://github.com/wojtek-t)) -- Promote NodeLease feature to GA. - The feature make Lease object changes an additional healthiness signal from Node. Together with that, we reduce frequency of NodeStatus updates to 5m by default in case of no changes to status itself ([#84351](https://github.com/kubernetes/kubernetes/pull/84351), [@wojtek-t](https://github.com/wojtek-t)) -- CSI Topology feature is GA. ([#83474](https://github.com/kubernetes/kubernetes/pull/83474), [@msau42](https://github.com/msau42)) -- The VolumeSubpathEnvExpansion feature is graduating to GA. The `VolumeSubpathEnvExpansion` feature gate is unconditionally enabled, and will be removed in v1.19. ([#82578](https://github.com/kubernetes/kubernetes/pull/82578), [@kevtaylor](https://github.com/kevtaylor)) -- Node-specific volume limits has graduated to GA. ([#83568](https://github.com/kubernetes/kubernetes/pull/83568), [@bertinatto](https://github.com/bertinatto)) -- The ResourceQuotaScopeSelectors feature has graduated to GA. The `ResourceQuotaScopeSelectors` feature gate is now unconditionally enabled and will be removed in 1.18. ([#82690](https://github.com/kubernetes/kubernetes/pull/82690), [@draveness](https://github.com/draveness)) - -### Beta - -- The Kubernetes Volume Snapshot feature has been moved to beta. The VolumeSnapshotDataSource feature gate is on by default in this release. This feature enables you to take a snapshot of a volume (if supported by the CSI driver), and use the snapshot to provision a new volume, pre-populated with data from the snapshot. -- Feature gates CSIMigration to Beta (on by default) and CSIMigrationGCE to Beta (off by default since it requires installation of the GCE PD CSI Driver) ([#85231](https://github.com/kubernetes/kubernetes/pull/85231), [@davidz627](https://github.com/davidz627)) -- EndpointSlices are now beta but not yet enabled by default. Use the EndpointSlice feature gate to enable this feature. ([#85365](https://github.com/kubernetes/kubernetes/pull/85365), [@robscott](https://github.com/robscott)) -- Promote CSIMigrationAWS to Beta (off by default since it requires installation of the AWS EBS CSI Driver) ([#85237](https://github.com/kubernetes/kubernetes/pull/85237), [@leakingtapan](https://github.com/leakingtapan)) -- Moving Windows RunAsUserName feature to beta ([#84882](https://github.com/kubernetes/kubernetes/pull/84882), [@marosset](https://github.com/marosset)) - -### CLI Improvements - -- The kubectl's api-resource command now has a `--sort-by` flag to sort resources by name or kind. ([#81971](https://github.com/kubernetes/kubernetes/pull/81971), [@laddng](https://github.com/laddng)) -- A new `--prefix` flag added into kubectl logs which prepends each log line with information about it's source (pod name and container name) ([#76471](https://github.com/kubernetes/kubernetes/pull/76471), [@m1kola](https://github.com/m1kola)) - -## API Changes - -- CustomResourceDefinitions now validate documented API semantics of `x-kubernetes-list-type` and `x-kubernetes-map-type` atomic to reject non-atomic sub-types. ([#84722](https://github.com/kubernetes/kubernetes/pull/84722), [@sttts](https://github.com/sttts)) -- Kube-apiserver: The `AdmissionConfiguration` type accepted by `--admission-control-config-file` has been promoted to `apiserver.config.k8s.io/v1` with no schema changes. ([#85098](https://github.com/kubernetes/kubernetes/pull/85098), [@liggitt](https://github.com/liggitt)) -- Fixed EndpointSlice port name validation to match Endpoint port name validation (allowing port names longer than 15 characters) ([#84481](https://github.com/kubernetes/kubernetes/pull/84481), [@robscott](https://github.com/robscott)) -- CustomResourceDefinitions introduce `x-kubernetes-map-type` annotation as a CRD API extension. Enables this particular validation for server-side apply. ([#84113](https://github.com/kubernetes/kubernetes/pull/84113), [@enxebre](https://github.com/enxebre)) - -## Other notable changes - -### API Machinery - -- kube-apiserver: the `--runtime-config` flag now supports an `api/beta=false` value which disables all built-in REST API versions matching `v[0-9]+beta[0-9]+`. ([#84304](https://github.com/kubernetes/kubernetes/pull/84304), [@liggitt](https://github.com/liggitt)) - The `--feature-gates` flag now supports an `AllBeta=false` value which disables all beta feature gates. ([#84304](https://github.com/kubernetes/kubernetes/pull/84304), [@liggitt](https://github.com/liggitt)) -- New flag `--show-hidden-metrics-for-version` in kube-apiserver can be used to show all hidden metrics that deprecated in the previous minor release. ([#84292](https://github.com/kubernetes/kubernetes/pull/84292), [@RainbowMango](https://github.com/RainbowMango)) -- kube-apiserver: Authentication configuration for mutating and validating admission webhooks referenced from an `--admission-control-config-file` can now be specified with `apiVersion: apiserver.config.k8s.io/v1, kind: WebhookAdmissionConfiguration`. ([#85138](https://github.com/kubernetes/kubernetes/pull/85138), [@liggitt](https://github.com/liggitt)) -- kube-apiserver: The `ResourceQuota` admission plugin configuration referenced from `--admission-control-config-file` admission config has been promoted to `apiVersion: apiserver.config.k8s.io/v1`, `kind: ResourceQuotaConfiguration` with no schema changes. ([#85099](https://github.com/kubernetes/kubernetes/pull/85099), [@liggitt](https://github.com/liggitt)) -- kube-apiserver: fixed a bug that could cause a goroutine leak if the apiserver encountered an encoding error serving a watch to a websocket watcher ([#84693](https://github.com/kubernetes/kubernetes/pull/84693), [@tedyu](https://github.com/tedyu)) -- Fix the bug that EndpointSlice for masters wasn't created after enabling EndpointSlice feature on a pre-existing cluster. ([#84421](https://github.com/kubernetes/kubernetes/pull/84421), [@tnqn](https://github.com/tnqn)) -- Switched intstr.Type to sized integer to follow API guidelines and improve compatibility with proto libraries ([#83956](https://github.com/kubernetes/kubernetes/pull/83956), [@liggitt](https://github.com/liggitt)) -- Client-go: improved allocation behavior of the delaying workqueue when handling objects with far-future ready times. ([#83945](https://github.com/kubernetes/kubernetes/pull/83945), [@barkbay](https://github.com/barkbay)) -- Fixed an issue with informers missing an `Added` event if a recently deleted object was immediately recreated at the same time the informer dropped a watch and relisted. ([#83911](https://github.com/kubernetes/kubernetes/pull/83911), [@matte21](https://github.com/matte21)) -- Fixed panic when accessing CustomResources of a CRD with `x-kubernetes-int-or-string`. ([#83787](https://github.com/kubernetes/kubernetes/pull/83787), [@sttts](https://github.com/sttts)) -- The resource version option, when passed to a list call, is now consistently interpreted as the minimum allowed resource version. Previously when listing resources that had the watch cache disabled clients could retrieve a snapshot at that exact resource version. If the client requests a resource version newer than the current state, a TimeoutError is returned suggesting the client retry in a few seconds. This behavior is now consistent for both single item retrieval and list calls, and for when the watch cache is enabled or disabled. ([#72170](https://github.com/kubernetes/kubernetes/pull/72170), [@jpbetz](https://github.com/jpbetz)) -- Fixes a goroutine leak in kube-apiserver when a request times out. ([#83333](https://github.com/kubernetes/kubernetes/pull/83333), [@lavalamp](https://github.com/lavalamp)) -- Fixes the bug in informer-gen that it produces incorrect code if a type has nonNamespaced tag set. ([#80458](https://github.com/kubernetes/kubernetes/pull/80458), [@tatsuhiro-t](https://github.com/tatsuhiro-t)) -- Resolves bottleneck in internal API server communication that can cause increased goroutines and degrade API Server performance ([#80465](https://github.com/kubernetes/kubernetes/pull/80465), [@answer1991](https://github.com/answer1991)) -- Resolves regression generating informers for packages whose names contain `.` characters ([#82410](https://github.com/kubernetes/kubernetes/pull/82410), [@nikhita](https://github.com/nikhita)) -- Resolves issue with `/readyz` and `/livez` not including etcd and kms health checks ([#82713](https://github.com/kubernetes/kubernetes/pull/82713), [@logicalhan](https://github.com/logicalhan)) -- Fixes regression in logging spurious stack traces when proxied connections are closed by the backend ([#82588](https://github.com/kubernetes/kubernetes/pull/82588), [@liggitt](https://github.com/liggitt)) -- Kube-apiserver now reloads serving certificates from disk every minute to allow rotation without restarting the server process ([#84200](https://github.com/kubernetes/kubernetes/pull/84200), [@jackkleeman](https://github.com/jackkleeman)) -- Client-ca bundles for the all generic-apiserver based servers will dynamically reload from disk on content changes ([#83579](https://github.com/kubernetes/kubernetes/pull/83579), [@deads2k](https://github.com/deads2k)) -- Client-go: Clients can request protobuf and json and correctly negotiate with the server for JSON for CRD objects, allowing all client libraries to request protobuf if it is available. If an error occurs negotiating a watch with the server, the error is immediately return by the client `Watch()` method instead of being sent as an `Error` event on the watch stream. ([#84692](https://github.com/kubernetes/kubernetes/pull/84692), [@smarterclayton](https://github.com/smarterclayton)) - Renamed FeatureGate RequestManagement to APIPriorityAndFairness. This feature gate is an alpha and has not yet been associated with any actual functionality. ([#85260](https://github.com/kubernetes/kubernetes/pull/85260), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) -- Filter published OpenAPI schema by making nullable, required fields non-required in order to avoid kubectl to wrongly reject null values. ([#85722](https://github.com/kubernetes/kubernetes/pull/85722), [@sttts](https://github.com/sttts)) -- kube-apiserver: fixed a conflict error encountered attempting to delete a pod with `gracePeriodSeconds=0` and a resourceVersion precondition ([#85516](https://github.com/kubernetes/kubernetes/pull/85516), [@michaelgugino](https://github.com/michaelgugino)) -- Use context to check client closed instead of http.CloseNotifier in processing watch request which will reduce 1 goroutine for each request if proto is HTTP/2.x . ([#85408](https://github.com/kubernetes/kubernetes/pull/85408), [@answer1991](https://github.com/answer1991)) -- Reload apiserver SNI certificates from disk every minute ([#84303](https://github.com/kubernetes/kubernetes/pull/84303), [@jackkleeman](https://github.com/jackkleeman)) -- The mutating and validating admission webhook plugins now read configuration from the admissionregistration.k8s.io/v1 API. ([#80883](https://github.com/kubernetes/kubernetes/pull/80883), [@liggitt](https://github.com/liggitt)) -- kube-proxy: a configuration file specified via `--config` is now loaded with strict deserialization, which fails if the config file contains duplicate or unknown fields. This protects against accidentally running with config files that are malformed, mis-indented, or have typos in field names, and getting unexpected behavior. ([#82927](https://github.com/kubernetes/kubernetes/pull/82927), [@obitech](https://github.com/obitech)) -- When registering with a 1.17+ API server, MutatingWebhookConfiguration and ValidatingWebhookConfiguration objects can now request that only `v1` AdmissionReview requests be sent to them. Previously, webhooks were required to support receiving `v1beta1` AdmissionReview requests as well for compatibility with API servers <= 1.15. - - When registering with a 1.17+ API server, a CustomResourceDefinition conversion webhook can now request that only `v1` ConversionReview requests be sent to them. Previously, conversion webhooks were required to support receiving `v1beta1` ConversionReview requests as well for compatibility with API servers <= 1.15. ([#82707](https://github.com/kubernetes/kubernetes/pull/82707), [@liggitt](https://github.com/liggitt)) -- OpenAPI v3 format in CustomResourceDefinition schemas are now documented. ([#85381](https://github.com/kubernetes/kubernetes/pull/85381), [@sttts](https://github.com/sttts)) -- kube-apiserver: Fixed a regression accepting patch requests > 1MB ([#84963](https://github.com/kubernetes/kubernetes/pull/84963), [@liggitt](https://github.com/liggitt)) -- The example API server has renamed its `wardle.k8s.io` API group to `wardle.example.com` ([#81670](https://github.com/kubernetes/kubernetes/pull/81670), [@liggitt](https://github.com/liggitt)) -- CRDs defaulting is promoted to GA. Note: the feature gate CustomResourceDefaulting will be removed in 1.18. ([#84713](https://github.com/kubernetes/kubernetes/pull/84713), [@sttts](https://github.com/sttts)) -- Restores compatibility with <=1.15.x custom resources by not publishing OpenAPI for non-structural custom resource definitions ([#82653](https://github.com/kubernetes/kubernetes/pull/82653), [@liggitt](https://github.com/liggitt)) -- If given an IPv6 bind-address, kube-apiserver will now advertise an IPv6 endpoint for the kubernetes.default service. ([#84727](https://github.com/kubernetes/kubernetes/pull/84727), [@danwinship](https://github.com/danwinship)) -- Add table convertor to component status. ([#85174](https://github.com/kubernetes/kubernetes/pull/85174), [@zhouya0](https://github.com/zhouya0)) -- Scale custom resource unconditionally if resourceVersion is not provided ([#80572](https://github.com/kubernetes/kubernetes/pull/80572), [@knight42](https://github.com/knight42)) -- When the go-client reflector relists, the ResourceVersion list option is set to the reflector's latest synced resource version to ensure the reflector does not "go back in time" and reprocess events older than it has already processed. If the server responds with an HTTP 410 (Gone) status code response, the relist falls back to using `resourceVersion=""`. ([#83520](https://github.com/kubernetes/kubernetes/pull/83520), [@jpbetz](https://github.com/jpbetz)) -- Fix unsafe JSON construction in a number of locations in the codebase ([#81158](https://github.com/kubernetes/kubernetes/pull/81158), [@zouyee](https://github.com/zouyee)) -- Fixes a flaw (CVE-2019-11253) in json/yaml decoding where large or malformed documents could consume excessive server resources. Request bodies for normal API requests (create/delete/update/patch operations of regular resources) are now limited to 3MB. ([#83261](https://github.com/kubernetes/kubernetes/pull/83261), [@liggitt](https://github.com/liggitt)) -- CRDs can have fields named `type` with value `array` and nested array with `items` fields without validation to fall over this. ([#85223](https://github.com/kubernetes/kubernetes/pull/85223), [@sttts](https://github.com/sttts)) - -### Apps - -- Support Service Topology ([#72046](https://github.com/kubernetes/kubernetes/pull/72046), [@m1093782566](https://github.com/m1093782566)) -- Finalizer Protection for Service LoadBalancers is now in GA (enabled by default). This feature ensures the Service resource is not fully deleted until the correlating load balancer resources are deleted. ([#85023](https://github.com/kubernetes/kubernetes/pull/85023), [@MrHohn](https://github.com/MrHohn)) -- Pod process namespace sharing is now Generally Available. The `PodShareProcessNamespace` feature gate is now deprecated and will be removed in Kubernetes 1.19. ([#84356](https://github.com/kubernetes/kubernetes/pull/84356), [@verb](https://github.com/verb)) -- Fix handling tombstones in pod-disruption-budged controller. ([#83951](https://github.com/kubernetes/kubernetes/pull/83951), [@zouyee](https://github.com/zouyee)) -- Fixed the bug that deleted services were processed by EndpointSliceController repeatedly even their cleanup were successful. ([#82996](https://github.com/kubernetes/kubernetes/pull/82996), [@tnqn](https://github.com/tnqn)) -- Add `RequiresExactMatch` for `label.Selector` ([#85048](https://github.com/kubernetes/kubernetes/pull/85048), [@shaloulcy](https://github.com/shaloulcy)) -- Adds a new label to indicate what is managing an EndpointSlice. ([#83965](https://github.com/kubernetes/kubernetes/pull/83965), [@robscott](https://github.com/robscott)) -- Fix handling tombstones in pod-disruption-budged controller. ([#83951](https://github.com/kubernetes/kubernetes/pull/83951), [@zouyee](https://github.com/zouyee)) -- Fixed the bug that deleted services were processed by EndpointSliceController repeatedly even their cleanup were successful. ([#82996](https://github.com/kubernetes/kubernetes/pull/82996), [@tnqn](https://github.com/tnqn)) -- An end-user may choose to request logs without confirming the identity of the backing kubelet. This feature can be disabled by setting the `AllowInsecureBackendProxy` feature-gate to false. ([#83419](https://github.com/kubernetes/kubernetes/pull/83419), [@deads2k](https://github.com/deads2k)) -- When scaling down a ReplicaSet, delete doubled up replicas first, where a "doubled up replica" is defined as one that is on the same node as an active replica belonging to a related ReplicaSet. ReplicaSets are considered "related" if they have a common controller (typically a Deployment). ([#80004](https://github.com/kubernetes/kubernetes/pull/80004), [@Miciah](https://github.com/Miciah)) -- Kube-controller-manager: Fixes bug setting headless service labels on endpoints ([#85361](https://github.com/kubernetes/kubernetes/pull/85361), [@liggitt](https://github.com/liggitt)) -- People can see the right log and note. ([#84637](https://github.com/kubernetes/kubernetes/pull/84637), [@zhipengzuo](https://github.com/zhipengzuo)) -- Clean duplicate GetPodServiceMemberships function ([#83902](https://github.com/kubernetes/kubernetes/pull/83902), [@gongguan](https://github.com/gongguan)) - -### Auth - -- K8s docker config json secrets are now compatible with docker config desktop authentication credentials files ([#82148](https://github.com/kubernetes/kubernetes/pull/82148), [@bbourbie](https://github.com/bbourbie)) -- Kubelet and aggregated API servers now use v1 TokenReview and SubjectAccessReview endpoints to check authentication/authorization. ([#84768](https://github.com/kubernetes/kubernetes/pull/84768), [@liggitt](https://github.com/liggitt)) -- Kube-apiserver can now specify `--authentication-token-webhook-version=v1` or `--authorization-webhook-version=v1` to use `v1` TokenReview and SubjectAccessReview API objects when communicating with authentication and authorization webhooks. ([#84768](https://github.com/kubernetes/kubernetes/pull/84768), [@liggitt](https://github.com/liggitt)) -- Authentication token cache size is increased (from 4k to 32k) to support clusters with many nodes or many namespaces with active service accounts. ([#83643](https://github.com/kubernetes/kubernetes/pull/83643), [@lavalamp](https://github.com/lavalamp)) -- Apiservers based on k8s.io/apiserver with delegated authn based on cluster authentication will automatically update to new authentication information when the authoritative configmap is updated. ([#85004](https://github.com/kubernetes/kubernetes/pull/85004), [@deads2k](https://github.com/deads2k)) -- Configmaps/extension-apiserver-authentication in kube-system is continuously updated by kube-apiservers, instead of just at apiserver start ([#82705](https://github.com/kubernetes/kubernetes/pull/82705), [@deads2k](https://github.com/deads2k)) - -### CLI - -- Fixed kubectl endpointslice output for get requests ([#82603](https://github.com/kubernetes/kubernetes/pull/82603), [@robscott](https://github.com/robscott)) -- Gives the right error message when using `kubectl delete` a wrong resource. ([#83825](https://github.com/kubernetes/kubernetes/pull/83825), [@zhouya0](https://github.com/zhouya0)) -- If a bad flag is supplied to a kubectl command, only a tip to run `--help` is printed, instead of the usage menu. Usage menu is printed upon running `kubectl command --help`. ([#82423](https://github.com/kubernetes/kubernetes/pull/82423), [@sallyom](https://github.com/sallyom)) -- Commands like `kubectl apply` now return errors if schema-invalid annotations are specified, rather than silently dropping the entire annotations section. ([#83552](https://github.com/kubernetes/kubernetes/pull/83552), [@liggitt](https://github.com/liggitt)) -- Fixes spurious 0 revisions listed when running `kubectl rollout history` for a StatefulSet ([#82643](https://github.com/kubernetes/kubernetes/pull/82643), [@ZP-AlwaysWin](https://github.com/ZP-AlwaysWin)) -- Correct a reference to a not/no longer used kustomize subcommand in the documentation ([#82535](https://github.com/kubernetes/kubernetes/pull/82535), [@demobox](https://github.com/demobox)) -- Kubectl set resources will no longer return an error if passed an empty change for a resource. kubectl set subject will no longer return an error if passed an empty change for a resource. ([#85490](https://github.com/kubernetes/kubernetes/pull/85490), [@sallyom](https://github.com/sallyom)) -- Kubectl: --resource-version now works properly in label/annotate/set selector commands when racing with other clients to update the target object ([#85285](https://github.com/kubernetes/kubernetes/pull/85285), [@liggitt](https://github.com/liggitt)) -- The `--certificate-authority` flag now correctly overrides existing skip-TLS or CA data settings in the kubeconfig file ([#83547](https://github.com/kubernetes/kubernetes/pull/83547), [@liggitt](https://github.com/liggitt)) - -### Cloud Provider - -- Azure: update disk lock logic per vm during attach/detach to allow concurrent updates for different nodes. ([#85115](https://github.com/kubernetes/kubernetes/pull/85115), [@aramase](https://github.com/aramase)) -- Fix vmss dirty cache issue in disk attach/detach on vmss node ([#85158](https://github.com/kubernetes/kubernetes/pull/85158), [@andyzhangx](https://github.com/andyzhangx)) -- Fix race condition when attach/delete azure disk in same time ([#84917](https://github.com/kubernetes/kubernetes/pull/84917), [@andyzhangx](https://github.com/andyzhangx)) -- Change GCP ILB firewall names to contain the `k8s-fw-` prefix like the rest of the firewall rules. This is needed for consistency and also for other components to identify the firewall rule as k8s/service-controller managed. ([#84622](https://github.com/kubernetes/kubernetes/pull/84622), [@prameshj](https://github.com/prameshj)) -- Ensure health probes are created for local traffic policy UDP services on Azure ([#84802](https://github.com/kubernetes/kubernetes/pull/84802), [@feiskyer](https://github.com/feiskyer)) -- Openstack: Do not delete managed LB in case of security group reconciliation errors ([#82264](https://github.com/kubernetes/kubernetes/pull/82264), [@multi-io](https://github.com/multi-io)) -- Fix aggressive VM calls for Azure VMSS ([#83102](https://github.com/kubernetes/kubernetes/pull/83102), [@feiskyer](https://github.com/feiskyer)) -- Fix: azure disk detach failure if node not exists ([#82640](https://github.com/kubernetes/kubernetes/pull/82640), [@andyzhangx](https://github.com/andyzhangx)) -- Add azure disk encryption(SSE+CMK) support ([#84605](https://github.com/kubernetes/kubernetes/pull/84605), [@andyzhangx](https://github.com/andyzhangx)) -- Update Azure SDK versions to v35.0.0 ([#84543](https://github.com/kubernetes/kubernetes/pull/84543), [@andyzhangx](https://github.com/andyzhangx)) -- Azure: Add allow unsafe read from cache ([#83685](https://github.com/kubernetes/kubernetes/pull/83685), [@aramase](https://github.com/aramase)) -- Reduces the number of calls made to the Azure API when requesting the instance view of a virtual machine scale set node. ([#82496](https://github.com/kubernetes/kubernetes/pull/82496), [@hasheddan](https://github.com/hasheddan)) -- Added cloud operation count metrics to azure cloud controller manager. ([#82574](https://github.com/kubernetes/kubernetes/pull/82574), [@kkmsft](https://github.com/kkmsft)) -- On AWS nodes with multiple network interfaces, kubelet should now more reliably report the same primary node IP. ([#80747](https://github.com/kubernetes/kubernetes/pull/80747), [@danwinship](https://github.com/danwinship)) -- Update Azure load balancer to prevent orphaned public IP addresses ([#82890](https://github.com/kubernetes/kubernetes/pull/82890), [@chewong](https://github.com/chewong)) - -### Cluster Lifecycle - -- Kubeadm alpha certs command now skip missing files ([#85092](https://github.com/kubernetes/kubernetes/pull/85092), [@fabriziopandini](https://github.com/fabriziopandini)) -- Kubeadm: the command "kubeadm token create" now has a "--certificate-key" flag that can be used for the formation of join commands for control-planes with automatic copy of certificates ([#84591](https://github.com/kubernetes/kubernetes/pull/84591), [@TheLastProject](https://github.com/TheLastProject)) -- Kubeadm: Fix a bug where kubeadm cannot parse kubelet's version if the latter dumps logs on the standard error. ([#85351](https://github.com/kubernetes/kubernetes/pull/85351), [@rosti](https://github.com/rosti)) -- Kubeadm: added retry to all the calls to the etcd API so kubeadm will be more resilient to network glitches ([#85201](https://github.com/kubernetes/kubernetes/pull/85201), [@fabriziopandini](https://github.com/fabriziopandini)) -- Fixes a bug in kubeadm that caused init and join to hang indefinitely in specific conditions. ([#85156](https://github.com/kubernetes/kubernetes/pull/85156), [@chuckha](https://github.com/chuckha)) -- Kubeadm now includes CoreDNS version 1.6.5 - - `kubernetes` plugin adds metrics to measure kubernetes control plane latency. - - the `health` plugin now includes the `lameduck` option by default, which waits for a duration before shutting down. ([#85109](https://github.com/kubernetes/kubernetes/pull/85109), [@rajansandeep](https://github.com/rajansandeep)) -- Fixed bug when using kubeadm alpha certs commands with clusters using external etcd ([#85091](https://github.com/kubernetes/kubernetes/pull/85091), [@fabriziopandini](https://github.com/fabriziopandini)) -- Kubeadm no longer defaults or validates the component configs of the kubelet or kube-proxy ([#79223](https://github.com/kubernetes/kubernetes/pull/79223), [@rosti](https://github.com/rosti)) -- Kubeadm: remove the deprecated `--cri-socket` flag for `kubeadm upgrade apply`. The flag has been deprecated since v1.14. ([#85044](https://github.com/kubernetes/kubernetes/pull/85044), [@neolit123](https://github.com/neolit123)) -- Kubeadm: prevent potential hanging of commands such as "kubeadm reset" if the apiserver endpoint is not reachable. ([#84648](https://github.com/kubernetes/kubernetes/pull/84648), [@neolit123](https://github.com/neolit123)) -- Kubeadm: fix skipped etcd upgrade on secondary control-plane nodes when the command `kubeadm upgrade node` is used. ([#85024](https://github.com/kubernetes/kubernetes/pull/85024), [@neolit123](https://github.com/neolit123)) -- Kubeadm: fix an issue with the kube-proxy container env. variables ([#84888](https://github.com/kubernetes/kubernetes/pull/84888), [@neolit123](https://github.com/neolit123)) -- Utilize diagnostics tool to dump GKE windows test logs ([#83517](https://github.com/kubernetes/kubernetes/pull/83517), [@YangLu1031](https://github.com/YangLu1031)) -- Kubeadm: always mount the kube-controller-manager hostPath volume that is given by the `--flex-volume-plugin-dir` flag. ([#84468](https://github.com/kubernetes/kubernetes/pull/84468), [@neolit123](https://github.com/neolit123)) -- Update Cluster Autoscaler version to 1.16.2 (CA release docs: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.16.2) ([#84038](https://github.com/kubernetes/kubernetes/pull/84038), [@losipiuk](https://github.com/losipiuk)) -- Kubeadm no longer removes /etc/cni/net.d as it does not install it. Users should remove files from it manually or rely on the component that created them ([#83950](https://github.com/kubernetes/kubernetes/pull/83950), [@yastij](https://github.com/yastij)) -- Kubeadm: fix wrong default value for the `upgrade node --certificate-renewal` flag. ([#83528](https://github.com/kubernetes/kubernetes/pull/83528), [@neolit123](https://github.com/neolit123)) -- Bump metrics-server to v0.3.5 ([#83015](https://github.com/kubernetes/kubernetes/pull/83015), [@olagacek](https://github.com/olagacek)) -- Dashboard: disable the dashboard Deployment on non-Linux nodes. This step is required to support Windows worker nodes. ([#82975](https://github.com/kubernetes/kubernetes/pull/82975), [@wawa0210](https://github.com/wawa0210)) -- Fixes a panic in kube-controller-manager cleaning up bootstrap tokens ([#82887](https://github.com/kubernetes/kubernetes/pull/82887), [@tedyu](https://github.com/tedyu)) -- Kubeadm: add a new `kubelet-finalize` phase as part of the `init` workflow and an experimental sub-phase to enable automatic kubelet client certificate rotation on primary control-plane nodes. - - Prior to 1.17 and for existing nodes created by `kubeadm init` where kubelet client certificate rotation is desired, you must modify "/etc/kubernetes/kubelet.conf" to point to the PEM symlink for rotation: - - `client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem` and `client-key: /var/lib/kubelet/pki/kubelet-client-current.pem`, replacing the embedded client certificate and key. ([#84118](https://github.com/kubernetes/kubernetes/pull/84118), [@neolit123](https://github.com/neolit123)) - -- Kubeadm: add a upgrade health check that deploys a Job ([#81319](https://github.com/kubernetes/kubernetes/pull/81319), [@neolit123](https://github.com/neolit123)) -- Kubeadm now supports automatic calculations of dual-stack node cidr masks to kube-controller-manager. ([#85609](https://github.com/kubernetes/kubernetes/pull/85609), [@Arvinderpal](https://github.com/Arvinderpal)) -- Kubeadm: reset raises warnings if it cannot delete folders ([#85265](https://github.com/kubernetes/kubernetes/pull/85265), [@SataQiu](https://github.com/SataQiu)) -- Kubeadm: enable the usage of the secure kube-scheduler and kube-controller-manager ports for health checks. For kube-scheduler was 10251, becomes 10259. For kube-controller-manager was 10252, becomes 10257. ([#85043](https://github.com/kubernetes/kubernetes/pull/85043), [@neolit123](https://github.com/neolit123)) -- A new kubelet command line option, `--reserved-cpus`, is introduced to explicitly define the CPU list that will be reserved for system. For example, if `--reserved-cpus=0,1,2,3` is specified, then cpu 0,1,2,3 will be reserved for the system. On a system with 24 CPUs, the user may specify `isolcpus=4-23` for the kernel option and use CPU 4-23 for the user containers. ([#83592](https://github.com/kubernetes/kubernetes/pull/83592), [@jianzzha](https://github.com/jianzzha)) -- Kubelet: a configuration file specified via `--config` is now loaded with strict deserialization, which fails if the config file contains duplicate or unknown fields. This protects against accidentally running with config files that are malformed, mis-indented, or have typos in field names, and getting unexpected behavior. ([#83204](https://github.com/kubernetes/kubernetes/pull/83204), [@obitech](https://github.com/obitech)) -- Kubeadm now propagates proxy environment variables to kube-proxy ([#84559](https://github.com/kubernetes/kubernetes/pull/84559), [@yastij](https://github.com/yastij)) -- Update the latest validated version of Docker to 19.03 ([#84476](https://github.com/kubernetes/kubernetes/pull/84476), [@neolit123](https://github.com/neolit123)) -- Update to Ingress-GCE v1.6.1 ([#84018](https://github.com/kubernetes/kubernetes/pull/84018), [@rramkumar1](https://github.com/rramkumar1)) -- Kubeadm: enhance certs check-expiration to show the expiration info of related CAs ([#83932](https://github.com/kubernetes/kubernetes/pull/83932), [@SataQiu](https://github.com/SataQiu)) -- Kubeadm: implemented structured output of 'kubeadm token list' in JSON, YAML, Go template and JsonPath formats ([#78764](https://github.com/kubernetes/kubernetes/pull/78764), [@bart0sh](https://github.com/bart0sh)) -- Kubeadm: add support for `127.0.0.1` as advertise address. kubeadm will automatically replace this value with matching global unicast IP address on the loopback interface. ([#83475](https://github.com/kubernetes/kubernetes/pull/83475), [@fabriziopandini](https://github.com/fabriziopandini)) -- Kube-scheduler: a configuration file specified via `--config` is now loaded with strict deserialization, which fails if the config file contains duplicate or unknown fields. This protects against accidentally running with config files that are malformed, mis-indented, or have typos in field names, and getting unexpected behavior. ([#83030](https://github.com/kubernetes/kubernetes/pull/83030), [@obitech](https://github.com/obitech)) -- Kubeadm: use the `--service-cluster-ip-range` flag to init or use the ServiceSubnet field in the kubeadm config to pass a comma separated list of Service CIDRs. ([#82473](https://github.com/kubernetes/kubernetes/pull/82473), [@Arvinderpal](https://github.com/Arvinderpal)) -- Update crictl to v1.16.1. ([#82856](https://github.com/kubernetes/kubernetes/pull/82856), [@Random-Liu](https://github.com/Random-Liu)) -- Bump addon-resizer to 1.8.7 to fix issues with using deprecated extensions APIs ([#85864](https://github.com/kubernetes/kubernetes/pull/85864), [@liggitt](https://github.com/liggitt)) -- Simple script based hyperkube image that bundles all the necessary binaries. This is an equivalent replacement for the image based on the go based hyperkube command + image. ([#84662](https://github.com/kubernetes/kubernetes/pull/84662), [@dims](https://github.com/dims)) -- Hyperkube will now be available in a new Github repository and will not be included in the kubernetes release from 1.17 onwards ([#83454](https://github.com/kubernetes/kubernetes/pull/83454), [@dims](https://github.com/dims)) -- Remove prometheus cluster monitoring addon from kube-up ([#83442](https://github.com/kubernetes/kubernetes/pull/83442), [@serathius](https://github.com/serathius)) -- SourcesReady provides the readiness of kubelet configuration sources such as apiserver update readiness. ([#81344](https://github.com/kubernetes/kubernetes/pull/81344), [@zouyee](https://github.com/zouyee)) -- This PR sets the --cluster-dns flag value to kube-dns service IP whether or not NodeLocal DNSCache is enabled. NodeLocal DNSCache will listen on both the link-local as well as the service IP. ([#84383](https://github.com/kubernetes/kubernetes/pull/84383), [@prameshj](https://github.com/prameshj)) -- kube-dns add-on: - - All containers are now being executed under more restrictive privileges. - - Most of the containers now run as non-root user and has the root filesystem set as read-only. - - The remaining container running as root only has the minimum Linux capabilities it requires to run. - - Privilege escalation has been disabled for all containers. ([#82347](https://github.com/kubernetes/kubernetes/pull/82347), [@pjbgf](https://github.com/pjbgf)) -- Kubernetes no longer monitors firewalld. On systems using firewalld for firewall - maintenance, kube-proxy will take slightly longer to recover from disruptive - firewalld operations that delete kube-proxy's iptables rules. - - As a side effect of these changes, kube-proxy's - `sync_proxy_rules_last_timestamp_seconds` metric no longer behaves the - way it used to; now it will only change when services or endpoints actually - change, rather than reliably updating every 60 seconds (or whatever). If you - are trying to monitor for whether iptables updates are failing, the - `sync_proxy_rules_iptables_restore_failures_total` metric may be more useful. ([#81517](https://github.com/kubernetes/kubernetes/pull/81517), [@danwinship](https://github.com/danwinship)) - -### Instrumentation - -- Bump version of event-exporter to 0.3.1, to switch it to protobuf. ([#83396](https://github.com/kubernetes/kubernetes/pull/83396), [@loburm](https://github.com/loburm)) -- Bumps metrics-server version to v0.3.6 with following bugfix: - - Don't break metric storage when duplicate pod metrics encountered causing hpa to fail ([#83907](https://github.com/kubernetes/kubernetes/pull/83907), [@olagacek](https://github.com/olagacek)) -- addons: elasticsearch discovery supports IPv6 ([#85543](https://github.com/kubernetes/kubernetes/pull/85543), [@SataQiu](https://github.com/SataQiu)) -- Update Cluster Autoscaler to 1.17.0; changelog: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.17.0 ([#85610](https://github.com/kubernetes/kubernetes/pull/85610), [@losipiuk](https://github.com/losipiuk)) - -### Network - -- The official kube-proxy image (used by kubeadm, among other things) is now compatible with systems running iptables 1.8 in "nft" mode, and will autodetect which mode it should use. ([#82966](https://github.com/kubernetes/kubernetes/pull/82966), [@danwinship](https://github.com/danwinship)) -- Kubenet: added HostPort IPv6 support. HostPortManager: operates only with one IP family, failing if receives port mapping entries with different IP families. HostPortSyncer: operates only with one IP family, skipping portmap entries with different IP families ([#80854](https://github.com/kubernetes/kubernetes/pull/80854), [@aojea](https://github.com/aojea)) -- Kube-proxy now supports DualStack feature with EndpointSlices and IPVS. ([#85246](https://github.com/kubernetes/kubernetes/pull/85246), [@robscott](https://github.com/robscott)) -- Remove redundant API validation when using Service Topology with externalTrafficPolicy=Local ([#85346](https://github.com/kubernetes/kubernetes/pull/85346), [@andrewsykim](https://github.com/andrewsykim)) -- Update github.com/vishvananda/netlink to v1.0.0 ([#83576](https://github.com/kubernetes/kubernetes/pull/83576), [@andrewsykim](https://github.com/andrewsykim)) -- `-- kube-controller-manager` - `--node-cidr-mask-size-ipv4 int32` Default: 24. Mask size for IPv4 node-cidr in dual-stack cluster. - `--node-cidr-mask-size-ipv6 int32` Default: 64. Mask size for IPv6 node-cidr in dual-stack cluster. - - These 2 flags can be used only for dual-stack clusters. For non dual-stack clusters, continue to use `--node-cidr-mask-size` flag to configure the mask size. - - The default node cidr mask size for IPv6 was 24 which is now changed to 64. ([#79993](https://github.com/kubernetes/kubernetes/pull/79993), [@aramase](https://github.com/aramase)) - -- deprecate cleanup-ipvs flag ([#83832](https://github.com/kubernetes/kubernetes/pull/83832), [@gongguan](https://github.com/gongguan)) -- Kube-proxy: emits a warning when a malformed component config file is used with v1alpha1. ([#84143](https://github.com/kubernetes/kubernetes/pull/84143), [@phenixblue](https://github.com/phenixblue)) -- Set config.BindAddress to IPv4 address `127.0.0.1` if not specified ([#83822](https://github.com/kubernetes/kubernetes/pull/83822), [@zouyee](https://github.com/zouyee)) -- Updated kube-proxy ipvs README with correct grep argument to list loaded ipvs modules ([#83677](https://github.com/kubernetes/kubernetes/pull/83677), [@pete911](https://github.com/pete911)) -- The userspace mode of kube-proxy no longer confusingly logs messages about deleting endpoints that it is actually adding. ([#83644](https://github.com/kubernetes/kubernetes/pull/83644), [@danwinship](https://github.com/danwinship)) -- Kube-proxy iptables probabilities are now more granular and will result in better distribution beyond 319 endpoints. ([#83599](https://github.com/kubernetes/kubernetes/pull/83599), [@robscott](https://github.com/robscott)) -- Significant kube-proxy performance improvements for non UDP ports. ([#83208](https://github.com/kubernetes/kubernetes/pull/83208), [@robscott](https://github.com/robscott)) -- Improved performance of kube-proxy with EndpointSlice enabled with more efficient sorting. ([#83035](https://github.com/kubernetes/kubernetes/pull/83035), [@robscott](https://github.com/robscott)) -- EndpointSlices are now beta for better Network Endpoint performance at scale. ([#84390](https://github.com/kubernetes/kubernetes/pull/84390), [@robscott](https://github.com/robscott)) -- Updated EndpointSlices to use PublishNotReadyAddresses from Services. ([#84573](https://github.com/kubernetes/kubernetes/pull/84573), [@robscott](https://github.com/robscott)) -- When upgrading to 1.17 with a cluster with EndpointSlices enabled, the `endpointslice.kubernetes.io/managed-by` label needs to be set on each EndpointSlice. ([#85359](https://github.com/kubernetes/kubernetes/pull/85359), [@robscott](https://github.com/robscott)) -- Adds FQDN addressType support for EndpointSlice. ([#84091](https://github.com/kubernetes/kubernetes/pull/84091), [@robscott](https://github.com/robscott)) -- Fix incorrect network policy description suggesting that pods are isolated when a network policy has no rules of a given type ([#84194](https://github.com/kubernetes/kubernetes/pull/84194), [@jackkleeman](https://github.com/jackkleeman)) -- Fix bug where EndpointSlice controller would attempt to modify shared objects. ([#85368](https://github.com/kubernetes/kubernetes/pull/85368), [@robscott](https://github.com/robscott)) -- Splitting IP address type into IPv4 and IPv6 for EndpointSlices ([#84971](https://github.com/kubernetes/kubernetes/pull/84971), [@robscott](https://github.com/robscott)) -- Added appProtocol field to EndpointSlice Port ([#83815](https://github.com/kubernetes/kubernetes/pull/83815), [@howardjohn](https://github.com/howardjohn)) -- The docker container runtime now enforces a 220 second timeout on container network operations. ([#71653](https://github.com/kubernetes/kubernetes/pull/71653), [@liucimin](https://github.com/liucimin)) -- Fix panic in kubelet when running IPv4/IPv6 dual-stack mode with a CNI plugin ([#82508](https://github.com/kubernetes/kubernetes/pull/82508), [@aanm](https://github.com/aanm)) -- EndpointSlice hostname is now set in the same conditions Endpoints hostname is. ([#84207](https://github.com/kubernetes/kubernetes/pull/84207), [@robscott](https://github.com/robscott)) -- Improving the performance of Endpoint and EndpointSlice controllers by caching Service Selectors ([#84280](https://github.com/kubernetes/kubernetes/pull/84280), [@gongguan](https://github.com/gongguan)) -- Significant kube-proxy performance improvements when using Endpoint Slices at scale. ([#83206](https://github.com/kubernetes/kubernetes/pull/83206), [@robscott](https://github.com/robscott)) - -### Node - -- Mirror pods now include an ownerReference for the node that created them. ([#84485](https://github.com/kubernetes/kubernetes/pull/84485), [@tallclair](https://github.com/tallclair)) -- Fixed a bug in the single-numa-policy of the TopologyManager. Previously, best-effort pods would result in a terminated state with a TopologyAffinity error. Now they will run as expected. ([#83777](https://github.com/kubernetes/kubernetes/pull/83777), [@lmdaly](https://github.com/lmdaly)) -- Fixed a bug in the single-numa-node policy of the TopologyManager. Previously, pods that only requested CPU resources and did not request any third-party devices would fail to launch with a TopologyAffinity error. Now they will launch successfully. ([#83697](https://github.com/kubernetes/kubernetes/pull/83697), [@klueska](https://github.com/klueska)) -- Fix error where metrics related to dynamic kubelet config isn't registered ([#83184](https://github.com/kubernetes/kubernetes/pull/83184), [@odinuge](https://github.com/odinuge)) -- If container fails because ContainerCannotRun, do not utilize the FallbackToLogsOnError TerminationMessagePolicy, as it masks more useful logs. ([#81280](https://github.com/kubernetes/kubernetes/pull/81280), [@yqwang-ms](https://github.com/yqwang-ms)) -- Use online nodes instead of possible nodes when discovering available NUMA nodes ([#83196](https://github.com/kubernetes/kubernetes/pull/83196), [@zouyee](https://github.com/zouyee)) -- Use IPv4 in wincat port forward. ([#83036](https://github.com/kubernetes/kubernetes/pull/83036), [@liyanhui1228](https://github.com/liyanhui1228)) -- Single static pod files and pod files from http endpoints cannot be larger than 10 MB. HTTP probe payloads are now truncated to 10KB. ([#82669](https://github.com/kubernetes/kubernetes/pull/82669), [@rphillips](https://github.com/rphillips)) -- Limit the body length of exec readiness/liveness probes. remote CRIs and Docker shim read a max of 16MB output of which the exec probe itself inspects 10kb. ([#82514](https://github.com/kubernetes/kubernetes/pull/82514), [@dims](https://github.com/dims)) -- Kubelet: Added kubelet serving certificate metric `server_rotation_seconds` which is a histogram reporting the age of a just rotated serving certificate in seconds. ([#84534](https://github.com/kubernetes/kubernetes/pull/84534), [@sambdavidson](https://github.com/sambdavidson)) -- Reduce default NodeStatusReportFrequency to 5 minutes. With this change, periodic node status updates will be send every 5m if node status doesn't change (otherwise they are still send with 10s). - - Bump NodeProblemDetector version to v0.8.0 to reduce forced NodeStatus updates frequency to 5 minutes. ([#84007](https://github.com/kubernetes/kubernetes/pull/84007), [@wojtek-t](https://github.com/wojtek-t)) - -- The topology manager aligns resources for pods of all QoS classes with respect to NUMA locality, not just Guaranteed QoS pods. ([#83492](https://github.com/kubernetes/kubernetes/pull/83492), [@ConnorDoyle](https://github.com/ConnorDoyle)) -- Fix a bug that a node Lease object may have been created without OwnerReference. ([#84998](https://github.com/kubernetes/kubernetes/pull/84998), [@wojtek-t](https://github.com/wojtek-t)) -- External facing APIs in plugin registration and device plugin packages are now available under k8s.io/kubelet/pkg/apis/ ([#83551](https://github.com/kubernetes/kubernetes/pull/83551), [@dims](https://github.com/dims)) - -### Release - -- Added the `crictl` Windows binaries as well as the Linux 32bit binary to the release archives ([#83944](https://github.com/kubernetes/kubernetes/pull/83944), [@saschagrunert](https://github.com/saschagrunert)) -- Bumps the minimum version of Go required for building Kubernetes to 1.12.4. ([#83596](https://github.com/kubernetes/kubernetes/pull/83596), [@jktomer](https://github.com/jktomer)) -- The deprecated mondo `kubernetes-test` tarball is no longer built. Users running Kubernetes e2e tests should use the `kubernetes-test-portable` and `kubernetes-test-{OS}-{ARCH}` tarballs instead. ([#83093](https://github.com/kubernetes/kubernetes/pull/83093), [@ixdy](https://github.com/ixdy)) - -### Scheduling - -- Only validate duplication of the RequestedToCapacityRatio custom priority and allow other custom predicates/priorities ([#84646](https://github.com/kubernetes/kubernetes/pull/84646), [@liu-cong](https://github.com/liu-cong)) -- Scheduler policy configs can no longer be declared multiple times ([#83963](https://github.com/kubernetes/kubernetes/pull/83963), [@damemi](https://github.com/damemi)) -- TaintNodesByCondition was graduated to GA, CheckNodeMemoryPressure, CheckNodePIDPressure, CheckNodeDiskPressure, CheckNodeCondition were accidentally removed since 1.12, the replacement is to use CheckNodeUnschedulablePred ([#84152](https://github.com/kubernetes/kubernetes/pull/84152), [@draveness](https://github.com/draveness)) -- [migration phase 1] PodFitsHostPorts as filter plugin ([#83659](https://github.com/kubernetes/kubernetes/pull/83659), [@wgliang](https://github.com/wgliang)) -- [migration phase 1] PodFitsResources as framework plugin ([#83650](https://github.com/kubernetes/kubernetes/pull/83650), [@wgliang](https://github.com/wgliang)) -- [migration phase 1] PodMatchNodeSelector/NodAffinity as filter plugin ([#83660](https://github.com/kubernetes/kubernetes/pull/83660), [@wgliang](https://github.com/wgliang)) -- Add more tracing steps in generic_scheduler ([#83539](https://github.com/kubernetes/kubernetes/pull/83539), [@wgliang](https://github.com/wgliang)) -- [migration phase 1] PodFitsHost as filter plugin ([#83662](https://github.com/kubernetes/kubernetes/pull/83662), [@wgliang](https://github.com/wgliang)) -- Fixed a scheduler panic when using PodAffinity. ([#82841](https://github.com/kubernetes/kubernetes/pull/82841), [@Huang-Wei](https://github.com/Huang-Wei)) -- Take the context as the first argument of Schedule. ([#82119](https://github.com/kubernetes/kubernetes/pull/82119), [@wgliang](https://github.com/wgliang)) -- Fixed an issue that the correct PluginConfig.Args is not passed to the corresponding PluginFactory in kube-scheduler when multiple PluginConfig items are defined. ([#82483](https://github.com/kubernetes/kubernetes/pull/82483), [@everpeace](https://github.com/everpeace)) -- Profiling is enabled by default in the scheduler ([#84835](https://github.com/kubernetes/kubernetes/pull/84835), [@denkensk](https://github.com/denkensk)) -- Scheduler now reports metrics on cache size including nodes, pods, and assumed pods ([#83508](https://github.com/kubernetes/kubernetes/pull/83508), [@damemi](https://github.com/damemi)) -- User can now use component config to configure NodeLabel plugin for the scheduler framework. ([#84297](https://github.com/kubernetes/kubernetes/pull/84297), [@liu-cong](https://github.com/liu-cong)) -- Optimize inter-pod affinity preferredDuringSchedulingIgnoredDuringExecution type, up to 4x in some cases. ([#84264](https://github.com/kubernetes/kubernetes/pull/84264), [@ahg-g](https://github.com/ahg-g)) -- Filter plugin for cloud provider storage predicate ([#84148](https://github.com/kubernetes/kubernetes/pull/84148), [@gongguan](https://github.com/gongguan)) -- Refactor scheduler's framework permit API. ([#83756](https://github.com/kubernetes/kubernetes/pull/83756), [@hex108](https://github.com/hex108)) -- Add incoming pods metrics to scheduler queue. ([#83577](https://github.com/kubernetes/kubernetes/pull/83577), [@liu-cong](https://github.com/liu-cong)) -- Allow dynamically set glog logging level of kube-scheduler ([#83910](https://github.com/kubernetes/kubernetes/pull/83910), [@mrkm4ntr](https://github.com/mrkm4ntr)) -- Add latency and request count metrics for scheduler framework. ([#83569](https://github.com/kubernetes/kubernetes/pull/83569), [@liu-cong](https://github.com/liu-cong)) -- Expose SharedInformerFactory in the framework handle ([#83663](https://github.com/kubernetes/kubernetes/pull/83663), [@draveness](https://github.com/draveness)) -- Add per-pod scheduling metrics across 1 or more schedule attempts. ([#83674](https://github.com/kubernetes/kubernetes/pull/83674), [@liu-cong](https://github.com/liu-cong)) -- Add `podInitialBackoffDurationSeconds` and `podMaxBackoffDurationSeconds` to the scheduler config API ([#81263](https://github.com/kubernetes/kubernetes/pull/81263), [@draveness](https://github.com/draveness)) -- Expose kubernetes client in the scheduling framework handle. ([#82432](https://github.com/kubernetes/kubernetes/pull/82432), [@draveness](https://github.com/draveness)) -- Remove MaxPriority in the scheduler API, please use MaxNodeScore or MaxExtenderPriority instead. ([#83386](https://github.com/kubernetes/kubernetes/pull/83386), [@draveness](https://github.com/draveness)) -- Consolidate ScoreWithNormalizePlugin into the ScorePlugin interface ([#83042](https://github.com/kubernetes/kubernetes/pull/83042), [@draveness](https://github.com/draveness)) -- New APIs to allow adding/removing pods from pre-calculated prefilter state in the scheduling framework ([#82912](https://github.com/kubernetes/kubernetes/pull/82912), [@ahg-g](https://github.com/ahg-g)) -- Added Clone method to the scheduling framework's PluginContext and ContextData. ([#82951](https://github.com/kubernetes/kubernetes/pull/82951), [@ahg-g](https://github.com/ahg-g)) -- Modified the scheduling framework's Filter API. ([#82842](https://github.com/kubernetes/kubernetes/pull/82842), [@ahg-g](https://github.com/ahg-g)) -- Critical pods can now be created in namespaces other than kube-system. To limit critical pods to the kube-system namespace, cluster admins should create an admission configuration file limiting critical pods by default, and a matching quota object in the `kube-system` namespace permitting critical pods in that namespace. See https://kubernetes.io/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default for details. ([#76310](https://github.com/kubernetes/kubernetes/pull/76310), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) -- Scheduler ComponentConfig fields are now pointers ([#83619](https://github.com/kubernetes/kubernetes/pull/83619), [@damemi](https://github.com/damemi)) -- Scheduler Policy API has a new recommended apiVersion `apiVersion: kubescheduler.config.k8s.io/v1` which is consistent with the scheduler API group `kubescheduler.config.k8s.io`. It holds the same API as the old apiVersion `apiVersion: v1`. ([#83578](https://github.com/kubernetes/kubernetes/pull/83578), [@Huang-Wei](https://github.com/Huang-Wei)) -- Rename PluginContext to CycleState in the scheduling framework ([#83430](https://github.com/kubernetes/kubernetes/pull/83430), [@draveness](https://github.com/draveness)) -- Some scheduler extender API fields are moved from `pkg/scheduler/api` to `pkg/scheduler/apis/extender/v1`. ([#83262](https://github.com/kubernetes/kubernetes/pull/83262), [@Huang-Wei](https://github.com/Huang-Wei)) -- Kube-scheduler: emits a warning when a malformed component config file is used with v1alpha1. ([#84129](https://github.com/kubernetes/kubernetes/pull/84129), [@obitech](https://github.com/obitech)) -- Kube-scheduler now falls back to emitting events using core/v1 Events when events.k8s.io/v1beta1 is disabled. ([#83692](https://github.com/kubernetes/kubernetes/pull/83692), [@yastij](https://github.com/yastij)) -- Expand scheduler priority functions and scheduling framework plugins' node score range to [0, 100]. Note: this change is internal and does not affect extender and RequestedToCapacityRatio custom priority, which are still expected to provide a [0, 10] range. ([#83522](https://github.com/kubernetes/kubernetes/pull/83522), [@draveness](https://github.com/draveness)) - -### Storage - -- Bump CSI version to 1.2.0 ([#84832](https://github.com/kubernetes/kubernetes/pull/84832), [@gnufied](https://github.com/gnufied)) -- CSI Migration: Fixes issue where all volumes with the same inline volume inner spec name were staged in the same path. Migrated inline volumes are now staged at a unique path per unique volume. ([#84754](https://github.com/kubernetes/kubernetes/pull/84754), [@davidz627](https://github.com/davidz627)) -- CSI Migration: GCE PD access mode now reflects read only status of inline volumes - this allows multi-attach for read only many PDs ([#84809](https://github.com/kubernetes/kubernetes/pull/84809), [@davidz627](https://github.com/davidz627)) -- CSI detach timeout increased from 10 seconds to 2 minutes ([#84321](https://github.com/kubernetes/kubernetes/pull/84321), [@cduchesne](https://github.com/cduchesne)) -- Ceph RBD volume plugin now does not use any keyring (`/etc/ceph/ceph.client.lvs01cinder.keyring`, `/etc/ceph/ceph.keyring`, `/etc/ceph/keyring`, `/etc/ceph/keyring.bin`) for authentication. Ceph user credentials must be provided in PersistentVolume objects and referred Secrets. ([#75588](https://github.com/kubernetes/kubernetes/pull/75588), [@smileusd](https://github.com/smileusd)) -- Validate Gluster IP ([#83104](https://github.com/kubernetes/kubernetes/pull/83104), [@zouyee](https://github.com/zouyee)) -- PersistentVolumeLabel admission plugin, responsible for labeling `PersistentVolumes` with topology labels, now does not overwrite existing labels on PVs that were dynamically provisioned. It trusts the dynamic provisioning that it provided the correct labels to the `PersistentVolume`, saving one potentially expensive cloud API call. `PersistentVolumes` created manually by users are labelled by the admission plugin in the same way as before. ([#82830](https://github.com/kubernetes/kubernetes/pull/82830), [@jsafrane](https://github.com/jsafrane)) - -- Existing PVs are converted to use volume topology if migration is enabled. ([#83394](https://github.com/kubernetes/kubernetes/pull/83394), [@bertinatto](https://github.com/bertinatto)) -- local: support local filesystem volume with block resource reconstruction ([#84218](https://github.com/kubernetes/kubernetes/pull/84218), [@cofyc](https://github.com/cofyc)) -- Fixed binding of block PersistentVolumes / PersistentVolumeClaims when BlockVolume feature is off. ([#84049](https://github.com/kubernetes/kubernetes/pull/84049), [@jsafrane](https://github.com/jsafrane)) -- Report non-confusing error for negative storage size in PVC spec. ([#82759](https://github.com/kubernetes/kubernetes/pull/82759), [@sttts](https://github.com/sttts)) -- Fixed "requested device X but found Y" attach error on AWS. ([#85675](https://github.com/kubernetes/kubernetes/pull/85675), [@jsafrane](https://github.com/jsafrane)) -- Reduced frequency of DescribeVolumes calls of AWS API when attaching/detaching a volume. ([#84181](https://github.com/kubernetes/kubernetes/pull/84181), [@jsafrane](https://github.com/jsafrane)) -- Fixed attachment of AWS volumes that have just been detached. ([#83567](https://github.com/kubernetes/kubernetes/pull/83567), [@jsafrane](https://github.com/jsafrane)) -- Fix possible fd leak and closing of dirs when using openstack ([#82873](https://github.com/kubernetes/kubernetes/pull/82873), [@odinuge](https://github.com/odinuge)) -- local: support local volume block mode reconstruction ([#84173](https://github.com/kubernetes/kubernetes/pull/84173), [@cofyc](https://github.com/cofyc)) -- Fixed cleanup of raw block devices after kubelet restart. ([#83451](https://github.com/kubernetes/kubernetes/pull/83451), [@jsafrane](https://github.com/jsafrane)) -- Add data cache flushing during unmount device for GCE-PD driver in Windows Server. ([#83591](https://github.com/kubernetes/kubernetes/pull/83591), [@jingxu97](https://github.com/jingxu97)) - -### Windows - -- Adds Windows Server build information as a label on the node. ([#84472](https://github.com/kubernetes/kubernetes/pull/84472), [@gab-satchi](https://github.com/gab-satchi)) -- Fixes kube-proxy bug accessing self nodeip:port on windows ([#83027](https://github.com/kubernetes/kubernetes/pull/83027), [@liggitt](https://github.com/liggitt)) -- When using Containerd on Windows, the `TerminationMessagePath` file will now be mounted in the Windows Pod. ([#83057](https://github.com/kubernetes/kubernetes/pull/83057), [@bclau](https://github.com/bclau)) -- Fix kubelet metrics gathering on non-English Windows hosts ([#84156](https://github.com/kubernetes/kubernetes/pull/84156), [@wawa0210](https://github.com/wawa0210)) - -### Dependencies - -- Update etcd client side to v3.4.3 ([#83987](https://github.com/kubernetes/kubernetes/pull/83987), [@wenjiaswe](https://github.com/wenjiaswe)) -- Kubernetes now requires go1.13.4+ to build ([#82809](https://github.com/kubernetes/kubernetes/pull/82809), [@liggitt](https://github.com/liggitt)) -- Update to use go1.12.12 ([#84064](https://github.com/kubernetes/kubernetes/pull/84064), [@cblecker](https://github.com/cblecker)) -- Update to go 1.12.10 ([#83139](https://github.com/kubernetes/kubernetes/pull/83139), [@cblecker](https://github.com/cblecker)) -- Update default etcd server version to 3.4.3 ([#84329](https://github.com/kubernetes/kubernetes/pull/84329), [@jingyih](https://github.com/jingyih)) -- Upgrade default etcd server version to 3.3.17 ([#83804](https://github.com/kubernetes/kubernetes/pull/83804), [@jpbetz](https://github.com/jpbetz)) -- Upgrade to etcd client 3.3.17 to fix bug where etcd client does not parse IPv6 addresses correctly when members are joining, and to fix bug where failover on multi-member etcd cluster fails certificate check on DNS mismatch ([#83801](https://github.com/kubernetes/kubernetes/pull/83801), [@jpbetz](https://github.com/jpbetz)) - -### Detailed go Dependency Changes - -#### Added - -- github.com/OpenPeeDeeP/depguard: v1.0.1 -- github.com/StackExchange/wmi: 5d04971 -- github.com/agnivade/levenshtein: v1.0.1 -- github.com/alecthomas/template: a0175ee -- github.com/alecthomas/units: 2efee85 -- github.com/andreyvit/diff: c7f18ee -- github.com/anmitsu/go-shlex: 648efa6 -- github.com/bazelbuild/rules_go: 6dae44d -- github.com/bgentry/speakeasy: v0.1.0 -- github.com/bradfitz/go-smtpd: deb6d62 -- github.com/cockroachdb/datadriven: 80d97fb -- github.com/creack/pty: v1.1.7 -- github.com/gliderlabs/ssh: v0.1.1 -- github.com/go-critic/go-critic: 1df3008 -- github.com/go-kit/kit: v0.8.0 -- github.com/go-lintpack/lintpack: v0.5.2 -- github.com/go-logfmt/logfmt: v0.3.0 -- github.com/go-ole/go-ole: v1.2.1 -- github.com/go-stack/stack: v1.8.0 -- github.com/go-toolsmith/astcast: v1.0.0 -- github.com/go-toolsmith/astcopy: v1.0.0 -- github.com/go-toolsmith/astequal: v1.0.0 -- github.com/go-toolsmith/astfmt: v1.0.0 -- github.com/go-toolsmith/astinfo: 9809ff7 -- github.com/go-toolsmith/astp: v1.0.0 -- github.com/go-toolsmith/pkgload: v1.0.0 -- github.com/go-toolsmith/strparse: v1.0.0 -- github.com/go-toolsmith/typep: v1.0.0 -- github.com/gobwas/glob: v0.2.3 -- github.com/golangci/check: cfe4005 -- github.com/golangci/dupl: 3e9179a -- github.com/golangci/errcheck: ef45e06 -- github.com/golangci/go-misc: 927a3d8 -- github.com/golangci/go-tools: e32c541 -- github.com/golangci/goconst: 041c5f2 -- github.com/golangci/gocyclo: 2becd97 -- github.com/golangci/gofmt: 0b8337e -- github.com/golangci/golangci-lint: v1.18.0 -- github.com/golangci/gosec: 66fb7fc -- github.com/golangci/ineffassign: 42439a7 -- github.com/golangci/lint-1: ee948d0 -- github.com/golangci/maligned: b1d8939 -- github.com/golangci/misspell: 950f5d1 -- github.com/golangci/prealloc: 215b22d -- github.com/golangci/revgrep: d9c87f5 -- github.com/golangci/unconvert: 28b1c44 -- github.com/google/go-github: v17.0.0+incompatible -- github.com/google/go-querystring: v1.0.0 -- github.com/gostaticanalysis/analysisutil: v0.0.3 -- github.com/jellevandenhooff/dkim: f50fe3d -- github.com/julienschmidt/httprouter: v1.2.0 -- github.com/klauspost/compress: v1.4.1 -- github.com/kr/logfmt: b84e30a -- github.com/logrusorgru/aurora: a7b3b31 -- github.com/mattn/go-runewidth: v0.0.2 -- github.com/mattn/goveralls: v0.0.2 -- github.com/mitchellh/go-ps: 4fdf99a -- github.com/mozilla/tls-observatory: 8791a20 -- github.com/mwitkow/go-conntrack: cc309e4 -- github.com/nbutton23/zxcvbn-go: eafdab6 -- github.com/olekukonko/tablewriter: a0225b3 -- github.com/quasilyte/go-consistent: c6f3937 -- github.com/rogpeppe/fastuuid: 6724a57 -- github.com/ryanuber/go-glob: 256dc44 -- github.com/sergi/go-diff: v1.0.0 -- github.com/shirou/gopsutil: c95755e -- github.com/shirou/w32: bb4de01 -- github.com/shurcooL/go-goon: 37c2f52 -- github.com/shurcooL/go: 9e1955d -- github.com/sourcegraph/go-diff: v0.5.1 -- github.com/tarm/serial: 98f6abe -- github.com/tidwall/pretty: v1.0.0 -- github.com/timakin/bodyclose: 87058b9 -- github.com/ultraware/funlen: v0.0.2 -- github.com/urfave/cli: v1.20.0 -- github.com/valyala/bytebufferpool: v1.0.0 -- github.com/valyala/fasthttp: v1.2.0 -- github.com/valyala/quicktemplate: v1.1.1 -- github.com/valyala/tcplisten: ceec8f9 -- github.com/vektah/gqlparser: v1.1.2 -- go.etcd.io/etcd: 3cf2f69 -- go.mongodb.org/mongo-driver: v1.1.2 -- go4.org: 417644f -- golang.org/x/build: 2835ba2 -- golang.org/x/perf: 6e6d33e -- golang.org/x/xerrors: a985d34 -- gopkg.in/alecthomas/kingpin.v2: v2.2.6 -- gopkg.in/cheggaaa/pb.v1: v1.0.25 -- gopkg.in/resty.v1: v1.12.0 -- grpc.go4.org: 11d0a25 -- k8s.io/system-validators: v1.0.4 -- mvdan.cc/interfacer: c200402 -- mvdan.cc/lint: adc824a -- mvdan.cc/unparam: fbb5962 -- sourcegraph.com/sqs/pbtypes: d3ebe8f - -#### Changed - -- github.com/Azure/azure-sdk-for-go: v32.5.0+incompatible → v35.0.0+incompatible -- github.com/Microsoft/go-winio: v0.4.11 → v0.4.14 -- github.com/bazelbuild/bazel-gazelle: c728ce9 → 70208cb -- github.com/bazelbuild/buildtools: 80c7f0d → 69366ca -- github.com/beorn7/perks: 3a771d9 → v1.0.0 -- github.com/container-storage-interface/spec: v1.1.0 → v1.2.0 -- github.com/coredns/corefile-migration: v1.0.2 → v1.0.4 -- github.com/coreos/etcd: v3.3.17+incompatible → v3.3.10+incompatible -- github.com/coreos/go-systemd: 39ca1b0 → 95778df -- github.com/docker/go-units: v0.3.3 → v0.4.0 -- github.com/docker/libnetwork: a9cd636 → f0e46a7 -- github.com/fatih/color: v1.6.0 → v1.7.0 -- github.com/ghodss/yaml: c7ce166 → v1.0.0 -- github.com/go-openapi/analysis: v0.19.2 → v0.19.5 -- github.com/go-openapi/jsonpointer: v0.19.2 → v0.19.3 -- github.com/go-openapi/jsonreference: v0.19.2 → v0.19.3 -- github.com/go-openapi/loads: v0.19.2 → v0.19.4 -- github.com/go-openapi/runtime: v0.19.0 → v0.19.4 -- github.com/go-openapi/spec: v0.19.2 → v0.19.3 -- github.com/go-openapi/strfmt: v0.19.0 → v0.19.3 -- github.com/go-openapi/swag: v0.19.2 → v0.19.5 -- github.com/go-openapi/validate: v0.19.2 → v0.19.5 -- github.com/godbus/dbus: v4.1.0+incompatible → 2ff6f7f -- github.com/golang/protobuf: v1.3.1 → v1.3.2 -- github.com/google/btree: 4030bb1 → v1.0.0 -- github.com/google/cadvisor: v0.34.0 → v0.35.0 -- github.com/gregjones/httpcache: 787624d → 9cad4c3 -- github.com/grpc-ecosystem/go-grpc-middleware: cfaf568 → f849b54 -- github.com/grpc-ecosystem/grpc-gateway: v1.3.0 → v1.9.5 -- github.com/heketi/heketi: v9.0.0+incompatible → c2e2a4a -- github.com/json-iterator/go: v1.1.7 → v1.1.8 -- github.com/mailru/easyjson: 94de47d → v0.7.0 -- github.com/mattn/go-isatty: v0.0.3 → v0.0.9 -- github.com/mindprince/gonvml: fee913c → 9ebdce4 -- github.com/mrunalp/fileutils: 4ee1cc9 → 7d4729f -- github.com/munnerz/goautoneg: a547fc6 → a7dc8b6 -- github.com/onsi/ginkgo: v1.8.0 → v1.10.1 -- github.com/onsi/gomega: v1.5.0 → v1.7.0 -- github.com/opencontainers/runc: 6cc5158 → v1.0.0-rc9 -- github.com/opencontainers/selinux: v1.2.2 → 5215b18 -- github.com/pkg/errors: v0.8.0 → v0.8.1 -- github.com/prometheus/client_golang: v0.9.2 → v1.0.0 -- github.com/prometheus/client_model: 5c3871d → fd36f42 -- github.com/prometheus/common: 4724e92 → v0.4.1 -- github.com/prometheus/procfs: 1dc9a6c → v0.0.2 -- github.com/soheilhy/cmux: v0.1.3 → v0.1.4 -- github.com/spf13/pflag: v1.0.3 → v1.0.5 -- github.com/stretchr/testify: v1.3.0 → v1.4.0 -- github.com/syndtr/gocapability: e7cb7fa → d983527 -- github.com/vishvananda/netlink: b2de5d1 → v1.0.0 -- github.com/vmware/govmomi: v0.20.1 → v0.20.3 -- github.com/xiang90/probing: 07dd2e8 → 43a291a -- go.uber.org/atomic: 8dc6146 → v1.3.2 -- go.uber.org/multierr: ddea229 → v1.1.0 -- go.uber.org/zap: 67bc79d → v1.10.0 -- golang.org/x/crypto: e84da03 → 60c769a -- golang.org/x/lint: 8f45f77 → 959b441 -- golang.org/x/net: cdfb69a → 13f9640 -- golang.org/x/oauth2: 9f33145 → 0f29369 -- golang.org/x/sync: 42b3178 → cd5d95a -- golang.org/x/sys: 3b52091 → fde4db3 -- golang.org/x/text: e6919f6 → v0.3.2 -- golang.org/x/time: f51c127 → 9d24e82 -- golang.org/x/tools: 6e04913 → 65e3620 -- google.golang.org/grpc: v1.23.0 → v1.23.1 -- gopkg.in/inf.v0: v0.9.0 → v0.9.1 -- k8s.io/klog: v0.4.0 → v1.0.0 -- k8s.io/kube-openapi: 743ec37 → 30be4d1 -- k8s.io/repo-infra: 00fe14e → v0.0.1-alpha.1 -- k8s.io/utils: 581e001 → e782cd3 -- sigs.k8s.io/structured-merge-diff: 6149e45 → b1b620d - -#### Removed - -- github.com/cloudflare/cfssl: 56268a6 -- github.com/coreos/bbolt: v1.3.3 -- github.com/coreos/rkt: v1.30.0 -- github.com/globalsign/mgo: eeefdec -- github.com/google/certificate-transparency-go: v1.0.21 -- github.com/heketi/rest: aa6a652 -- github.com/heketi/utils: 435bc5b -- github.com/pborman/uuid: v1.2.0 + - kubelet_pod_worker_latency_microseconds + - kubelet_pod_start_latency_microseconds + - kubelet_cgroup_manager_latency_microseconds + - kubelet_pod_worker_start_latency_microseconds + - kubelet_pleg_relist_latency_microseconds + - kubelet_pleg_relist_interval_microseconds + - kubelet_eviction_stats_age_microseconds + - kubelet_runtime_operations + - kubelet_runtime_operations_latency_microseconds + - kubelet_runtime_operations_errors + - kubelet_device_plugin_registration_count + - kubelet_device_plugin_alloc_latency_microseconds + - kubelet_docker_operations + - kubelet_docker_operations_latency_microseconds + - kubelet_docker_operations_errors + - kubelet_docker_operations_timeout + - network_plugin_operations_latency_microseconds ([#83841](https://github.com/kubernetes/kubernetes/pull/83841), [@RainbowMango](https://github.com/RainbowMango)) [SIG Network and Node] +- Kube-apiserver metrics will now include request counts, latencies, and response sizes for /healthz, /livez, and /readyz requests. ([#83598](https://github.com/kubernetes/kubernetes/pull/83598), [@jktomer](https://github.com/jktomer)) [SIG API Machinery] +- Kubelet now exports a `server_expiration_renew_failure` and `client_expiration_renew_failure` metric counter if the certificate rotations cannot be performed. ([#84614](https://github.com/kubernetes/kubernetes/pull/84614), [@rphillips](https://github.com/rphillips)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node and Release] +- Kubelet: the metric process_start_time_seconds be marked as with the ALPHA stability level. ([#85446](https://github.com/kubernetes/kubernetes/pull/85446), [@RainbowMango](https://github.com/RainbowMango)) [SIG API Machinery, Cluster Lifecycle, Instrumentation and Node] +- New metric `kubelet_pleg_last_seen_seconds` to aid diagnosis of PLEG not healthy issues. ([#86251](https://github.com/kubernetes/kubernetes/pull/86251), [@bboreham](https://github.com/bboreham)) [SIG Node] + +### Other (Bug, Cleanup or Flake) + +- Fixed a regression with clients prior to 1.15 not being able to update podIP in pod status, or podCIDR in node spec, against >= 1.16 API servers ([#88505](https://github.com/kubernetes/kubernetes/pull/88505), [@liggitt](https://github.com/liggitt)) [SIG Apps and Network] +- Fixed "kubectl describe statefulsets.apps" printing garbage for rolling update partition ([#85846](https://github.com/kubernetes/kubernetes/pull/85846), [@phil9909](https://github.com/phil9909)) [SIG CLI] +- Add a event to PV when filesystem on PV does not match actual filesystem on disk ([#86982](https://github.com/kubernetes/kubernetes/pull/86982), [@gnufied](https://github.com/gnufied)) [SIG Storage] +- Add azure disk WriteAccelerator support ([#87945](https://github.com/kubernetes/kubernetes/pull/87945), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Add delays between goroutines for vm instance update ([#88094](https://github.com/kubernetes/kubernetes/pull/88094), [@aramase](https://github.com/aramase)) [SIG Cloud Provider] +- Add init containers log to cluster dump info. ([#88324](https://github.com/kubernetes/kubernetes/pull/88324), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] +- Addons: elasticsearch discovery supports IPv6 ([#85543](https://github.com/kubernetes/kubernetes/pull/85543), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle and Instrumentation] +- Adds "volume.beta.kubernetes.io/migrated-to" annotation to PV's and PVC's when they are migrated to signal external provisioners to pick up those objects for Provisioning and Deleting. ([#87098](https://github.com/kubernetes/kubernetes/pull/87098), [@davidz627](https://github.com/davidz627)) [SIG Storage] +- All api-server log request lines in a more greppable format. ([#87203](https://github.com/kubernetes/kubernetes/pull/87203), [@lavalamp](https://github.com/lavalamp)) [SIG API Machinery] +- Azure VMSS LoadBalancerBackendAddressPools updating has been improved with sequential-sync + concurrent-async requests. ([#88699](https://github.com/kubernetes/kubernetes/pull/88699), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Azure cloud provider now obtains AAD token who audience claim will not have spn: prefix ([#87590](https://github.com/kubernetes/kubernetes/pull/87590), [@weinong](https://github.com/weinong)) [SIG Cloud Provider] +- AzureFile and CephFS use the new Mount library that prevents logging of sensitive mount options. ([#88684](https://github.com/kubernetes/kubernetes/pull/88684), [@saad-ali](https://github.com/saad-ali)) [SIG Storage] +- Bind dns-horizontal containers to linux nodes to avoid Windows scheduling on kubernetes cluster includes linux nodes and windows nodes ([#83364](https://github.com/kubernetes/kubernetes/pull/83364), [@wawa0210](https://github.com/wawa0210)) [SIG Cluster Lifecycle and Windows] +- Bind kube-dns containers to linux nodes to avoid Windows scheduling ([#83358](https://github.com/kubernetes/kubernetes/pull/83358), [@wawa0210](https://github.com/wawa0210)) [SIG Cluster Lifecycle and Windows] +- Bind metadata-agent containers to linux nodes to avoid Windows scheduling on kubernetes cluster includes linux nodes and windows nodes ([#83363](https://github.com/kubernetes/kubernetes/pull/83363), [@wawa0210](https://github.com/wawa0210)) [SIG Cluster Lifecycle, Instrumentation and Windows] +- Bind metrics-server containers to linux nodes to avoid Windows scheduling on kubernetes cluster includes linux nodes and windows nodes ([#83362](https://github.com/kubernetes/kubernetes/pull/83362), [@wawa0210](https://github.com/wawa0210)) [SIG Cluster Lifecycle, Instrumentation and Windows] +- Bug fixes: Make sure we include latest packages node #351 (@caseydavenport) ([#84163](https://github.com/kubernetes/kubernetes/pull/84163), [@david-tigera](https://github.com/david-tigera)) [SIG Cluster Lifecycle] +- CPU limits are now respected for Windows containers. If a node is over-provisioned, no weighting is used, only limits are respected. ([#86101](https://github.com/kubernetes/kubernetes/pull/86101), [@PatrickLang](https://github.com/PatrickLang)) [SIG Node, Testing and Windows] +- Changed core_pattern on COS nodes to be an absolute path. ([#86329](https://github.com/kubernetes/kubernetes/pull/86329), [@mml](https://github.com/mml)) [SIG Cluster Lifecycle and Node] +- Client-go certificate manager rotation gained the ability to preserve optional intermediate chains accompanying issued certificates ([#88744](https://github.com/kubernetes/kubernetes/pull/88744), [@jackkleeman](https://github.com/jackkleeman)) [SIG API Machinery and Auth] +- Cloud provider config CloudProviderBackoffMode has been removed since it won't be used anymore. ([#88463](https://github.com/kubernetes/kubernetes/pull/88463), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Conformance image now depends on stretch-slim instead of debian-hyperkube-base as that image is being deprecated and removed. ([#88702](https://github.com/kubernetes/kubernetes/pull/88702), [@dims](https://github.com/dims)) [SIG Cluster Lifecycle, Release and Testing] +- Deprecate --generator flag from kubectl create commands ([#88655](https://github.com/kubernetes/kubernetes/pull/88655), [@soltysh](https://github.com/soltysh)) [SIG CLI] +- During initialization phase (preflight), kubeadm now verifies the presence of the conntrack executable ([#85857](https://github.com/kubernetes/kubernetes/pull/85857), [@hnanni](https://github.com/hnanni)) [SIG Cluster Lifecycle] +- EndpointSlice should not contain endpoints for terminating pods ([#89056](https://github.com/kubernetes/kubernetes/pull/89056), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps and Network] +- Evictions due to pods breaching their ephemeral storage limits are now recorded by the `kubelet_evictions` metric and can be alerted on. ([#87906](https://github.com/kubernetes/kubernetes/pull/87906), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node] +- Filter published OpenAPI schema by making nullable, required fields non-required in order to avoid kubectl to wrongly reject null values. ([#85722](https://github.com/kubernetes/kubernetes/pull/85722), [@sttts](https://github.com/sttts)) [SIG API Machinery] +- Fix /readyz to return error immediately after a shutdown is initiated, before the --shutdown-delay-duration has elapsed. ([#88911](https://github.com/kubernetes/kubernetes/pull/88911), [@tkashem](https://github.com/tkashem)) [SIG API Machinery] +- Fix API Server potential memory leak issue in processing watch request. ([#85410](https://github.com/kubernetes/kubernetes/pull/85410), [@answer1991](https://github.com/answer1991)) [SIG API Machinery] +- Fix EndpointSlice controller race condition and ensure that it handles external changes to EndpointSlices. ([#85703](https://github.com/kubernetes/kubernetes/pull/85703), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- Fix IPv6 addresses lost issue in pure ipv6 vsphere environment ([#86001](https://github.com/kubernetes/kubernetes/pull/86001), [@hubv](https://github.com/hubv)) [SIG Cloud Provider] +- Fix LoadBalancer rule checking so that no unexpected LoadBalancer updates are made ([#85990](https://github.com/kubernetes/kubernetes/pull/85990), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Fix a bug in kube-proxy that caused it to crash when using load balancers with a different IP family ([#87117](https://github.com/kubernetes/kubernetes/pull/87117), [@aojea](https://github.com/aojea)) [SIG Network] +- Fix a bug in port-forward: named port not working with service ([#85511](https://github.com/kubernetes/kubernetes/pull/85511), [@oke-py](https://github.com/oke-py)) [SIG CLI] +- Fix a bug in the dual-stack IPVS proxier where stale IPv6 endpoints were not being cleaned up ([#87695](https://github.com/kubernetes/kubernetes/pull/87695), [@andrewsykim](https://github.com/andrewsykim)) [SIG Network] +- Fix a bug that orphan revision cannot be adopted and statefulset cannot be synced ([#86801](https://github.com/kubernetes/kubernetes/pull/86801), [@likakuli](https://github.com/likakuli)) [SIG Apps] +- Fix a bug where ExternalTrafficPolicy is not applied to service ExternalIPs. ([#88786](https://github.com/kubernetes/kubernetes/pull/88786), [@freehan](https://github.com/freehan)) [SIG Network] +- Fix a bug where kubenet fails to parse the tc output. ([#83572](https://github.com/kubernetes/kubernetes/pull/83572), [@chendotjs](https://github.com/chendotjs)) [SIG Network] +- Fix a regression in kubenet that prevent pods to obtain ip addresses ([#85993](https://github.com/kubernetes/kubernetes/pull/85993), [@chendotjs](https://github.com/chendotjs)) [SIG Network and Node] +- Fix azure file AuthorizationFailure ([#85475](https://github.com/kubernetes/kubernetes/pull/85475), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fix bug where EndpointSlice controller would attempt to modify shared objects. ([#85368](https://github.com/kubernetes/kubernetes/pull/85368), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps and Network] +- Fix handling of aws-load-balancer-security-groups annotation. Security-Groups assigned with this annotation are no longer modified by kubernetes which is the expected behaviour of most users. Also no unnecessary Security-Groups are created anymore if this annotation is used. ([#83446](https://github.com/kubernetes/kubernetes/pull/83446), [@Elias481](https://github.com/Elias481)) [SIG Cloud Provider] +- Fix invalid VMSS updates due to incorrect cache ([#89002](https://github.com/kubernetes/kubernetes/pull/89002), [@ArchangelSDY](https://github.com/ArchangelSDY)) [SIG Cloud Provider] +- Fix isCurrentInstance for Windows by removing the dependency of hostname. ([#89138](https://github.com/kubernetes/kubernetes/pull/89138), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Fix issue #85805 about a resource not found in azure cloud provider when LoadBalancer specified in another resource group. ([#86502](https://github.com/kubernetes/kubernetes/pull/86502), [@levimm](https://github.com/levimm)) [SIG Cloud Provider] +- Fix kubectl annotate error when local=true is set ([#86952](https://github.com/kubernetes/kubernetes/pull/86952), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] +- Fix kubectl create deployment image name ([#86636](https://github.com/kubernetes/kubernetes/pull/86636), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] +- Fix `kubectl drain ignore` daemonsets and others. ([#87361](https://github.com/kubernetes/kubernetes/pull/87361), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] +- Fix missing "apiVersion" for "involvedObject" in Events for Nodes. ([#87537](https://github.com/kubernetes/kubernetes/pull/87537), [@uthark](https://github.com/uthark)) [SIG Apps and Node] +- Fix nil pointer dereference in azure cloud provider ([#85975](https://github.com/kubernetes/kubernetes/pull/85975), [@ldx](https://github.com/ldx)) [SIG Cloud Provider] +- Fix regression in statefulset conversion which prevents applying a statefulset multiple times. ([#87706](https://github.com/kubernetes/kubernetes/pull/87706), [@liggitt](https://github.com/liggitt)) [SIG Apps and Testing] +- Fix route conflicted operations when updating multiple routes together ([#88209](https://github.com/kubernetes/kubernetes/pull/88209), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Fix that prevents repeated fetching of PVC/PV objects by kubelet when processing of pod volumes fails. While this prevents hammering API server in these error scenarios, it means that some errors in processing volume(s) for a pod could now take up to 2-3 minutes before retry. ([#88141](https://github.com/kubernetes/kubernetes/pull/88141), [@tedyu](https://github.com/tedyu)) [SIG Node and Storage] +- Fix the bug PIP's DNS is deleted if no DNS label service annotation isn't set. ([#87246](https://github.com/kubernetes/kubernetes/pull/87246), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix control plane hosts rolling upgrade causing thundering herd of LISTs on etcd leading to control plane unavailability. ([#86430](https://github.com/kubernetes/kubernetes/pull/86430), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery, Node and Testing] +- Fix: add azure disk migration support for CSINode ([#88014](https://github.com/kubernetes/kubernetes/pull/88014), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fix: add non-retriable errors in azure clients ([#87941](https://github.com/kubernetes/kubernetes/pull/87941), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] +- Fix: add remediation in azure disk attach/detach ([#88444](https://github.com/kubernetes/kubernetes/pull/88444), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] +- Fix: azure data disk should use same key as os disk by default ([#86351](https://github.com/kubernetes/kubernetes/pull/86351), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] +- Fix: azure disk could not mounted on Standard_DC4s/DC2s instances ([#86612](https://github.com/kubernetes/kubernetes/pull/86612), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fix: azure file mount timeout issue ([#88610](https://github.com/kubernetes/kubernetes/pull/88610), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fix: check disk status before disk azure disk ([#88360](https://github.com/kubernetes/kubernetes/pull/88360), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] +- Fix: corrupted mount point in csi driver ([#88569](https://github.com/kubernetes/kubernetes/pull/88569), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] +- Fix: get azure disk lun timeout issue ([#88158](https://github.com/kubernetes/kubernetes/pull/88158), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fix: update azure disk max count ([#88201](https://github.com/kubernetes/kubernetes/pull/88201), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fixed "requested device X but found Y" attach error on AWS. ([#85675](https://github.com/kubernetes/kubernetes/pull/85675), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider and Storage] +- Fixed NetworkPolicy validation that `Except` values are accepted when they are outside the CIDR range. ([#86578](https://github.com/kubernetes/kubernetes/pull/86578), [@tnqn](https://github.com/tnqn)) [SIG Network] +- Fixed a bug in the TopologyManager. Previously, the TopologyManager would only guarantee alignment if container creation was serialized in some way. Alignment is now guaranteed under all scenarios of container creation. ([#87759](https://github.com/kubernetes/kubernetes/pull/87759), [@klueska](https://github.com/klueska)) [SIG Node] +- Fixed a bug which could prevent a provider ID from ever being set for node if an error occurred determining the provider ID when the node was added. ([#87043](https://github.com/kubernetes/kubernetes/pull/87043), [@zjs](https://github.com/zjs)) [SIG Apps and Cloud Provider] +- Fixed a data race in the kubelet image manager that can cause static pod workers to silently stop working. ([#88915](https://github.com/kubernetes/kubernetes/pull/88915), [@roycaihw](https://github.com/roycaihw)) [SIG Node] +- Fixed a panic in the kubelet cleaning up pod volumes ([#86277](https://github.com/kubernetes/kubernetes/pull/86277), [@tedyu](https://github.com/tedyu)) [SIG Storage] +- Fixed a regression where the kubelet would fail to update the ready status of pods. ([#84951](https://github.com/kubernetes/kubernetes/pull/84951), [@tedyu](https://github.com/tedyu)) [SIG Node] +- Fixed an issue that could cause the kubelet to incorrectly run concurrent pod reconciliation loops and crash. ([#89055](https://github.com/kubernetes/kubernetes/pull/89055), [@tedyu](https://github.com/tedyu)) [SIG Node] +- Fixed block CSI volume cleanup after timeouts. ([#88660](https://github.com/kubernetes/kubernetes/pull/88660), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixed cleaning of CSI raw block volumes. ([#87978](https://github.com/kubernetes/kubernetes/pull/87978), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixed AWS Cloud Provider attempting to delete LoadBalancer security group it didn’t provision, and fixed AWS Cloud Provider creating a default LoadBalancer security group even if annotation `service.beta.kubernetes.io/aws-load-balancer-security-groups` is present because the intended behavior of aws-load-balancer-security-groups is to replace all security groups assigned to the load balancer. ([#84265](https://github.com/kubernetes/kubernetes/pull/84265), [@bhagwat070919](https://github.com/bhagwat070919)) [SIG Cloud Provider] +- Fixed two scheduler metrics (pending_pods and schedule_attempts_total) not being recorded ([#87692](https://github.com/kubernetes/kubernetes/pull/87692), [@everpeace](https://github.com/everpeace)) [SIG Scheduling] +- Fixes an issue with kubelet-reported pod status on deleted/recreated pods. ([#86320](https://github.com/kubernetes/kubernetes/pull/86320), [@liggitt](https://github.com/liggitt)) [SIG Node] +- Fixes conversion error in multi-version custom resources that could cause metadata.generation to increment on no-op patches or updates of a custom resource. ([#88995](https://github.com/kubernetes/kubernetes/pull/88995), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] +- Fixes issue where AAD token obtained by kubectl is incompatible with on-behalf-of flow and oidc. The audience claim before this fix has "spn:" prefix. After this fix, "spn:" prefix is omitted. ([#86412](https://github.com/kubernetes/kubernetes/pull/86412), [@weinong](https://github.com/weinong)) [SIG API Machinery, Auth and Cloud Provider] +- Fixes an issue where you can't attach more than 15 GCE Persistent Disks to c2, n2, m1, m2 machine types. ([#88602](https://github.com/kubernetes/kubernetes/pull/88602), [@yuga711](https://github.com/yuga711)) [SIG Storage] +- Fixes kube-proxy when EndpointSlice feature gate is enabled on Windows. ([#86016](https://github.com/kubernetes/kubernetes/pull/86016), [@robscott](https://github.com/robscott)) [SIG Auth and Network] +- Fixes kubelet crash in client certificate rotation cases ([#88079](https://github.com/kubernetes/kubernetes/pull/88079), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Auth and Node] +- Fixes service account token admission error in clusters that do not run the service account token controller ([#87029](https://github.com/kubernetes/kubernetes/pull/87029), [@liggitt](https://github.com/liggitt)) [SIG Auth] +- Fixes v1.17.0 regression in --service-cluster-ip-range handling with IPv4 ranges larger than 65536 IP addresses ([#86534](https://github.com/kubernetes/kubernetes/pull/86534), [@liggitt](https://github.com/liggitt)) [SIG Network] +- Fixes wrong validation result of NetworkPolicy PolicyTypes ([#85747](https://github.com/kubernetes/kubernetes/pull/85747), [@tnqn](https://github.com/tnqn)) [SIG Network] +- For subprotocol negotiation, both client and server protocol is required now. ([#86646](https://github.com/kubernetes/kubernetes/pull/86646), [@tedyu](https://github.com/tedyu)) [SIG API Machinery and Node] +- For volumes that allow attaches across multiple nodes, attach and detach operations across different nodes are now executed in parallel. ([#88678](https://github.com/kubernetes/kubernetes/pull/88678), [@verult](https://github.com/verult)) [SIG Storage] +- Garbage collector now can correctly orphan ControllerRevisions when StatefulSets are deleted with orphan propagation policy. ([#84984](https://github.com/kubernetes/kubernetes/pull/84984), [@cofyc](https://github.com/cofyc)) [SIG Apps] +- `Get-kube.sh` uses the gcloud's current local GCP service account for auth when the provider is GCE or GKE instead of the metadata server default ([#88383](https://github.com/kubernetes/kubernetes/pull/88383), [@BenTheElder](https://github.com/BenTheElder)) [SIG Cluster Lifecycle] +- Golang/x/net has been updated to bring in fixes for CVE-2020-9283 ([#88381](https://github.com/kubernetes/kubernetes/pull/88381), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] +- If a serving certificate’s param specifies a name that is an IP for an SNI certificate, it will have priority for replying to server connections. ([#85308](https://github.com/kubernetes/kubernetes/pull/85308), [@deads2k](https://github.com/deads2k)) [SIG API Machinery] +- Improved yaml parsing performance ([#85458](https://github.com/kubernetes/kubernetes/pull/85458), [@cjcullen](https://github.com/cjcullen)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] +- Improves performance of the node authorizer ([#87696](https://github.com/kubernetes/kubernetes/pull/87696), [@liggitt](https://github.com/liggitt)) [SIG Auth] +- In GKE alpha clusters it will be possible to use the service annotation `cloud.google.com/network-tier: Standard` ([#88487](https://github.com/kubernetes/kubernetes/pull/88487), [@zioproto](https://github.com/zioproto)) [SIG Cloud Provider] +- Includes FSType when describing CSI persistent volumes. ([#85293](https://github.com/kubernetes/kubernetes/pull/85293), [@huffmanca](https://github.com/huffmanca)) [SIG CLI and Storage] +- Iptables/userspace proxy: improve performance by getting local addresses only once per sync loop, instead of for every external IP ([#85617](https://github.com/kubernetes/kubernetes/pull/85617), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Network] +- Kube-aggregator: always sets unavailableGauge metric to reflect the current state of a service. ([#87778](https://github.com/kubernetes/kubernetes/pull/87778), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery] +- Kube-apiserver: fixed a conflict error encountered attempting to delete a pod with gracePeriodSeconds=0 and a resourceVersion precondition ([#85516](https://github.com/kubernetes/kubernetes/pull/85516), [@michaelgugino](https://github.com/michaelgugino)) [SIG API Machinery] +- Kube-proxy no longer modifies shared EndpointSlices. ([#86092](https://github.com/kubernetes/kubernetes/pull/86092), [@robscott](https://github.com/robscott)) [SIG Network] +- Kube-proxy: on dual-stack mode, if it is not able to get the IP Family of an endpoint, logs it with level InfoV(4) instead of Warning, avoiding flooding the logs for endpoints without addresses ([#88934](https://github.com/kubernetes/kubernetes/pull/88934), [@aojea](https://github.com/aojea)) [SIG Network] +- Kubeadm allows to configure single-stack clusters if dual-stack is enabled ([#87453](https://github.com/kubernetes/kubernetes/pull/87453), [@aojea](https://github.com/aojea)) [SIG API Machinery, Cluster Lifecycle and Network] +- Kubeadm now includes CoreDNS version 1.6.7 ([#86260](https://github.com/kubernetes/kubernetes/pull/86260), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubeadm upgrades always persist the etcd backup for stacked ([#86861](https://github.com/kubernetes/kubernetes/pull/86861), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubeadm: 'kubeadm alpha kubelet config download' has been removed, please use 'kubeadm upgrade node phase kubelet-config' instead ([#87944](https://github.com/kubernetes/kubernetes/pull/87944), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubeadm: Forward cluster name to the controller-manager arguments ([#85817](https://github.com/kubernetes/kubernetes/pull/85817), [@ereslibre](https://github.com/ereslibre)) [SIG Cluster Lifecycle] +- Kubeadm: add support for the "ci/k8s-master" version label as a replacement for "ci-cross/*", which no longer exists. ([#86609](https://github.com/kubernetes/kubernetes/pull/86609), [@Pensu](https://github.com/Pensu)) [SIG Cluster Lifecycle] +- Kubeadm: apply further improvements to the tentative support for concurrent etcd member join. Fixes a bug where multiple members can receive the same hostname. Increase the etcd client dial timeout and retry timeout for add/remove/... operations. ([#87505](https://github.com/kubernetes/kubernetes/pull/87505), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: don't write the kubelet environment file on "upgrade apply" ([#85412](https://github.com/kubernetes/kubernetes/pull/85412), [@boluisa](https://github.com/boluisa)) [SIG Cluster Lifecycle] +- Kubeadm: fix potential panic when executing "kubeadm reset" with a corrupted kubelet.conf file ([#86216](https://github.com/kubernetes/kubernetes/pull/86216), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: fix the bug that 'kubeadm upgrade' hangs in single node cluster ([#88434](https://github.com/kubernetes/kubernetes/pull/88434), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubeadm: make sure images are pre-pulled even if a tag did not change but their contents changed ([#85603](https://github.com/kubernetes/kubernetes/pull/85603), [@bart0sh](https://github.com/bart0sh)) [SIG Cluster Lifecycle] +- Kubeadm: remove 'kubeadm upgrade node config' command since it was deprecated in v1.15, please use 'kubeadm upgrade node phase kubelet-config' instead ([#87975](https://github.com/kubernetes/kubernetes/pull/87975), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubeadm: remove the deprecated CoreDNS feature-gate. It was set to "true" since v1.11 when the feature went GA. In v1.13 it was marked as deprecated and hidden from the CLI. ([#87400](https://github.com/kubernetes/kubernetes/pull/87400), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: retry `kubeadm-config` ConfigMap creation or mutation if the apiserver is not responding. This will improve resiliency when joining new control plane nodes. ([#85763](https://github.com/kubernetes/kubernetes/pull/85763), [@ereslibre](https://github.com/ereslibre)) [SIG Cluster Lifecycle] +- Kubeadm: tolerate whitespace when validating certificate authority PEM data in kubeconfig files ([#86705](https://github.com/kubernetes/kubernetes/pull/86705), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: use bind-address option to configure the kube-controller-manager and kube-scheduler http probes ([#86493](https://github.com/kubernetes/kubernetes/pull/86493), [@aojea](https://github.com/aojea)) [SIG Cluster Lifecycle] +- Kubeadm: uses the api-server AdvertiseAddress IP family to choose the etcd endpoint IP family for non external etcd clusters ([#85745](https://github.com/kubernetes/kubernetes/pull/85745), [@aojea](https://github.com/aojea)) [SIG Cluster Lifecycle] +- Kubectl cluster-info dump --output-directory=xxx now generates files with an extension depending on the output format. ([#82070](https://github.com/kubernetes/kubernetes/pull/82070), [@olivierlemasle](https://github.com/olivierlemasle)) [SIG CLI] +- `Kubectl describe ` and `kubectl top pod` will return a message saying `"No resources found"` or `"No resources found in namespace"` if there are no results to display. ([#87527](https://github.com/kubernetes/kubernetes/pull/87527), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] +- `Kubectl drain node --dry-run` will list pods that would be evicted or deleted ([#82660](https://github.com/kubernetes/kubernetes/pull/82660), [@sallyom](https://github.com/sallyom)) [SIG CLI] +- `Kubectl set resources` will no longer return an error if passed an empty change for a resource. `kubectl set subject` will no longer return an error if passed an empty change for a resource. ([#85490](https://github.com/kubernetes/kubernetes/pull/85490), [@sallyom](https://github.com/sallyom)) [SIG CLI] +- Kubelet metrics gathered through metrics-server or prometheus should no longer timeout for Windows nodes running more than 3 pods. ([#87730](https://github.com/kubernetes/kubernetes/pull/87730), [@marosset](https://github.com/marosset)) [SIG Node, Testing and Windows] +- Kubelet metrics have been changed to buckets. For example the `exec/{podNamespace}/{podID}/{containerName}` is now just exec. ([#87913](https://github.com/kubernetes/kubernetes/pull/87913), [@cheftako](https://github.com/cheftako)) [SIG Node] +- Kubelets perform fewer unnecessary pod status update operations on the API server. ([#88591](https://github.com/kubernetes/kubernetes/pull/88591), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node and Scalability] +- Kubernetes will try to acquire the iptables lock every 100 msec during 5 seconds instead of every second. This is especially useful for environments using kube-proxy in iptables mode with a high churn rate of services. ([#85771](https://github.com/kubernetes/kubernetes/pull/85771), [@aojea](https://github.com/aojea)) [SIG Network] +- Limit number of instances in a single update to GCE target pool to 1000. ([#87881](https://github.com/kubernetes/kubernetes/pull/87881), [@wojtek-t](https://github.com/wojtek-t)) [SIG Cloud Provider, Network and Scalability] +- Make Azure clients only retry on specified HTTP status codes ([#88017](https://github.com/kubernetes/kubernetes/pull/88017), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Make error message and service event message more clear ([#86078](https://github.com/kubernetes/kubernetes/pull/86078), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Minimize AWS NLB health check timeout when externalTrafficPolicy set to Local ([#73363](https://github.com/kubernetes/kubernetes/pull/73363), [@kellycampbell](https://github.com/kellycampbell)) [SIG Cloud Provider] +- Pause image contains "Architecture" in non-amd64 images ([#87954](https://github.com/kubernetes/kubernetes/pull/87954), [@BenTheElder](https://github.com/BenTheElder)) [SIG Release] +- Pause image upgraded to 3.2 in kubelet and kubeadm. ([#88173](https://github.com/kubernetes/kubernetes/pull/88173), [@BenTheElder](https://github.com/BenTheElder)) [SIG CLI, Cluster Lifecycle, Node and Testing] +- Plugin/PluginConfig and Policy APIs are mutually exclusive when running the scheduler ([#88864](https://github.com/kubernetes/kubernetes/pull/88864), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] +- Remove `FilteredNodesStatuses` argument from `PreScore`'s interface. ([#88189](https://github.com/kubernetes/kubernetes/pull/88189), [@skilxn-go](https://github.com/skilxn-go)) [SIG Scheduling and Testing] +- Resolved a performance issue in the node authorizer index maintenance. ([#87693](https://github.com/kubernetes/kubernetes/pull/87693), [@liggitt](https://github.com/liggitt)) [SIG Auth] +- Resolved regression in admission, authentication, and authorization webhook performance in v1.17.0-rc.1 ([#85810](https://github.com/kubernetes/kubernetes/pull/85810), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Testing] +- Resolves performance regression in `kubectl get all` and in client-go discovery clients constructed using `NewDiscoveryClientForConfig` or `NewDiscoveryClientForConfigOrDie`. ([#86168](https://github.com/kubernetes/kubernetes/pull/86168), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] +- Reverted a kubectl azure auth module change where oidc claim spn: prefix was omitted resulting a breaking behavior with existing Azure AD OIDC enabled api-server ([#87507](https://github.com/kubernetes/kubernetes/pull/87507), [@weinong](https://github.com/weinong)) [SIG API Machinery, Auth and Cloud Provider] +- Shared informers are now more reliable in the face of network disruption. ([#86015](https://github.com/kubernetes/kubernetes/pull/86015), [@squeed](https://github.com/squeed)) [SIG API Machinery] +- Specifying PluginConfig for the same plugin more than once fails scheduler startup. + Specifying extenders and configuring .ignoredResources for the NodeResourcesFit plugin fails ([#88870](https://github.com/kubernetes/kubernetes/pull/88870), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] +- Terminating a restartPolicy=Never pod no longer has a chance to report the pod succeeded when it actually failed. ([#88440](https://github.com/kubernetes/kubernetes/pull/88440), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node and Testing] +- The CSR signing cert/key pairs will be reloaded from disk like the kube-apiserver cert/key pairs ([#86816](https://github.com/kubernetes/kubernetes/pull/86816), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Apps and Auth] +- The EventRecorder from k8s.io/client-go/tools/events will now create events in the default namespace (instead of kube-system) when the related object does not have it set. ([#88815](https://github.com/kubernetes/kubernetes/pull/88815), [@enj](https://github.com/enj)) [SIG API Machinery] +- The audit event sourceIPs list will now always end with the IP that sent the request directly to the API server. ([#87167](https://github.com/kubernetes/kubernetes/pull/87167), [@tallclair](https://github.com/tallclair)) [SIG API Machinery and Auth] +- The sample-apiserver aggregated conformance test has updated to use the Kubernetes v1.17.0 sample apiserver ([#84735](https://github.com/kubernetes/kubernetes/pull/84735), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Architecture, CLI and Testing] +- To reduce chances of throttling, VM cache is set to nil when Azure node provisioning state is deleting ([#87635](https://github.com/kubernetes/kubernetes/pull/87635), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- VMSS cache is added so that less chances of VMSS GET throttling ([#85885](https://github.com/kubernetes/kubernetes/pull/85885), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Wait for kubelet & kube-proxy to be ready on Windows node within 10s ([#85228](https://github.com/kubernetes/kubernetes/pull/85228), [@YangLu1031](https://github.com/YangLu1031)) [SIG Cluster Lifecycle] +- `kubectl apply -f --prune -n ` should prune all resources not defined in the file in the cli specified namespace. ([#85613](https://github.com/kubernetes/kubernetes/pull/85613), [@MartinKaburu](https://github.com/MartinKaburu)) [SIG CLI] +- `kubectl create clusterrolebinding` creates rbac.authorization.k8s.io/v1 object ([#85889](https://github.com/kubernetes/kubernetes/pull/85889), [@oke-py](https://github.com/oke-py)) [SIG CLI] +- `kubectl diff` now returns 1 only on diff finding changes, and >1 on kubectl errors. The "exit status code 1" message has also been muted. ([#87437](https://github.com/kubernetes/kubernetes/pull/87437), [@apelisse](https://github.com/apelisse)) [SIG CLI and Testing] + +## Dependencies + +- Update Calico to v3.8.4 ([#84163](https://github.com/kubernetes/kubernetes/pull/84163), [@david-tigera](https://github.com/david-tigera))[SIG Cluster Lifecycle] +- Update aws-sdk-go dependency to v1.28.2 ([#87253](https://github.com/kubernetes/kubernetes/pull/87253), [@SaranBalaji90](https://github.com/SaranBalaji90))[SIG API Machinery and Cloud Provider] +- Update CNI version to v0.8.5 ([#78819](https://github.com/kubernetes/kubernetes/pull/78819), [@justaugustus](https://github.com/justaugustus))[SIG Release, Testing, Network, Cluster Lifecycle and API Machinery] +- Update cri-tools to v1.17.0 ([#86305](https://github.com/kubernetes/kubernetes/pull/86305), [@saschagrunert](https://github.com/saschagrunert))[SIG Release and Cluster Lifecycle] +- Pause image upgraded to 3.2 in kubelet and kubeadm ([#88173](https://github.com/kubernetes/kubernetes/pull/88173), [@BenTheElder](https://github.com/BenTheElder))[SIG CLI, Node, Testing and Cluster Lifecycle] +- Update CoreDNS version to 1.6.7 in kubeadm ([#86260](https://github.com/kubernetes/kubernetes/pull/86260), [@rajansandeep](https://github.com/rajansandeep))[SIG Cluster Lifecycle] +- Update golang.org/x/crypto to fix CVE-2020-9283 ([#8838](https://github.com/kubernetes/kubernetes/pull/88381), [@BenTheElder](https://github.com/BenTheElder))[SIG CLI, Instrumentation, API Machinery, CLuster Lifecycle and Cloud Provider] +- Update Go to 1.13.8 ([#87648](https://github.com/kubernetes/kubernetes/pull/87648), [@ialidzhikov](https://github.com/ialidzhikov))[SIG Release and Testing] +- Update Cluster-Autoscaler to 1.18.0 ([#89095](https://github.com/kubernetes/kubernetes/pull/89095), [@losipiuk](https://github.com/losipiuk))[SIG Autoscaling and Cluster Lifecycle] + + + +# v1.18.0-rc.1 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.18.0-rc.1 + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes.tar.gz) | `c17231d5de2e0677e8af8259baa11a388625821c79b86362049f2edb366404d6f4b4587b8f13ccbceeb2f32c6a9fe98607f779c0f3e1caec438f002e3a2c8c21` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-src.tar.gz) | `e84ffad57c301f5d6e90f916b996d5abb0c987928c3ca6b1565f7b042588f839b994ca12c43fc36f0ffb63f9fabc15110eb08be253b8939f49cd951e956da618` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-darwin-386.tar.gz) | `1aea99923d492436b3eb91aaecffac94e5d0aa2b38a0930d266fda85c665bbc4569745c409aa302247df3b578ce60324e7a489eb26240e97d4e65a67428ea3d1` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-darwin-amd64.tar.gz) | `07fa7340a959740bd52b83ff44438bbd988e235277dad1e43f125f08ac85230a24a3b755f4e4c8645743444fa2b66a3602fc445d7da6d2fc3770e8c21ba24b33` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-linux-386.tar.gz) | `48cebd26448fdd47aa36257baa4c716a98fda055bbf6a05230f2a3fe3c1b99b4e483668661415392190f3eebb9cb6e15c784626b48bb2541d93a37902f0e3974` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-linux-amd64.tar.gz) | `c3a5fedf263f07a07f59c01fea6c63c1e0b76ee8dc67c45b6c134255c28ed69171ccc2f91b6a45d6a8ec5570a0a7562e24c33b9d7b0d1a864f4dc04b178b3c04` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-linux-arm.tar.gz) | `a6b11a55bd38583bbaac14931a6862f8ce6493afe30947ba29e5556654a571593358278df59412bbeb6888fa127e9ae4c0047a9d46cb59394995010796df6b14` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-linux-arm64.tar.gz) | `9e15331ac8010154a9b64f5488969fc8ee2f21059639896cb84c5cf4f05f4c9d1d8970cb6f9831de6b34013848227c1972c12a698d07aac1ecc056e972fe6f79` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-linux-ppc64le.tar.gz) | `f828fe6252678de9d4822e482f5873309ae9139b2db87298ab3273ce45d38aa07b6b9b42b76c140705f27ba71e101d58b43e59ac7259d7c08dc647ea809e207c` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-linux-s390x.tar.gz) | `19da4b45f0666c063934af616f3e7ed3caa99d4ee1e46d53efadc7a8a4d38e43a36ced7249acd7ad3dcc4b4f60d8451b4f7ec7727e478ee2fadd14d353228bce` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-windows-386.tar.gz) | `775c9afb6cb3e7c4ba53e9f48a5df2cf207234a33059bd74448bc9f177dd120fb3f9c58ab45048a566326acc43bc8a67e886e10ef99f20780c8f63bb17426ebd` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-client-windows-amd64.tar.gz) | `208d2595a5b57ac97aac75b4a2a6130f0c937f781a030bde1a432daf4bc51f2fa523fca2eb84c38798489c4b536ee90aad22f7be8477985d9691d51ad8e1c4dc` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-server-linux-amd64.tar.gz) | `dcf832eae04f9f52ff473754ef5cfe697b35f4dc1a282622c94fa10943c8c35f4a8777a0c58c7de871c3c428c8973bf72d6bcd8751416d4c682125268b8fcefe` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-server-linux-arm.tar.gz) | `a04e34bea28eb1c8b492e8b1dd3c0dd87ebee71a7dbbef72be10a335e553361af7e48296e504f9844496b04e66350871114d20cfac3f3b49550d8be60f324ba3` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-server-linux-arm64.tar.gz) | `a6af086b07a8c2e498f32b43e6511bf6a5e6baf358c572c6910c8df17cd6cae94f562f459714fcead1595767cb14c7f639c5735f1411173bbd38d5604c082a77` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-server-linux-ppc64le.tar.gz) | `5a960ef5ba0c255f587f2ac0b028cd03136dc91e4efc5d1becab46417852e5524d18572b6f66259531ec6fea997da3c4d162ac153a9439672154375053fec6c7` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-server-linux-s390x.tar.gz) | `0f32c7d9b14bc238b9a5764d8f00edc4d3bf36bcf06b340b81061424e6070768962425194a8c2025c3a7ffb97b1de551d3ad23d1591ae34dd4e3ba25ab364c33` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-node-linux-amd64.tar.gz) | `27d8955d535d14f3f4dca501fd27e4f06fad84c6da878ea5332a5c83b6955667f6f731bfacaf5a3a23c09f14caa400f9bee927a0f269f5374de7f79cd1919b3b` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-node-linux-arm.tar.gz) | `0d56eccad63ba608335988e90b377fe8ae978b177dc836cdb803a5c99d99e8f3399a666d9477ca9cfe5964944993e85c416aec10a99323e3246141efc0b1cc9e` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-node-linux-arm64.tar.gz) | `79bb9be66f9e892d866b28e5cc838245818edb9706981fab6ccbff493181b341c1fcf6fe5d2342120a112eb93af413f5ba191cfba1ab4c4a8b0546a5ad8ec220` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-node-linux-ppc64le.tar.gz) | `3e9e2c6f9a2747d828069511dce8b4034c773c2d122f005f4508e22518055c1e055268d9d86773bbd26fbd2d887d783f408142c6c2f56ab2f2365236fd4d2635` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-node-linux-s390x.tar.gz) | `4f96e018c336fa13bb6df6f7217fe46a2b5c47f806f786499c429604ccba2ebe558503ab2c72f63250aa25b61dae2d166e4b80ae10f6ab37d714f87c1dcf6691` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-rc.1/kubernetes-node-windows-amd64.tar.gz) | `ab110d76d506746af345e5897ef4f6993d5f53ac818ba69a334f3641047351aa63bfb3582841a9afca51dd0baff8b9010077d9c8ec85d2d69e4172b8d4b338b0` + +## Changelog since v1.18.0-beta.2 + +## Changes by Kind + +### API Change + +- Removes ConfigMap as suggestion for IngressClass parameters ([#89093](https://github.com/kubernetes/kubernetes/pull/89093), [@robscott](https://github.com/robscott)) [SIG Network] + +### Other (Bug, Cleanup or Flake) + +- EndpointSlice should not contain endpoints for terminating pods ([#89056](https://github.com/kubernetes/kubernetes/pull/89056), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps and Network] +- Fix a bug where ExternalTrafficPolicy is not applied to service ExternalIPs. ([#88786](https://github.com/kubernetes/kubernetes/pull/88786), [@freehan](https://github.com/freehan)) [SIG Network] +- Fix invalid VMSS updates due to incorrect cache ([#89002](https://github.com/kubernetes/kubernetes/pull/89002), [@ArchangelSDY](https://github.com/ArchangelSDY)) [SIG Cloud Provider] +- Fix isCurrentInstance for Windows by removing the dependency of hostname. ([#89138](https://github.com/kubernetes/kubernetes/pull/89138), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Fixed a data race in kubelet image manager that can cause static pod workers to silently stop working. ([#88915](https://github.com/kubernetes/kubernetes/pull/88915), [@roycaihw](https://github.com/roycaihw)) [SIG Node] +- Fixed an issue that could cause the kubelet to incorrectly run concurrent pod reconciliation loops and crash. ([#89055](https://github.com/kubernetes/kubernetes/pull/89055), [@tedyu](https://github.com/tedyu)) [SIG Node] +- Kube-proxy: on dual-stack mode, if it is not able to get the IP Family of an endpoint, logs it with level InfoV(4) instead of Warning, avoiding flooding the logs for endpoints without addresses ([#88934](https://github.com/kubernetes/kubernetes/pull/88934), [@aojea](https://github.com/aojea)) [SIG Network] +- Update Cluster Autoscaler to 1.18.0; changelog: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.18.0 ([#89095](https://github.com/kubernetes/kubernetes/pull/89095), [@losipiuk](https://github.com/losipiuk)) [SIG Autoscaling and Cluster Lifecycle] + + +# v1.18.0-beta.2 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.18.0-beta.2 + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes.tar.gz) | `3017430ca17f8a3523669b4a02c39cedfc6c48b07281bc0a67a9fbe9d76547b76f09529172cc01984765353a6134a43733b7315e0dff370bba2635dd2a6289af` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-src.tar.gz) | `c5fd60601380a99efff4458b1c9cf4dc02195f6f756b36e590e54dff68f7064daf32cf63980dddee13ef9dec7a60ad4eeb47a288083fdbbeeef4bc038384e9ea` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-darwin-386.tar.gz) | `7e49ede167b9271d4171e477fa21d267b2fb35f80869337d5b323198dc12f71b61441975bf925ad6e6cd7b61cbf6372d386417dc1e5c9b3c87ae651021c37237` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-darwin-amd64.tar.gz) | `3f5cdf0e85eee7d0773e0ae2df1c61329dea90e0da92b02dae1ffd101008dc4bade1c4951fc09f0cad306f0bcb7d16da8654334ddee43d5015913cc4ac8f3eda` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-linux-386.tar.gz) | `b67b41c11bfecb88017c33feee21735c56f24cf6f7851b63c752495fc0fb563cd417a67a81f46bca091f74dc00fca1f296e483d2e3dfe2004ea4b42e252d30b9` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-linux-amd64.tar.gz) | `1fef2197cb80003e3a5c26f05e889af9d85fbbc23e27747944d2997ace4bfa28f3670b13c08f5e26b7e274176b4e2df89c1162aebd8b9506e63b39b311b2d405` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-linux-arm.tar.gz) | `84e5f4d9776490219ee94a84adccd5dfc7c0362eb330709771afcde95ec83f03d96fe7399eec218e47af0a1e6445e24d95e6f9c66c0882ef8233a09ff2022420` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-linux-arm64.tar.gz) | `ba613b114e0cca32fa21a3d10f845aa2f215d3af54e775f917ff93919f7dd7075efe254e4047a85a1f4b817fc2bd78006c2e8873885f1208cbc02db99e2e2e25` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-linux-ppc64le.tar.gz) | `502a6938d8c4bbe04abbd19b59919d86765058ff72334848be4012cec493e0e7027c6cd950cf501367ac2026eea9f518110cb72d1c792322b396fc2f73d23217` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-linux-s390x.tar.gz) | `c24700e0ed2ef5c1d2dd282d638c88d90392ae90ea420837b39fd8e1cfc19525017325ccda71d8472fdaea174762208c09e1bba9bbc77c89deef6fac5e847ba2` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-windows-386.tar.gz) | `0d4c5a741b052f790c8b0923c9586ee9906225e51cf4dc8a56fc303d4d61bb5bf77fba9e65151dec7be854ff31da8fc2dcd3214563e1b4b9951e6af4aa643da4` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-client-windows-amd64.tar.gz) | `841ef2e306c0c9593f04d9528ee019bf3b667761227d9afc1d6ca8bf1aa5631dc25f5fe13ff329c4bf0c816b971fd0dec808f879721e0f3bf51ce49772b38010` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-server-linux-amd64.tar.gz) | `b373df2e6ef55215e712315a5508e85a39126bd81b7b93c6b6305238919a88c740077828a6f19bcd97141951048ef7a19806ef6b1c3e1772dbc45715c5fcb3af` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-server-linux-arm.tar.gz) | `b8103cb743c23076ce8dd7c2da01c8dd5a542fbac8480e82dc673139c8ee5ec4495ca33695e7a18dd36412cf1e18ed84c8de05042525ddd8e869fbdfa2766569` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-server-linux-arm64.tar.gz) | `8f8f05cf64fb9c8d80cdcb4935b2d3e3edc48bdd303231ae12f93e3f4d979237490744a11e24ba7f52dbb017ca321a8e31624dcffa391b8afda3d02078767fa0` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-server-linux-ppc64le.tar.gz) | `b313b911c46f2ec129537407af3f165f238e48caeb4b9e530783ffa3659304a544ed02bef8ece715c279373b9fb2c781bd4475560e02c4b98a6d79837bc81938` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-server-linux-s390x.tar.gz) | `a1b6b06571141f507b12e5ef98efb88f4b6b9aba924722b2a74f11278d29a2972ab8290608360151d124608e6e24da0eb3516d484cb5fa12ff2987562f15964a` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-node-linux-amd64.tar.gz) | `20e02ca327543cddb2568ead3d5de164cbfb2914ab6416106d906bf12fcfbc4e55b13bea4d6a515e8feab038e2c929d72c4d6909dfd7881ba69fd1e8c772ab99` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-node-linux-arm.tar.gz) | `ecd817ef05d6284f9c6592b84b0a48ea31cf4487030c9fb36518474b2a33dad11b9c852774682e60e4e8b074e6bea7016584ca281dddbe2994da5eaf909025c0` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-node-linux-arm64.tar.gz) | `0020d32b7908ffd5055c8b26a8b3033e4702f89efcfffe3f6fcdb8a9921fa8eaaed4193c85597c24afd8c523662454f233521bb7055841a54c182521217ccc9d` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-node-linux-ppc64le.tar.gz) | `e065411d66d486e7793449c1b2f5a412510b913bf7f4e728c0a20e275642b7668957050dc266952cdff09acc391369ae6ac5230184db89af6823ba400745f2fc` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-node-linux-s390x.tar.gz) | `082ee90413beaaea41d6cbe9a18f7d783a95852607f3b94190e0ca12aacdd97d87e233b87117871bfb7d0a4b6302fbc7688549492a9bc50a2f43a5452504d3ce` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.2/kubernetes-node-windows-amd64.tar.gz) | `fb5aca0cc36be703f9d4033eababd581bac5de8399c50594db087a99ed4cb56e4920e960eb81d0132d696d094729254eeda2a5c0cb6e65e3abca6c8d61da579e` + +## Changelog since v1.18.0-beta.1 + +## Urgent Upgrade Notes + +### (No, really, you MUST read this before you upgrade) + +- `kubectl` no longer defaults to `http://localhost:8080`. If you own one of these legacy clusters, you are *strongly- encouraged to secure your server. If you cannot secure your server, you can set `KUBERNETES_MASTER` if you were relying on that behavior and you're a client-go user. Set `--server`, `--kubeconfig` or `KUBECONFIG` to make it work in `kubectl`. ([#86173](https://github.com/kubernetes/kubernetes/pull/86173), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, CLI and Testing] + +## Changes by Kind + +### Deprecation + +- AlgorithmSource is removed from v1alpha2 Scheduler ComponentConfig ([#87999](https://github.com/kubernetes/kubernetes/pull/87999), [@damemi](https://github.com/damemi)) [SIG Scheduling] +- Kube-proxy: deprecate `--healthz-port` and `--metrics-port` flag, please use `--healthz-bind-address` and `--metrics-bind-address` instead ([#88512](https://github.com/kubernetes/kubernetes/pull/88512), [@SataQiu](https://github.com/SataQiu)) [SIG Network] +- Kubeadm: deprecate the usage of the experimental flag '--use-api' under the 'kubeadm alpha certs renew' command. ([#88827](https://github.com/kubernetes/kubernetes/pull/88827), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + +### API Change + +- A new IngressClass resource has been added to enable better Ingress configuration. ([#88509](https://github.com/kubernetes/kubernetes/pull/88509), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, CLI, Network, Node and Testing] +- Added GenericPVCDataSource feature gate to enable using arbitrary custom resources as the data source for a PVC. ([#88636](https://github.com/kubernetes/kubernetes/pull/88636), [@bswartz](https://github.com/bswartz)) [SIG Apps and Storage] +- Allow user to specify fsgroup permission change policy for pods ([#88488](https://github.com/kubernetes/kubernetes/pull/88488), [@gnufied](https://github.com/gnufied)) [SIG Apps and Storage] +- BlockVolume and CSIBlockVolume features are now GA. ([#88673](https://github.com/kubernetes/kubernetes/pull/88673), [@jsafrane](https://github.com/jsafrane)) [SIG Apps, Node and Storage] +- CustomResourceDefinition schemas that use `x-kubernetes-list-map-keys` to specify properties that uniquely identify list items must make those properties required or have a default value, to ensure those properties are present for all list items. See https://kubernetes.io/docs/reference/using-api/api-concepts/#merge-strategy for details. ([#88076](https://github.com/kubernetes/kubernetes/pull/88076), [@eloyekunle](https://github.com/eloyekunle)) [SIG API Machinery and Testing] +- Fixes a regression with clients prior to 1.15 not being able to update podIP in pod status, or podCIDR in node spec, against >= 1.16 API servers ([#88505](https://github.com/kubernetes/kubernetes/pull/88505), [@liggitt](https://github.com/liggitt)) [SIG Apps and Network] +- Ingress: Add Exact and Prefix maching to Ingress PathTypes ([#88587](https://github.com/kubernetes/kubernetes/pull/88587), [@cmluciano](https://github.com/cmluciano)) [SIG Apps, Cluster Lifecycle and Network] +- Ingress: Add alternate backends via TypedLocalObjectReference ([#88775](https://github.com/kubernetes/kubernetes/pull/88775), [@cmluciano](https://github.com/cmluciano)) [SIG Apps and Network] +- Ingress: allow wildcard hosts in IngressRule ([#88858](https://github.com/kubernetes/kubernetes/pull/88858), [@cmluciano](https://github.com/cmluciano)) [SIG Network] +- Kube-controller-manager and kube-scheduler expose profiling by default to match the kube-apiserver. Use `--enable-profiling=false` to disable. ([#88663](https://github.com/kubernetes/kubernetes/pull/88663), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Cloud Provider and Scheduling] +- Move TaintBasedEvictions feature gates to GA ([#87487](https://github.com/kubernetes/kubernetes/pull/87487), [@skilxn-go](https://github.com/skilxn-go)) [SIG API Machinery, Apps, Node, Scheduling and Testing] +- New flag --endpointslice-updates-batch-period in kube-controller-manager can be used to reduce number of endpointslice updates generated by pod changes. ([#88745](https://github.com/kubernetes/kubernetes/pull/88745), [@mborsz](https://github.com/mborsz)) [SIG API Machinery, Apps and Network] +- Scheduler Extenders can now be configured in the v1alpha2 component config ([#88768](https://github.com/kubernetes/kubernetes/pull/88768), [@damemi](https://github.com/damemi)) [SIG Release, Scheduling and Testing] +- The apiserver/v1alph1#EgressSelectorConfiguration API is now beta. ([#88502](https://github.com/kubernetes/kubernetes/pull/88502), [@caesarxuchao](https://github.com/caesarxuchao)) [SIG API Machinery] +- The storage.k8s.io/CSIDriver has moved to GA, and is now available for use. ([#84814](https://github.com/kubernetes/kubernetes/pull/84814), [@huffmanca](https://github.com/huffmanca)) [SIG API Machinery, Apps, Auth, Node, Scheduling, Storage and Testing] +- VolumePVCDataSource moves to GA in 1.18 release ([#88686](https://github.com/kubernetes/kubernetes/pull/88686), [@j-griffith](https://github.com/j-griffith)) [SIG Apps, CLI and Cluster Lifecycle] + +### Feature + +- Add `rest_client_rate_limiter_duration_seconds` metric to component-base to track client side rate limiter latency in seconds. Broken down by verb and URL. ([#88134](https://github.com/kubernetes/kubernetes/pull/88134), [@jennybuckley](https://github.com/jennybuckley)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] +- Allow user to specify resource using --filename flag when invoking kubectl exec ([#88460](https://github.com/kubernetes/kubernetes/pull/88460), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] +- Apiserver add a new flag --goaway-chance which is the fraction of requests that will be closed gracefully(GOAWAY) to prevent HTTP/2 clients from getting stuck on a single apiserver. + After the connection closed(received GOAWAY), the client's other in-flight requests won't be affected, and the client will reconnect. + The flag min value is 0 (off), max is .02 (1/50 requests); .001 (1/1000) is a recommended starting point. + Clusters with single apiservers, or which don't use a load balancer, should NOT enable this. ([#88567](https://github.com/kubernetes/kubernetes/pull/88567), [@answer1991](https://github.com/answer1991)) [SIG API Machinery] +- Azure: add support for single stack IPv6 ([#88448](https://github.com/kubernetes/kubernetes/pull/88448), [@aramase](https://github.com/aramase)) [SIG Cloud Provider] +- DefaultConstraints can be specified for the PodTopologySpread plugin in the component config ([#88671](https://github.com/kubernetes/kubernetes/pull/88671), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] +- Kubeadm: support Windows specific kubelet flags in kubeadm-flags.env ([#88287](https://github.com/kubernetes/kubernetes/pull/88287), [@gab-satchi](https://github.com/gab-satchi)) [SIG Cluster Lifecycle and Windows] +- Kubectl cluster-info dump changed to only display a message telling you the location where the output was written when the output is not standard output. ([#88765](https://github.com/kubernetes/kubernetes/pull/88765), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] +- Print NotReady when pod is not ready based on its conditions. ([#88240](https://github.com/kubernetes/kubernetes/pull/88240), [@soltysh](https://github.com/soltysh)) [SIG CLI] +- Scheduler Extender API is now located under k8s.io/kube-scheduler/extender ([#88540](https://github.com/kubernetes/kubernetes/pull/88540), [@damemi](https://github.com/damemi)) [SIG Release, Scheduling and Testing] +- Signatures on scale client methods have been modified to accept `context.Context` as a first argument. Signatures of Get, Update, and Patch methods have been updated to accept GetOptions, UpdateOptions and PatchOptions respectively. ([#88599](https://github.com/kubernetes/kubernetes/pull/88599), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG API Machinery, Apps, Autoscaling and CLI] +- Signatures on the dynamic client methods have been modified to accept `context.Context` as a first argument. Signatures of Delete and DeleteCollection methods now accept DeleteOptions by value instead of by reference. ([#88906](https://github.com/kubernetes/kubernetes/pull/88906), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps, CLI, Cluster Lifecycle, Storage and Testing] +- Signatures on the metadata client methods have been modified to accept `context.Context` as a first argument. Signatures of Delete and DeleteCollection methods now accept DeleteOptions by value instead of by reference. ([#88910](https://github.com/kubernetes/kubernetes/pull/88910), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Testing] +- Webhooks will have alpha support for network proxy ([#85870](https://github.com/kubernetes/kubernetes/pull/85870), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery, Auth and Testing] +- When client certificate files are provided, reload files for new connections, and close connections when a certificate changes. ([#79083](https://github.com/kubernetes/kubernetes/pull/79083), [@jackkleeman](https://github.com/jackkleeman)) [SIG API Machinery, Auth, Node and Testing] +- When deleting objects using kubectl with the --force flag, you are no longer required to also specify --grace-period=0. ([#87776](https://github.com/kubernetes/kubernetes/pull/87776), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] +- `kubectl` now contains a `kubectl alpha debug` command. This command allows attaching an ephemeral container to a running pod for the purposes of debugging. ([#88004](https://github.com/kubernetes/kubernetes/pull/88004), [@verb](https://github.com/verb)) [SIG CLI] + +### Documentation + +- Update Japanese translation for kubectl help ([#86837](https://github.com/kubernetes/kubernetes/pull/86837), [@inductor](https://github.com/inductor)) [SIG CLI and Docs] +- `kubectl plugin` now prints a note how to install krew ([#88577](https://github.com/kubernetes/kubernetes/pull/88577), [@corneliusweig](https://github.com/corneliusweig)) [SIG CLI] + +### Other (Bug, Cleanup or Flake) + +- Azure VMSS LoadBalancerBackendAddressPools updating has been improved with squential-sync + concurrent-async requests. ([#88699](https://github.com/kubernetes/kubernetes/pull/88699), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- AzureFile and CephFS use new Mount library that prevents logging of sensitive mount options. ([#88684](https://github.com/kubernetes/kubernetes/pull/88684), [@saad-ali](https://github.com/saad-ali)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Storage] +- Build: Enable kube-cross image-building on K8s Infra ([#88562](https://github.com/kubernetes/kubernetes/pull/88562), [@justaugustus](https://github.com/justaugustus)) [SIG Release and Testing] +- Client-go certificate manager rotation gained the ability to preserve optional intermediate chains accompanying issued certificates ([#88744](https://github.com/kubernetes/kubernetes/pull/88744), [@jackkleeman](https://github.com/jackkleeman)) [SIG API Machinery and Auth] +- Conformance image now depends on stretch-slim instead of debian-hyperkube-base as that image is being deprecated and removed. ([#88702](https://github.com/kubernetes/kubernetes/pull/88702), [@dims](https://github.com/dims)) [SIG Cluster Lifecycle, Release and Testing] +- Deprecate --generator flag from kubectl create commands ([#88655](https://github.com/kubernetes/kubernetes/pull/88655), [@soltysh](https://github.com/soltysh)) [SIG CLI] +- FIX: prevent apiserver from panicking when failing to load audit webhook config file ([#88879](https://github.com/kubernetes/kubernetes/pull/88879), [@JoshVanL](https://github.com/JoshVanL)) [SIG API Machinery and Auth] +- Fix /readyz to return error immediately after a shutdown is initiated, before the --shutdown-delay-duration has elapsed. ([#88911](https://github.com/kubernetes/kubernetes/pull/88911), [@tkashem](https://github.com/tkashem)) [SIG API Machinery] +- Fix a bug where kubenet fails to parse the tc output. ([#83572](https://github.com/kubernetes/kubernetes/pull/83572), [@chendotjs](https://github.com/chendotjs)) [SIG Network] +- Fix describe ingress annotations not sorted. ([#88394](https://github.com/kubernetes/kubernetes/pull/88394), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] +- Fix handling of aws-load-balancer-security-groups annotation. Security-Groups assigned with this annotation are no longer modified by kubernetes which is the expected behaviour of most users. Also no unnecessary Security-Groups are created anymore if this annotation is used. ([#83446](https://github.com/kubernetes/kubernetes/pull/83446), [@Elias481](https://github.com/Elias481)) [SIG Cloud Provider] +- Fix kubectl create deployment image name ([#86636](https://github.com/kubernetes/kubernetes/pull/86636), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] +- Fix missing "apiVersion" for "involvedObject" in Events for Nodes. ([#87537](https://github.com/kubernetes/kubernetes/pull/87537), [@uthark](https://github.com/uthark)) [SIG Apps and Node] +- Fix that prevents repeated fetching of PVC/PV objects by kubelet when processing of pod volumes fails. While this prevents hammering API server in these error scenarios, it means that some errors in processing volume(s) for a pod could now take up to 2-3 minutes before retry. ([#88141](https://github.com/kubernetes/kubernetes/pull/88141), [@tedyu](https://github.com/tedyu)) [SIG Node and Storage] +- Fix: azure file mount timeout issue ([#88610](https://github.com/kubernetes/kubernetes/pull/88610), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fix: corrupted mount point in csi driver ([#88569](https://github.com/kubernetes/kubernetes/pull/88569), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] +- Fixed a bug in the TopologyManager. Previously, the TopologyManager would only guarantee alignment if container creation was serialized in some way. Alignment is now guaranteed under all scenarios of container creation. ([#87759](https://github.com/kubernetes/kubernetes/pull/87759), [@klueska](https://github.com/klueska)) [SIG Node] +- Fixed block CSI volume cleanup after timeouts. ([#88660](https://github.com/kubernetes/kubernetes/pull/88660), [@jsafrane](https://github.com/jsafrane)) [SIG Node and Storage] +- Fixes issue where you can't attach more than 15 GCE Persistent Disks to c2, n2, m1, m2 machine types. ([#88602](https://github.com/kubernetes/kubernetes/pull/88602), [@yuga711](https://github.com/yuga711)) [SIG Storage] +- For volumes that allow attaches across multiple nodes, attach and detach operations across different nodes are now executed in parallel. ([#88678](https://github.com/kubernetes/kubernetes/pull/88678), [@verult](https://github.com/verult)) [SIG Apps, Node and Storage] +- Hide kubectl.kubernetes.io/last-applied-configuration in describe command ([#88758](https://github.com/kubernetes/kubernetes/pull/88758), [@soltysh](https://github.com/soltysh)) [SIG Auth and CLI] +- In GKE alpha clusters it will be possible to use the service annotation `cloud.google.com/network-tier: Standard` ([#88487](https://github.com/kubernetes/kubernetes/pull/88487), [@zioproto](https://github.com/zioproto)) [SIG Cloud Provider] +- Kubelets perform fewer unnecessary pod status update operations on the API server. ([#88591](https://github.com/kubernetes/kubernetes/pull/88591), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node and Scalability] +- Plugin/PluginConfig and Policy APIs are mutually exclusive when running the scheduler ([#88864](https://github.com/kubernetes/kubernetes/pull/88864), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] +- Specifying PluginConfig for the same plugin more than once fails scheduler startup. + + Specifying extenders and configuring .ignoredResources for the NodeResourcesFit plugin fails ([#88870](https://github.com/kubernetes/kubernetes/pull/88870), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] +- Support TLS Server Name overrides in kubeconfig file and via --tls-server-name in kubectl ([#88769](https://github.com/kubernetes/kubernetes/pull/88769), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth and CLI] +- Terminating a restartPolicy=Never pod no longer has a chance to report the pod succeeded when it actually failed. ([#88440](https://github.com/kubernetes/kubernetes/pull/88440), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node and Testing] +- The EventRecorder from k8s.io/client-go/tools/events will now create events in the default namespace (instead of kube-system) when the related object does not have it set. ([#88815](https://github.com/kubernetes/kubernetes/pull/88815), [@enj](https://github.com/enj)) [SIG API Machinery] +- The audit event sourceIPs list will now always end with the IP that sent the request directly to the API server. ([#87167](https://github.com/kubernetes/kubernetes/pull/87167), [@tallclair](https://github.com/tallclair)) [SIG API Machinery and Auth] +- Update to use golang 1.13.8 ([#87648](https://github.com/kubernetes/kubernetes/pull/87648), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Release and Testing] +- Validate kube-proxy flags --ipvs-tcp-timeout, --ipvs-tcpfin-timeout, --ipvs-udp-timeout ([#88657](https://github.com/kubernetes/kubernetes/pull/88657), [@chendotjs](https://github.com/chendotjs)) [SIG Network] + + +# v1.18.0-beta.1 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.18.0-beta.1 + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes.tar.gz) | `7c182ca905b3a31871c01ab5fdaf46f074547536c7975e069ff230af0d402dfc0346958b1d084bd2c108582ffc407484e6a15a1cd93e9affbe34b6e99409ef1f` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-src.tar.gz) | `d104b8c792b1517bd730787678c71c8ee3b259de81449192a49a1c6e37a6576d28f69b05c2019cc4a4c40ddeb4d60b80138323df3f85db8682caabf28e67c2de` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-darwin-386.tar.gz) | `bc337bb8f200a789be4b97ce99b9d7be78d35ebd64746307c28339dc4628f56d9903e0818c0888aaa9364357a528d1ac6fd34f74377000f292ec502fbea3837e` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-darwin-amd64.tar.gz) | `38dfa5e0b0cfff39942c913a6bcb2ad8868ec43457d35cffba08217bb6e7531720e0731f8588505f4c81193ce5ec0e5fe6870031cf1403fbbde193acf7e53540` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-linux-386.tar.gz) | `8e63ec7ce29c69241120c037372c6c779e3f16253eabd612c7cbe6aa89326f5160eb5798004d723c5cd72d458811e98dac3574842eb6a57b2798ecd2bbe5bcf9` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-linux-amd64.tar.gz) | `c1be9f184a7c3f896a785c41cd6ece9d90d8cb9b1f6088bdfb5557d8856c55e455f6688f5f54c2114396d5ae7adc0361e34ebf8e9c498d0187bd785646ccc1d0` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-linux-arm.tar.gz) | `8eab02453cfd9e847632a774a0e0cf3a33c7619fb4ced7f1840e1f71444e8719b1c8e8cbfdd1f20bb909f3abe39cdcac74f14cb9c878c656d35871b7c37c7cbe` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-linux-arm64.tar.gz) | `f7df0ec02d2e7e63278d5386e8153cfe2b691b864f17b6452cc824a5f328d688976c975b076e60f1c6b3c859e93e477134fbccc53bb49d9e846fb038b34eee48` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-linux-ppc64le.tar.gz) | `36dd5b10addca678a518e6d052c9d6edf473e3f87388a2f03f714c93c5fbfe99ace16cf3b382a531be20a8fe6f4160f8d891800dd2cff5f23c9ca12c2f4a151b` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-linux-s390x.tar.gz) | `5bdbb44b996ab4ccf3a383780270f5cfdbf174982c300723c8bddf0a48ae5e459476031c1d51b9d30ffd621d0a126c18a5de132ef1d92fca2f3e477665ea10cc` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-windows-386.tar.gz) | `5dea3d4c4e91ef889850143b361974250e99a3c526f5efee23ff9ccdcd2ceca4a2247e7c4f236bdfa77d2150157da5d676ac9c3ba26cf3a2f1e06d8827556f77` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-client-windows-amd64.tar.gz) | `db298e698391368703e6aea7f4345aec5a4b8c69f9d8ff6c99fb5804a6cea16d295fb01e70fe943ade3d4ce9200a081ad40da21bd331317ec9213f69b4d6c48f` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-server-linux-amd64.tar.gz) | `c6284929dd5940e750b48db72ffbc09f73c5ec31ab3db283babb8e4e07cd8cbb27642f592009caae4717981c0db82c16312849ef4cbafe76acc4264c7d5864ac` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-server-linux-arm.tar.gz) | `6fc9552cf082c54cc0833b19876117c87ba7feb5a12c7e57f71b52208daf03eaef3ca56bd22b7bce2d6e81b5a23537cf6f5497a6eaa356c0aab1d3de26c309f9` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-server-linux-arm64.tar.gz) | `b794b9c399e548949b5bfb2fe71123e86c2034847b2c99aca34b6de718a35355bbecdae9dc2a81c49e3c82fb4b5862526a3f63c2862b438895e12c5ea884f22e` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-server-linux-ppc64le.tar.gz) | `fddaed7a54f97046a91c29534645811c6346e973e22950b2607b8c119c2377e9ec2d32144f81626078cdaeca673129cc4016c1a3dbd3d43674aa777089fb56ac` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-server-linux-s390x.tar.gz) | `65951a534bb55069c7419f41cbcdfe2fae31541d8a3f9eca11fc2489addf281c5ad2d13719212657da0be5b898f22b57ac39446d99072872fbacb0a7d59a4f74` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-node-linux-amd64.tar.gz) | `992059efb5cae7ed0ef55820368d854bad1c6d13a70366162cd3b5111ce24c371c7c87ded2012f055e08b2ff1b4ef506e1f4e065daa3ac474fef50b5efa4fb07` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-node-linux-arm.tar.gz) | `c63ae0f8add5821ad267774314b8c8c1ffe3b785872bf278e721fd5dfdad1a5db1d4db3720bea0a36bf10d9c6dd93e247560162c0eac6e1b743246f587d3b27a` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-node-linux-arm64.tar.gz) | `47adb9ddf6eaf8f475b89f59ee16fbd5df183149a11ad1574eaa645b47a6d58aec2ca70ba857ce9f1a5793d44cf7a61ebc6874793bb685edaf19410f4f76fd13` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-node-linux-ppc64le.tar.gz) | `a3bc4a165567c7b76a3e45ab7b102d6eb3ecf373eb048173f921a4964cf9be8891d0d5b8dafbd88c3af7b0e21ef3d41c1e540c3347ddd84b929b3a3d02ceb7b2` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-node-linux-s390x.tar.gz) | `109ddf37c748f69584c829db57107c3518defe005c11fcd2a1471845c15aae0a3c89aafdd734229f4069ed18856cc650c80436684e1bdc43cfee3149b0324746` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-beta.1/kubernetes-node-windows-amd64.tar.gz) | `a3a75d2696ad3136476ad7d811e8eabaff5111b90e592695e651d6111f819ebf0165b8b7f5adc05afb5f7f01d1e5fb64876cb696e492feb20a477a5800382b7a` + +## Changelog since v1.18.0-beta.0 + +## Urgent Upgrade Notes + +### (No, really, you MUST read this before you upgrade) + +- The StreamingProxyRedirects feature and `--redirect-container-streaming` flag are deprecated, and will be removed in a future release. The default behavior (proxy streaming requests through the kubelet) will be the only supported option. + If you are setting `--redirect-container-streaming=true`, then you must migrate off this configuration. The flag will no longer be able to be enabled starting in v1.20. If you are not setting the flag, no action is necessary. ([#88290](https://github.com/kubernetes/kubernetes/pull/88290), [@tallclair](https://github.com/tallclair)) [SIG API Machinery and Node] + +- Yes. + + Feature Name: Support using network resources (VNet, LB, IP, etc.) in different AAD Tenant and Subscription than those for the cluster. + + Changes in Pull Request: + + 1. Add properties `networkResourceTenantID` and `networkResourceSubscriptionID` in cloud provider auth config section, which indicates the location of network resources. + 2. Add function `GetMultiTenantServicePrincipalToken` to fetch multi-tenant service principal token, which will be used by Azure VM/VMSS Clients in this feature. + 3. Add function `GetNetworkResourceServicePrincipalToken` to fetch network resource service principal token, which will be used by Azure Network Resource (Load Balancer, Public IP, Route Table, Network Security Group and their sub level resources) Clients in this feature. + 4. Related unit tests. + + None. + + User Documentation: In PR https://github.com/kubernetes-sigs/cloud-provider-azure/pull/301 ([#88384](https://github.com/kubernetes/kubernetes/pull/88384), [@bowen5](https://github.com/bowen5)) [SIG Cloud Provider] + +## Changes by Kind + +### Deprecation + +- Azure service annotation service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset has been deprecated. Its support would be removed in a future release. ([#88462](https://github.com/kubernetes/kubernetes/pull/88462), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] + +### API Change + +- API additions to apiserver types ([#87179](https://github.com/kubernetes/kubernetes/pull/87179), [@Jefftree](https://github.com/Jefftree)) [SIG API Machinery, Cloud Provider and Cluster Lifecycle] +- Add Scheduling Profiles to kubescheduler.config.k8s.io/v1alpha2 ([#88087](https://github.com/kubernetes/kubernetes/pull/88087), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling and Testing] +- Added support for multiple sizes huge pages on a container level ([#84051](https://github.com/kubernetes/kubernetes/pull/84051), [@bart0sh](https://github.com/bart0sh)) [SIG Apps, Node and Storage] +- AppProtocol is a new field on Service and Endpoints resources, enabled with the ServiceAppProtocol feature gate. ([#88503](https://github.com/kubernetes/kubernetes/pull/88503), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- Fixed missing validation of uniqueness of list items in lists with `x-kubernetes-list-type: map` or x-kubernetes-list-type: set` in CustomResources. ([#84920](https://github.com/kubernetes/kubernetes/pull/84920), [@sttts](https://github.com/sttts)) [SIG API Machinery] +- Introduces optional --detect-local flag to kube-proxy. + Currently the only supported value is "cluster-cidr", + which is the default if not specified. ([#87748](https://github.com/kubernetes/kubernetes/pull/87748), [@satyasm](https://github.com/satyasm)) [SIG Cluster Lifecycle, Network and Scheduling] +- Kube-scheduler can run more than one scheduling profile. Given a pod, the profile is selected by using its `.spec.SchedulerName`. ([#88285](https://github.com/kubernetes/kubernetes/pull/88285), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps, Scheduling and Testing] +- Moving Windows RunAsUserName feature to GA ([#87790](https://github.com/kubernetes/kubernetes/pull/87790), [@marosset](https://github.com/marosset)) [SIG Apps and Windows] + +### Feature + +- Add --dry-run to kubectl delete, taint, replace ([#88292](https://github.com/kubernetes/kubernetes/pull/88292), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG CLI and Testing] +- Add huge page stats to Allocated resources in "kubectl describe node" ([#80605](https://github.com/kubernetes/kubernetes/pull/80605), [@odinuge](https://github.com/odinuge)) [SIG CLI] +- Kubeadm: The ClusterStatus struct present in the kubeadm-config ConfigMap is deprecated and will be removed on a future version. It is going to be maintained by kubeadm until it gets removed. The same information can be found on `etcd` and `kube-apiserver` pod annotations, `kubeadm.kubernetes.io/etcd.advertise-client-urls` and `kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint` respectively. ([#87656](https://github.com/kubernetes/kubernetes/pull/87656), [@ereslibre](https://github.com/ereslibre)) [SIG Cluster Lifecycle] +- Kubeadm: add the experimental feature gate PublicKeysECDSA that can be used to create a + cluster with ECDSA certificates from "kubeadm init". Renewal of existing ECDSA certificates is + also supported using "kubeadm alpha certs renew", but not switching between the RSA and + ECDSA algorithms on the fly or during upgrades. ([#86953](https://github.com/kubernetes/kubernetes/pull/86953), [@rojkov](https://github.com/rojkov)) [SIG API Machinery, Auth and Cluster Lifecycle] +- Kubeadm: on kubeconfig certificate renewal, keep the embedded CA in sync with the one on disk ([#88052](https://github.com/kubernetes/kubernetes/pull/88052), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: upgrade supports fallback to the nearest known etcd version if an unknown k8s version is passed ([#88373](https://github.com/kubernetes/kubernetes/pull/88373), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- New flag `--show-hidden-metrics-for-version` in kube-scheduler can be used to show all hidden metrics that deprecated in the previous minor release. ([#84913](https://github.com/kubernetes/kubernetes/pull/84913), [@serathius](https://github.com/serathius)) [SIG Instrumentation and Scheduling] +- Scheduler framework permit plugins now run at the end of the scheduling cycle, after reserve plugins. Waiting on permit will remain in the beginning of the binding cycle. ([#88199](https://github.com/kubernetes/kubernetes/pull/88199), [@mateuszlitwin](https://github.com/mateuszlitwin)) [SIG Scheduling] +- The kubelet and the default docker runtime now support running ephemeral containers in the Linux process namespace of a target container. Other container runtimes must implement this feature before it will be available in that runtime. ([#84731](https://github.com/kubernetes/kubernetes/pull/84731), [@verb](https://github.com/verb)) [SIG Node] + +### Other (Bug, Cleanup or Flake) + +- Add delays between goroutines for vm instance update ([#88094](https://github.com/kubernetes/kubernetes/pull/88094), [@aramase](https://github.com/aramase)) [SIG Cloud Provider] +- Add init containers log to cluster dump info. ([#88324](https://github.com/kubernetes/kubernetes/pull/88324), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] +- CPU limits are now respected for Windows containers. If a node is over-provisioned, no weighting is used - only limits are respected. ([#86101](https://github.com/kubernetes/kubernetes/pull/86101), [@PatrickLang](https://github.com/PatrickLang)) [SIG Node, Testing and Windows] +- Cloud provider config CloudProviderBackoffMode has been removed since it won't be used anymore. ([#88463](https://github.com/kubernetes/kubernetes/pull/88463), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Evictions due to pods breaching their ephemeral storage limits are now recorded by the `kubelet_evictions` metric and can be alerted on. ([#87906](https://github.com/kubernetes/kubernetes/pull/87906), [@smarterclayton](https://github.com/smarterclayton)) [SIG Node] +- Fix: add remediation in azure disk attach/detach ([#88444](https://github.com/kubernetes/kubernetes/pull/88444), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] +- Fix: check disk status before disk azure disk ([#88360](https://github.com/kubernetes/kubernetes/pull/88360), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] +- Fixed cleaning of CSI raw block volumes. ([#87978](https://github.com/kubernetes/kubernetes/pull/87978), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Get-kube.sh uses the gcloud's current local GCP service account for auth when the provider is GCE or GKE instead of the metadata server default ([#88383](https://github.com/kubernetes/kubernetes/pull/88383), [@BenTheElder](https://github.com/BenTheElder)) [SIG Cluster Lifecycle] +- Golang/x/net has been updated to bring in fixes for CVE-2020-9283 ([#88381](https://github.com/kubernetes/kubernetes/pull/88381), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] +- Kubeadm now includes CoreDNS version 1.6.7 ([#86260](https://github.com/kubernetes/kubernetes/pull/86260), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubeadm: fix the bug that 'kubeadm upgrade' hangs in single node cluster ([#88434](https://github.com/kubernetes/kubernetes/pull/88434), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Optimize kubectl version help info ([#88313](https://github.com/kubernetes/kubernetes/pull/88313), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] +- Removes the deprecated command `kubectl rolling-update` ([#88057](https://github.com/kubernetes/kubernetes/pull/88057), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG Architecture, CLI and Testing] + + +# v1.18.0-alpha.5 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.18.0-alpha.5 + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes.tar.gz) | `6452cac2b80721e9f577cb117c29b9ac6858812b4275c2becbf74312566f7d016e8b34019bd1bf7615131b191613bf9b973e40ad9ac8f6de9007d41ef2d7fd70` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-src.tar.gz) | `e41d9d4dd6910a42990051fcdca4bf5d3999df46375abd27ffc56aae9b455ae984872302d590da6aa85bba6079334fb5fe511596b415ee79843dee1c61c137da` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-darwin-386.tar.gz) | `5c95935863492b31d4aaa6be93260088dafea27663eb91edca980ca3a8485310e60441bc9050d4d577e9c3f7ffd96db516db8d64321124cec1b712e957c9fe1c` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-darwin-amd64.tar.gz) | `868faa578b3738604d8be62fae599ccc556799f1ce54807f1fe72599f20f8a1f98ad8152fac14a08a463322530b696d375253ba3653325e74b587df6e0510da3` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-linux-386.tar.gz) | `76a89d1d30b476b47f8fb808e342f89608e5c1c1787c4c06f2d7e763f9482e2ae8b31e6ad26541972e2b9a3a7c28327e3150cdd355e8b8d8b050a801bbf08d49` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-linux-amd64.tar.gz) | `07ad96a09b44d1c707d7c68312c5d69b101a3424bf1e6e9400b2e7a3fba78df04302985d473ddd640d8f3f0257be34110dbe1304b9565dd9d7a4639b7b7b85fd` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-linux-arm.tar.gz) | `c04fed9fa370a75c1b8e18b2be0821943bb9befcc784d14762ea3278e73600332a9b324d5eeaa1801d20ad6be07a553c41dcf4fa7ab3eadd0730ab043d687c8c` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-linux-arm64.tar.gz) | `4199147dea9954333df26d34248a1cb7b02ebbd6380ffcd42d9f9ed5fdabae45a59215474dab3c11436c82e60bd27cbd03b3dde288bf611cd3e78b87c783c6a9` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-linux-ppc64le.tar.gz) | `4f6d4d61d1c52d3253ca19031ebcd4bad06d19b68bbaaab5c8e8c590774faea4a5ceab1f05f2706b61780927e1467815b3479342c84d45df965aba78414727c4` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-linux-s390x.tar.gz) | `e2a454151ae5dd891230fb516a3f73f73ab97832db66fd3d12e7f1657a569f58a9fe2654d50ddd7d8ec88a5ff5094199323a4c6d7d44dcf7edb06cca11dd4de1` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-windows-386.tar.gz) | `14b262ba3b71c41f545db2a017cf1746075ada5745a858d2a62bc9df7c5dc10607220375db85e2c4cb85307b09709e58bc66a407488e0961191e3249dc7742b0` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-client-windows-amd64.tar.gz) | `26353c294755a917216664364b524982b7f5fc6aa832ce90134bb178df8a78604963c68873f121ea5f2626ff615bdbf2ffe54e00578739cde6df42ffae034732` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-server-linux-amd64.tar.gz) | `ba77e0e7c610f59647c1b2601f82752964a0f54b7ad609a89b00fcfd553d0f0249f6662becbabaa755bb769b36a2000779f08022c40fb8cc61440337481317a1` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-server-linux-arm.tar.gz) | `45e87b3e844ea26958b0b489e8c9b90900a3253000850f5ff9e87ffdcafba72ab8fd17b5ba092051a58a4bc277912c047a85940ec7f093dff6f9e8bf6fed3b42` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-server-linux-arm64.tar.gz) | `155e136e3124ead69c594eead3398d6cfdbb8f823c324880e8a7bbd1b570b05d13a77a69abd0a6758cfcc7923971cc6da4d3e0c1680fd519b632803ece00d5ce` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-server-linux-ppc64le.tar.gz) | `3fa0fb8221da19ad9d03278961172b7fa29a618b30abfa55e7243bb937dede8df56658acf02e6b61e7274fbc9395e237f49c62f2a83017eca2a69f67af31c01c` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-server-linux-s390x.tar.gz) | `db3199c3d7ba0b326d71dc8b80f50b195e79e662f71386a3b2976d47d13d7b0136887cc21df6f53e70a3d733da6eac7bbbf3bab2df8a1909a3cee4b44c32dd0b` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-node-linux-amd64.tar.gz) | `addcdfbad7f12647e6babb8eadf853a374605c8f18bf63f416fa4d3bf1b903aa206679d840433206423a984bb925e7983366edcdf777cf5daef6ef88e53d6dfa` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-node-linux-arm.tar.gz) | `b2ac54e0396e153523d116a2aaa32c919d6243931e0104cd47a23f546d710e7abdaa9eae92d978ce63c92041e63a9b56f5dd8fd06c812a7018a10ecac440f768` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-node-linux-arm64.tar.gz) | `7aab36f2735cba805e4fd109831a1af0f586a88db3f07581b6dc2a2aab90076b22c96b490b4f6461a8fb690bf78948b6d514274f0d6fb0664081de2d44dc48e1` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-node-linux-ppc64le.tar.gz) | `a579936f07ebf86f69f297ac50ba4c34caf2c0b903f73190eb581c78382b05ef36d41ade5bfd25d7b1b658cfcbee3d7125702a18e7480f9b09a62733a512a18a` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-node-linux-s390x.tar.gz) | `58fa0359ddd48835192fab1136a2b9b45d1927b04411502c269cda07cb8a8106536973fb4c7fedf1d41893a524c9fe2e21078fdf27bfbeed778273d024f14449` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.5/kubernetes-node-windows-amd64.tar.gz) | `9086c03cd92b440686cea6d8c4e48045cc46a43ab92ae0e70350b3f51804b9e2aaae7178142306768bae00d9ef6dd938167972bfa90b12223540093f735a45db` + +## Changelog since v1.18.0-alpha.3 + +### Deprecation + +- Kubeadm: command line option "kubelet-version" for `kubeadm upgrade node` has been deprecated and will be removed in a future release. ([#87942](https://github.com/kubernetes/kubernetes/pull/87942), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] + +### API Change + +- Kubelet podresources API now provides the information about active pods only. ([#79409](https://github.com/kubernetes/kubernetes/pull/79409), [@takmatsu](https://github.com/takmatsu)) [SIG Node] +- Remove deprecated fields from .leaderElection in kubescheduler.config.k8s.io/v1alpha2 ([#87904](https://github.com/kubernetes/kubernetes/pull/87904), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] +- Signatures on generated clientset methods have been modified to accept `context.Context` as a first argument. Signatures of generated Create, Update, and Patch methods have been updated to accept CreateOptions, UpdateOptions and PatchOptions respectively. Clientsets that with the previous interface have been added in new "deprecated" packages to allow incremental migration to the new APIs. The deprecated packages will be removed in the 1.21 release. ([#87299](https://github.com/kubernetes/kubernetes/pull/87299), [@mikedanese](https://github.com/mikedanese)) [SIG API Machinery, Apps, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Scheduling, Storage, Testing and Windows] +- The k8s.io/node-api component is no longer updated. Instead, use the RuntimeClass types located within k8s.io/api, and the generated clients located within k8s.io/client-go ([#87503](https://github.com/kubernetes/kubernetes/pull/87503), [@liggitt](https://github.com/liggitt)) [SIG Node and Release] + +### Feature + +- Add indexer for storage cacher ([#85445](https://github.com/kubernetes/kubernetes/pull/85445), [@shaloulcy](https://github.com/shaloulcy)) [SIG API Machinery] +- Add support for mount options to the FC volume plugin ([#87499](https://github.com/kubernetes/kubernetes/pull/87499), [@ejweber](https://github.com/ejweber)) [SIG Storage] +- Added a config-mode flag in azure auth module to enable getting AAD token without spn: prefix in audience claim. When it's not specified, the default behavior doesn't change. ([#87630](https://github.com/kubernetes/kubernetes/pull/87630), [@weinong](https://github.com/weinong)) [SIG API Machinery, Auth, CLI and Cloud Provider] +- Introduced BackoffManager interface for backoff management ([#87829](https://github.com/kubernetes/kubernetes/pull/87829), [@zhan849](https://github.com/zhan849)) [SIG API Machinery] +- PodTopologySpread plugin now excludes terminatingPods when making scheduling decisions. ([#87845](https://github.com/kubernetes/kubernetes/pull/87845), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] +- Promote CSIMigrationOpenStack to Beta (off by default since it requires installation of the OpenStack Cinder CSI Driver) + The in-tree AWS OpenStack Cinder "kubernetes.io/cinder" was already deprecated a while ago and will be removed in 1.20. Users should enable CSIMigration + CSIMigrationOpenStack features and install the OpenStack Cinder CSI Driver (https://github.com/kubernetes-sigs/cloud-provider-openstack) to avoid disruption to existing Pod and PVC objects at that time. + Users should start using the OpenStack Cinder CSI Driver directly for any new volumes. ([#85637](https://github.com/kubernetes/kubernetes/pull/85637), [@dims](https://github.com/dims)) [SIG Cloud Provider] + +### Design + +- The scheduler Permit extension point doesn't return a boolean value in its Allow() and Reject() functions. ([#87936](https://github.com/kubernetes/kubernetes/pull/87936), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] + +### Other (Bug, Cleanup or Flake) + +- Adds "volume.beta.kubernetes.io/migrated-to" annotation to PV's and PVC's when they are migrated to signal external provisioners to pick up those objects for Provisioning and Deleting. ([#87098](https://github.com/kubernetes/kubernetes/pull/87098), [@davidz627](https://github.com/davidz627)) [SIG Apps and Storage] +- Fix a bug in the dual-stack IPVS proxier where stale IPv6 endpoints were not being cleaned up ([#87695](https://github.com/kubernetes/kubernetes/pull/87695), [@andrewsykim](https://github.com/andrewsykim)) [SIG Network] +- Fix kubectl drain ignore daemonsets and others. ([#87361](https://github.com/kubernetes/kubernetes/pull/87361), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] +- Fix: add azure disk migration support for CSINode ([#88014](https://github.com/kubernetes/kubernetes/pull/88014), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fix: add non-retriable errors in azure clients ([#87941](https://github.com/kubernetes/kubernetes/pull/87941), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] +- Fixed NetworkPolicy validation that Except values are accepted when they are outside the CIDR range. ([#86578](https://github.com/kubernetes/kubernetes/pull/86578), [@tnqn](https://github.com/tnqn)) [SIG Network] +- Improves performance of the node authorizer ([#87696](https://github.com/kubernetes/kubernetes/pull/87696), [@liggitt](https://github.com/liggitt)) [SIG Auth] +- Iptables/userspace proxy: improve performance by getting local addresses only once per sync loop, instead of for every external IP ([#85617](https://github.com/kubernetes/kubernetes/pull/85617), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Network] +- Kube-aggregator: always sets unavailableGauge metric to reflect the current state of a service. ([#87778](https://github.com/kubernetes/kubernetes/pull/87778), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery] +- Kubeadm allows to configure single-stack clusters if dual-stack is enabled ([#87453](https://github.com/kubernetes/kubernetes/pull/87453), [@aojea](https://github.com/aojea)) [SIG API Machinery, Cluster Lifecycle and Network] +- Kubeadm: 'kubeadm alpha kubelet config download' has been removed, please use 'kubeadm upgrade node phase kubelet-config' instead ([#87944](https://github.com/kubernetes/kubernetes/pull/87944), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubeadm: remove 'kubeadm upgrade node config' command since it was deprecated in v1.15, please use 'kubeadm upgrade node phase kubelet-config' instead ([#87975](https://github.com/kubernetes/kubernetes/pull/87975), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubectl describe and kubectl top pod will return a message saying "No resources found" or "No resources found in namespace" if there are no results to display. ([#87527](https://github.com/kubernetes/kubernetes/pull/87527), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] +- Kubelet metrics gathered through metrics-server or prometheus should no longer timeout for Windows nodes running more than 3 pods. ([#87730](https://github.com/kubernetes/kubernetes/pull/87730), [@marosset](https://github.com/marosset)) [SIG Node, Testing and Windows] +- Kubelet metrics have been changed to buckets. + For example the exec/{podNamespace}/{podID}/{containerName} is now just exec. ([#87913](https://github.com/kubernetes/kubernetes/pull/87913), [@cheftako](https://github.com/cheftako)) [SIG Node] +- Limit number of instances in a single update to GCE target pool to 1000. ([#87881](https://github.com/kubernetes/kubernetes/pull/87881), [@wojtek-t](https://github.com/wojtek-t)) [SIG Cloud Provider, Network and Scalability] +- Make Azure clients only retry on specified HTTP status codes ([#88017](https://github.com/kubernetes/kubernetes/pull/88017), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Pause image contains "Architecture" in non-amd64 images ([#87954](https://github.com/kubernetes/kubernetes/pull/87954), [@BenTheElder](https://github.com/BenTheElder)) [SIG Release] +- Pods that are considered for preemption and haven't started don't produce an error log. ([#87900](https://github.com/kubernetes/kubernetes/pull/87900), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] +- Prevent error message from being displayed when running kubectl plugin list and your path includes an empty string ([#87633](https://github.com/kubernetes/kubernetes/pull/87633), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] +- `kubectl create clusterrolebinding` creates rbac.authorization.k8s.io/v1 object ([#85889](https://github.com/kubernetes/kubernetes/pull/85889), [@oke-py](https://github.com/oke-py)) [SIG CLI] + +# v1.18.0-alpha.4 + +[Documentation](https://docs.k8s.io) + +## Important note about manual tag + +Due to a [tagging bug in our Release Engineering tooling](https://github.com/kubernetes/release/issues/1080) during `v1.18.0-alpha.3`, we needed to push a manual tag (`v1.18.0-alpha.4`). + +**No binaries have been produced or will be provided for `v1.18.0-alpha.4`.** + +The changelog for `v1.18.0-alpha.4` is included as part of the [changelog since v1.18.0-alpha.3][#changelog-since-v1180-alpha3] section. + +# v1.18.0-alpha.3 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.18.0-alpha.3 + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes.tar.gz) | `60bf3bfc23b428f53fd853bac18a4a905b980fcc0bacd35ccd6357a89cfc26e47de60975ea6b712e65980e6b9df82a22331152d9f08ed4dba44558ba23a422d4` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-src.tar.gz) | `8adf1016565a7c93713ab6fa4293c2d13b4f6e4e1ec4dcba60bd71e218b4dbe9ef5eb7dbb469006743f498fc7ddeb21865cd12bec041af60b1c0edce8b7aecd5` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-darwin-386.tar.gz) | `abb32e894e8280c772e96227b574da81cd1eac374b8d29158b7f222ed550087c65482eef4a9817dfb5f2baf0d9b85fcdfa8feced0fbc1aacced7296853b57e1f` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-darwin-amd64.tar.gz) | `5e4b1a993264e256ec1656305de7c306094cae9781af8f1382df4ce4eed48ce030827fde1a5e757d4ad57233d52075c9e4e93a69efbdc1102e4ba810705ccddc` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-linux-386.tar.gz) | `68da39c2ae101d2b38f6137ceda07eb0c2124794982a62ef483245dbffb0611c1441ca085fa3127e7a9977f45646788832a783544ff06954114548ea0e526e46` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-linux-amd64.tar.gz) | `dc236ffa8ad426620e50181419e9bebe3c161e953dbfb8a019f61b11286e1eb950b40d7cc03423bdf3e6974973bcded51300f98b55570c29732fa492dcde761d` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-linux-arm.tar.gz) | `ab0a8bd6dc31ea160b731593cdc490b3cc03668b1141cf95310bd7060dcaf55c7ee9842e0acae81063fdacb043c3552ccdd12a94afd71d5310b3ce056fdaa06c` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-linux-arm64.tar.gz) | `159ea083c601710d0d6aea423eeb346c99ffaf2abd137d35a53e87a07f5caf12fca8790925f3196f67b768fa92a024f83b50325dbca9ccd4dde6c59acdce3509` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-linux-ppc64le.tar.gz) | `16b0459adfa26575d13be49ab53ac7f0ffd05e184e4e13d2dfbfe725d46bb8ac891e1fd8aebe36ecd419781d4cc5cf3bd2aaaf5263cf283724618c4012408f40` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-linux-s390x.tar.gz) | `d5aa1f5d89168995d2797eb839a04ce32560f405b38c1c0baaa0e313e4771ae7bb3b28e22433ad5897d36aadf95f73eb69d8d411d31c4115b6b0adf5fe041f85` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-windows-386.tar.gz) | `374e16a1e52009be88c94786f80174d82dff66399bf294c9bee18a2159c42251c5debef1109a92570799148b08024960c6c50b8299a93fd66ebef94f198f34e9` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-client-windows-amd64.tar.gz) | `5a94c1068c19271f810b994adad8e62fae03b3d4473c7c9e6d056995ff7757ea61dd8d140c9267dd41e48808876673ce117826d35a3c1bb5652752f11a044d57` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-server-linux-amd64.tar.gz) | `a677bec81f0eba75114b92ff955bac74512b47e53959d56a685dae5edd527283d91485b1e86ad74ef389c5405863badf7eb22e2f0c9a568a4d0cb495c6a5c32f` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-server-linux-arm.tar.gz) | `2fb696f86ff13ebeb5f3cf2b254bf41303644c5ea84a292782eac6123550702655284d957676d382698c091358e5c7fe73f32803699c19be7138d6530fe413b6` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-server-linux-arm64.tar.gz) | `738e95da9cfb8f1309479078098de1c38cef5e1dd5ee1129b77651a936a412b7cd0cf15e652afc7421219646a98846ab31694970432e48dea9c9cafa03aa59cf` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-server-linux-ppc64le.tar.gz) | `7a85bfcbb2aa636df60c41879e96e788742ecd72040cb0db2a93418439c125218c58a4cfa96d01b0296c295793e94c544e87c2d98d50b49bc4cb06b41f874376` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-server-linux-s390x.tar.gz) | `1f1cdb2efa3e7cac857203d8845df2fdaa5cf1f20df764efffff29371945ec58f6deeba06f8fbf70b96faf81b0c955bf4cb84e30f9516cb2cc1ed27c2d2185a6` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-node-linux-amd64.tar.gz) | `4ccfced3f5ba4adfa58f4a9d1b2c5bdb3e89f9203ab0e27d11eb1c325ac323ebe63c015d2c9d070b233f5d1da76cab5349da3528511c1cd243e66edc9af381c4` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-node-linux-arm.tar.gz) | `d695a69d18449062e4c129e54ec8384c573955f8108f4b78adc2ec929719f2196b995469c728dd6656c63c44cda24315543939f85131ebc773cfe0de689df55b` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-node-linux-arm64.tar.gz) | `21df1da88c89000abc22f97e482c3aaa5ce53ec9628d83dda2e04a1d86c4d53be46c03ed6f1f211df3ee5071bce39d944ff7716b5b6ada3b9c4821d368b0a898` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-node-linux-ppc64le.tar.gz) | `ff77e3aacb6ed9d89baed92ef542c8b5cec83151b6421948583cf608bca3b779dce41fc6852961e00225d5e1502f6a634bfa61a36efa90e1aee90dedb787c2d2` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-node-linux-s390x.tar.gz) | `57d75b7977ec1a0f6e7ed96a304dbb3b8664910f42ca19aab319a9ec33535ff5901dfca4abcb33bf5741cde6d152acd89a5f8178f0efe1dc24430e0c1af5b98f` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.3/kubernetes-node-windows-amd64.tar.gz) | `63fdbb71773cfd73a914c498e69bb9eea3fc314366c99ffb8bd42ec5b4dae807682c83c1eb5cfb1e2feb4d11d9e49cc85ba644e954241320a835798be7653d61` + +## Changelog since v1.18.0-alpha.2 + +### Deprecation + +- Remove all the generators from kubectl run. It will now only create pods. Additionally, deprecates all the flags that are not relevant anymore. ([#87077](https://github.com/kubernetes/kubernetes/pull/87077), [@soltysh](https://github.com/soltysh)) [SIG Architecture, SIG CLI, and SIG Testing] +- kubeadm: kube-dns is deprecated and will not be supported in a future version ([#86574](https://github.com/kubernetes/kubernetes/pull/86574), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] + +### API Change + +- Add kubescheduler.config.k8s.io/v1alpha2 ([#87628](https://github.com/kubernetes/kubernetes/pull/87628), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] +- --enable-cadvisor-endpoints is now disabled by default. If you need access to the cAdvisor v1 Json API please enable it explicitly in the kubelet command line. Please note that this flag was deprecated in 1.15 and will be removed in 1.19. ([#87440](https://github.com/kubernetes/kubernetes/pull/87440), [@dims](https://github.com/dims)) [SIG Instrumentation, SIG Node, and SIG Testing] +- The following feature gates are removed, because the associated features were unconditionally enabled in previous releases: CustomResourceValidation, CustomResourceSubresources, CustomResourceWebhookConversion, CustomResourcePublishOpenAPI, CustomResourceDefaulting ([#87475](https://github.com/kubernetes/kubernetes/pull/87475), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] + +### Feature + +- aggragation api will have alpha support for network proxy ([#87515](https://github.com/kubernetes/kubernetes/pull/87515), [@Sh4d1](https://github.com/Sh4d1)) [SIG API Machinery] +- API request throttling (due to a high rate of requests) is now reported in client-go logs at log level 2. The messages are of the form + + Throttling request took 1.50705208s, request: GET: + + The presence of these messages, may indicate to the administrator the need to tune the cluster accordingly. ([#87740](https://github.com/kubernetes/kubernetes/pull/87740), [@jennybuckley](https://github.com/jennybuckley)) [SIG API Machinery] +- kubeadm: reject a node joining the cluster if a node with the same name already exists ([#81056](https://github.com/kubernetes/kubernetes/pull/81056), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- disableAvailabilitySetNodes is added to avoid VM list for VMSS clusters. It should only be used when vmType is "vmss" and all the nodes (including masters) are VMSS virtual machines. ([#87685](https://github.com/kubernetes/kubernetes/pull/87685), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- The kubectl --dry-run flag now accepts the values 'client', 'server', and 'none', to support client-side and server-side dry-run strategies. The boolean and unset values for the --dry-run flag are deprecated and a value will be required in a future version. ([#87580](https://github.com/kubernetes/kubernetes/pull/87580), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG CLI] +- Add support for pre-allocated hugepages for more than one page size ([#82820](https://github.com/kubernetes/kubernetes/pull/82820), [@odinuge](https://github.com/odinuge)) [SIG Apps] +- Update CNI version to v0.8.5 ([#78819](https://github.com/kubernetes/kubernetes/pull/78819), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, SIG Cluster Lifecycle, SIG Network, SIG Release, and SIG Testing] +- Skip default spreading scoring plugin for pods that define TopologySpreadConstraints ([#87566](https://github.com/kubernetes/kubernetes/pull/87566), [@skilxn-go](https://github.com/skilxn-go)) [SIG Scheduling] +- Added more details to taint toleration errors ([#87250](https://github.com/kubernetes/kubernetes/pull/87250), [@starizard](https://github.com/starizard)) [SIG Apps, and SIG Scheduling] +- Scheduler: Add DefaultBinder plugin ([#87430](https://github.com/kubernetes/kubernetes/pull/87430), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling, and SIG Testing] +- Kube-apiserver metrics will now include request counts, latencies, and response sizes for /healthz, /livez, and /readyz requests. ([#83598](https://github.com/kubernetes/kubernetes/pull/83598), [@jktomer](https://github.com/jktomer)) [SIG API Machinery] + +### Other (Bug, Cleanup or Flake) + +- Fix the masters rolling upgrade causing thundering herd of LISTs on etcd leading to control plane unavailability. ([#86430](https://github.com/kubernetes/kubernetes/pull/86430), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery, SIG Node, and SIG Testing] +- `kubectl diff` now returns 1 only on diff finding changes, and >1 on kubectl errors. The "exit status code 1" message as also been muted. ([#87437](https://github.com/kubernetes/kubernetes/pull/87437), [@apelisse](https://github.com/apelisse)) [SIG CLI, and SIG Testing] +- To reduce chances of throttling, VM cache is set to nil when Azure node provisioning state is deleting ([#87635](https://github.com/kubernetes/kubernetes/pull/87635), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Fix regression in statefulset conversion which prevented applying a statefulset multiple times. ([#87706](https://github.com/kubernetes/kubernetes/pull/87706), [@liggitt](https://github.com/liggitt)) [SIG Apps, and SIG Testing] +- fixed two scheduler metrics (pending_pods and schedule_attempts_total) not being recorded ([#87692](https://github.com/kubernetes/kubernetes/pull/87692), [@everpeace](https://github.com/everpeace)) [SIG Scheduling] +- Resolved a performance issue in the node authorizer index maintenance. ([#87693](https://github.com/kubernetes/kubernetes/pull/87693), [@liggitt](https://github.com/liggitt)) [SIG Auth] +- Removed the 'client' label from apiserver_request_total. ([#87669](https://github.com/kubernetes/kubernetes/pull/87669), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, and SIG Instrumentation] +- `(*"k8s.io/client-go/rest".Request).{Do,DoRaw,Stream,Watch}` now require callers to pass a `context.Context` as an argument. The context is used for timeout and cancellation signaling and to pass supplementary information to round trippers in the wrapped transport chain. If you don't need any of this functionality, it is sufficient to pass a context created with `context.Background()` to these functions. The `(*"k8s.io/client-go/rest".Request).Context` method is removed now that all methods that execute a request accept a context directly. ([#87597](https://github.com/kubernetes/kubernetes/pull/87597), [@mikedanese](https://github.com/mikedanese)) [SIG API Machinery, SIG Apps, SIG Auth, SIG Autoscaling, SIG CLI, SIG Cloud Provider, SIG Cluster Lifecycle, SIG Instrumentation, SIG Network, SIG Node, SIG Scheduling, SIG Storage, and SIG Testing] +- For volumes that allow attaches across multiple nodes, attach and detach operations across different nodes are now executed in parallel. ([#87258](https://github.com/kubernetes/kubernetes/pull/87258), [@verult](https://github.com/verult)) [SIG Apps, SIG Node, and SIG Storage] +- kubeadm: apply further improvements to the tentative support for concurrent etcd member join. Fixes a bug where multiple members can receive the same hostname. Increase the etcd client dial timeout and retry timeout for add/remove/... operations. ([#87505](https://github.com/kubernetes/kubernetes/pull/87505), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Reverted a kubectl azure auth module change where oidc claim spn: prefix was omitted resulting a breaking behavior with existing Azure AD OIDC enabled api-server ([#87507](https://github.com/kubernetes/kubernetes/pull/87507), [@weinong](https://github.com/weinong)) [SIG API Machinery, SIG Auth, and SIG Cloud Provider] +- Update cri-tools to v1.17.0 ([#86305](https://github.com/kubernetes/kubernetes/pull/86305), [@saschagrunert](https://github.com/saschagrunert)) [SIG Cluster Lifecycle, and SIG Release] +- kubeadm: remove the deprecated CoreDNS feature-gate. It was set to "true" since v1.11 when the feature went GA. In v1.13 it was marked as deprecated and hidden from the CLI. ([#87400](https://github.com/kubernetes/kubernetes/pull/87400), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Shared informers are now more reliable in the face of network disruption. ([#86015](https://github.com/kubernetes/kubernetes/pull/86015), [@squeed](https://github.com/squeed)) [SIG API Machinery] +- the CSR signing cert/key pairs will be reloaded from disk like the kube-apiserver cert/key pairs ([#86816](https://github.com/kubernetes/kubernetes/pull/86816), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, SIG Apps, and SIG Auth] +- "kubectl describe statefulsets.apps" prints garbage for rolling update partition ([#85846](https://github.com/kubernetes/kubernetes/pull/85846), [@phil9909](https://github.com/phil9909)) [SIG CLI] + + + + + +# v1.18.0-alpha.2 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.18.0-alpha.2 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes.tar.gz) | `7af83386b4b35353f0aa1bdaf73599eb08b1d1ca11ecc2c606854aff754db69f3cd3dc761b6d7fc86f01052f615ca53185f33dbf9e53b2f926b0f02fc103fbd3` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-src.tar.gz) | `a14b02a0a0bde97795a836a8f5897b0ee6b43e010e13e43dd4cca80a5b962a1ef3704eedc7916fed1c38ec663a71db48c228c91e5daacba7d9370df98c7ddfb6` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-darwin-386.tar.gz) | `427f214d47ded44519007de2ae87160c56c2920358130e474b768299751a9affcbc1b1f0f936c39c6138837bca2a97792a6700896976e98c4beee8a1944cfde1` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-darwin-amd64.tar.gz) | `861fd81ac3bd45765575bedf5e002a2294aba48ef9e15980fc7d6783985f7d7fcde990ea0aef34690977a88df758722ec0a2e170d5dcc3eb01372e64e5439192` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-linux-386.tar.gz) | `7d59b05d6247e2606a8321c72cd239713373d876dbb43b0fb7f1cb857fa6c998038b41eeed78d9eb67ce77b0b71776ceed428cce0f8d2203c5181b473e0bd86c` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-linux-amd64.tar.gz) | `7cdefb4e32bad9d2df5bb8e7e0a6f4dab2ae6b7afef5d801ac5c342d4effdeacd799081fa2dec699ecf549200786c7623c3176252010f12494a95240dd63311d` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-linux-arm.tar.gz) | `6212bbf0fa1d01ced77dcca2c4b76b73956cd3c6b70e0701c1fe0df5ff37160835f6b84fa2481e0e6979516551b14d8232d1c72764a559a3652bfe2a1e7488ff` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-linux-arm64.tar.gz) | `1f0d9990700510165ee471acb2f88222f1b80e8f6deb351ce14cf50a70a9840fb99606781e416a13231c74b2bd7576981b5348171aa33b628d2666e366cd4629` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-linux-ppc64le.tar.gz) | `77e00ba12a32db81e96f8de84609de93f32c61bb3f53875a57496d213aa6d1b92c09ad5a6de240a78e1a5bf77fac587ff92874f34a10f8909ae08ca32fda45d2` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-linux-s390x.tar.gz) | `a39ec2044bed5a4570e9c83068e0fc0ce923ccffa44380f8bbc3247426beaff79c8a84613bcb58b05f0eb3afbc34c79fe3309aa2e0b81abcfd0aa04770e62e05` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-windows-386.tar.gz) | `1a0ab88f9b7e34b60ab31d5538e97202a256ad8b7b7ed5070cae5f2f12d5d4edeae615db7a34ebbe254004b6393c6b2480100b09e30e59c9139492a3019a596a` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-client-windows-amd64.tar.gz) | `1966eb5dfb78c1bc33aaa6389f32512e3aa92584250a0164182f3566c81d901b59ec78ee4e25df658bc1dd221b5a9527d6ce3b6c487ca3e3c0b319a077caa735` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-server-linux-amd64.tar.gz) | `f814d6a3872e4572aa4da297c29def4c1fad8eba0903946780b6bf9788c72b99d71085c5aef9e12c01133b26fa4563c1766ba724ad2a8af2670a24397951a94d` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-server-linux-arm.tar.gz) | `56aa08225e546c92c2ff88ac57d3db7dd5e63640772ea72a429f080f7069827138cbc206f6f5fe3a0c01bfca043a9eda305ecdc1dcb864649114893e46b6dc84` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-server-linux-arm64.tar.gz) | `fb87128d905211ba097aa860244a376575ae2edbaca6e51402a24bc2964854b9b273e09df3d31a2bcffc91509f7eecb2118b183fb0e0eb544f33403fa235c274` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-server-linux-ppc64le.tar.gz) | `6d21fbf39b9d3a0df9642407d6f698fabdc809aca83af197bceb58a81b25846072f407f8fb7caae2e02dc90912e3e0f5894f062f91bcb69f8c2329625d3dfeb7` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-server-linux-s390x.tar.gz) | `ddcda4dc360ca97705f71bf2a18ddacd7b7ddf77535b62e699e97a1b2dd24843751313351d0112e238afe69558e8271eba4d27ab77bb67b4b9e3fbde6eec85c9` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-node-linux-amd64.tar.gz) | `78915a9bde35c70c67014f0cea8754849db4f6a84491a3ad9678fd3bc0203e43af5a63cfafe104ae1d56b05ce74893a87a6dcd008d7859e1af6b3bce65425b5d` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-node-linux-arm.tar.gz) | `3218e811abcb0cb09d80742def339be3916db5e9bbc62c0dc8e6d87085f7e3d9eeed79dea081906f1de78ddd07b7e3acdbd7765fdb838d262bb35602fd1df106` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-node-linux-arm64.tar.gz) | `fa22de9c4440b8fb27f4e77a5a63c5e1c8aa8aa30bb79eda843b0f40498c21b8c0ad79fff1d841bb9fef53fe20da272506de9a86f81a0b36d028dbeab2e482ce` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-node-linux-ppc64le.tar.gz) | `bbda9b5cc66e8f13d235703b2a85e2c4f02fa16af047be4d27a3e198e11eb11706e4a0fbb6c20978c770b069cd4cd9894b661f09937df9d507411548c36576e0` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-node-linux-s390x.tar.gz) | `b2ed1eda013069adce2aac00b86d75b84e006cfce9bafac0b5a2bafcb60f8f2cb346b5ea44eafa72d777871abef1ea890eb3a2a05de28968f9316fa88886a8ed` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.2/kubernetes-node-windows-amd64.tar.gz) | `bd8eb23dba711f31b5148257076b1bbe9629f2a75de213b2c779bd5b29279e9bf22f8bde32f4bc814f4c0cc49e19671eb8b24f4105f0fe2c1490c4b78ec3c704` + +## Changelog since v1.18.0-alpha.1 + +### Other notable changes + +* Bump golang/mock version to v1.3.1 ([#87326](https://github.com/kubernetes/kubernetes/pull/87326), [@wawa0210](https://github.com/wawa0210)) +* fix a bug that orphan revision cannot be adopted and statefulset cannot be synced ([#86801](https://github.com/kubernetes/kubernetes/pull/86801), [@likakuli](https://github.com/likakuli)) +* Azure storage clients now suppress requests on throttling ([#87306](https://github.com/kubernetes/kubernetes/pull/87306), [@feiskyer](https://github.com/feiskyer)) +* Introduce Alpha field `Immutable` in both Secret and ConfigMap objects to mark their contents as immutable. The implementation is hidden behind feature gate `ImmutableEphemeralVolumes` (currently in Alpha stage). ([#86377](https://github.com/kubernetes/kubernetes/pull/86377), [@wojtek-t](https://github.com/wojtek-t)) +* EndpointSlices will now be enabled by default. A new `EndpointSliceProxying` feature gate determines if kube-proxy will use EndpointSlices, this is disabled by default. ([#86137](https://github.com/kubernetes/kubernetes/pull/86137), [@robscott](https://github.com/robscott)) +* kubeadm upgrades always persist the etcd backup for stacked ([#86861](https://github.com/kubernetes/kubernetes/pull/86861), [@SataQiu](https://github.com/SataQiu)) +* Fix the bug PIP's DNS is deleted if no DNS label service annotation isn't set. ([#87246](https://github.com/kubernetes/kubernetes/pull/87246), [@nilo19](https://github.com/nilo19)) +* New flag `--show-hidden-metrics-for-version` in kube-controller-manager can be used to show all hidden metrics that deprecated in the previous minor release. ([#85281](https://github.com/kubernetes/kubernetes/pull/85281), [@RainbowMango](https://github.com/RainbowMango)) +* Azure network and VM clients now suppress requests on throttling ([#87122](https://github.com/kubernetes/kubernetes/pull/87122), [@feiskyer](https://github.com/feiskyer)) +* `kubectl apply -f --prune -n ` should prune all resources not defined in the file in the cli specified namespace. ([#85613](https://github.com/kubernetes/kubernetes/pull/85613), [@MartinKaburu](https://github.com/MartinKaburu)) +* Fixes service account token admission error in clusters that do not run the service account token controller ([#87029](https://github.com/kubernetes/kubernetes/pull/87029), [@liggitt](https://github.com/liggitt)) +* CustomResourceDefinition status fields are no longer required for client validation when submitting manifests. ([#87213](https://github.com/kubernetes/kubernetes/pull/87213), [@hasheddan](https://github.com/hasheddan)) +* All apiservers log request lines in a more greppable format. ([#87203](https://github.com/kubernetes/kubernetes/pull/87203), [@lavalamp](https://github.com/lavalamp)) +* provider/azure: Network security groups can now be in a separate resource group. ([#87035](https://github.com/kubernetes/kubernetes/pull/87035), [@CecileRobertMichon](https://github.com/CecileRobertMichon)) +* Cleaned up the output from `kubectl describe CSINode `. ([#85283](https://github.com/kubernetes/kubernetes/pull/85283), [@huffmanca](https://github.com/huffmanca)) +* Fixed the following ([#84265](https://github.com/kubernetes/kubernetes/pull/84265), [@bhagwat070919](https://github.com/bhagwat070919)) + * - AWS Cloud Provider attempts to delete LoadBalancer security group it didn’t provision + * - AWS Cloud Provider creates default LoadBalancer security group even if annotation [service.beta.kubernetes.io/aws-load-balancer-security-groups] is present +* kubelet: resource metrics endpoint `/metrics/resource/v1alpha1` as well as all metrics under this endpoint have been deprecated. ([#86282](https://github.com/kubernetes/kubernetes/pull/86282), [@RainbowMango](https://github.com/RainbowMango)) + * Please convert to the following metrics emitted by endpoint `/metrics/resource`: + * - scrape_error --> scrape_error + * - node_cpu_usage_seconds_total --> node_cpu_usage_seconds + * - node_memory_working_set_bytes --> node_memory_working_set_bytes + * - container_cpu_usage_seconds_total --> container_cpu_usage_seconds + * - container_memory_working_set_bytes --> container_memory_working_set_bytes + * - scrape_error --> scrape_error +* You can now pass "--node-ip ::" to kubelet to indicate that it should autodetect an IPv6 address to use as the node's primary address. ([#85850](https://github.com/kubernetes/kubernetes/pull/85850), [@danwinship](https://github.com/danwinship)) +* kubeadm: support automatic retry after failing to pull image ([#86899](https://github.com/kubernetes/kubernetes/pull/86899), [@SataQiu](https://github.com/SataQiu)) +* TODO ([#87044](https://github.com/kubernetes/kubernetes/pull/87044), [@jennybuckley](https://github.com/jennybuckley)) +* Improved yaml parsing performance ([#85458](https://github.com/kubernetes/kubernetes/pull/85458), [@cjcullen](https://github.com/cjcullen)) +* Fixed a bug which could prevent a provider ID from ever being set for node if an error occurred determining the provider ID when the node was added. ([#87043](https://github.com/kubernetes/kubernetes/pull/87043), [@zjs](https://github.com/zjs)) +* fix a regression in kubenet that prevent pods to obtain ip addresses ([#85993](https://github.com/kubernetes/kubernetes/pull/85993), [@chendotjs](https://github.com/chendotjs)) +* Bind kube-dns containers to linux nodes to avoid Windows scheduling ([#83358](https://github.com/kubernetes/kubernetes/pull/83358), [@wawa0210](https://github.com/wawa0210)) +* The following features are unconditionally enabled and the corresponding `--feature-gates` flags have been removed: `PodPriority`, `TaintNodesByCondition`, `ResourceQuotaScopeSelectors` and `ScheduleDaemonSetPods` ([#86210](https://github.com/kubernetes/kubernetes/pull/86210), [@draveness](https://github.com/draveness)) +* Bind dns-horizontal containers to linux nodes to avoid Windows scheduling on kubernetes cluster includes linux nodes and windows nodes ([#83364](https://github.com/kubernetes/kubernetes/pull/83364), [@wawa0210](https://github.com/wawa0210)) +* fix kubectl annotate error when local=true is set ([#86952](https://github.com/kubernetes/kubernetes/pull/86952), [@zhouya0](https://github.com/zhouya0)) +* Bug fixes: ([#84163](https://github.com/kubernetes/kubernetes/pull/84163), [@david-tigera](https://github.com/david-tigera)) + * Make sure we include latest packages node #351 ([@caseydavenport](https://github.com/caseydavenport)) +* fix kuebctl apply set-last-applied namespaces error ([#86474](https://github.com/kubernetes/kubernetes/pull/86474), [@zhouya0](https://github.com/zhouya0)) +* Add VolumeBinder method to FrameworkHandle interface, which allows user to get the volume binder when implementing scheduler framework plugins. ([#86940](https://github.com/kubernetes/kubernetes/pull/86940), [@skilxn-go](https://github.com/skilxn-go)) +* elasticsearch supports automatically setting the advertise address ([#85944](https://github.com/kubernetes/kubernetes/pull/85944), [@SataQiu](https://github.com/SataQiu)) +* If a serving certificates param specifies a name that is an IP for an SNI certificate, it will have priority for replying to server connections. ([#85308](https://github.com/kubernetes/kubernetes/pull/85308), [@deads2k](https://github.com/deads2k)) +* kube-proxy: Added dual-stack IPv4/IPv6 support to the iptables proxier. ([#82462](https://github.com/kubernetes/kubernetes/pull/82462), [@vllry](https://github.com/vllry)) +* Azure VMSS/VMSSVM clients now suppress requests on throttling ([#86740](https://github.com/kubernetes/kubernetes/pull/86740), [@feiskyer](https://github.com/feiskyer)) +* New metric kubelet_pleg_last_seen_seconds to aid diagnosis of PLEG not healthy issues. ([#86251](https://github.com/kubernetes/kubernetes/pull/86251), [@bboreham](https://github.com/bboreham)) +* For subprotocol negotiation, both client and server protocol is required now. ([#86646](https://github.com/kubernetes/kubernetes/pull/86646), [@tedyu](https://github.com/tedyu)) +* kubeadm: use bind-address option to configure the kube-controller-manager and kube-scheduler http probes ([#86493](https://github.com/kubernetes/kubernetes/pull/86493), [@aojea](https://github.com/aojea)) +* Marked scheduler's metrics scheduling_algorithm_predicate_evaluation_seconds and ([#86584](https://github.com/kubernetes/kubernetes/pull/86584), [@xiaoanyunfei](https://github.com/xiaoanyunfei)) + * scheduling_algorithm_priority_evaluation_seconds as deprecated. Those are replaced by framework_extension_point_duration_seconds[extenstion_point="Filter"] and framework_extension_point_duration_seconds[extenstion_point="Score"] respectively. +* Marked scheduler's scheduling_duration_seconds Summary metric as deprecated ([#86586](https://github.com/kubernetes/kubernetes/pull/86586), [@xiaoanyunfei](https://github.com/xiaoanyunfei)) +* Add instructions about how to bring up e2e test cluster ([#85836](https://github.com/kubernetes/kubernetes/pull/85836), [@YangLu1031](https://github.com/YangLu1031)) +* If a required flag is not provided to a command, the user will only see the required flag error message, instead of the entire usage menu. ([#86693](https://github.com/kubernetes/kubernetes/pull/86693), [@sallyom](https://github.com/sallyom)) +* kubeadm: tolerate whitespace when validating certificate authority PEM data in kubeconfig files ([#86705](https://github.com/kubernetes/kubernetes/pull/86705), [@neolit123](https://github.com/neolit123)) +* kubeadm: add support for the "ci/k8s-master" version label as a replacement for "ci-cross/*", which no longer exists. ([#86609](https://github.com/kubernetes/kubernetes/pull/86609), [@Pensu](https://github.com/Pensu)) +* Fix EndpointSlice controller race condition and ensure that it handles external changes to EndpointSlices. ([#85703](https://github.com/kubernetes/kubernetes/pull/85703), [@robscott](https://github.com/robscott)) +* Fix nil pointer dereference in azure cloud provider ([#85975](https://github.com/kubernetes/kubernetes/pull/85975), [@ldx](https://github.com/ldx)) +* fix: azure disk could not mounted on Standard_DC4s/DC2s instances ([#86612](https://github.com/kubernetes/kubernetes/pull/86612), [@andyzhangx](https://github.com/andyzhangx)) +* Fixes v1.17.0 regression in --service-cluster-ip-range handling with IPv4 ranges larger than 65536 IP addresses ([#86534](https://github.com/kubernetes/kubernetes/pull/86534), [@liggitt](https://github.com/liggitt)) +* Adds back support for AlwaysCheckAllPredicates flag. ([#86496](https://github.com/kubernetes/kubernetes/pull/86496), [@ahg-g](https://github.com/ahg-g)) +* Azure global rate limit is switched to per-client. A set of new rate limit configure options are introduced, including routeRateLimit, SubnetsRateLimit, InterfaceRateLimit, RouteTableRateLimit, LoadBalancerRateLimit, PublicIPAddressRateLimit, SecurityGroupRateLimit, VirtualMachineRateLimit, StorageAccountRateLimit, DiskRateLimit, SnapshotRateLimit, VirtualMachineScaleSetRateLimit and VirtualMachineSizeRateLimit. ([#86515](https://github.com/kubernetes/kubernetes/pull/86515), [@feiskyer](https://github.com/feiskyer)) + * The original rate limit options would be default values for those new client's rate limiter. +* Fix issue [#85805](https://github.com/kubernetes/kubernetes/pull/85805) about resource not found in azure cloud provider when lb specified in other resource group. ([#86502](https://github.com/kubernetes/kubernetes/pull/86502), [@levimm](https://github.com/levimm)) +* `AlwaysCheckAllPredicates` is deprecated in scheduler Policy API. ([#86369](https://github.com/kubernetes/kubernetes/pull/86369), [@Huang-Wei](https://github.com/Huang-Wei)) +* Kubernetes KMS provider for data encryption now supports disabling the in-memory data encryption key (DEK) cache by setting cachesize to a negative value. ([#86294](https://github.com/kubernetes/kubernetes/pull/86294), [@enj](https://github.com/enj)) +* option `preConfiguredBackendPoolLoadBalancerTypes` is added to azure cloud provider for the pre-configured load balancers, possible values: `""`, `"internal"`, "external"`, `"all"` ([#86338](https://github.com/kubernetes/kubernetes/pull/86338), [@gossion](https://github.com/gossion)) +* Promote StartupProbe to beta for 1.18 release ([#83437](https://github.com/kubernetes/kubernetes/pull/83437), [@matthyx](https://github.com/matthyx)) +* Fixes issue where AAD token obtained by kubectl is incompatible with on-behalf-of flow and oidc. ([#86412](https://github.com/kubernetes/kubernetes/pull/86412), [@weinong](https://github.com/weinong)) + * The audience claim before this fix has "spn:" prefix. After this fix, "spn:" prefix is omitted. +* change CounterVec to Counter about PLEGDiscardEvent ([#86167](https://github.com/kubernetes/kubernetes/pull/86167), [@yiyang5055](https://github.com/yiyang5055)) +* hollow-node do not use remote CRI anymore ([#86425](https://github.com/kubernetes/kubernetes/pull/86425), [@jkaniuk](https://github.com/jkaniuk)) +* hollow-node use fake CRI ([#85879](https://github.com/kubernetes/kubernetes/pull/85879), [@gongguan](https://github.com/gongguan)) + + + +# v1.18.0-alpha.1 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.18.0-alpha.1 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes.tar.gz) | `0c4904efc7f4f1436119c91dc1b6c93b3bd9c7490362a394bff10099c18e1e7600c4f6e2fcbaeb2d342a36c4b20692715cf7aa8ada6dfac369f44cc9292529d7` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-src.tar.gz) | `0a50fc6816c730ca5ae4c4f26d5ad7b049607d29f6a782a4e5b4b05ac50e016486e269dafcc6a163bd15e1a192780a9a987f1bb959696993641c603ed1e841c8` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-darwin-386.tar.gz) | `c6d75f7f3f20bef17fc7564a619b54e6f4a673d041b7c9ec93663763a1cc8dd16aecd7a2af70e8d54825a0eecb9762cf2edfdade840604c9a32ecd9cc2d5ac3c` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-darwin-amd64.tar.gz) | `ca1f19db289933beace6daee6fc30af19b0e260634ef6e89f773464a05e24551c791be58b67da7a7e2a863e28b7cbcc7b24b6b9bf467113c26da76ac8f54fdb6` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-linux-386.tar.gz) | `af2e673653eb39c3f24a54efc68e1055f9258bdf6cf8fea42faf42c05abefc2da853f42faac3b166c37e2a7533020b8993b98c0d6d80a5b66f39e91d8ae0a3fb` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-linux-amd64.tar.gz) | `9009032c3f94ac8a78c1322a28e16644ce3b20989eb762685a1819148aed6e883ca8e1200e5ec37ec0853f115c67e09b5d697d6cf5d4c45f653788a2d3a2f84f` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-linux-arm.tar.gz) | `afba9595b37a3f2eead6e3418573f7ce093b55467dce4da0b8de860028576b96b837a2fd942f9c276e965da694e31fbd523eeb39aefb902d7e7a2f169344d271` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-linux-arm64.tar.gz) | `04fc3b2fe3f271807f0bc6c61be52456f26a1af904964400be819b7914519edc72cbab9afab2bb2e2ba1a108963079367cedfb253c9364c0175d1fcc64d52f5c` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-linux-ppc64le.tar.gz) | `04c7edab874b33175ff7bebfff5b3a032bc6eb088fcd7387ffcd5b3fa71395ca8c5f9427b7ddb496e92087dfdb09eaf14a46e9513071d3bd73df76c182922d38` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-linux-s390x.tar.gz) | `499287dbbc33399a37b9f3b35e0124ff20b17b6619f25a207ee9c606ef261af61fa0c328dde18c7ce2d3dfb2eea2376623bc3425d16bc8515932a68b44f8bede` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-windows-386.tar.gz) | `cf84aeddf00f126fb13c0436b116dd0464a625659e44c84bf863517db0406afb4eefd86807e7543c4f96006d275772fbf66214ae7d582db5865c84ac3545b3e6` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-client-windows-amd64.tar.gz) | `69f20558ccd5cd6dbaccf29307210db4e687af21f6d71f68c69d3a39766862686ac1333ab8a5012010ca5c5e3c11676b45e498e3d4c38773da7d24bcefc46d95` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-server-linux-amd64.tar.gz) | `3f29df2ce904a0f10db4c1d7a425a36f420867b595da3fa158ae430bfead90def2f2139f51425b349faa8a9303dcf20ea01657cb6ea28eb6ad64f5bb32ce2ed1` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-server-linux-arm.tar.gz) | `4a21073b2273d721fbf062c254840be5c8471a010bcc0c731b101729e36e61f637cb7fcb521a22e8d24808510242f4fff8a6ca40f10e9acd849c2a47bf135f27` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-server-linux-arm64.tar.gz) | `7f1cb6d721bedc90e28b16f99bea7e59f5ad6267c31ef39c14d34db6ad6aad87ee51d2acdd01b6903307c1c00b58ff6b785a03d5a491cc3f8a4df9a1d76d406c` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-server-linux-ppc64le.tar.gz) | `8f2b552030b5274b1c2c7c166eacd5a14b0c6ca0f23042f4c52efe87e22a167ba4460dcd66615a5ecd26d9e88336be1fb555548392e70efe59070dd2c314da98` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-server-linux-s390x.tar.gz) | `8d9f2c96f66edafb7c8b3aa90960d29b41471743842aede6b47b3b2e61f4306fb6fc60b9ebc18820c547ee200bfedfe254c1cde962d447c791097dd30e79abdb` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-node-linux-amd64.tar.gz) | `84194cb081d1502f8ca68143569f9707d96f1a28fcf0c574ebd203321463a8b605f67bb2a365eaffb14fbeb8d55c8d3fa17431780b242fb9cba3a14426a0cd4a` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-node-linux-arm.tar.gz) | `0091e108ab94fd8683b89c597c4fdc2fbf4920b007cfcd5297072c44bc3a230dfe5ceed16473e15c3e6cf5edab866d7004b53edab95be0400cc60e009eee0d9d` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-node-linux-arm64.tar.gz) | `b7e85682cc2848a35d52fd6f01c247f039ee1b5dd03345713821ea10a7fa9939b944f91087baae95eaa0665d11857c1b81c454f720add077287b091f9f19e5d3` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-node-linux-ppc64le.tar.gz) | `cd1f0849e9c62b5d2c93ff0cebf58843e178d8a88317f45f76de0db5ae020b8027e9503a5fccc96445184e0d77ecdf6f57787176ac31dbcbd01323cd0a190cbb` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-node-linux-s390x.tar.gz) | `e1e697a34424c75d75415b613b81c8af5f64384226c5152d869f12fd7db1a3e25724975b73fa3d89e56e4bf78d5fd07e68a709ba8566f53691ba6a88addc79ea` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.18.0-alpha.1/kubernetes-node-windows-amd64.tar.gz) | `c725a19a4013c74e22383ad3fb4cb799b3e161c4318fdad066daf806730a89bc3be3ff0f75678d02b3cbe52b2ef0c411c0639968e200b9df470be40bb2c015cc` + +## Changelog since v1.17.0 + +### Action Required + +* action required ([#85363](https://github.com/kubernetes/kubernetes/pull/85363), [@immutableT](https://github.com/immutableT)) + * 1. Currently, if users were to explicitly specify CacheSize of 0 for KMS provider, they would end-up with a provider that caches up to 1000 keys. This PR changes this behavior. + * Post this PR, when users supply 0 for CacheSize this will result in a validation error. + * 2. CacheSize type was changed from int32 to *int32. This allows defaulting logic to differentiate between cases where users explicitly supplied 0 vs. not supplied any value. + * 3. KMS Provider's endpoint (path to Unix socket) is now validated when the EncryptionConfiguration files is loaded. This used to be handled by the GRPCService. + +### Other notable changes + +* fix: azure data disk should use same key as os disk by default ([#86351](https://github.com/kubernetes/kubernetes/pull/86351), [@andyzhangx](https://github.com/andyzhangx)) +* New flag `--show-hidden-metrics-for-version` in kube-proxy can be used to show all hidden metrics that deprecated in the previous minor release. ([#85279](https://github.com/kubernetes/kubernetes/pull/85279), [@RainbowMango](https://github.com/RainbowMango)) +* Remove cluster-monitoring addon ([#85512](https://github.com/kubernetes/kubernetes/pull/85512), [@serathius](https://github.com/serathius)) +* Changed core_pattern on COS nodes to be an absolute path. ([#86329](https://github.com/kubernetes/kubernetes/pull/86329), [@mml](https://github.com/mml)) +* Track mount operations as uncertain if operation fails with non-final error ([#82492](https://github.com/kubernetes/kubernetes/pull/82492), [@gnufied](https://github.com/gnufied)) +* add kube-proxy flags --ipvs-tcp-timeout, --ipvs-tcpfin-timeout, --ipvs-udp-timeout to configure IPVS connection timeouts. ([#85517](https://github.com/kubernetes/kubernetes/pull/85517), [@andrewsykim](https://github.com/andrewsykim)) +* The sample-apiserver aggregated conformance test has updated to use the Kubernetes v1.17.0 sample apiserver ([#84735](https://github.com/kubernetes/kubernetes/pull/84735), [@liggitt](https://github.com/liggitt)) +* The underlying format of the `CPUManager` state file has changed. Upgrades should be seamless, but any third-party tools that rely on reading the previous format need to be updated. ([#84462](https://github.com/kubernetes/kubernetes/pull/84462), [@klueska](https://github.com/klueska)) +* kubernetes will try to acquire the iptables lock every 100 msec during 5 seconds instead of every second. This specially useful for environments using kube-proxy in iptables mode with a high churn rate of services. ([#85771](https://github.com/kubernetes/kubernetes/pull/85771), [@aojea](https://github.com/aojea)) +* Fixed a panic in the kubelet cleaning up pod volumes ([#86277](https://github.com/kubernetes/kubernetes/pull/86277), [@tedyu](https://github.com/tedyu)) +* azure cloud provider cache TTL is configurable, list of the azure cloud provider is as following: ([#86266](https://github.com/kubernetes/kubernetes/pull/86266), [@zqingqing1](https://github.com/zqingqing1)) + * - "availabilitySetNodesCacheTTLInSeconds" + * - "vmssCacheTTLInSeconds" + * - "vmssVirtualMachinesCacheTTLInSeconds" + * - "vmCacheTTLInSeconds" + * - "loadBalancerCacheTTLInSeconds" + * - "nsgCacheTTLInSeconds" + * - "routeTableCacheTTLInSeconds" +* Fixes kube-proxy when EndpointSlice feature gate is enabled on Windows. ([#86016](https://github.com/kubernetes/kubernetes/pull/86016), [@robscott](https://github.com/robscott)) +* Fixes wrong validation result of NetworkPolicy PolicyTypes ([#85747](https://github.com/kubernetes/kubernetes/pull/85747), [@tnqn](https://github.com/tnqn)) +* Fixes an issue with kubelet-reported pod status on deleted/recreated pods. ([#86320](https://github.com/kubernetes/kubernetes/pull/86320), [@liggitt](https://github.com/liggitt)) +* kube-apiserver no longer serves the following deprecated APIs: ([#85903](https://github.com/kubernetes/kubernetes/pull/85903), [@liggitt](https://github.com/liggitt)) + * All resources under `apps/v1beta1` and `apps/v1beta2` - use `apps/v1` instead + * `daemonsets`, `deployments`, `replicasets` resources under `extensions/v1beta1` - use `apps/v1` instead + * `networkpolicies` resources under `extensions/v1beta1` - use `networking.k8s.io/v1` instead + * `podsecuritypolicies` resources under `extensions/v1beta1` - use `policy/v1beta1` instead +* kubeadm: fix potential panic when executing "kubeadm reset" with a corrupted kubelet.conf file ([#86216](https://github.com/kubernetes/kubernetes/pull/86216), [@neolit123](https://github.com/neolit123)) +* Fix a bug in port-forward: named port not working with service ([#85511](https://github.com/kubernetes/kubernetes/pull/85511), [@oke-py](https://github.com/oke-py)) +* kube-proxy no longer modifies shared EndpointSlices. ([#86092](https://github.com/kubernetes/kubernetes/pull/86092), [@robscott](https://github.com/robscott)) +* allow for configuration of CoreDNS replica count ([#85837](https://github.com/kubernetes/kubernetes/pull/85837), [@pickledrick](https://github.com/pickledrick)) +* Fixed a regression where the kubelet would fail to update the ready status of pods. ([#84951](https://github.com/kubernetes/kubernetes/pull/84951), [@tedyu](https://github.com/tedyu)) +* Resolves performance regression in client-go discovery clients constructed using `NewDiscoveryClientForConfig` or `NewDiscoveryClientForConfigOrDie`. ([#86168](https://github.com/kubernetes/kubernetes/pull/86168), [@liggitt](https://github.com/liggitt)) +* Make error message and service event message more clear ([#86078](https://github.com/kubernetes/kubernetes/pull/86078), [@feiskyer](https://github.com/feiskyer)) +* e2e-test-framework: add e2e test namespace dump if all tests succeed but the cleanup fails. ([#85542](https://github.com/kubernetes/kubernetes/pull/85542), [@schrodit](https://github.com/schrodit)) +* SafeSysctlWhitelist: add net.ipv4.ping_group_range ([#85463](https://github.com/kubernetes/kubernetes/pull/85463), [@AkihiroSuda](https://github.com/AkihiroSuda)) +* kubelet: the metric process_start_time_seconds be marked as with the ALPHA stability level. ([#85446](https://github.com/kubernetes/kubernetes/pull/85446), [@RainbowMango](https://github.com/RainbowMango)) +* API request throttling (due to a high rate of requests) is now reported in the kubelet (and other component) logs by default. The messages are of the form ([#80649](https://github.com/kubernetes/kubernetes/pull/80649), [@RobertKrawitz](https://github.com/RobertKrawitz)) + * Throttling request took 1.50705208s, request: GET: + * The presence of large numbers of these messages, particularly with long delay times, may indicate to the administrator the need to tune the cluster accordingly. +* Fix API Server potential memory leak issue in processing watch request. ([#85410](https://github.com/kubernetes/kubernetes/pull/85410), [@answer1991](https://github.com/answer1991)) +* Verify kubelet & kube-proxy can recover after being killed on Windows nodes ([#84886](https://github.com/kubernetes/kubernetes/pull/84886), [@YangLu1031](https://github.com/YangLu1031)) +* Fixed an issue that the scheduler only returns the first failure reason. ([#86022](https://github.com/kubernetes/kubernetes/pull/86022), [@Huang-Wei](https://github.com/Huang-Wei)) +* kubectl/drain: add skip-wait-for-delete-timeout option. ([#85577](https://github.com/kubernetes/kubernetes/pull/85577), [@michaelgugino](https://github.com/michaelgugino)) + * If pod DeletionTimestamp older than N seconds, skip waiting for the pod. Seconds must be greater than 0 to skip. +* Following metrics have been turned off: ([#83841](https://github.com/kubernetes/kubernetes/pull/83841), [@RainbowMango](https://github.com/RainbowMango)) + * - kubelet_pod_worker_latency_microseconds + * - kubelet_pod_start_latency_microseconds + * - kubelet_cgroup_manager_latency_microseconds + * - kubelet_pod_worker_start_latency_microseconds + * - kubelet_pleg_relist_latency_microseconds + * - kubelet_pleg_relist_interval_microseconds + * - kubelet_eviction_stats_age_microseconds + * - kubelet_runtime_operations + * - kubelet_runtime_operations_latency_microseconds + * - kubelet_runtime_operations_errors + * - kubelet_device_plugin_registration_count + * - kubelet_device_plugin_alloc_latency_microseconds + * - kubelet_docker_operations + * - kubelet_docker_operations_latency_microseconds + * - kubelet_docker_operations_errors + * - kubelet_docker_operations_timeout + * - network_plugin_operations_latency_microseconds +* - Renamed Kubelet metric certificate_manager_server_expiration_seconds to certificate_manager_server_ttl_seconds and changed to report the second until expiration at read time rather than absolute time of expiry. ([#85874](https://github.com/kubernetes/kubernetes/pull/85874), [@sambdavidson](https://github.com/sambdavidson)) + * - Improved accuracy of Kubelet metric rest_client_exec_plugin_ttl_seconds. +* Bind metadata-agent containers to linux nodes to avoid Windows scheduling on kubernetes cluster includes linux nodes and windows nodes ([#83363](https://github.com/kubernetes/kubernetes/pull/83363), [@wawa0210](https://github.com/wawa0210)) +* Bind metrics-server containers to linux nodes to avoid Windows scheduling on kubernetes cluster includes linux nodes and windows nodes ([#83362](https://github.com/kubernetes/kubernetes/pull/83362), [@wawa0210](https://github.com/wawa0210)) +* During initialization phase (preflight), kubeadm now verifies the presence of the conntrack executable ([#85857](https://github.com/kubernetes/kubernetes/pull/85857), [@hnanni](https://github.com/hnanni)) +* VMSS cache is added so that less chances of VMSS GET throttling ([#85885](https://github.com/kubernetes/kubernetes/pull/85885), [@nilo19](https://github.com/nilo19)) +* Update go-winio module version from 0.4.11 to 0.4.14 ([#85739](https://github.com/kubernetes/kubernetes/pull/85739), [@wawa0210](https://github.com/wawa0210)) +* Fix LoadBalancer rule checking so that no unexpected LoadBalancer updates are made ([#85990](https://github.com/kubernetes/kubernetes/pull/85990), [@feiskyer](https://github.com/feiskyer)) +* kubectl drain node --dry-run will list pods that would be evicted or deleted ([#82660](https://github.com/kubernetes/kubernetes/pull/82660), [@sallyom](https://github.com/sallyom)) +* Windows nodes on GCE can use TPM-based authentication to the master. ([#85466](https://github.com/kubernetes/kubernetes/pull/85466), [@pjh](https://github.com/pjh)) +* kubectl/drain: add disable-eviction option. ([#85571](https://github.com/kubernetes/kubernetes/pull/85571), [@michaelgugino](https://github.com/michaelgugino)) + * Force drain to use delete, even if eviction is supported. This will bypass checking PodDisruptionBudgets, and should be used with caution. +* kubeadm now errors out whenever a not supported component config version is supplied for the kubelet and kube-proxy ([#85639](https://github.com/kubernetes/kubernetes/pull/85639), [@rosti](https://github.com/rosti)) +* Fixed issue with addon-resizer using deprecated extensions APIs ([#85793](https://github.com/kubernetes/kubernetes/pull/85793), [@bskiba](https://github.com/bskiba)) +* Includes FSType when describing CSI persistent volumes. ([#85293](https://github.com/kubernetes/kubernetes/pull/85293), [@huffmanca](https://github.com/huffmanca)) +* kubelet now exports a "server_expiration_renew_failure" and "client_expiration_renew_failure" metric counter if the certificate rotations cannot be performed. ([#84614](https://github.com/kubernetes/kubernetes/pull/84614), [@rphillips](https://github.com/rphillips)) +* kubeadm: don't write the kubelet environment file on "upgrade apply" ([#85412](https://github.com/kubernetes/kubernetes/pull/85412), [@boluisa](https://github.com/boluisa)) +* fix azure file AuthorizationFailure ([#85475](https://github.com/kubernetes/kubernetes/pull/85475), [@andyzhangx](https://github.com/andyzhangx)) +* Resolved regression in admission, authentication, and authorization webhook performance in v1.17.0-rc.1 ([#85810](https://github.com/kubernetes/kubernetes/pull/85810), [@liggitt](https://github.com/liggitt)) +* kubeadm: uses the apiserver AdvertiseAddress IP family to choose the etcd endpoint IP family for non external etcd clusters ([#85745](https://github.com/kubernetes/kubernetes/pull/85745), [@aojea](https://github.com/aojea)) +* kubeadm: Forward cluster name to the controller-manager arguments ([#85817](https://github.com/kubernetes/kubernetes/pull/85817), [@ereslibre](https://github.com/ereslibre)) +* Fixed "requested device X but found Y" attach error on AWS. ([#85675](https://github.com/kubernetes/kubernetes/pull/85675), [@jsafrane](https://github.com/jsafrane)) +* addons: elasticsearch discovery supports IPv6 ([#85543](https://github.com/kubernetes/kubernetes/pull/85543), [@SataQiu](https://github.com/SataQiu)) +* kubeadm: retry `kubeadm-config` ConfigMap creation or mutation if the apiserver is not responding. This will improve resiliency when joining new control plane nodes. ([#85763](https://github.com/kubernetes/kubernetes/pull/85763), [@ereslibre](https://github.com/ereslibre)) +* Update Cluster Autoscaler to 1.17.0; changelog: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.17.0 ([#85610](https://github.com/kubernetes/kubernetes/pull/85610), [@losipiuk](https://github.com/losipiuk)) +* Filter published OpenAPI schema by making nullable, required fields non-required in order to avoid kubectl to wrongly reject null values. ([#85722](https://github.com/kubernetes/kubernetes/pull/85722), [@sttts](https://github.com/sttts)) +* kubectl set resources will no longer return an error if passed an empty change for a resource. ([#85490](https://github.com/kubernetes/kubernetes/pull/85490), [@sallyom](https://github.com/sallyom)) + * kubectl set subject will no longer return an error if passed an empty change for a resource. +* kube-apiserver: fixed a conflict error encountered attempting to delete a pod with gracePeriodSeconds=0 and a resourceVersion precondition ([#85516](https://github.com/kubernetes/kubernetes/pull/85516), [@michaelgugino](https://github.com/michaelgugino)) +* kubeadm: add a upgrade health check that deploys a Job ([#81319](https://github.com/kubernetes/kubernetes/pull/81319), [@neolit123](https://github.com/neolit123)) +* kubeadm: make sure images are pre-pulled even if a tag did not change but their contents changed ([#85603](https://github.com/kubernetes/kubernetes/pull/85603), [@bart0sh](https://github.com/bart0sh)) +* kube-apiserver: Fixes a bug that hidden metrics can not be enabled by the command-line option `--show-hidden-metrics-for-version`. ([#85444](https://github.com/kubernetes/kubernetes/pull/85444), [@RainbowMango](https://github.com/RainbowMango)) +* kubeadm now supports automatic calculations of dual-stack node cidr masks to kube-controller-manager. ([#85609](https://github.com/kubernetes/kubernetes/pull/85609), [@Arvinderpal](https://github.com/Arvinderpal)) +* Fix bug where EndpointSlice controller would attempt to modify shared objects. ([#85368](https://github.com/kubernetes/kubernetes/pull/85368), [@robscott](https://github.com/robscott)) +* Use context to check client closed instead of http.CloseNotifier in processing watch request which will reduce 1 goroutine for each request if proto is HTTP/2.x . ([#85408](https://github.com/kubernetes/kubernetes/pull/85408), [@answer1991](https://github.com/answer1991)) +* kubeadm: reset raises warnings if it cannot delete folders ([#85265](https://github.com/kubernetes/kubernetes/pull/85265), [@SataQiu](https://github.com/SataQiu)) +* Wait for kubelet & kube-proxy to be ready on Windows node within 10s ([#85228](https://github.com/kubernetes/kubernetes/pull/85228), [@YangLu1031](https://github.com/YangLu1031)) diff --git a/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md b/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md index 3cb90383c7abb..123271be44e80 100644 --- a/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md +++ b/content/en/docs/tasks/access-application-cluster/connecting-frontend-backend.md @@ -39,7 +39,7 @@ frontend and backend are connected using a Kubernetes {{% capture lessoncontent %}} -### Creating the backend using a Deployment +## Creating the backend using a Deployment The backend is a simple hello greeter microservice. Here is the configuration file for the backend Deployment: @@ -95,7 +95,7 @@ Events: ... ``` -### Creating the backend Service object +## Creating the backend Service object The key to connecting a frontend to a backend is the backend Service. A Service creates a persistent IP address and DNS name entry @@ -119,7 +119,7 @@ kubectl apply -f https://k8s.io/examples/service/access/hello-service.yaml At this point, you have a backend Deployment running, and you have a Service that can route traffic to it. -### Creating the frontend +## Creating the frontend Now that you have your backend, you can create a frontend that connects to the backend. The frontend connects to the backend worker Pods by using the DNS name @@ -158,7 +158,7 @@ be to use a so that you can change the configuration more easily. {{< /note >}} -### Interact with the frontend Service +## Interact with the frontend Service Once you’ve created a Service of type LoadBalancer, you can use this command to find the external IP: @@ -186,7 +186,7 @@ frontend LoadBalancer 10.51.252.116 XXX.XXX.XXX.XXX 80/TCP 1m That IP can now be used to interact with the `frontend` service from outside the cluster. -### Send traffic through the frontend +## Send traffic through the frontend The frontend and backends are now connected. You can hit the endpoint by using the curl command on the external IP of your frontend Service. diff --git a/content/en/docs/tasks/access-application-cluster/ingress-minikube.md b/content/en/docs/tasks/access-application-cluster/ingress-minikube.md index 8ea504bd4aac7..77903196162df 100644 --- a/content/en/docs/tasks/access-application-cluster/ingress-minikube.md +++ b/content/en/docs/tasks/access-application-cluster/ingress-minikube.md @@ -7,11 +7,7 @@ weight: 100 {{% capture overview %}} An [Ingress](/docs/concepts/services-networking/ingress/) is an API object that defines rules which allow external access -to services in a cluster. An [Ingress controller](/docs/concepts/services-networking/ingress-controllers/) fulfills the rules set in the Ingress. - -{{< caution >}} -For the Ingress resource to work, the cluster **must** also have an Ingress controller running. -{{< /caution >}} +to services in a cluster. An [Ingress controller](/docs/concepts/services-networking/ingress-controllers/) fulfills the rules set in the Ingress. This page shows you how to set up a simple Ingress which routes requests to Service web or web2 depending on the HTTP URI. diff --git a/content/en/docs/tasks/access-application-cluster/list-all-running-container-images.md b/content/en/docs/tasks/access-application-cluster/list-all-running-container-images.md index 8b4480de2f0fa..b3fb886d1143a 100644 --- a/content/en/docs/tasks/access-application-cluster/list-all-running-container-images.md +++ b/content/en/docs/tasks/access-application-cluster/list-all-running-container-images.md @@ -23,7 +23,7 @@ In this exercise you will use kubectl to fetch all of the Pods running in a cluster, and format the output to pull out the list of Containers for each. -## List all Containers in all namespaces +## List all Container images in all namespaces - Fetch all Pods in all namespaces using `kubectl get pods --all-namespaces` - Format the output to include only the list of Container image names @@ -68,7 +68,7 @@ the `.items[*]` portion of the path should be omitted because a single Pod is returned instead of a list of items. {{< /note >}} -## List Containers by Pod +## List Container images by Pod The formatting can be controlled further by using the `range` operation to iterate over elements individually. @@ -78,7 +78,7 @@ kubectl get pods --all-namespaces -o=jsonpath='{range .items[*]}{"\n"}{.metadata sort ``` -## List Containers filtering by Pod label +## List Container images filtering by Pod label To target only Pods matching a specific label, use the -l flag. The following matches only Pods with labels matching `app=nginx`. @@ -87,7 +87,7 @@ following matches only Pods with labels matching `app=nginx`. kubectl get pods --all-namespaces -o=jsonpath="{..image}" -l app=nginx ``` -## List Containers filtering by Pod namespace +## List Container images filtering by Pod namespace To target only pods in a specific namespace, use the namespace flag. The following matches only Pods in the `kube-system` namespace. @@ -96,7 +96,7 @@ following matches only Pods in the `kube-system` namespace. kubectl get pods --namespace kube-system -o jsonpath="{..image}" ``` -## List Containers using a go-template instead of jsonpath +## List Container images using a go-template instead of jsonpath As an alternative to jsonpath, Kubectl supports using [go-templates](https://golang.org/pkg/text/template/) for formatting the output: diff --git a/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md b/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md index 4a3b201ff37b4..fc24022d0c6aa 100644 --- a/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md @@ -2,6 +2,7 @@ title: Use Port Forwarding to Access Applications in a Cluster content_template: templates/task weight: 40 +min-kubernetes-server-version: v1.10 --- {{% capture overview %}} @@ -26,104 +27,157 @@ for database debugging. ## Creating Redis deployment and service -1. Create a Redis deployment: +1. Create a Deployment that runs Redis: - kubectl apply -f https://k8s.io/examples/application/guestbook/redis-master-deployment.yaml + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-master-deployment.yaml + ``` The output of a successful command verifies that the deployment was created: - deployment.apps/redis-master created + ``` + deployment.apps/redis-master created + ``` View the pod status to check that it is ready: - kubectl get pods + ```shell + kubectl get pods + ``` The output displays the pod created: - NAME READY STATUS RESTARTS AGE - redis-master-765d459796-258hz 1/1 Running 0 50s + ``` + NAME READY STATUS RESTARTS AGE + redis-master-765d459796-258hz 1/1 Running 0 50s + ``` - View the deployment status: + View the Deployment's status: - kubectl get deployment + ```shell + kubectl get deployment + ``` - The output displays that the deployment was created: + The output displays that the Deployment was created: - NAME READY UP-TO-DATE AVAILABLE AGE - redis-master 1/1 1 1 55s + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + redis-master 1/1 1 1 55s + ``` - View the replicaset status using: + The Deployment automatically manages a ReplicaSet. + View the ReplicaSet status using: - kubectl get rs + ```shell + kubectl get replicaset + ``` - The output displays that the replicaset was created: + The output displays that the ReplicaSet was created: - NAME DESIRED CURRENT READY AGE - redis-master-765d459796 1 1 1 1m + ``` + NAME DESIRED CURRENT READY AGE + redis-master-765d459796 1 1 1 1m + ``` -2. Create a Redis service: +2. Create a Service to expose Redis on the network: - kubectl apply -f https://k8s.io/examples/application/guestbook/redis-master-service.yaml + ```shell + kubectl apply -f https://k8s.io/examples/application/guestbook/redis-master-service.yaml + ``` - The output of a successful command verifies that the service was created: + The output of a successful command verifies that the Service was created: - service/redis-master created + ``` + service/redis-master created + ``` - Check the service created: + Check the Service created: - kubectl get svc | grep redis + ```shell + kubectl get service redis-master + ``` The output displays the service created: - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - redis-master ClusterIP 10.0.0.213 6379/TCP 27s + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + redis-master ClusterIP 10.0.0.213 6379/TCP 27s + ``` -3. Verify that the Redis server is running in the pod and listening on port 6379: +3. Verify that the Redis server is running in the Pod, and listening on port 6379: - kubectl get pods redis-master-765d459796-258hz --template='{{(index (index .spec.containers 0).ports 0).containerPort}}{{"\n"}}' + ```shell + # Change redis-master-765d459796-258hz to the name of the Pod + kubectl get pod redis-master-765d459796-258hz --template='{{(index (index .spec.containers 0).ports 0).containerPort}}{{"\n"}}' + ``` - The output displays the port: + The output displays the port for Redis in that Pod: - 6379 + ``` + 6379 + ``` + (this is the TCP port allocated to Redis on the internet). -## Forward a local port to a port on the pod +## Forward a local port to a port on the Pod -1. `kubectl port-forward` allows using resource name, such as a pod name, to select a matching pod to port forward to since Kubernetes v1.10. +1. `kubectl port-forward` allows using resource name, such as a pod name, to select a matching pod to port forward to. - kubectl port-forward redis-master-765d459796-258hz 7000:6379 + + ```shell + # Change redis-master-765d459796-258hz to the name of the Pod + kubectl port-forward redis-master-765d459796-258hz 7000:6379 + ``` which is the same as - kubectl port-forward pods/redis-master-765d459796-258hz 7000:6379 + ```shell + kubectl port-forward pods/redis-master-765d459796-258hz 7000:6379 + ``` or - kubectl port-forward deployment/redis-master 7000:6379 + ```shell + kubectl port-forward deployment/redis-master 7000:6379 + ``` or - kubectl port-forward rs/redis-master 7000:6379 + ```shell + kubectl port-forward replicaset/redis-master 7000:6379 + ``` or - kubectl port-forward svc/redis-master 7000:6379 + ```shell + kubectl port-forward service/redis-master 7000:6379 + ``` Any of the above commands works. The output is similar to this: - I0710 14:43:38.274550 3655 portforward.go:225] Forwarding from 127.0.0.1:7000 -> 6379 - I0710 14:43:38.274797 3655 portforward.go:225] Forwarding from [::1]:7000 -> 6379 + ``` + I0710 14:43:38.274550 3655 portforward.go:225] Forwarding from 127.0.0.1:7000 -> 6379 + I0710 14:43:38.274797 3655 portforward.go:225] Forwarding from [::1]:7000 -> 6379 + ``` 2. Start the Redis command line interface: - redis-cli -p 7000 + ```shell + redis-cli -p 7000 + ``` 3. At the Redis command line prompt, enter the `ping` command: - 127.0.0.1:7000>ping + ``` + ping + ``` + + A successful ping request returns: - A successful ping request returns PONG. + ``` + PONG + ``` {{% /capture %}} @@ -132,15 +186,15 @@ for database debugging. ## Discussion -Connections made to local port 7000 are forwarded to port 6379 of the pod that -is running the Redis server. With this connection in place you can use your -local workstation to debug the database that is running in the pod. +Connections made to local port 7000 are forwarded to port 6379 of the Pod that +is running the Redis server. With this connection in place, you can use your +local workstation to debug the database that is running in the Pod. -{{< warning >}} -Due to known limitations, port forward today only works for TCP protocol. -The support to UDP protocol is being tracked in +{{< note >}} +`kubectl port-forward` is implemented for TCP ports only. +The support for UDP protocol is tracked in [issue 47862](https://github.com/kubernetes/kubernetes/issues/47862). -{{< /warning >}} +{{< /note >}} {{% /capture %}} @@ -148,6 +202,3 @@ The support to UDP protocol is being tracked in {{% capture whatsnext %}} Learn more about [kubectl port-forward](/docs/reference/generated/kubectl/kubectl-commands/#port-forward). {{% /capture %}} - - - diff --git a/content/en/docs/tasks/administer-cluster/change-default-storage-class.md b/content/en/docs/tasks/administer-cluster/change-default-storage-class.md index 4c3351956d325..a2070bcfe3fc0 100644 --- a/content/en/docs/tasks/administer-cluster/change-default-storage-class.md +++ b/content/en/docs/tasks/administer-cluster/change-default-storage-class.md @@ -62,10 +62,10 @@ for details about addon manager and how to disable individual addons. To mark a StorageClass as non-default, you need to change its value to `false`: ```bash - kubectl patch storageclass -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' + kubectl patch storageclass standard -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' ``` - where `` is the name of your chosen StorageClass. + where `standard` is the name of your chosen StorageClass. 1. Mark a StorageClass as default: @@ -73,7 +73,7 @@ for details about addon manager and how to disable individual addons. `storageclass.kubernetes.io/is-default-class=true`. ```bash - kubectl patch storageclass -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' + kubectl patch storageclass gold -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' ``` Please note that at most one StorageClass can be marked as default. If two diff --git a/content/en/docs/tasks/administer-cluster/cluster-management.md b/content/en/docs/tasks/administer-cluster/cluster-management.md index bceb4fef96897..65728ec4ee9b5 100644 --- a/content/en/docs/tasks/administer-cluster/cluster-management.md +++ b/content/en/docs/tasks/administer-cluster/cluster-management.md @@ -24,7 +24,7 @@ To install Kubernetes on a set of machines, consult one of the existing [Getting ## Upgrading a cluster -The current state of cluster upgrades is provider dependent, and some releases may require special care when upgrading. It is recommended that administrators consult both the [release notes](https://git.k8s.io/kubernetes/CHANGELOG.md), as well as the version specific upgrade notes prior to upgrading their clusters. +The current state of cluster upgrades is provider dependent, and some releases may require special care when upgrading. It is recommended that administrators consult both the [release notes](https://git.k8s.io/kubernetes/CHANGELOG/README.md), as well as the version specific upgrade notes prior to upgrading their clusters. ### Upgrading an Azure Kubernetes Service (AKS) cluster diff --git a/content/en/docs/tasks/administer-cluster/coredns.md b/content/en/docs/tasks/administer-cluster/coredns.md index 657459b14586c..2e50d54f06a2c 100644 --- a/content/en/docs/tasks/administer-cluster/coredns.md +++ b/content/en/docs/tasks/administer-cluster/coredns.md @@ -63,6 +63,10 @@ In Kubernetes 1.11, CoreDNS has graduated to General Availability (GA) and is installed by default. {{< /note >}} +{{< warning >}} +In Kubernetes 1.18, kube-dns usage with kubeadm has been deprecated and will be removed in a future version. +{{< /warning >}} + To install kube-dns on versions prior to 1.13, set the `CoreDNS` feature gate value to `false`: @@ -72,9 +76,9 @@ kubeadm init --feature-gates=CoreDNS=false For versions 1.13 and later, follow the guide outlined [here](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase#cmd-phase-addon). -## Upgrading CoreDNS +## Upgrading CoreDNS -CoreDNS is available in Kubernetes since v1.9. +CoreDNS is available in Kubernetes since v1.9. You can check the version of CoreDNS shipped with Kubernetes and the changes made to CoreDNS [here](https://github.com/coredns/deployment/blob/master/kubernetes/CoreDNS-k8s_version.md). CoreDNS can be upgraded manually in case you want to only upgrade CoreDNS or use your own custom image. diff --git a/content/en/docs/tasks/administer-cluster/declare-network-policy.md b/content/en/docs/tasks/administer-cluster/declare-network-policy.md index edb389c46f3ac..0fdbff57b8758 100644 --- a/content/en/docs/tasks/administer-cluster/declare-network-policy.md +++ b/content/en/docs/tasks/administer-cluster/declare-network-policy.md @@ -90,10 +90,11 @@ To limit the access to the `nginx` service so that only Pods with the label `acc {{< codenew file="service/networking/nginx-policy.yaml" >}} -{{< note >}} +The name of a NetworkPolicy object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). +{{< note >}} NetworkPolicy includes a `podSelector` which selects the grouping of Pods to which the policy applies. You can see this policy selects Pods with the label `app=nginx`. The label was automatically added to the Pod in the `nginx` Deployment. An empty `podSelector` selects all pods in the namespace. - {{< /note >}} ## Assign the policy to the service diff --git a/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md b/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md index 352a7093865aa..0203cfa469f01 100644 --- a/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md +++ b/content/en/docs/tasks/administer-cluster/dns-debugging-resolution.md @@ -248,7 +248,7 @@ linux/amd64, go1.10.3, 2e322f6 ## Known issues -Some Linux distributions (e.g. Ubuntu), use a local DNS resolver by default (systemd-resolved). +Some Linux distributions (e.g. Ubuntu) use a local DNS resolver by default (systemd-resolved). Systemd-resolved moves and replaces `/etc/resolv.conf` with a stub file that can cause a fatal forwarding loop when resolving names in upstream servers. This can be fixed manually by using kubelet's `--resolv-conf` flag to point to the correct `resolv.conf` (With `systemd-resolved`, this is `/run/systemd/resolve/resolv.conf`). @@ -258,22 +258,13 @@ Kubernetes installs do not configure the nodes' `resolv.conf` files to use the cluster DNS by default, because that process is inherently distribution-specific. This should probably be implemented eventually. -Linux's libc (a.k.a. glibc) has a limit for the DNS `nameserver` records to 3 by default. What's more, for the glibc versions which are older than glic-2.17-222 ([the new versions update see this issue](https://access.redhat.com/solutions/58028)), the DNS `search` records has been limited to 6 ([see this bug from 2005](https://bugzilla.redhat.com/show_bug.cgi?id=168253)). Kubernetes needs to consume 1 `nameserver` record and 3 `search` records. This means that if a local installation already uses 3 `nameserver`s or uses more than 3 `search`es while your glibc versions in the affected list, some of those settings will be lost. For the workaround of the DNS `nameserver` records limit, the node can run `dnsmasq` which will provide more `nameserver` entries, you can also use kubelet's `--resolv-conf` flag. For fixing the DNS `search` records limit, consider upgrading your linux distribution or glibc version. +Linux's libc (a.k.a. glibc) has a limit for the DNS `nameserver` records to 3 by default. What's more, for the glibc versions which are older than glibc-2.17-222 ([the new versions update see this issue](https://access.redhat.com/solutions/58028)), the allowed number of DNS `search` records has been limited to 6 ([see this bug from 2005](https://bugzilla.redhat.com/show_bug.cgi?id=168253)). Kubernetes needs to consume 1 `nameserver` record and 3 `search` records. This means that if a local installation already uses 3 `nameserver`s or uses more than 3 `search`es while your glibc version is in the affected list, some of those settings will be lost. To work around the DNS `nameserver` records limit, the node can run `dnsmasq`, which will provide more `nameserver` entries. You can also use kubelet's `--resolv-conf` flag. To fix the DNS `search` records limit, consider upgrading your linux distribution or upgrading to an unaffected version of glibc. If you are using Alpine version 3.3 or earlier as your base image, DNS may not -work properly owing to a known issue with Alpine. +work properly due to a known issue with Alpine. Check [here](https://github.com/kubernetes/kubernetes/issues/30215) for more information. -## Kubernetes Federation (Multiple Zone support) - -Release 1.3 introduced Cluster Federation support for multi-site Kubernetes -installations. This required some minor (backward-compatible) changes to the -way the Kubernetes cluster DNS server processes DNS queries, to facilitate -the lookup of federated services (which span multiple Kubernetes clusters). -See the [Cluster Federation Administrators' Guide](/docs/concepts/cluster-administration/federation/) -for more details on Cluster Federation and multi-site support. - ## References - [DNS for Services and Pods](/docs/concepts/services-networking/dns-pod-service/) diff --git a/content/en/docs/tasks/administer-cluster/enabling-endpointslices.md b/content/en/docs/tasks/administer-cluster/enabling-endpointslices.md index 99c575dbfb925..b8e4cf900da31 100644 --- a/content/en/docs/tasks/administer-cluster/enabling-endpointslices.md +++ b/content/en/docs/tasks/administer-cluster/enabling-endpointslices.md @@ -35,28 +35,25 @@ components still rely on Endpoints. For now, enabling EndpointSlices should be seen as an addition to Endpoints in a cluster, not a replacement for them. {{< /note >}} -EndpointSlices are considered a beta feature, but only the API is enabled by -default. Both the EndpointSlice controller and the usage of EndpointSlices by -kube-proxy are not enabled by default. - -The EndpointSlice controller creates and manages EndpointSlices in a cluster. -You can enable it with the `EndpointSlice` [feature -gate](/docs/reference/command-line-tools-reference/feature-gates/) on the {{< -glossary_tooltip text="kube-apiserver" term_id="kube-apiserver" >}} and {{< -glossary_tooltip text="kube-controller-manager" -term_id="kube-controller-manager" >}} (`--feature-gates=EndpointSlice=true`). - -For better scalability, you can also enable this feature gate on {{< -glossary_tooltip text="kube-proxy" term_id="kube-proxy" >}} so EndpointSlices -will be used as the data source instead of Endpoints. +EndpointSlices are a beta feature. Both the API and the EndpointSlice +{{< glossary_tooltip term_id="controller" >}} are enabled by default. +{{< glossary_tooltip text="kube-proxy" term_id="kube-proxy" >}} +uses Endpoints by default, not EndpointSlices. + +For better scalability and performance, you can enable the +`EndpointSliceProxying` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +on kube-proxy. That change +switches the data source to be EndpointSlices, which reduces the amount of +Kubernetes API traffic to and from kube-proxy. ## Using EndpointSlices With EndpointSlices fully enabled in your cluster, you should see corresponding EndpointSlice resources for each Endpoints resource. In addition to supporting -existing Endpoints functionality, EndpointSlices should include new bits of -information such as topology. They will allow for greater scalability and -extensibility of network endpoints in your cluster. +existing Endpoints functionality, EndpointSlices include new bits of information +such as topology. They will allow for greater scalability and extensibility of +network endpoints in your cluster. {{% capture whatsnext %}} diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md b/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md new file mode 100644 index 0000000000000..54978dc55dfb4 --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes.md @@ -0,0 +1,169 @@ +--- +reviewers: +- michmike +- patricklang +title: Adding Windows nodes +min-kubernetes-server-version: 1.17 +content_template: templates/tutorial +weight: 30 +--- + +{{% capture overview %}} + +{{< feature-state for_k8s_version="v1.18" state="beta" >}} + +You can use Kubernetes to run a mixture of Linux and Windows nodes, so you can mix Pods that run on Linux on with Pods that run on Windows. This page shows how to register Windows nodes to your cluster. + +{{% /capture %}} + + +{{% capture prerequisites %}} {{< version-check >}} + +* Obtain a [Windows Server 2019 license](https://www.microsoft.com/en-us/cloud-platform/windows-server-pricing) +(or higher) in order to configure the Windows node that hosts Windows containers. +If you are using VXLAN/Overlay networking you must have also have [KB4489899](https://support.microsoft.com/help/4489899) installed. + +* A Linux-based Kubernetes kubeadm cluster in which you have access to the control plane (see [Creating a single control-plane cluster with kubeadm](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/)). + +{{% /capture %}} + + +{{% capture objectives %}} + +* Register a Windows node to the cluster +* Configure networking so Pods and Services on Linux and Windows can communicate with each other + +{{% /capture %}} + + +{{% capture lessoncontent %}} + +## Getting Started: Adding a Windows Node to Your Cluster + +### Networking Configuration + +Once you have a Linux-based Kubernetes control-plane node you are ready to choose a networking solution. This guide illustrates using Flannel in VXLAN mode for simplicity. + +#### Configuring Flannel + +1. Prepare Kubernetes control plane for Flannel + + Some minor preparation is recommended on the Kubernetes control plane in our cluster. It is recommended to enable bridged IPv4 traffic to iptables chains when using Flannel. This can be done using the following command: + + ```bash + sudo sysctl net.bridge.bridge-nf-call-iptables=1 + ``` + +1. Download & configure Flannel for Linux + + Download the most recent Flannel manifest: + + ```bash + wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml + ``` + + Modify the `net-conf.json` section of the flannel manifest in order to set the VNI to 4096 and the Port to 4789. It should look as follows: + + ```json + net-conf.json: | + { + "Network": "10.244.0.0/16", + "Backend": { + "Type": "vxlan", + "VNI" : 4096, + "Port": 4789 + } + } + ``` + + {{< note >}}The VNI must be set to 4096 and port 4789 for Flannel on Linux to interoperate with Flannel on Windows. See the [VXLAN documentation](https://github.com/coreos/flannel/blob/master/Documentation/backends.md#vxlan). + for an explanation of these fields.{{< /note >}} + + {{< note >}}To use L2Bridge/Host-gateway mode instead change the value of `Type` to `"host-gw"` and omit `VNI` and `Port`.{{< /note >}} + +1. Apply the Flannel manifest and validate + + Let's apply the Flannel configuration: + + ```bash + kubectl apply -f kube-flannel.yml + ``` + + After a few minutes, you should see all the pods as running if the Flannel pod network was deployed. + + ```bash + kubectl get pods -n kube-system + ``` + + The output should include the Linux flannel DaemonSet as running: + + ``` + NAMESPACE NAME READY STATUS RESTARTS AGE + ... + kube-system kube-flannel-ds-54954 1/1 Running 0 1m + ``` + +1. Add Windows Flannel and kube-proxy DaemonSets + + Now you can add Windows-compatible versions of Flannel and kube-proxy. In order + to ensure that you get a compatible version of kube-proxy, you'll need to substitute + the tag of the image. The following example shows usage for Kubernetes {{< param "fullversion" >}}, + but you should adjust the version for your own deployment. + + ```bash + curl -L https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/download/kube-proxy.yml | sed 's/VERSION/{{< param "fullversion" >}}/g' | kubectl apply -f - + kubectl apply -f https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/download/flannel-overlay.yml + ``` + + {{< note >}} + If you're using host-gateway use https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/download/flannel-host-gw.yml instead + {{< /note >}} + +### Joining a Windows worker node +{{< note >}} +You must install the `Containers` feature and install Docker. Instructions +to do so are available at [Install Docker Engine - Enterprise on Windows Servers](https://docs.docker.com/ee/docker-ee/windows/docker-ee/#install-docker-engine---enterprise). +{{< /note >}} + +{{< note >}} +All code snippets in Windows sections are to be run in a PowerShell environment +with elevated permissions (Administrator) on the Windows worker node. +{{< /note >}} + +1. Install wins, kubelet, and kubeadm. + + ```PowerShell + curl.exe -LO https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/download/PrepareNode.ps1 + .\PrepareNode.ps1 -KubernetesVersion {{< param "fullversion" >}} + ``` + +1. Run `kubeadm` to join the node + + Use the command that was given to you when you ran `kubeadm init` on a control plane host. + If you no longer have this command, or the token has expired, you can run `kubeadm token create --print-join-command` + (on a control plane host) to generate a new token and join command. + + +#### Verifying your installation +You should now be able to view the Windows node in your cluster by running: + +```bash +kubectl get nodes -o wide +``` + +If your new node is in the `NotReady` state it is likely because the flannel image is still downloading. +You can check the progress as before by checking on the flannel pods in the `kube-system` namespace: + +```shell +kubectl -n kube-system get pods -l app=flannel +``` + +Once the flannel Pod is running, your node should enter the `Ready` state and then be available to handle workloads. + +{{% /capture %}} + +{{% capture whatsnext %}} + +- [Upgrading Windows kubeadm nodes](/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes) + +{{% /capture %}} diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md index c3ef0caa10ecc..6329c4a3959b4 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md @@ -3,6 +3,7 @@ reviewers: - sig-cluster-lifecycle title: Certificate Management with kubeadm content_template: templates/task +weight: 10 --- {{% capture overview %}} diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md index ce898371fbd82..9fc79c1e12b1f 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md @@ -3,16 +3,19 @@ reviewers: - sig-cluster-lifecycle title: Upgrading kubeadm clusters content_template: templates/task +weight: 20 +min-kubernetes-server-version: 1.18 --- {{% capture overview %}} This page explains how to upgrade a Kubernetes cluster created with kubeadm from version -1.16.x to version 1.17.x, and from version 1.17.x to 1.17.y (where `y > x`). +1.17.x to version 1.18.x, and from version 1.18.x to 1.18.y (where `y > x`). To see information about upgrading clusters created using older versions of kubeadm, please refer to following pages instead: +- [Upgrading kubeadm cluster from 1.16 to 1.17](https://v1-17.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) - [Upgrading kubeadm cluster from 1.15 to 1.16](https://v1-16.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) - [Upgrading kubeadm cluster from 1.14 to 1.15](https://v1-15.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-15/) - [Upgrading kubeadm cluster from 1.13 to 1.14](https://v1-15.docs.kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-14/) @@ -27,7 +30,7 @@ The upgrade workflow at high level is the following: {{% capture prerequisites %}} -- You need to have a kubeadm Kubernetes cluster running version 1.16.0 or later. +- You need to have a kubeadm Kubernetes cluster running version 1.17.0 or later. - [Swap must be disabled](https://serverfault.com/questions/684771/best-way-to-disable-swap-in-linux). - The cluster should use a static control plane and etcd pods or external etcd. - Make sure you read the [release notes]({{< latest-release-notes >}}) carefully. @@ -54,12 +57,12 @@ The upgrade workflow at high level is the following: apt update apt-cache madison kubeadm # find the latest 1.17 version in the list - # it should look like 1.17.x-00, where x is the latest patch + # it should look like 1.18.x-00, where x is the latest patch {{% /tab %}} {{% tab name="CentOS, RHEL or Fedora" %}} yum list --showduplicates kubeadm --disableexcludes=kubernetes # find the latest 1.17 version in the list - # it should look like 1.17.x-0, where x is the latest patch + # it should look like 1.18.x-0, where x is the latest patch {{% /tab %}} {{< /tabs >}} @@ -71,18 +74,18 @@ The upgrade workflow at high level is the following: {{< tabs name="k8s_install_kubeadm_first_cp" >}} {{% tab name="Ubuntu, Debian or HypriotOS" %}} - # replace x in 1.17.x-00 with the latest patch version + # replace x in 1.18.x-00 with the latest patch version apt-mark unhold kubeadm && \ - apt-get update && apt-get install -y kubeadm=1.17.x-00 && \ + apt-get update && apt-get install -y kubeadm=1.18.x-00 && \ apt-mark hold kubeadm # since apt-get version 1.1 you can also use the following method apt-get update && \ - apt-get install -y --allow-change-held-packages kubeadm=1.17.x-00 + apt-get install -y --allow-change-held-packages kubeadm=1.18.x-00 {{% /tab %}} {{% tab name="CentOS, RHEL or Fedora" %}} - # replace x in 1.17.x-0 with the latest patch version - yum install -y kubeadm-1.17.x-0 --disableexcludes=kubernetes + # replace x in 1.18.x-0 with the latest patch version + yum install -y kubeadm-1.18.x-0 --disableexcludes=kubernetes {{% /tab %}} {{< /tabs >}} @@ -112,28 +115,30 @@ The upgrade workflow at high level is the following: [upgrade/config] Reading configuration from the cluster... [upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' [preflight] Running pre-flight checks. - [upgrade] Making sure the cluster is healthy: + [upgrade] Running cluster health checks [upgrade] Fetching available versions to upgrade to - [upgrade/versions] Cluster version: v1.16.0 - [upgrade/versions] kubeadm version: v1.17.0 + [upgrade/versions] Cluster version: v1.17.3 + [upgrade/versions] kubeadm version: v1.18.0 + [upgrade/versions] Latest stable version: v1.18.0 + [upgrade/versions] Latest version in the v1.17 series: v1.18.0 Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': - COMPONENT CURRENT AVAILABLE - Kubelet 1 x v1.16.0 v1.17.0 + COMPONENT CURRENT AVAILABLE + Kubelet 1 x v1.17.3 v1.18.0 - Upgrade to the latest version in the v1.16 series: + Upgrade to the latest version in the v1.17 series: COMPONENT CURRENT AVAILABLE - API Server v1.16.0 v1.17.0 - Controller Manager v1.16.0 v1.17.0 - Scheduler v1.16.0 v1.17.0 - Kube Proxy v1.16.0 v1.17.0 - CoreDNS 1.6.2 1.6.5 - Etcd 3.3.15 3.4.3-0 + API Server v1.17.3 v1.18.0 + Controller Manager v1.17.3 v1.18.0 + Scheduler v1.17.3 v1.18.0 + Kube Proxy v1.17.3 v1.18.0 + CoreDNS 1.6.5 1.6.7 + Etcd 3.4.3 3.4.3-0 You can now apply the upgrade by executing the following command: - kubeadm upgrade apply v1.17.0 + kubeadm upgrade apply v1.18.0 _____________________________________________________________________ ``` @@ -150,78 +155,79 @@ The upgrade workflow at high level is the following: ```shell # replace x with the patch version you picked for this upgrade - sudo kubeadm upgrade apply v1.17.x + sudo kubeadm upgrade apply v1.18.x ``` You should see output similar to this: ``` - [preflight] Running pre-flight checks. - [upgrade] Making sure the cluster is healthy: [upgrade/config] Making sure the configuration is correct: [upgrade/config] Reading configuration from the cluster... [upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' - [upgrade/version] You have chosen to change the cluster version to "v1.17.0" - [upgrade/versions] Cluster version: v1.16.0 - [upgrade/versions] kubeadm version: v1.17.0 + [preflight] Running pre-flight checks. + [upgrade] Running cluster health checks + [upgrade/version] You have chosen to change the cluster version to "v1.18.0" + [upgrade/versions] Cluster version: v1.17.3 + [upgrade/versions] kubeadm version: v1.18.0 [upgrade/confirm] Are you sure you want to proceed with the upgrade? [y/N]: y [upgrade/prepull] Will prepull images for components [kube-apiserver kube-controller-manager kube-scheduler etcd] [upgrade/prepull] Prepulling image for component etcd. [upgrade/prepull] Prepulling image for component kube-apiserver. [upgrade/prepull] Prepulling image for component kube-controller-manager. [upgrade/prepull] Prepulling image for component kube-scheduler. - [apiclient] Found 0 Pods for label selector k8s-app=upgrade-prepull-kube-scheduler - [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-kube-apiserver [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-kube-controller-manager [apiclient] Found 0 Pods for label selector k8s-app=upgrade-prepull-etcd - [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-kube-scheduler + [apiclient] Found 0 Pods for label selector k8s-app=upgrade-prepull-kube-scheduler + [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-kube-apiserver [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-etcd + [apiclient] Found 1 Pods for label selector k8s-app=upgrade-prepull-kube-scheduler [upgrade/prepull] Prepulled image for component etcd. - [upgrade/prepull] Prepulled image for component kube-controller-manager. [upgrade/prepull] Prepulled image for component kube-apiserver. + [upgrade/prepull] Prepulled image for component kube-controller-manager. [upgrade/prepull] Prepulled image for component kube-scheduler. [upgrade/prepull] Successfully prepulled the images for all the control plane components - [upgrade/apply] Upgrading your Static Pod-hosted control plane to version "v1.17.0"... - Static pod: kube-apiserver-luboitvbox hash: 8d931c2296a38951e95684cbcbe3b923 - Static pod: kube-controller-manager-luboitvbox hash: 2480bf6982ad2103c05f6764e20f2787 - Static pod: kube-scheduler-luboitvbox hash: 9b290132363a92652555896288ca3f88 + [upgrade/apply] Upgrading your Static Pod-hosted control plane to version "v1.18.0"... + Static pod: kube-apiserver-myhost hash: 2cc222e1a577b40a8c2832320db54b46 + Static pod: kube-controller-manager-myhost hash: f7ce4bc35cb6e646161578ac69910f18 + Static pod: kube-scheduler-myhost hash: e3025acd90e7465e66fa19c71b916366 [upgrade/etcd] Upgrading to TLS for etcd - [upgrade/staticpods] Writing new Static Pod manifests to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests446257614" + [upgrade/etcd] Non fatal issue encountered during upgrade: the desired etcd version for this Kubernetes version "v1.18.0" is "3.4.3-0", but the current etcd version is "3.4.3". Won't downgrade etcd, instead just continue + [upgrade/staticpods] Writing new Static Pod manifests to "/etc/kubernetes/tmp/kubeadm-upgraded-manifests308527012" + W0308 18:48:14.535122 3082 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC" [upgrade/staticpods] Preparing for "kube-apiserver" upgrade - [upgrade/staticpods] Renewing "apiserver-etcd-client" certificate - [upgrade/staticpods] Renewing "apiserver" certificate - [upgrade/staticpods] Renewing "apiserver-kubelet-client" certificate - [upgrade/staticpods] Renewing "front-proxy-client" certificate - [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-apiserver.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2019-06-05-23-38-03/kube-apiserver.yaml" + [upgrade/staticpods] Renewing apiserver certificate + [upgrade/staticpods] Renewing apiserver-kubelet-client certificate + [upgrade/staticpods] Renewing front-proxy-client certificate + [upgrade/staticpods] Renewing apiserver-etcd-client certificate + [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-apiserver.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2020-03-08-18-48-14/kube-apiserver.yaml" [upgrade/staticpods] Waiting for the kubelet to restart the component [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s) - Static pod: kube-apiserver-luboitvbox hash: 8d931c2296a38951e95684cbcbe3b923 - Static pod: kube-apiserver-luboitvbox hash: 1b4e2b09a408c844f9d7b535e593ead9 + Static pod: kube-apiserver-myhost hash: 2cc222e1a577b40a8c2832320db54b46 + Static pod: kube-apiserver-myhost hash: 609429acb0d71dce6725836dd97d8bf4 [apiclient] Found 1 Pods for label selector component=kube-apiserver [upgrade/staticpods] Component "kube-apiserver" upgraded successfully! [upgrade/staticpods] Preparing for "kube-controller-manager" upgrade - [upgrade/staticpods] Renewing certificate embedded in "controller-manager.conf" - [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-controller-manager.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2019-06-05-23-38-03/kube-controller-manager.yaml" + [upgrade/staticpods] Renewing controller-manager.conf certificate + [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-controller-manager.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2020-03-08-18-48-14/kube-controller-manager.yaml" [upgrade/staticpods] Waiting for the kubelet to restart the component [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s) - Static pod: kube-controller-manager-luboitvbox hash: 2480bf6982ad2103c05f6764e20f2787 - Static pod: kube-controller-manager-luboitvbox hash: 6617d53423348aa619f1d6e568bb894a + Static pod: kube-controller-manager-myhost hash: f7ce4bc35cb6e646161578ac69910f18 + Static pod: kube-controller-manager-myhost hash: c7a1232ba2c5dc15641c392662fe5156 [apiclient] Found 1 Pods for label selector component=kube-controller-manager [upgrade/staticpods] Component "kube-controller-manager" upgraded successfully! [upgrade/staticpods] Preparing for "kube-scheduler" upgrade - [upgrade/staticpods] Renewing certificate embedded in "scheduler.conf" - [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-scheduler.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2019-06-05-23-38-03/kube-scheduler.yaml" + [upgrade/staticpods] Renewing scheduler.conf certificate + [upgrade/staticpods] Moved new manifest to "/etc/kubernetes/manifests/kube-scheduler.yaml" and backed up old manifest to "/etc/kubernetes/tmp/kubeadm-backup-manifests-2020-03-08-18-48-14/kube-scheduler.yaml" [upgrade/staticpods] Waiting for the kubelet to restart the component [upgrade/staticpods] This might take a minute or longer depending on the component/version gap (timeout 5m0s) - Static pod: kube-scheduler-luboitvbox hash: 9b290132363a92652555896288ca3f88 - Static pod: kube-scheduler-luboitvbox hash: edf58ab819741a5d1eb9c33de756e3ca + Static pod: kube-scheduler-myhost hash: e3025acd90e7465e66fa19c71b916366 + Static pod: kube-scheduler-myhost hash: b1b721486ae0ac504c160dcdc457ab0d [apiclient] Found 1 Pods for label selector component=kube-scheduler [upgrade/staticpods] Component "kube-scheduler" upgraded successfully! - [upgrade/staticpods] Renewing certificate embedded in "admin.conf" [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace - [kubelet] Creating a ConfigMap "kubelet-config-1.17" in namespace kube-system with the configuration for the kubelets in the cluster - [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.17" ConfigMap in the kube-system namespace + [kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster + [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.18" ConfigMap in the kube-system namespace [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token @@ -229,7 +235,7 @@ The upgrade workflow at high level is the following: [addons] Applied essential addon: CoreDNS [addons] Applied essential addon: kube-proxy - [upgrade/successful] SUCCESS! Your cluster was upgraded to "v1.17.0". Enjoy! + [upgrade/successful] SUCCESS! Your cluster was upgraded to "v1.18.0". Enjoy! [upgrade/kubelet] Now that your control plane is upgraded, please proceed with upgrading your kubelets if you haven't already done so. ``` @@ -271,18 +277,18 @@ Also `sudo kubeadm upgrade plan` is not needed. {{< tabs name="k8s_install_kubelet" >}} {{% tab name="Ubuntu, Debian or HypriotOS" %}} - # replace x in 1.17.x-00 with the latest patch version + # replace x in 1.18.x-00 with the latest patch version apt-mark unhold kubelet kubectl && \ - apt-get update && apt-get install -y kubelet=1.17.x-00 kubectl=1.17.x-00 && \ + apt-get update && apt-get install -y kubelet=1.18.x-00 kubectl=1.18.x-00 && \ apt-mark hold kubelet kubectl # since apt-get version 1.1 you can also use the following method apt-get update && \ - apt-get install -y --allow-change-held-packages kubelet=1.17.x-00 kubectl=1.17.x-00 + apt-get install -y --allow-change-held-packages kubelet=1.18.x-00 kubectl=1.18.x-00 {{% /tab %}} {{% tab name="CentOS, RHEL or Fedora" %}} - # replace x in 1.17.x-0 with the latest patch version - yum install -y kubelet-1.17.x-0 kubectl-1.17.x-0 --disableexcludes=kubernetes + # replace x in 1.18.x-0 with the latest patch version + yum install -y kubelet-1.18.x-0 kubectl-1.18.x-0 --disableexcludes=kubernetes {{% /tab %}} {{< /tabs >}} @@ -303,18 +309,18 @@ without compromising the minimum required capacity for running your workloads. {{< tabs name="k8s_install_kubeadm_worker_nodes" >}} {{% tab name="Ubuntu, Debian or HypriotOS" %}} - # replace x in 1.17.x-00 with the latest patch version + # replace x in 1.18.x-00 with the latest patch version apt-mark unhold kubeadm && \ - apt-get update && apt-get install -y kubeadm=1.17.x-00 && \ + apt-get update && apt-get install -y kubeadm=1.18.x-00 && \ apt-mark hold kubeadm # since apt-get version 1.1 you can also use the following method apt-get update && \ - apt-get install -y --allow-change-held-packages kubeadm=1.17.x-00 + apt-get install -y --allow-change-held-packages kubeadm=1.18.x-00 {{% /tab %}} {{% tab name="CentOS, RHEL or Fedora" %}} - # replace x in 1.17.x-0 with the latest patch version - yum install -y kubeadm-1.17.x-0 --disableexcludes=kubernetes + # replace x in 1.18.x-0 with the latest patch version + yum install -y kubeadm-1.18.x-0 --disableexcludes=kubernetes {{% /tab %}} {{< /tabs >}} @@ -349,18 +355,18 @@ without compromising the minimum required capacity for running your workloads. {{< tabs name="k8s_kubelet_and_kubectl" >}} {{% tab name="Ubuntu, Debian or HypriotOS" %}} - # replace x in 1.17.x-00 with the latest patch version + # replace x in 1.18.x-00 with the latest patch version apt-mark unhold kubelet kubectl && \ - apt-get update && apt-get install -y kubelet=1.17.x-00 kubectl=1.17.x-00 && \ + apt-get update && apt-get install -y kubelet=1.18.x-00 kubectl=1.18.x-00 && \ apt-mark hold kubelet kubectl # since apt-get version 1.1 you can also use the following method apt-get update && \ - apt-get install -y --allow-change-held-packages kubelet=1.17.x-00 kubectl=1.17.x-00 + apt-get install -y --allow-change-held-packages kubelet=1.18.x-00 kubectl=1.18.x-00 {{% /tab %}} {{% tab name="CentOS, RHEL or Fedora" %}} - # replace x in 1.17.x-0 with the latest patch version - yum install -y kubelet-1.17.x-0 kubectl-1.17.x-0 --disableexcludes=kubernetes + # replace x in 1.18.x-0 with the latest patch version + yum install -y kubelet-1.18.x-0 kubectl-1.18.x-0 --disableexcludes=kubernetes {{% /tab %}} {{< /tabs >}} @@ -375,7 +381,7 @@ without compromising the minimum required capacity for running your workloads. 1. Bring the node back online by marking it schedulable: ```shell - # replace with the name of your node + # replace with the name of your node kubectl uncordon ``` diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md b/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md new file mode 100644 index 0000000000000..a6c626a627799 --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-windows-nodes.md @@ -0,0 +1,93 @@ +--- +title: Upgrading Windows nodes +min-kubernetes-server-version: 1.17 +content_template: templates/task +weight: 40 +--- + +{{% capture overview %}} + +{{< feature-state for_k8s_version="v1.18" state="beta" >}} + +This page explains how to upgrade a Windows node [created with kubeadm](/docs/tasks/administer-cluster/kubeadm/adding-windows-nodes). + +{{% /capture %}} + + +{{% capture prerequisites %}} +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} +* Familiarize yourself with [the process for upgrading the rest of your kubeadm +cluster](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade). You will want to +upgrade the control plane nodes before upgrading your Windows nodes. + +{{% /capture %}} + + +{{% capture steps %}} + +## Upgrading worker nodes + +### Upgrade kubeadm + +1. From the Windows node, upgrade kubeadm: + + ```powershell + # replace {{< param "fullversion" >}} with your desired version + curl.exe -Lo C:\k\kubeadm.exe https://dl.k8s.io/{{< param "fullversion" >}}/bin/windows/amd64/kubeadm.exe + ``` + +### Drain the node + +1. From a machine with access to the Kubernetes API, + prepare the node for maintenance by marking it unschedulable and evicting the workloads: + + ```shell + # replace with the name of your node you are draining + kubectl drain --ignore-daemonsets + ``` + + You should see output similar to this: + + ``` + node/ip-172-31-85-18 cordoned + node/ip-172-31-85-18 drained + ``` + +### Upgrade the kubelet configuration + +1. From the Windows node, call the following command to sync new kubelet configuration: + + ```powershell + kubeadm upgrade node + ``` + +### Upgrade kubelet + +1. From the Windows node, upgrade and restart the kubelet: + + ```powershell + stop-service kubelet + curl.exe -Lo C:\k\kubelet.exe https://dl.k8s.io/{{< param "fullversion" >}}/bin/windows/amd64/kubelet.exe + restart-service kubelet + ``` + +### Uncordon the node + +1. From a machine with access to the Kubernetes API, +bring the node back online by marking it schedulable: + + ```shell + # replace with the name of your node + kubectl uncordon + ``` +### Upgrade kube-proxy + +1. From a machine with access to the Kubernetes API, run the following, +again replacing {{< param "fullversion" >}} with your desired version: + + ```shell + curl -L https://github.com/kubernetes-sigs/sig-windows-tools/releases/latest/download/kube-proxy.yml | sed 's/VERSION/{{< param "fullversion" >}}/g' | kubectl apply -f - + ``` + + +{{% /capture %}} diff --git a/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md b/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md index bd136e3ae2b48..9a69058ceb795 100644 --- a/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md +++ b/content/en/docs/tasks/administer-cluster/namespaces-walkthrough.md @@ -224,12 +224,14 @@ At this point, all requests we make to the Kubernetes cluster from the command l Let's create some contents. +{{< codenew file="admin/snowflake-deployment.yaml" >}} + +Apply the manifest to create a Deployment + ```shell -kubectl run snowflake --image=k8s.gcr.io/serve_hostname --replicas=2 +kubectl apply -f https://k8s.io/examples/admin/snowflake-deployment.yaml ``` We have just created a deployment whose replica size is 2 that is running the pod called `snowflake` with a basic container that just serves the hostname. -Note that `kubectl run` creates deployments only on Kubernetes cluster >= v1.2. If you are running older versions, it creates replication controllers instead. -If you want to obtain the old behavior, use `--generator=run/v1` to create replication controllers. See [`kubectl run`](/docs/reference/generated/kubectl/kubectl-commands/#run) for more details. ```shell kubectl get deployment diff --git a/content/en/docs/tasks/administer-cluster/namespaces.md b/content/en/docs/tasks/administer-cluster/namespaces.md index ef12b24c98963..de2230a6a4864 100644 --- a/content/en/docs/tasks/administer-cluster/namespaces.md +++ b/content/en/docs/tasks/administer-cluster/namespaces.md @@ -101,7 +101,8 @@ See the [design doc](https://git.k8s.io/community/contributors/design-proposals/ kubectl create namespace ``` -Note that the name of your namespace must be a DNS compatible label. +The name of your namespace must be a valid +[DNS label](/docs/concepts/overview/working-with-objects/names#dns-label-names). There's an optional field `finalizers`, which allows observables to purge resources whenever the namespace is deleted. Keep in mind that if you specify a nonexistent finalizer, the namespace will be created but will get stuck in the `Terminating` state if the user tries to delete it. @@ -187,88 +188,22 @@ This delete is asynchronous, so for a time you will see the namespace in the `Te To demonstrate this, let's spin up a simple Deployment and Pods in the `development` namespace. - We first check what is the current context: - - ```shell - kubectl config view - ``` - ```yaml - apiVersion: v1 - clusters: - cluster: - certificate-authority-data: REDACTED - server: https://130.211.122.180 - name: lithe-cocoa-92103_kubernetes - contexts: - context: - cluster: lithe-cocoa-92103_kubernetes - user: lithe-cocoa-92103_kubernetes - name: lithe-cocoa-92103_kubernetes - current-context: lithe-cocoa-92103_kubernetes - kind: Config - preferences: {} - users: - name: lithe-cocoa-92103_kubernetes - user: - client-certificate-data: REDACTED - client-key-data: REDACTED - token: 65rZW78y8HbwXXtSXuUw9DbP4FLjHi4b - name: lithe-cocoa-92103_kubernetes-basic-auth - user: - password: h5M0FtUUIflBSdI7 - username: admin - ``` - - ```shell - kubectl config current-context - ``` - ``` - lithe-cocoa-92103_kubernetes - ``` - - The next step is to define a context for the kubectl client to work in each namespace. The values of "cluster" and "user" fields are copied from the current context. - - ```shell - kubectl config set-context dev --namespace=development --cluster=lithe-cocoa-92103_kubernetes --user=lithe-cocoa-92103_kubernetes - kubectl config set-context prod --namespace=production --cluster=lithe-cocoa-92103_kubernetes --user=lithe-cocoa-92103_kubernetes - ``` - - The above commands provided two request contexts you can alternate against depending on what namespace you - wish to work against. - - Let's switch to operate in the `development` namespace. - - ```shell - kubectl config use-context dev - ``` - - You can verify your current context by doing the following: - ```shell - kubectl config current-context - dev - ``` - - At this point, all requests we make to the Kubernetes cluster from the command line are scoped to the `development` namespace. - - Let's create some contents. - - ```shell - kubectl run snowflake --image=k8s.gcr.io/serve_hostname --replicas=2 + kubectl run snowflake --image=k8s.gcr.io/serve_hostname --replicas=2 -n=development ``` We have just created a deployment whose replica size is 2 that is running the pod called `snowflake` with a basic container that just serves the hostname. Note that `kubectl run` creates deployments only on Kubernetes cluster >= v1.2. If you are running older versions, it creates replication controllers instead. If you want to obtain the old behavior, use `--generator=run/v1` to create replication controllers. See [`kubectl run`](/docs/reference/generated/kubectl/kubectl-commands/#run) for more details. ```shell - kubectl get deployment + kubectl get deployment -n=development ``` ``` NAME READY UP-TO-DATE AVAILABLE AGE snowflake 2/2 2 2 2m ``` ```shell - kubectl get pods -l run=snowflake + kubectl get pods -l run=snowflake -n=development ``` ``` NAME READY STATUS RESTARTS AGE @@ -280,23 +215,19 @@ This delete is asynchronous, so for a time you will see the namespace in the `Te Let's switch to the `production` namespace and show how resources in one namespace are hidden from the other. - ```shell - kubectl config use-context prod - ``` - The `production` namespace should be empty, and the following commands should return nothing. ```shell - kubectl get deployment - kubectl get pods + kubectl get deployment -n=production + kubectl get pods -n=production ``` Production likes to run cattle, so let's create some cattle pods. ```shell - kubectl run cattle --image=k8s.gcr.io/serve_hostname --replicas=5 + kubectl run cattle --image=k8s.gcr.io/serve_hostname --replicas=5 -n=production - kubectl get deployment + kubectl get deployment -n=production ``` ``` NAME READY UP-TO-DATE AVAILABLE AGE @@ -304,7 +235,7 @@ This delete is asynchronous, so for a time you will see the namespace in the `Te ``` ```shell - kubectl get pods -l run=cattle + kubectl get pods -l run=cattle -n=production ``` ``` NAME READY STATUS RESTARTS AGE diff --git a/content/en/docs/tasks/administer-cluster/nodelocaldns.md b/content/en/docs/tasks/administer-cluster/nodelocaldns.md index 7d15596112976..6502ce14728b0 100644 --- a/content/en/docs/tasks/administer-cluster/nodelocaldns.md +++ b/content/en/docs/tasks/administer-cluster/nodelocaldns.md @@ -2,12 +2,13 @@ reviewers: - bowei - zihongz +- sftim title: Using NodeLocal DNSCache in Kubernetes clusters content_template: templates/task --- {{% capture overview %}} -{{< feature-state for_k8s_version="v1.15" state="beta" >}} +{{< feature-state for_k8s_version="v1.18" state="stable" >}} This page provides an overview of NodeLocal DNSCache feature in Kubernetes. {{% /capture %}} @@ -47,18 +48,44 @@ This is the path followed by DNS Queries after NodeLocal DNSCache is enabled: {{< figure src="/images/docs/nodelocaldns.jpg" alt="NodeLocal DNSCache flow" title="Nodelocal DNSCache flow" caption="This image shows how NodeLocal DNSCache handles DNS queries." >}} ## Configuration - -This feature can be enabled using the command: - -`KUBE_ENABLE_NODELOCAL_DNS=true kubetest --up` - -This works for e2e clusters created on GCE. On all other environments, the following steps will setup NodeLocal DNSCache: - -* A yaml similar to [this](https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml) can be applied using `kubectl create -f` command. -* No need to modify the --cluster-dns flag since NodeLocal DNSCache listens on both the kube-dns service IP as well as a link-local IP (169.254.20.10 by default) +{{< note >}} The local listen IP address for NodeLocal DNSCache can be any IP in the 169.254.20.0/16 space or any other IP address that can be guaranteed to not collide with any existing IP. This document uses 169.254.20.10 as an example. +{{< /note >}} + +This feature can be enabled using the following steps: + +* Prepare a manifest similar to the sample [`nodelocaldns.yaml`](https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml) and save it as `nodelocaldns.yaml.` +* Substitute the variables in the manifest with the right values: + + * kubedns=`kubectl get svc kube-dns -n kube-system -o jsonpath={.spec.clusterIP}` + + * domain=`` + + * localdns=`` + + `` is "cluster.local" by default. `` is the local listen IP address chosen for NodeLocal DNSCache. + + * If kube-proxy is running in IPTABLES mode: + + ``` bash + sed -i "s/__PILLAR__LOCAL__DNS__/$localdns/g; s/__PILLAR__DNS__DOMAIN__/$domain/g; s/__PILLAR__DNS__SERVER__/$kubedns/g" nodelocaldns.yaml + ``` + + `__PILLAR__CLUSTER__DNS__` and `__PILLAR__UPSTREAM__SERVERS__` will be populated by the node-local-dns pods. + In this mode, node-local-dns pods listen on both the kube-dns service IP as well as ``, so pods can lookup DNS records using either IP address. + + * If kube-proxy is running in IPVS mode: + + ``` bash + sed -i "s/__PILLAR__LOCAL__DNS__/$localdns/g; s/__PILLAR__DNS__DOMAIN__/$domain/g; s/__PILLAR__DNS__SERVER__//g; s/__PILLAR__CLUSTER__DNS__/$kubedns/g" nodelocaldns.yaml + ``` + In this mode, node-local-dns pods listen only on ``. The node-local-dns interface cannot bind the kube-dns cluster IP since the interface used for IPVS loadbalancing already uses this address. + `__PILLAR__UPSTREAM__SERVERS__` will be populated by the node-local-dns pods. + +* Run `kubectl create -f nodelocaldns.yaml` +* If using kube-proxy in IPVS mode, `--cluster-dns` flag to kubelet needs to be modified to use `` that NodeLocal DNSCache is listening on. + Otherwise, there is no need to modify the value of the `--cluster-dns` flag, since NodeLocal DNSCache listens on both the kube-dns service IP as well as ``. Once enabled, node-local-dns Pods will run in the kube-system namespace on each of the cluster nodes. This Pod runs [CoreDNS](https://github.com/coredns/coredns) in cache mode, so all CoreDNS metrics exposed by the different plugins will be available on a per-node basis. -The feature can be disabled by removing the daemonset, using `kubectl delete -f` command. On e2e clusters created on GCE, the daemonset can be removed by deleting the node-local-dns yaml from `/etc/kubernetes/addons/0-dns/nodelocaldns.yaml` - +You can disable this feature by removing the DaemonSet, using `kubectl delete -f ` . You should also revert any changes you made to the kubelet configuration. {{% /capture %}} diff --git a/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md b/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md index e4fe4a5ac9815..e82b55583ba1d 100644 --- a/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md +++ b/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md @@ -5,6 +5,7 @@ reviewers: - dashpole title: Reserve Compute Resources for System Daemons content_template: templates/task +min-kubernetes-server-version: 1.8 --- {{% capture overview %}} @@ -27,6 +28,9 @@ on each node. {{% capture prerequisites %}} {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} +Your Kubernetes server must be at or later than version 1.17 to use +the kubelet command line option `--reserved-cpus` to set an +[explicitly reserved CPU list](#explicitly-reserved-cpu-list). {{% /capture %}} @@ -94,13 +98,7 @@ be configured to use the `systemd` cgroup driver. `kube-reserved` is meant to capture resource reservation for kubernetes system daemons like the `kubelet`, `container runtime`, `node problem detector`, etc. It is not meant to reserve resources for system daemons that are run as pods. -`kube-reserved` is typically a function of `pod density` on the nodes. [This -performance dashboard](http://node-perf-dash.k8s.io/#/builds) exposes `cpu` and -`memory` usage profiles of `kubelet` and `docker engine` at multiple levels of -pod density. [This blog -post](https://kubernetes.io/blog/2016/11/visualize-kubelet-performance-with-node-dashboard) -explains how the dashboard can be interpreted to come up with a suitable -`kube-reserved` reservation. +`kube-reserved` is typically a function of `pod density` on the nodes. In addition to `cpu`, `memory`, and `ephemeral-storage`, `pid` may be specified to reserve the specified number of process IDs for @@ -152,9 +150,9 @@ exist. Kubelet will fail if an invalid cgroup is specified. - **Kubelet Flag**: `--reserved-cpus=0-3` `reserved-cpus` is meant to define an explicit CPU set for OS system daemons and -kubernetes system daemons. This option is added in 1.17 release. `reserved-cpus` -is for systems that do not intent to define separate top level cgroups for -OS system daemons and kubernetes system daemons with regard to cpuset resource. +kubernetes system daemons. `reserved-cpus` is for systems that do not intend to +define separate top level cgroups for OS system daemons and kubernetes system daemons +with regard to cpuset resource. If the Kubelet **does not** have `--system-reserved-cgroup` and `--kube-reserved-cgroup`, the explicit cpuset provided by `reserved-cpus` will take precedence over the CPUs defined by `--kube-reserved` and `--system-reserved` options. @@ -253,36 +251,4 @@ If `kube-reserved` and/or `system-reserved` is not enforced and system daemons exceed their reservation, `kubelet` evicts pods whenever the overall node memory usage is higher than `31.5Gi` or `storage` is greater than `90Gi` -## Feature Availability - -As of Kubernetes version 1.2, it has been possible to **optionally** specify -`kube-reserved` and `system-reserved` reservations. The scheduler switched to -using `Allocatable` instead of `Capacity` when available in the same release. - -As of Kubernetes version 1.6, `eviction-thresholds` are being considered by -computing `Allocatable`. To revert to the old behavior set -`--experimental-allocatable-ignore-eviction` kubelet flag to `true`. - -As of Kubernetes version 1.6, `kubelet` enforces `Allocatable` on pods using -control groups. To revert to the old behavior unset `--enforce-node-allocatable` -kubelet flag. Note that unless `--kube-reserved`, or `--system-reserved` or -`--eviction-hard` flags have non-default values, `Allocatable` enforcement does -not affect existing deployments. - -As of Kubernetes version 1.6, `kubelet` launches pods in their own cgroup -sandbox in a dedicated part of the cgroup hierarchy it manages. Operators are -required to drain their nodes prior to upgrade of the `kubelet` from prior -versions in order to ensure pods and their associated containers are launched in -the proper part of the cgroup hierarchy. - -As of Kubernetes version 1.7, `kubelet` supports specifying `storage` as a resource -for `kube-reserved` and `system-reserved`. - -As of Kubernetes version 1.8, the `storage` key name was changed to `ephemeral-storage` -for the alpha release. - -As of Kubernetes version 1.17, you can optionally specify -explicit cpuset by `reserved-cpus` as CPUs reserved for OS system -daemons/interrupts/timers and Kubernetes daemons. - {{% /capture %}} diff --git a/content/en/docs/tasks/administer-cluster/safely-drain-node.md b/content/en/docs/tasks/administer-cluster/safely-drain-node.md index e7a4a093ac6ef..29006ff754318 100644 --- a/content/en/docs/tasks/administer-cluster/safely-drain-node.md +++ b/content/en/docs/tasks/administer-cluster/safely-drain-node.md @@ -9,7 +9,7 @@ content_template: templates/task --- {{% capture overview %}} -This page shows how to safely drain a machine, respecting the PodDisruptionBudget you have defined. +This page shows how to safely drain a node, respecting the PodDisruptionBudget you have defined. {{% /capture %}} {{% capture prerequisites %}} @@ -156,6 +156,7 @@ application owners and cluster owners to establish an agreement on behavior in t {{% capture whatsnext %}} * Follow steps to protect your application by [configuring a Pod Disruption Budget](/docs/tasks/run-application/configure-pdb/). +* Learn more about [maintenance on a node](/docs/tasks/administer-cluster/cluster-management/#maintenance-on-a-node). {{% /capture %}} diff --git a/content/en/docs/tasks/administer-cluster/topology-manager.md b/content/en/docs/tasks/administer-cluster/topology-manager.md index 382038a3a54d7..2e37830e411b8 100644 --- a/content/en/docs/tasks/administer-cluster/topology-manager.md +++ b/content/en/docs/tasks/administer-cluster/topology-manager.md @@ -8,11 +8,12 @@ reviewers: - nolancon content_template: templates/task +min-kubernetes-server-version: v1.18 --- {{% capture overview %}} -{{< feature-state state="alpha" >}} +{{< feature-state state="beta" >}} An increasing number of systems leverage a combination of CPUs and hardware accelerators to support latency-critical execution and high-throughput parallel computation. These include workloads in fields such as telecommunications, scientific computing, machine learning, financial services and data analytics. Such hybrid systems comprise a high performance environment. @@ -44,6 +45,10 @@ The Topology manager receives Topology information from the *Hint Providers* as The selected hint is stored as part of the Topology Manager. Depending on the policy configured the pod can be accepted or rejected from the node based on the selected hint. The hint is then stored in the Topology Manager for use by the *Hint Providers* when making the resource allocation decisions. +### Enable the Topology Manager feature + +Support for the Topology Manager requires `TopologyManager` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to be enabled. It is enabled by default starting with Kubernetes 1.18. + ### Topology Manager Policies The Topology Manager currently: @@ -176,12 +181,10 @@ In the case of the `BestEffort` pod the CPU Manager would send back the default Using this information the Topology Manager calculates the optimal hint for the pod and stores this information, which will be used by the Hint Providers when they are making their resource assignments. ### Known Limitations -1. As of K8s 1.16 the Topology Manager is currently only guaranteed to work if a *single* container in the pod spec requires aligned resources. This is due to the hint generation being based on current resource allocations, and all containers in a pod generate hints before any resource allocation has been made. This results in unreliable hints for all but the first container in a pod. -*Due to this limitation if multiple pods/containers are considered by Kubelet in quick succession they may not respect the Topology Manager policy. - -2. The maximum number of NUMA nodes that Topology Manager will allow is 8, past this there will be a state explosion when trying to enumerate the possible NUMA affinities and generating their hints. +1. The maximum number of NUMA nodes that Topology Manager allows is 8. With more than 8 NUMA nodes there will be a state explosion when trying to enumerate the possible NUMA affinities and generating their hints. -3. The scheduler is not topology-aware, so it is possible to be scheduled on a node and then fail on the node due to the Topology Manager. +2. The scheduler is not topology-aware, so it is possible to be scheduled on a node and then fail on the node due to the Topology Manager. +3. The Device Manager and the CPU Manager are the only components to adopt the Topology Manager's HintProvider interface. This means that NUMA alignment can only be achieved for resources managed by the CPU Manager and the Device Manager. Memory or Hugepages are not considered by the Topology Manager for NUMA alignment. {{% /capture %}} diff --git a/content/en/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md b/content/en/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md new file mode 100644 index 0000000000000..ded131d6103dd --- /dev/null +++ b/content/en/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md @@ -0,0 +1,120 @@ +--- +title: Assign Pods to Nodes using Node Affinity +min-kubernetes-server-version: v1.10 +content_template: templates/task +weight: 120 +--- + +{{% capture overview %}} +This page shows how to assign a Kubernetes Pod to a particular node using Node Affinity in a +Kubernetes cluster. +{{% /capture %}} + +{{% capture prerequisites %}} + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +{{% /capture %}} + +{{% capture steps %}} + +## Add a label to a node + +1. List the nodes in your cluster, along with their labels: + + ```shell + kubectl get nodes --show-labels + ``` + The output is similar to this: + + ```shell + NAME STATUS ROLES AGE VERSION LABELS + worker0 Ready 1d v1.13.0 ...,kubernetes.io/hostname=worker0 + worker1 Ready 1d v1.13.0 ...,kubernetes.io/hostname=worker1 + worker2 Ready 1d v1.13.0 ...,kubernetes.io/hostname=worker2 + ``` +1. Chose one of your nodes, and add a label to it: + + ```shell + kubectl label nodes disktype=ssd + ``` + where `` is the name of your chosen node. + +1. Verify that your chosen node has a `disktype=ssd` label: + + ```shell + kubectl get nodes --show-labels + ``` + + The output is similar to this: + + ``` + NAME STATUS ROLES AGE VERSION LABELS + worker0 Ready 1d v1.13.0 ...,disktype=ssd,kubernetes.io/hostname=worker0 + worker1 Ready 1d v1.13.0 ...,kubernetes.io/hostname=worker1 + worker2 Ready 1d v1.13.0 ...,kubernetes.io/hostname=worker2 + ``` + + In the preceding output, you can see that the `worker0` node has a + `disktype=ssd` label. + +## Schedule a Pod using required node affinity + +This manifest describes a Pod that has a `requiredDuringSchedulingIgnoredDuringExecution` node affinity,`disktype: ssd`. +This means that the pod will get scheduled only on a node that has a `disktype=ssd` label. + +{{< codenew file="pods/pod-nginx-required-affinity.yaml" >}} + +1. Apply the manifest to create a Pod that is scheduled onto your + chosen node: + + ```shell + kubectl apply -f https://k8s.io/examples/pods/pod-nginx-required-affinity.yaml + ``` + +1. Verify that the pod is running on your chosen node: + + ```shell + kubectl get pods --output=wide + ``` + + The output is similar to this: + + ``` + NAME READY STATUS RESTARTS AGE IP NODE + nginx 1/1 Running 0 13s 10.200.0.4 worker0 + ``` + +## Schedule a Pod using preferred node affinity + +This manifest describes a Pod that has a `preferredDuringSchedulingIgnoredDuringExecution` node affinity,`disktype: ssd`. +This means that the pod will prefer a node that has a `disktype=ssd` label. + +{{< codenew file="pods/pod-nginx-preferred-affinity.yaml" >}} + +1. Apply the manifest to create a Pod that is scheduled onto your + chosen node: + + ```shell + kubectl apply -f https://k8s.io/examples/pods/pod-nginx-preferred-affinity.yaml + ``` + +1. Verify that the pod is running on your chosen node: + + ```shell + kubectl get pods --output=wide + ``` + + The output is similar to this: + + ``` + NAME READY STATUS RESTARTS AGE IP NODE + nginx 1/1 Running 0 13s 10.200.0.4 worker0 + ``` + +{{% /capture %}} + +{{% capture whatsnext %}} +Learn more about +[Node Affinity](/docs/concepts/configuration/assign-pod-node/#node-affinity). +{{% /capture %}} diff --git a/content/en/docs/tasks/configure-pod-container/configure-gmsa.md b/content/en/docs/tasks/configure-pod-container/configure-gmsa.md index ff47a7dd8c7ef..83d9dee596a02 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-gmsa.md +++ b/content/en/docs/tasks/configure-pod-container/configure-gmsa.md @@ -6,7 +6,7 @@ weight: 20 {{% capture overview %}} -{{< feature-state for_k8s_version="v1.16" state="beta" >}} +{{< feature-state for_k8s_version="v1.18" state="stable" >}} This page shows how to configure [Group Managed Service Accounts](https://docs.microsoft.com/en-us/windows-server/security/group-managed-service-accounts/group-managed-service-accounts-overview) (GMSA) for Pods and containers that will run on Windows nodes. Group Managed Service Accounts are a specific type of Active Directory account that provides automatic password management, simplified service principal name (SPN) management, and the ability to delegate the management to other administrators across multiple servers. @@ -18,9 +18,6 @@ In Kubernetes, GMSA credential specs are configured at a Kubernetes cluster-wide You need to have a Kubernetes cluster and the `kubectl` command-line tool must be configured to communicate with your cluster. The cluster is expected to have Windows worker nodes. This section covers a set of initial steps required once for each cluster: -### WindowsGMSA feature gate -The `WindowsGMSA` feature gate (required to pass down GMSA credential specs from the pod specs to the container runtime) is enabled by default on the API server and the kubelet. See [Feature Gates](/docs/reference/command-line-tools-reference/feature-gates/) for an explanation of enabling or disabling feature gates. - ### Install the GMSACredentialSpec CRD A [CustomResourceDefinition](/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/)(CRD) for GMSA credential spec resources needs to be configured on the cluster to define the custom resource type `GMSACredentialSpec`. Download the GMSA CRD [YAML](https://github.com/kubernetes-sigs/windows-gmsa/blob/master/admission-webhook/deploy/gmsa-crd.yml) and save it as gmsa-crd.yaml. Next, install the CRD with `kubectl apply -f gmsa-crd.yaml` @@ -42,7 +39,7 @@ Installing the above webhooks and associated objects require the steps below: 1. Create the validating and mutating webhook configurations referring to the deployment. -A [script](https://github.com/kubernetes-sigs/windows-gmsa/blob/master/admission-webhook/deploy/deploy-gmsa-webhook.sh) can be used to deploy and configure the GMSA webhooks and associated objects mentioned above. The script can be run with a ```--dry-run``` option to allow you to review the changes that would be made to your cluster. +A [script](https://github.com/kubernetes-sigs/windows-gmsa/blob/master/admission-webhook/deploy/deploy-gmsa-webhook.sh) can be used to deploy and configure the GMSA webhooks and associated objects mentioned above. The script can be run with a ```--dry-run=server``` option to allow you to review the changes that would be made to your cluster. The [YAML template](https://github.com/kubernetes-sigs/windows-gmsa/blob/master/admission-webhook/deploy/gmsa-webhook.yml.tpl) used by the script may also be used to deploy the webhooks and associated objects manually (with appropriate substitutions for the parameters) diff --git a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md index dfa0a1b41467c..d4b306f02bbfc 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md +++ b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md @@ -6,20 +6,20 @@ weight: 110 {{% capture overview %}} -This page shows how to configure liveness, readiness and startup probes for Containers. +This page shows how to configure liveness, readiness and startup probes for containers. The [kubelet](/docs/admin/kubelet/) uses liveness probes to know when to -restart a Container. For example, liveness probes could catch a deadlock, +restart a container. For example, liveness probes could catch a deadlock, where an application is running, but unable to make progress. Restarting a -Container in such a state can help to make the application more available +container in such a state can help to make the application more available despite bugs. -The kubelet uses readiness probes to know when a Container is ready to start -accepting traffic. A Pod is considered ready when all of its Containers are ready. +The kubelet uses readiness probes to know when a container is ready to start +accepting traffic. A Pod is considered ready when all of its containers are ready. One use of this signal is to control which Pods are used as backends for Services. When a Pod is not ready, it is removed from Service load balancers. -The kubelet uses startup probes to know when a Container application has started. +The kubelet uses startup probes to know when a container application has started. If such a probe is configured, it disables liveness and readiness checks until it succeeds, making sure those probes don't interfere with the application startup. This can be used to adopt liveness checks on slow starting containers, avoiding them @@ -41,27 +41,27 @@ Many applications running for long periods of time eventually transition to broken states, and cannot recover except by being restarted. Kubernetes provides liveness probes to detect and remedy such situations. -In this exercise, you create a Pod that runs a Container based on the +In this exercise, you create a Pod that runs a container based on the `k8s.gcr.io/busybox` image. Here is the configuration file for the Pod: {{< codenew file="pods/probe/exec-liveness.yaml" >}} -In the configuration file, you can see that the Pod has a single Container. +In the configuration file, you can see that the Pod has a single `Container`. The `periodSeconds` field specifies that the kubelet should perform a liveness probe every 5 seconds. The `initialDelaySeconds` field tells the kubelet that it should wait 5 second before performing the first probe. To perform a probe, the -kubelet executes the command `cat /tmp/healthy` in the Container. If the -command succeeds, it returns 0, and the kubelet considers the Container to be alive and -healthy. If the command returns a non-zero value, the kubelet kills the Container +kubelet executes the command `cat /tmp/healthy` in the target container. If the +command succeeds, it returns 0, and the kubelet considers the container to be alive and +healthy. If the command returns a non-zero value, the kubelet kills the container and restarts it. -When the Container starts, it executes this command: +When the container starts, it executes this command: ```shell /bin/sh -c "touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600" ``` -For the first 30 seconds of the Container's life, there is a `/tmp/healthy` file. +For the first 30 seconds of the container's life, there is a `/tmp/healthy` file. So during the first 30 seconds, the command `cat /tmp/healthy` returns a success code. After 30 seconds, `cat /tmp/healthy` returns a failure code. @@ -79,7 +79,7 @@ kubectl describe pod liveness-exec The output indicates that no liveness probes have failed yet: -```shell +``` FirstSeen LastSeen Count From SubobjectPath Type Reason Message --------- -------- ----- ---- ------------- -------- ------ ------- 24s 24s 1 {default-scheduler } Normal Scheduled Successfully assigned liveness-exec to worker0 @@ -98,7 +98,7 @@ kubectl describe pod liveness-exec At the bottom of the output, there are messages indicating that the liveness probes have failed, and the containers have been killed and recreated. -```shell +``` FirstSeen LastSeen Count From SubobjectPath Type Reason Message --------- -------- ----- ---- ------------- -------- ------ ------- 37s 37s 1 {default-scheduler } Normal Scheduled Successfully assigned liveness-exec to worker0 @@ -109,7 +109,7 @@ FirstSeen LastSeen Count From SubobjectPath Type 2s 2s 1 {kubelet worker0} spec.containers{liveness} Warning Unhealthy Liveness probe failed: cat: can't open '/tmp/healthy': No such file or directory ``` -Wait another 30 seconds, and verify that the Container has been restarted: +Wait another 30 seconds, and verify that the container has been restarted: ```shell kubectl get pod liveness-exec @@ -117,7 +117,7 @@ kubectl get pod liveness-exec The output shows that `RESTARTS` has been incremented: -```shell +``` NAME READY STATUS RESTARTS AGE liveness-exec 1/1 Running 1 1m ``` @@ -130,23 +130,23 @@ image. {{< codenew file="pods/probe/http-liveness.yaml" >}} -In the configuration file, you can see that the Pod has a single Container. +In the configuration file, you can see that the Pod has a single container. The `periodSeconds` field specifies that the kubelet should perform a liveness probe every 3 seconds. The `initialDelaySeconds` field tells the kubelet that it should wait 3 seconds before performing the first probe. To perform a probe, the -kubelet sends an HTTP GET request to the server that is running in the Container +kubelet sends an HTTP GET request to the server that is running in the container and listening on port 8080. If the handler for the server's `/healthz` path -returns a success code, the kubelet considers the Container to be alive and -healthy. If the handler returns a failure code, the kubelet kills the Container +returns a success code, the kubelet considers the container to be alive and +healthy. If the handler returns a failure code, the kubelet kills the container and restarts it. Any code greater than or equal to 200 and less than 400 indicates success. Any other code indicates failure. You can see the source code for the server in -[server.go](https://github.com/kubernetes/kubernetes/blob/master/test/images/agnhost/liveness/server.go). +[server.go](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/test/images/agnhost/liveness/server.go). -For the first 10 seconds that the Container is alive, the `/healthz` handler +For the first 10 seconds that the container is alive, the `/healthz` handler returns a status of 200. After that, the handler returns a status of 500. ```go @@ -162,9 +162,9 @@ http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { }) ``` -The kubelet starts performing health checks 3 seconds after the Container starts. +The kubelet starts performing health checks 3 seconds after the container starts. So the first couple of health checks will succeed. But after 10 seconds, the health -checks will fail, and the kubelet will kill and restart the Container. +checks will fail, and the kubelet will kill and restart the container. To try the HTTP liveness check, create a Pod: @@ -173,21 +173,21 @@ kubectl apply -f https://k8s.io/examples/pods/probe/http-liveness.yaml ``` After 10 seconds, view Pod events to verify that liveness probes have failed and -the Container has been restarted: +the container has been restarted: ```shell kubectl describe pod liveness-http ``` In releases prior to v1.13 (including v1.13), if the environment variable -`http_proxy` (or `HTTP_PROXY`) is set on the node where a pod is running, +`http_proxy` (or `HTTP_PROXY`) is set on the node where a Pod is running, the HTTP liveness probe uses that proxy. In releases after v1.13, local HTTP proxy environment variable settings do not affect the HTTP liveness probe. ## Define a TCP liveness probe -A third type of liveness probe uses a TCP Socket. With this configuration, the +A third type of liveness probe uses a TCP socket. With this configuration, the kubelet will attempt to open a socket to your container on the specified port. If it can establish a connection, the container is considered healthy, if it can’t it is considered a failure. @@ -197,7 +197,7 @@ can’t it is considered a failure. As you can see, configuration for a TCP check is quite similar to an HTTP check. This example uses both readiness and liveness probes. The kubelet will send the first readiness probe 5 seconds after the container starts. This will attempt to -connect to the `goproxy` container on port 8080. If the probe succeeds, the pod +connect to the `goproxy` container on port 8080. If the probe succeeds, the Pod will be marked as ready. The kubelet will continue to run this check every 10 seconds. @@ -351,7 +351,7 @@ port to perform the check. The kubelet sends the probe to the pod’s IP address unless the address is overridden by the optional `host` field in `httpGet`. If `scheme` field is set to `HTTPS`, the kubelet sends an HTTPS request skipping the certificate verification. In most scenarios, you do not want to set the `host` field. -Here's one scenario where you would set it. Suppose the Container listens on 127.0.0.1 +Here's one scenario where you would set it. Suppose the container listens on 127.0.0.1 and the Pod's `hostNetwork` field is true. Then `host`, under `httpGet`, should be set to 127.0.0.1. If your pod relies on virtual hosts, which is probably the more common case, you should not use `host`, but rather set the `Host` header in `httpHeaders`. @@ -367,7 +367,7 @@ to resolve it. * Learn more about [Container Probes](/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). -### Reference +You can also read the API references for: * [Pod](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core) * [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) diff --git a/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md b/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md index dcba78d81a5d8..71fad95de9c60 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md +++ b/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md @@ -33,6 +33,8 @@ kubectl create configmap ``` where \ is the name you want to assign to the ConfigMap and \ is the directory, file, or literal value to draw the data from. +The name of a ConfigMap object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). When you are creating a ConfigMap based on a file, the key in the \ defaults to the basename of the file, and the value defaults to the file content. diff --git a/content/en/docs/tasks/configure-pod-container/configure-runasusername.md b/content/en/docs/tasks/configure-pod-container/configure-runasusername.md index 8fecb6535c1d8..530666e9dc12c 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-runasusername.md +++ b/content/en/docs/tasks/configure-pod-container/configure-runasusername.md @@ -6,13 +6,9 @@ weight: 20 {{% capture overview %}} -{{< feature-state for_k8s_version="v1.17" state="beta" >}} +{{< feature-state for_k8s_version="v1.18" state="stable" >}} -This page shows how to enable and use the `RunAsUserName` feature for pods and containers that will run on Windows nodes. This feature is meant to be the Windows equivalent of the Linux-specific `runAsUser` feature, allowing users to run the container entrypoints with a different username that their default ones. - -{{< note >}} -This feature is in beta. The overall functionality for `RunAsUserName` will not change, but there may be some changes regarding the username validation. -{{< /note >}} +This page shows how to use the `runAsUserName` setting for Pods and containers that will run on Windows nodes. This is roughly equivalent of the Linux-specific `runAsUser` setting, allowing you to run applications in a container as a different username than the default. {{% /capture %}} @@ -60,7 +56,6 @@ The output should be: ContainerUser ``` - ## Set the Username for a Container To specify the username with which to execute a Container's processes, include the `securityContext` field ([SecurityContext](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#securitycontext-v1-core)) in the Container manifest, and within it, the `windowsOptions` ([WindowsSecurityContextOptions](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#windowssecuritycontextoptions-v1-core) field containing the `runAsUserName` field. diff --git a/content/en/docs/tasks/configure-pod-container/configure-service-account.md b/content/en/docs/tasks/configure-pod-container/configure-service-account.md index f42bc8e1fd6f3..a86ae91aca960 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-service-account.md +++ b/content/en/docs/tasks/configure-pod-container/configure-service-account.md @@ -95,6 +95,9 @@ metadata: EOF ``` +The name of a ServiceAccount object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + If you get a complete dump of the service account object, like this: ```shell @@ -296,9 +299,67 @@ token available to the pod at a configurable file path, and refresh the token as The application is responsible for reloading the token when it rotates. Periodic reloading (e.g. once every 5 minutes) is sufficient for most usecases. +## Service Account Issuer Discovery + +{{< feature-state for_k8s_version="v1.18" state="alpha" >}} + +The Service Account Issuer Discovery feature is enabled by enabling the +`ServiceAccountIssuerDiscovery` [feature gate](/docs/reference/command-line-tools-reference/feature) +and then enabling the Service Account Token Projection feature as described +[above](#service-account-token-volume-projection). + +{{< note >}} +The issuer URL must comply with the +[OIDC Discovery Spec](https://openid.net/specs/openid-connect-discovery-1_0.html). In +practice, this means it must use the `https` scheme, and should serve an OpenID +provider configuration at `{service-account-issuer}/.well-known/openid-configuration`. + +If the URL does not comply, the `ServiceAccountIssuerDiscovery` endpoints will +not be registered, even if the feature is enabled. +{{< /note >}} + +The Service Account Issuer Discovery feature enables federation of Kubernetes +service account tokens issued by a cluster (the _identity provider_) with +external systems (_relying parties_). + +When enabled, the Kubernetes API server provides an OpenID Provider +Configuration document at `/.well-known/openid-configuration` and the associated +JSON Web Key Set (JWKS) at `/openid/v1/jwks`. The OpenID Provider Configuration +is sometimes referred to as the _discovery document_. + +When enabled, the cluster is also configured with a default RBAC ClusterRole +called `system:service-account-issuer-discovery`. No role bindings are provided +by default. Administrators may, for example, choose whether to bind the role to +`system:authenticated` or `system:unauthenticated` depending on their security +requirements and which external systems they intend to federate with. + +{{< note >}} +The responses served at `/.well-known/openid-configuration` and +`/openid/v1/jwks` are designed to be OIDC compatible, but not strictly OIDC +compliant. Those documents contain only the parameters necessary to perform +validation of Kubernetes service account tokens. +{{< /note >}} + +The JWKS response contains public keys that a relying party can use to validate +the Kubernetes service account tokens. Relying parties first query for the +OpenID Provider Configuration, and use the `jwks_uri` field in the response to +find the JWKS. + +In many cases, Kubernetes API servers are not available on the public internet, +but public endpoints that serve cached responses from the API server can be made +available by users or service providers. In these cases, it is possible to +override the `jwks_uri` in the OpenID Provider Configuration so that it points +to the public endpoint, rather than the API server's address, by passing the +`--service-account-jwks-uri` flag to the API server. Like the issuer URL, the +JWKS URI is required to use the `https` scheme. {{% /capture %}} {{% capture whatsnext %}} -See also the -[Cluster Admin Guide to Service Accounts](/docs/reference/access-authn-authz/service-accounts-admin/). + +See also: + +- [Cluster Admin Guide to Service Accounts](/docs/reference/access-authn-authz/service-accounts-admin/) +- [Service Account Signing Key Retrieval KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/20190730-oidc-discovery.md) +- [OIDC Discovery Spec](https://openid.net/specs/openid-connect-discovery-1_0.html) + {{% /capture %}} diff --git a/content/en/docs/tasks/configure-pod-container/security-context.md b/content/en/docs/tasks/configure-pod-container/security-context.md index bc1fc3a8272bf..038fbcb97fb48 100644 --- a/content/en/docs/tasks/configure-pod-container/security-context.md +++ b/content/en/docs/tasks/configure-pod-container/security-context.md @@ -140,6 +140,45 @@ Exit your shell: exit ``` +## Configure volume permission and ownership change policy for Pods + +{{< feature-state for_k8s_version="v1.18" state="alpha" >}} + +By default, Kubernetes recursively changes ownership and permissions for the contents of each +volume to match the `fsGroup` specified in a Pod's `securityContext` when that volume is +mounted. +For large volumes, checking and changing ownership and permissions can take a lot of time, +slowing Pod startup. You can use the `fsGroupChangePolicy` field inside a `securityContext` +to control the way that Kubernetes checks and manages ownership and permissions +for a volume. + +**fsGroupChangePolicy** - `fsGroupChangePolicy` defines behavior for changing ownership and permission of the volume +before being exposed inside a Pod. This field only applies to volume types that support +`fsGroup` controlled ownership and permissions. This field has two possible values: + +* _OnRootMismatch_: Only change permissions and ownership if permission and ownership of root directory does not match with expected permissions of the volume. This could help shorten the time it takes to change ownership and permission of a volume. +* _Always_: Always change permission and ownership of the volume when volume is mounted. + +For example: + +```yaml +securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + fsGroupChangePolicy: "OnRootMismatch" +``` + +This is an alpha feature. To use it, enable the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) `ConfigurableFSGroupPolicy` for the kube-api-server, the kube-controller-manager, and for the kubelet. + +{{< note >}} +This field has no effect on ephemeral volume types such as +[`secret`](https://kubernetes.io/docs/concepts/storage/volumes/#secret), +[`configMap`](https://kubernetes.io/docs/concepts/storage/volumes/#configmap), +and [`emptydir`](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir). +{{< /note >}} + + ## Set the security context for a Container To specify security settings for a Container, include the `securityContext` field diff --git a/content/en/docs/tasks/debug-application-cluster/audit.md b/content/en/docs/tasks/debug-application-cluster/audit.md index ed95d353ac29d..8e7497bdb8007 100644 --- a/content/en/docs/tasks/debug-application-cluster/audit.md +++ b/content/en/docs/tasks/debug-application-cluster/audit.md @@ -235,6 +235,8 @@ spec: ``` For the complete API definition, see [AuditSink](/docs/reference/generated/kubernetes-api/v1.13/#auditsink-v1alpha1-auditregistration). Multiple objects will exist as independent solutions. +The name of an AuditSink object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). Existing static backends that you configure with runtime flags are not affected by this feature. However, the dynamic backends share the truncate options of the static webhook. If webhook truncate options are set with runtime flags, they are applied to all dynamic backends. diff --git a/content/en/docs/tasks/debug-application-cluster/debug-application.md b/content/en/docs/tasks/debug-application-cluster/debug-application.md index 053af8b65456e..f63173e334841 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-application.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-application.md @@ -64,38 +64,8 @@ Again, the information from `kubectl describe ...` should be informative. The m #### My pod is crashing or otherwise unhealthy -First, take a look at the logs of -the current container: - -```shell -kubectl logs ${POD_NAME} ${CONTAINER_NAME} -``` - -If your container has previously crashed, you can access the previous container's crash log with: - -```shell -kubectl logs --previous ${POD_NAME} ${CONTAINER_NAME} -``` - -Alternately, you can run commands inside that container with `exec`: - -```shell -kubectl exec ${POD_NAME} -c ${CONTAINER_NAME} -- ${CMD} ${ARG1} ${ARG2} ... ${ARGN} -``` - -{{< note >}} -`-c ${CONTAINER_NAME}` is optional. You can omit it for Pods that only contain a single container. -{{< /note >}} - -As an example, to look at the logs from a running Cassandra pod, you might run - -```shell -kubectl exec cassandra -- cat /var/log/cassandra/system.log -``` - -If none of these approaches work, you can find the host machine that the pod is running on and SSH into that host, -but this should generally not be necessary given tools in the Kubernetes API. Therefore, if you find yourself needing to ssh into a machine, please file a -feature request on GitHub describing your use case and why these tools are insufficient. +Once your pod has been scheduled, the methods described in [Debug Running Pods]( +/docs/tasks/debug-application-cluster/debug-running-pods/) are available for debugging. #### My pod is running but not doing what I told it to do diff --git a/content/en/docs/tasks/debug-application-cluster/debug-cluster.md b/content/en/docs/tasks/debug-application-cluster/debug-cluster.md index ae56c42411053..473f364361e58 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-cluster.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-cluster.md @@ -29,6 +29,11 @@ kubectl get nodes And verify that all of the nodes you expect to see are present and that they are all in the `Ready` state. +To get detailed information about the overall health of your cluster, you can run: + +```shell +kubectl cluster-info dump +``` ## Looking at logs For now, digging deeper into the cluster requires logging into the relevant machines. Here are the locations @@ -119,7 +124,4 @@ This is an incomplete list of things that could go wrong, and how to adjust your - Mitigates: Node shutdown - Mitigates: Kubelet software fault -- Action: [Multiple independent clusters](/docs/concepts/cluster-administration/federation/) (and avoid making risky changes to all clusters at once) - - Mitigates: Everything listed above. - {{% /capture %}} diff --git a/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md b/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md index 56ba566bc6d22..ec84b82fd02fb 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md @@ -93,40 +93,9 @@ worker node, but it can't run on that machine. Again, the information from ### My pod is crashing or otherwise unhealthy -First, take a look at the logs of the current container: +Once your pod has been scheduled, the methods described in [Debug Running Pods]( +/docs/tasks/debug-application-cluster/debug-running-pods/) are available for debugging. -```shell -kubectl logs ${POD_NAME} ${CONTAINER_NAME} -``` - -If your container has previously crashed, you can access the previous -container's crash log with: - -```shell -kubectl logs --previous ${POD_NAME} ${CONTAINER_NAME} -``` - -Alternately, you can run commands inside that container with `exec`: - -```shell -kubectl exec ${POD_NAME} -c ${CONTAINER_NAME} -- ${CMD} ${ARG1} ${ARG2} ... ${ARGN} -``` - -{{< note >}} -`-c ${CONTAINER_NAME}` is optional. You can omit it for pods that -only contain a single container. -{{< /note >}} - -As an example, to look at the logs from a running Cassandra pod, you might run: - -```shell -kubectl exec cassandra -- cat /var/log/cassandra/system.log -``` - -If your cluster enabled it, you can also try adding an [ephemeral container](/docs/concepts/workloads/pods/ephemeral-containers/) into the existing pod. You can use the new temporary container to run arbitrary commands, for example, to diagnose problems inside the Pod. See the page about [ephemeral container](/docs/concepts/workloads/pods/ephemeral-containers/) for more details, including feature availability. - -If none of these approaches work, you can find the host machine that the pod is -running on and SSH into that host. ## Debugging ReplicationControllers diff --git a/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md b/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md new file mode 100644 index 0000000000000..95065ca595c6d --- /dev/null +++ b/content/en/docs/tasks/debug-application-cluster/debug-running-pod.md @@ -0,0 +1,190 @@ +--- +reviewers: +- verb +- soltysh +title: Debug Running Pods +content_template: templates/task +--- + +{{% capture overview %}} + +This page explains how to debug Pods running (or crashing) on a Node. + +{{% /capture %}} + +{{% capture prerequisites %}} + +* Your {{< glossary_tooltip text="Pod" term_id="pod" >}} should already be + scheduled and running. If your Pod is not yet running, start with [Troubleshoot + Applications](/docs/tasks/debug-application-cluster/debug-application/). +* For some of the advanced debugging steps you need to know on which Node the + Pod is running and have shell access to run commands on that Node. You don't + need that access to run the standard debug steps that use `kubectl`. + +{{% /capture %}} + +{{% capture steps %}} + +## Examining pod logs {#examine-pod-logs} + +First, look at the logs of the affected container: + +```shell +kubectl logs ${POD_NAME} ${CONTAINER_NAME} +``` + +If your container has previously crashed, you can access the previous container's crash log with: + +```shell +kubectl logs --previous ${POD_NAME} ${CONTAINER_NAME} +``` + +## Debugging with container exec {#container-exec} + +If the {{< glossary_tooltip text="container image" term_id="image" >}} includes +debugging utilities, as is the case with images built from Linux and Windows OS +base images, you can run commands inside a specific container with +`kubectl exec`: + +```shell +kubectl exec ${POD_NAME} -c ${CONTAINER_NAME} -- ${CMD} ${ARG1} ${ARG2} ... ${ARGN} +``` + +{{< note >}} +`-c ${CONTAINER_NAME}` is optional. You can omit it for Pods that only contain a single container. +{{< /note >}} + +As an example, to look at the logs from a running Cassandra pod, you might run + +```shell +kubectl exec cassandra -- cat /var/log/cassandra/system.log +``` + +You can run a shell that's connected to your terminal using the `-i` and `-t` +arguments to `kubectl exec`, for example: + +```shell +kubectl exec -it cassandra -- sh +``` + +For more details, see [Get a Shell to a Running Container]( +/docs/tasks/debug-application-cluster/get-shell-running-container/). + +## Debugging with an ephemeral debug container {#ephemeral-container} + +{{< feature-state state="alpha" for_k8s_version="v1.18" >}} + +{{< glossary_tooltip text="Ephemeral containers" term_id="ephemeral-container" >}} +are useful for interactive troubleshooting when `kubectl exec` is insufficient +because a container has crashed or a container image doesn't include debugging +utilities, such as with [distroless images]( +https://github.com/GoogleContainerTools/distroless). `kubectl` has an alpha +command that can create ephemeral containers for debugging beginning with version +`v1.18`. + +### Example debugging using ephemeral containers {#ephemeral-container-example} + +{{< note >}} +The examples in this section require the `EphemeralContainers` [feature gate]( +/docs/reference/command-line-tools-reference/feature-gates/) enabled in your +cluster and `kubectl` version v1.18 or later. +{{< /note >}} + +You can use the `kubectl alpha debug` command to add ephemeral containers to a +running Pod. First, create a pod for the example: + +```shell +kubectl run ephemeral-demo --image=k8s.gcr.io/pause:3.1 --restart=Never +``` + +{{< note >}} +This section use the `pause` container image in examples because it does not +contain userland debugging utilities, but this method works with all container +images. +{{< /note >}} + +If you attempt to use `kubectl exec` to create a shell you will see an error +because there is no shell in this container image. + +```shell +kubectl exec -it pause -- sh +``` + +``` +OCI runtime exec failed: exec failed: container_linux.go:346: starting container process caused "exec: \"sh\": executable file not found in $PATH": unknown +``` + +You can instead add a debugging container using `kubectl alpha debug`. If you +specify the `-i`/`--interactive` argument, `kubectl` will automatically attach +to the console of the Ephemeral Container. + +```shell +kubectl alpha debug -it ephemeral-demo --image=busybox --target=ephemeral-demo +``` + +``` +Defaulting debug container name to debugger-8xzrl. +If you don't see a command prompt, try pressing enter. +/ # +``` + +This command adds a new busybox container and attaches to it. The `--target` +parameter targets the process namespace of another container. It's necessary +here because `kubectl run` does not enable [process namespace sharing]( +/docs/tasks/configure-pod-container/share-process-namespace/) in the pod it +creates. + +{{< note >}} +The `--target` parameter must be supported by the {{< glossary_tooltip +text="Container Runtime" term_id="container-runtime" >}}. When not supported, +the Ephemeral Container may not be started, or it may be started with an +isolated process namespace. +{{< /note >}} + +You can view the state of the newly created ephemeral container using `kubectl describe`: + +```shell +kubectl describe pod ephemeral-demo +``` + +``` +... +Ephemeral Containers: + debugger-8xzrl: + Container ID: docker://b888f9adfd15bd5739fefaa39e1df4dd3c617b9902082b1cfdc29c4028ffb2eb + Image: busybox + Image ID: docker-pullable://busybox@sha256:1828edd60c5efd34b2bf5dd3282ec0cc04d47b2ff9caa0b6d4f07a21d1c08084 + Port: + Host Port: + State: Running + Started: Wed, 12 Feb 2020 14:25:42 +0100 + Ready: False + Restart Count: 0 + Environment: + Mounts: +... +``` + +Use `kubectl delete` to remove the Pod when you're finished: + +```shell +kubectl delete pod ephemeral-demo +``` + + + +## Debugging via a shell on the node {#node-shell-session} + +If none of these approaches work, you can find the host machine that the pod is +running on and SSH into that host, but this should generally not be necessary +given tools in the Kubernetes API. Therefore, if you find yourself needing to +ssh into a machine, please file a feature request on GitHub describing your use +case and why these tools are insufficient. + +{{% /capture %}} diff --git a/content/en/docs/tasks/debug-application-cluster/debug-service.md b/content/en/docs/tasks/debug-application-cluster/debug-service.md index e0683fbd02e93..a065c9fa85009 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-service.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-service.md @@ -8,57 +8,30 @@ title: Debug Services {{% capture overview %}} An issue that comes up rather frequently for new installations of Kubernetes is -that a `Service` is not working properly. You've run your `Deployment` and -created a `Service`, but you get no response when you try to access it. -This document will hopefully help you to figure out what's going wrong. +that a Service is not working properly. You've run your Pods through a +Deployment (or other workload controller) and created a Service, but you +get no response when you try to access it. This document will hopefully help +you to figure out what's going wrong. {{% /capture %}} {{% capture body %}} -## Conventions - -Throughout this doc you will see various commands that you can run. Some -commands need to be run within a `Pod`, others on a Kubernetes `Node`, and others -can run anywhere you have `kubectl` and credentials for the cluster. To make it -clear what is expected, this document will use the following conventions. - -If the command "COMMAND" is expected to run in a `Pod` and produce "OUTPUT": - -```shell -u@pod$ COMMAND -OUTPUT -``` - -If the command "COMMAND" is expected to run on a `Node` and produce "OUTPUT": - -```shell -u@node$ COMMAND -OUTPUT -``` - -If the command is "kubectl ARGS": - -```shell -kubectl ARGS -OUTPUT -``` - ## Running commands in a Pod -For many steps here you will want to see what a `Pod` running in the cluster -sees. The simplest way to do this is to run an interactive alpine `Pod`: +For many steps here you will want to see what a Pod running in the cluster +sees. The simplest way to do this is to run an interactive alpine Pod: ```none kubectl run -it --rm --restart=Never alpine --image=alpine sh -/ # ``` + {{< note >}} If you don't see a command prompt, try pressing enter. {{< /note >}} -If you already have a running `Pod` that you prefer to use, you can run a +If you already have a running Pod that you prefer to use, you can run a command in it using: ```shell @@ -67,21 +40,23 @@ kubectl exec -c -- ## Setup -For the purposes of this walk-through, let's run some `Pods`. Since you're -probably debugging your own `Service` you can substitute your own details, or you +For the purposes of this walk-through, let's run some Pods. Since you're +probably debugging your own Service you can substitute your own details, or you can follow along and get a second data point. ```shell kubectl run hostnames --image=k8s.gcr.io/serve_hostname \ - --labels=app=hostnames \ - --port=9376 \ - --replicas=3 + --replicas=3 +``` +```none deployment.apps/hostnames created ``` `kubectl` commands will print the type and name of the resource created or mutated, which can then be used in subsequent commands. + {{< note >}} -This is the same as if you started the `Deployment` with the following YAML: +This is the same as if you had started the Deployment with the following +YAML: ```yaml apiVersion: apps/v1 @@ -91,61 +66,111 @@ metadata: spec: selector: matchLabels: - app: hostnames + run: hostnames replicas: 3 template: metadata: labels: - app: hostnames + run: hostnames spec: containers: - name: hostnames image: k8s.gcr.io/serve_hostname - ports: - - containerPort: 9376 - protocol: TCP ``` + +The label "run" is automatically set by `kubectl run` to the name of the +Deployment. {{< /note >}} -Confirm your `Pods` are running: +You can confirm your Pods are running: ```shell -kubectl get pods -l app=hostnames +kubectl get pods -l run=hostnames +``` +```none NAME READY STATUS RESTARTS AGE hostnames-632524106-bbpiw 1/1 Running 0 2m hostnames-632524106-ly40y 1/1 Running 0 2m hostnames-632524106-tlaok 1/1 Running 0 2m ``` +You can also confirm that your Pods are serving. You can get the list of +Pod IP addresses and test them directly. + +```shell +kubectl get pods -l run=hostnames \ + -o go-template='{{range .items}}{{.status.podIP}}{{"\n"}}{{end}}' +``` +```none +10.244.0.5 +10.244.0.6 +10.244.0.7 +``` + +The example container used for this walk-through simply serves its own hostname +via HTTP on port 9376, but if you are debugging your own app, you'll want to +use whatever port number your Pods are listening on. + +From within a pod: + +```shell +for ep in 10.244.0.5:9376 10.244.0.6:9376 10.244.0.7:9376; do + wget -qO- $ep +done +``` + +This should produce something like: + +``` +hostnames-0uton +hostnames-bvc05 +hostnames-yp2kp +``` + +If you are not getting the responses you expect at this point, your Pods +might not be healthy or might not be listening on the port you think they are. +You might find `kubectl logs` to be useful for seeing what is happening, or +perhaps you need to `kubectl exec` directly into your Pods and debug from +there. + +Assuming everything has gone to plan so far, you can start to investigate why +your Service doesn't work. + ## Does the Service exist? -The astute reader will have noticed that we did not actually create a `Service` +The astute reader will have noticed that you did not actually create a Service yet - that is intentional. This is a step that sometimes gets forgotten, and is the first thing to check. -So what would happen if I tried to access a non-existent `Service`? Assuming you -have another `Pod` that consumes this `Service` by name you would get something -like: +What would happen if you tried to access a non-existent Service? If +you have another Pod that consumes this Service by name you would get +something like: ```shell -u@pod$ wget -O- hostnames +wget -O- hostnames +``` +```none Resolving hostnames (hostnames)... failed: Name or service not known. wget: unable to resolve host address 'hostnames' ``` -So the first thing to check is whether that `Service` actually exists: +The first thing to check is whether that Service actually exists: ```shell kubectl get svc hostnames +``` +```none No resources found. Error from server (NotFound): services "hostnames" not found ``` -So we have a culprit, let's create the `Service`. As before, this is for the -walk-through - you can use your own `Service`'s details here. +Let's create the Service. As before, this is for the walk-through - you can +use your own Service's details here. ```shell kubectl expose deployment hostnames --port=80 --target-port=9376 +``` +```none service/hostnames exposed ``` @@ -153,11 +178,16 @@ And read it back, just to be sure: ```shell kubectl get svc hostnames +``` +```none NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE hostnames ClusterIP 10.0.1.175 80/TCP 5s ``` -As before, this is the same as if you had started the `Service` with YAML: +Now you know that the Service exists. + +{{< note >}} +As before, this is the same as if you had started the Service with YAML: ```yaml apiVersion: v1 @@ -166,7 +196,7 @@ metadata: name: hostnames spec: selector: - app: hostnames + run: hostnames ports: - name: default protocol: TCP @@ -174,25 +204,35 @@ spec: targetPort: 9376 ``` -Now you can confirm that the `Service` exists. +In order to highlight the full range of configuration, the Service you created +here uses a different port number than the Pods. For many real-world +Services, these values might be the same. +{{< /note >}} + +## Does the Service work by DNS name? -## Does the Service work by DNS? +One of the most common ways that clients consume a Service is through a DNS +name. -From a `Pod` in the same `Namespace`: +From a Pod in the same Namespace: ```shell -u@pod$ nslookup hostnames +nslookup hostnames +``` +```none Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local Name: hostnames Address 1: 10.0.1.175 hostnames.default.svc.cluster.local ``` -If this fails, perhaps your `Pod` and `Service` are in different -`Namespaces`, try a namespace-qualified name: +If this fails, perhaps your Pod and Service are in different +Namespaces, try a namespace-qualified name (again, from within a Pod): ```shell -u@pod$ nslookup hostnames.default +nslookup hostnames.default +``` +```none Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local Name: hostnames.default @@ -200,11 +240,13 @@ Address 1: 10.0.1.175 hostnames.default.svc.cluster.local ``` If this works, you'll need to adjust your app to use a cross-namespace name, or -run your app and `Service` in the same `Namespace`. If this still fails, try a +run your app and Service in the same Namespace. If this still fails, try a fully-qualified name: ```shell -u@pod$ nslookup hostnames.default.svc.cluster.local +nslookup hostnames.default.svc.cluster.local +``` +```none Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local Name: hostnames.default.svc.cluster.local @@ -212,18 +254,20 @@ Address 1: 10.0.1.175 hostnames.default.svc.cluster.local ``` Note the suffix here: "default.svc.cluster.local". The "default" is the -`Namespace` we're operating in. The "svc" denotes that this is a `Service`. +Namespace you're operating in. The "svc" denotes that this is a Service. The "cluster.local" is your cluster domain, which COULD be different in your own cluster. -You can also try this from a `Node` in the cluster: +You can also try this from a Node in the cluster: {{< note >}} -10.0.0.10 is my DNS `Service`, yours might be different. +10.0.0.10 is the cluster's DNS Service IP, yours might be different. {{< /note >}} ```shell -u@node$ nslookup hostnames.default.svc.cluster.local 10.0.0.10 +nslookup hostnames.default.svc.cluster.local 10.0.0.10 +``` +```none Server: 10.0.0.10 Address: 10.0.0.10#53 @@ -232,39 +276,49 @@ Address: 10.0.1.175 ``` If you are able to do a fully-qualified name lookup but not a relative one, you -need to check that your `/etc/resolv.conf` file is correct. +need to check that your `/etc/resolv.conf` file in your Pod is correct. From +within a Pod: ```shell -u@pod$ cat /etc/resolv.conf +cat /etc/resolv.conf +``` + +You should see something like: + +``` nameserver 10.0.0.10 search default.svc.cluster.local svc.cluster.local cluster.local example.com options ndots:5 ``` -The `nameserver` line must indicate your cluster's DNS `Service`. This is +The `nameserver` line must indicate your cluster's DNS Service. This is passed into `kubelet` with the `--cluster-dns` flag. The `search` line must include an appropriate suffix for you to find the -`Service` name. In this case it is looking for `Services` in the local -`Namespace` (`default.svc.cluster.local`), `Services` in all `Namespaces` -(`svc.cluster.local`), and the cluster (`cluster.local`). Depending on your own -install you might have additional records after that (up to 6 total). The -cluster suffix is passed into `kubelet` with the `--cluster-domain` flag. We -assume that is "cluster.local" in this document, but yours might be different, -in which case you should change that in all of the commands above. +Service name. In this case it is looking for Services in the local +Namespace ("default.svc.cluster.local"), Services in all Namespaces +("svc.cluster.local"), and lastly for names in the cluster ("cluster.local"). +Depending on your own install you might have additional records after that (up +to 6 total). The cluster suffix is passed into `kubelet` with the +`--cluster-domain` flag. Throughout this document, the cluster suffix is +assumed to be "cluster.local". Your own clusters might be configured +differently, in which case you should change that in all of the previous +commands. The `options` line must set `ndots` high enough that your DNS client library considers search paths at all. Kubernetes sets this to 5 by default, which is high enough to cover all of the DNS names it generates. -### Does any Service exist in DNS? +### Does any Service work by DNS name? {#does-any-service-exist-in-dns} -If the above still fails - DNS lookups are not working for your `Service` - we +If the above still fails, DNS lookups are not working for your Service. You can take a step back and see what else is not working. The Kubernetes master -`Service` should always work: +Service should always work. From within a Pod: ```shell -u@pod$ nslookup kubernetes.default +nslookup kubernetes.default +``` +```none Server: 10.0.0.10 Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local @@ -272,34 +326,37 @@ Name: kubernetes.default Address 1: 10.0.0.1 kubernetes.default.svc.cluster.local ``` -If this fails, you might need to go to the kube-proxy section of this doc, or -even go back to the top of this document and start over, but instead of -debugging your own `Service`, debug DNS. +If this fails, please see the [kube-proxy](#is-the-kube-proxy-working) section +of this document, or even go back to the top of this document and start over, +but instead of debugging your own Service, debug the DNS Service. ## Does the Service work by IP? -Assuming we can confirm that DNS works, the next thing to test is whether your -`Service` works at all. From a node in your cluster, access the `Service`'s -IP (from `kubectl get` above). +Assuming you have confirmed that DNS works, the next thing to test is whether your +Service works by its IP address. From a Pod in your cluster, access the +Service's IP (from `kubectl get` above). ```shell -u@node$ curl 10.0.1.175:80 -hostnames-0uton +for i in $(seq 1 3); do + wget -qO- 10.0.1.175:80 +done +``` -u@node$ curl 10.0.1.175:80 -hostnames-yp2kp +This should produce something like: -u@node$ curl 10.0.1.175:80 +``` +hostnames-0uton hostnames-bvc05 +hostnames-yp2kp ``` -If your `Service` is working, you should get correct responses. If not, there +If your Service is working, you should get correct responses. If not, there are a number of things that could be going wrong. Read on. -## Is the Service correct? +## Is the Service defined correctly? It might sound silly, but you should really double and triple check that your -`Service` is correct and matches your `Pod`'s port. Read back your `Service` +Service is correct and matches your Pod's port. Read back your Service and verify it: ```shell @@ -316,7 +373,7 @@ kubectl get service hostnames -o json "resourceVersion": "347189", "creationTimestamp": "2015-07-07T15:24:29Z", "labels": { - "app": "hostnames" + "run": "hostnames" } }, "spec": { @@ -330,7 +387,7 @@ kubectl get service hostnames -o json } ], "selector": { - "app": "hostnames" + "run": "hostnames" }, "clusterIP": "10.0.1.175", "type": "ClusterIP", @@ -342,110 +399,116 @@ kubectl get service hostnames -o json } ``` -* Is the port you are trying to access in `spec.ports[]`? -* Is the `targetPort` correct for your `Pods` (many `Pods` choose to use a different port than the `Service`)? -* If you meant it to be a numeric port, is it a number (9376) or a -string "9376"? -* If you meant it to be a named port, do your `Pods` expose a port -with the same name? -* Is the port's `protocol` the same as the `Pod`'s? +* Is the Service port you are trying to access listed in `spec.ports[]`? +* Is the `targetPort` correct for your Pods (some Pods use a different port than the Service)? +* If you meant to use a numeric port, is it a number (9376) or a string "9376"? +* If you meant to use a named port, do your Pods expose a port with the same name? +* Is the port's `protocol` correct for your Pods? ## Does the Service have any Endpoints? -If you got this far, we assume that you have confirmed that your `Service` -exists and is resolved by DNS. Now let's check that the `Pods` you ran are -actually being selected by the `Service`. +If you got this far, you have confirmed that your Service is correctly +defined and is resolved by DNS. Now let's check that the Pods you ran are +actually being selected by the Service. -Earlier we saw that the `Pods` were running. We can re-check that: +Earlier you saw that the Pods were running. You can re-check that: ```shell -kubectl get pods -l app=hostnames +kubectl get pods -l run=hostnames +``` +```none NAME READY STATUS RESTARTS AGE hostnames-0uton 1/1 Running 0 1h hostnames-bvc05 1/1 Running 0 1h hostnames-yp2kp 1/1 Running 0 1h ``` -The "AGE" column says that these `Pods` are about an hour old, which implies that +The `-l run=hostnames` argument is a label selector - just like our Service +has. + +The "AGE" column says that these Pods are about an hour old, which implies that they are running fine and not crashing. -The `-l app=hostnames` argument is a label selector - just like our `Service` -has. Inside the Kubernetes system is a control loop which evaluates the -selector of every `Service` and saves the results into an `Endpoints` object. +The "RESTARTS" column says that these pods are not crashing frequently or being +restarted. Frequent restarts could lead to intermittent connectivity issues. +If the restart count is high, read more about how to [debug pods](/docs/tasks/debug-application-cluster/debug-pod-replication-controller/#debugging-pods). + +Inside the Kubernetes system is a control loop which evaluates the selector of +every Service and saves the results into a corresponding Endpoints object. ```shell kubectl get endpoints hostnames + NAME ENDPOINTS hostnames 10.244.0.5:9376,10.244.0.6:9376,10.244.0.7:9376 ``` -This confirms that the endpoints controller has found the correct `Pods` for -your `Service`. If the `hostnames` row is blank, you should check that the -`spec.selector` field of your `Service` actually selects for `metadata.labels` -values on your `Pods`. A common mistake is to have a typo or other error, such -as the `Service` selecting for `run=hostnames`, but the `Deployment` specifying -`app=hostnames`. +This confirms that the endpoints controller has found the correct Pods for +your Service. If the `ENDPOINTS` column is ``, you should check that +the `spec.selector` field of your Service actually selects for +`metadata.labels` values on your Pods. A common mistake is to have a typo or +other error, such as the Service selecting for `app=hostnames`, but the +Deployment specifying `run=hostnames`. ## Are the Pods working? -At this point, we know that your `Service` exists and has selected your `Pods`. -Let's check that the `Pods` are actually working - we can bypass the `Service` -mechanism and go straight to the `Pods`. +At this point, you know that your Service exists and has selected your Pods. +At the beginning of this walk-through, you verified the Pods themselves. +Let's check again that the Pods are actually working - you can bypass the +Service mechanism and go straight to the Pods, as listed by the Endpoints +above. {{< note >}} -These commands use the `Pod` port (9376), rather than the `Service` port (80). +These commands use the Pod port (9376), rather than the Service port (80). {{< /note >}} +From within a Pod: + ```shell -u@pod$ wget -qO- 10.244.0.5:9376 -hostnames-0uton +for ep in 10.244.0.5:9376 10.244.0.6:9376 10.244.0.7:9376; do + wget -qO- $ep +done +``` -pod $ wget -qO- 10.244.0.6:9376 -hostnames-bvc05 +This should produce something like: -u@pod$ wget -qO- 10.244.0.7:9376 +``` +hostnames-0uton +hostnames-bvc05 hostnames-yp2kp ``` -We expect each `Pod` in the `Endpoints` list to return its own hostname. If +You expect each Pod in the Endpoints list to return its own hostname. If this is not what happens (or whatever the correct behavior is for your own -`Pods`), you should investigate what's happening there. You might find -`kubectl logs` to be useful or `kubectl exec` directly to your `Pods` and check -service from there. - -Another thing to check is that your `Pods` are not crashing or being restarted. -Frequent restarts could lead to intermittent connectivity issues. - -```shell -kubectl get pods -l app=hostnames -NAME READY STATUS RESTARTS AGE -hostnames-632524106-bbpiw 1/1 Running 0 2m -hostnames-632524106-ly40y 1/1 Running 0 2m -hostnames-632524106-tlaok 1/1 Running 0 2m -``` - -If the restart count is high, read more about how to [debug -pods](/docs/tasks/debug-application-cluster/debug-pod-replication-controller/#debugging-pods). +Pods), you should investigate what's happening there. ## Is the kube-proxy working? -If you get here, your `Service` is running, has `Endpoints`, and your `Pods` -are actually serving. At this point, the whole `Service` proxy mechanism is +If you get here, your Service is running, has Endpoints, and your Pods +are actually serving. At this point, the whole Service proxy mechanism is suspect. Let's confirm it, piece by piece. +The default implementation of Services, and the one used on most clusters, is +kube-proxy. This is a program that runs on every node and configures one of a +small set of mechanisms for providing the Service abstraction. If your +cluster does not use kube-proxy, the following sections will not apply, and you +will have to investigate whatever implementation of Services you are using. + ### Is kube-proxy running? -Confirm that `kube-proxy` is running on your `Nodes`. You should get something -like the below: +Confirm that `kube-proxy` is running on your Nodes. Running directly on a +Node, you should get something like the below: ```shell -u@node$ ps auxw | grep kube-proxy +ps auxw | grep kube-proxy +``` +```none root 4194 0.4 0.1 101864 17696 ? Sl Jul04 25:43 /usr/local/bin/kube-proxy --master=https://kubernetes-master --kubeconfig=/var/lib/kube-proxy/kubeconfig --v=2 ``` Next, confirm that it is not failing something obvious, like contacting the master. To do this, you'll have to look at the logs. Accessing the logs -depends on your `Node` OS. On some OSes it is a file, such as +depends on your Node OS. On some OSes it is a file, such as /var/log/kube-proxy.log, while other OSes use `journalctl` to access logs. You should see something like: @@ -463,7 +526,7 @@ I1027 22:14:54.040223 5063 proxier.go:294] Adding new service "kube-system/ku ``` If you see error messages about not being able to contact the master, you -should double-check your `Node` configuration and installation steps. +should double-check your Node configuration and installation steps. One of the possible reasons that `kube-proxy` cannot run correctly is that the required `conntrack` binary cannot be found. This may happen on some Linux @@ -472,36 +535,19 @@ installing Kubernetes from scratch. If this is the case, you need to manually install the `conntrack` package (e.g. `sudo apt install conntrack` on Ubuntu) and then retry. -### Is kube-proxy writing iptables rules? +Kube-proxy can run in one of a few modes. In the log listed above, the +line `Using iptables Proxier` indicates that kube-proxy is running in +"iptables" mode. The most common other mode is "ipvs". The older "userspace" +mode has largely been replaced by these. -One of the main responsibilities of `kube-proxy` is to write the `iptables` -rules which implement `Services`. Let's check that those rules are getting -written. +#### Iptables mode -The kube-proxy can run in "userspace" mode, "iptables" mode or "ipvs" mode. -Hopefully you are using the "iptables" mode or "ipvs" mode. You -should see one of the following cases. - -#### Userspace +In "iptables" mode, you should see something like the following on a Node: ```shell -u@node$ iptables-save | grep hostnames --A KUBE-PORTALS-CONTAINER -d 10.0.1.175/32 -p tcp -m comment --comment "default/hostnames:default" -m tcp --dport 80 -j REDIRECT --to-ports 48577 --A KUBE-PORTALS-HOST -d 10.0.1.175/32 -p tcp -m comment --comment "default/hostnames:default" -m tcp --dport 80 -j DNAT --to-destination 10.240.115.247:48577 +iptables-save | grep hostnames ``` - -There should be 2 rules for each port on your `Service` (just one in this -example) - a "KUBE-PORTALS-CONTAINER" and a "KUBE-PORTALS-HOST". If you do -not see these, try restarting `kube-proxy` with the `-v` flag set to 4, and -then look at the logs again. - -Almost nobody should be using the "userspace" mode any more, so we won't spend -more time on it here. - -#### Iptables - -```shell -u@node$ iptables-save | grep hostnames +```none -A KUBE-SEP-57KPRZ3JQVENLNBR -s 10.244.3.6/32 -m comment --comment "default/hostnames:" -j MARK --set-xmark 0x00004000/0x00004000 -A KUBE-SEP-57KPRZ3JQVENLNBR -p tcp -m comment --comment "default/hostnames:" -m tcp -j DNAT --to-destination 10.244.3.6:9376 -A KUBE-SEP-WNBA2IHDGP2BOBGZ -s 10.244.1.7/32 -m comment --comment "default/hostnames:" -j MARK --set-xmark 0x00004000/0x00004000 @@ -514,15 +560,20 @@ u@node$ iptables-save | grep hostnames -A KUBE-SVC-NWV5X2332I4OT4T3 -m comment --comment "default/hostnames:" -j KUBE-SEP-57KPRZ3JQVENLNBR ``` -There should be 1 rule in `KUBE-SERVICES`, 1 or 2 rules per endpoint in -`KUBE-SVC-(hash)` (depending on `SessionAffinity`), one `KUBE-SEP-(hash)` chain -per endpoint, and a few rules in each `KUBE-SEP-(hash)` chain. The exact rules -will vary based on your exact config (including node-ports and load-balancers). +For each port of each Service, there should be 1 rule in `KUBE-SERVICES` and +one `KUBE-SVC-` chain. For each Pod endpoint, there should be a small +number of rules in that `KUBE-SVC-` and one `KUBE-SEP-` chain with +a small number of rules in it. The exact rules will vary based on your exact +config (including node-ports and load-balancers). -#### IPVS +#### IPVS mode + +In "ipvs" mode, you should see something like the following on a Node: ```shell -u@node$ ipvsadm -ln +ipvsadm -ln +``` +```none Prot LocalAddress:Port Scheduler Flags -> RemoteAddress:Port Forward Weight ActiveConn InActConn ... @@ -533,14 +584,39 @@ TCP 10.0.1.175:80 rr ... ``` -IPVS proxy will create a virtual server for each service address(e.g. Cluster IP, External IP, NodePort IP, Load Balancer IP etc.) and some corresponding real servers for endpoints of the service, if any. In this example, service hostnames(`10.0.1.175:80`) has 3 endpoints(`10.244.0.5:9376`, `10.244.0.6:9376`, `10.244.0.7:9376`) and you'll get results similar to above. +For each port of each Service, plus any NodePorts, external IPs, and +load-balancer IPs, kube-proxy will create a virtual server. For each Pod +endpoint, it will create corresponding real servers. In this example, service +hostnames(`10.0.1.175:80`) has 3 endpoints(`10.244.0.5:9376`, +`10.244.0.6:9376`, `10.244.0.7:9376`). + +#### Userspace mode + +In rare cases, you may be using "userspace" mode. From your Node: + +```shell +iptables-save | grep hostnames +``` +```none +-A KUBE-PORTALS-CONTAINER -d 10.0.1.175/32 -p tcp -m comment --comment "default/hostnames:default" -m tcp --dport 80 -j REDIRECT --to-ports 48577 +-A KUBE-PORTALS-HOST -d 10.0.1.175/32 -p tcp -m comment --comment "default/hostnames:default" -m tcp --dport 80 -j DNAT --to-destination 10.240.115.247:48577 +``` + +There should be 2 rules for each port of your Service (just one in this +example) - a "KUBE-PORTALS-CONTAINER" and a "KUBE-PORTALS-HOST". + +Almost nobody should be using the "userspace" mode any more, so you won't spend +more time on it here. ### Is kube-proxy proxying? -Assuming you do see the above rules, try again to access your `Service` by IP: +Assuming you do see one the above cases, try again to access your Service by +IP from one of your Nodes: ```shell -u@node$ curl 10.0.1.175:80 +curl 10.0.1.175:80 +``` +```none hostnames-0uton ``` @@ -548,31 +624,36 @@ If this fails and you are using the userspace proxy, you can try accessing the proxy directly. If you are using the iptables proxy, skip this section. Look back at the `iptables-save` output above, and extract the -port number that `kube-proxy` is using for your `Service`. In the above +port number that `kube-proxy` is using for your Service. In the above examples it is "48577". Now connect to that: ```shell -u@node$ curl localhost:48577 +curl localhost:48577 +``` +```none hostnames-yp2kp ``` If this still fails, look at the `kube-proxy` logs for specific lines like: -```shell +```none Setting endpoints for default/hostnames:default to [10.244.0.5:9376 10.244.0.6:9376 10.244.0.7:9376] ``` If you don't see those, try restarting `kube-proxy` with the `-v` flag set to 4, and then look at the logs again. -### A Pod cannot reach itself via Service IP +### Edge case: A Pod fails to reach itself via the Service IP {#a-pod-fails-to-reach-itself-via-the-service-ip} + +This might sound unlikely, but it does happen and it is supposed to work. This can happen when the network is not properly configured for "hairpin" traffic, usually when `kube-proxy` is running in `iptables` mode and Pods are connected with bridge network. The `Kubelet` exposes a `hairpin-mode` -[flag](/docs/admin/kubelet/) that allows endpoints of a Service to loadbalance back to themselves -if they try to access their own Service VIP. The `hairpin-mode` flag must either be -set to `hairpin-veth` or `promiscuous-bridge`. +[flag](/docs/admin/kubelet/) that allows endpoints of a Service to loadbalance +back to themselves if they try to access their own Service VIP. The +`hairpin-mode` flag must either be set to `hairpin-veth` or +`promiscuous-bridge`. The common steps to trouble shoot this are as follows: @@ -581,9 +662,10 @@ You should see something like the below. `hairpin-mode` is set to `promiscuous-bridge` in the following example. ```shell -u@node$ ps auxw|grep kubelet +ps auxw | grep kubelet +``` +```none root 3392 1.1 0.8 186804 65208 ? Sl 00:51 11:11 /usr/local/bin/kubelet --enable-debugging-handlers=true --config=/etc/kubernetes/manifests --allow-privileged=True --v=4 --cluster-dns=10.0.0.10 --cluster-domain=cluster.local --configure-cbr0=true --cgroup-root=/ --system-cgroups=/system --hairpin-mode=promiscuous-bridge --runtime-cgroups=/docker-daemon --kubelet-cgroups=/kubelet --babysit-daemons=true --max-pods=110 --serialize-image-pulls=false --outofdisk-transition-frequency=0 - ``` * Confirm the effective `hairpin-mode`. To do this, you'll have to look at @@ -594,7 +676,7 @@ match `--hairpin-mode` flag due to compatibility. Check if there is any log lines with key word `hairpin` in kubelet.log. There should be log lines indicating the effective hairpin mode, like something below. -```shell +```none I0629 00:51:43.648698 3252 kubelet.go:380] Hairpin mode set to "promiscuous-bridge" ``` @@ -604,6 +686,8 @@ you should see something like: ```shell for intf in /sys/devices/virtual/net/cbr0/brif/*; do cat $intf/hairpin_mode; done +``` +```none 1 1 1 @@ -615,20 +699,21 @@ has the permission to manipulate linux bridge on node. If `cbr0` bridge is used and configured properly, you should see: ```shell -u@node$ ifconfig cbr0 |grep PROMISC +ifconfig cbr0 |grep PROMISC +``` +```none UP BROADCAST RUNNING PROMISC MULTICAST MTU:1460 Metric:1 - ``` * Seek help if none of above works out. ## Seek help -If you get this far, something very strange is happening. Your `Service` is -running, has `Endpoints`, and your `Pods` are actually serving. You have DNS -working, `iptables` rules installed, and `kube-proxy` does not seem to be -misbehaving. And yet your `Service` is not working. You should probably let -us know, so we can help investigate! +If you get this far, something very strange is happening. Your Service is +running, has Endpoints, and your Pods are actually serving. You have DNS +working, and `kube-proxy` does not seem to be misbehaving. And yet your +Service is not working. Please let us know what is going on, so we can help +investigate! Contact us on [Slack](/docs/troubleshooting/#slack) or diff --git a/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md b/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md index 45ad12506f070..072070ab663cf 100644 --- a/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md +++ b/content/en/docs/tasks/debug-application-cluster/resource-metrics-pipeline.md @@ -56,7 +56,7 @@ as a Deployment object. If you use a different Kubernetes setup mechanism you ca Metric server collects metrics from the Summary API, exposed by [Kubelet](/docs/admin/kubelet/) on each node. -Metrics Server registered in the main API server through +Metrics Server is registered with the main API server through [Kubernetes aggregator](/docs/concepts/api-extension/apiserver-aggregation/). Learn more about the metrics server in [the design doc](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/metrics-server.md). diff --git a/content/en/docs/tasks/inject-data-application/podpreset.md b/content/en/docs/tasks/inject-data-application/podpreset.md index beb57754c3661..de41c0f73a5d9 100644 --- a/content/en/docs/tasks/inject-data-application/podpreset.md +++ b/content/en/docs/tasks/inject-data-application/podpreset.md @@ -2,23 +2,19 @@ reviewers: - jessfraz title: Inject Information into Pods Using a PodPreset +min-kubernetes-server-version: v1.10 content_template: templates/task weight: 60 --- {{% capture overview %}} -You can use a `PodPreset` object to inject information like secrets, volume -mounts, and environment variables etc into pods at creation time. -This task shows some examples on using the `PodPreset` resource. +This page shows how to use PodPreset objects to inject information like {{< glossary_tooltip text="Secrets" term_id="secret" >}}, volume mounts, and {{< glossary_tooltip text="environment variables" term_id="container-env-variables" >}} into Pods at creation time. {{% /capture %}} {{% capture prerequisites %}} -Get an overview of PodPresets at -[Understanding Pod Presets](/docs/concepts/workloads/pods/podpreset/). - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} {{% /capture %}} @@ -26,157 +22,298 @@ Get an overview of PodPresets at {{% capture steps %}} -## Simple Pod Spec Example +## Use Pod presets to inject environment variables and volumes -This is a simple example to show how a Pod spec is modified by the Pod -Preset. +In this step, you create a preset that has a volume mount and one environment variable. +Here is the manifest for the PodPreset: {{< codenew file="podpreset/preset.yaml" >}} +The name of a PodPreset object must be a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +In the manifest, you can see that the preset has an environment variable definition called `DB_PORT` +and a volume mount definition called `cache-volume` which is mounted under `/cache`. The {{< glossary_tooltip text="selector" term_id="selector" >}} specifies that +the preset will act upon any Pod that is labeled `role:frontend`. + Create the PodPreset: ```shell kubectl apply -f https://k8s.io/examples/podpreset/preset.yaml ``` -Examine the created PodPreset: +Verify that the PodPreset has been created: ```shell kubectl get podpreset ``` ``` -NAME AGE -allow-database 1m +NAME CREATED AT +allow-database 2020-01-24T08:54:29Z ``` -The new PodPreset will act upon any pod that has label `role: frontend`. +This manifest defines a Pod labelled `role: frontend` (matching the PodPreset's selector): {{< codenew file="podpreset/pod.yaml" >}} -Create a pod: +Create the Pod: ```shell kubectl create -f https://k8s.io/examples/podpreset/pod.yaml ``` -List the running Pods: +Verify that the Pod is running: ```shell kubectl get pods ``` + +The output shows that the Pod is running: + ``` NAME READY STATUS RESTARTS AGE website 1/1 Running 0 4m ``` -**Pod spec after admission controller:** - -{{< codenew file="podpreset/merged.yaml" >}} - -To see above output, run the following command: +View the Pod spec altered by the admission controller in order to see the effects of the preset +having been applied: ```shell kubectl get pod website -o yaml ``` -## Pod Spec with ConfigMap Example +{{< codenew file="podpreset/merged.yaml" >}} -This is an example to show how a Pod spec is modified by the Pod Preset -that defines a `ConfigMap` for Environment Variables. +The `DB_PORT` environment variable, the `volumeMount` and the `podpreset.admission.kubernetes.io` annotation +of the Pod verify that the preset has been applied. -**User submitted pod spec:** +## Pod spec with ConfigMap example -{{< codenew file="podpreset/pod.yaml" >}} +This is an example to show how a Pod spec is modified by a Pod preset +that references a ConfigMap containing environment variables. -**User submitted `ConfigMap`:** +Here is the manifest containing the definition of the ConfigMap: {{< codenew file="podpreset/configmap.yaml" >}} -**Example Pod Preset:** +Create the ConfigMap: + +```shell +kubectl create -f https://k8s.io/examples/podpreset/configmap.yaml +``` + +Here is a PodPreset manifest referencing that ConfigMap: {{< codenew file="podpreset/allow-db.yaml" >}} -**Pod spec after admission controller:** +Create the preset that references the ConfigMap: + +```shell +kubectl create -f https://k8s.io/examples/podpreset/allow-db.yaml +``` + +The following manifest defines a Pod matching the PodPreset for this example: + +{{< codenew file="podpreset/pod.yaml" >}} + +Create the Pod: + +```shell +kubectl create -f https://k8s.io/examples/podpreset/pod.yaml +``` + +View the Pod spec altered by the admission controller in order to see the effects of the preset +having been applied: + +```shell +kubectl get pod website -o yaml +``` {{< codenew file="podpreset/allow-db-merged.yaml" >}} -## ReplicaSet with Pod Spec Example +The `DB_PORT` environment variable and the `podpreset.admission.kubernetes.io` annotation of the Pod +verify that the preset has been applied. + +## ReplicaSet with Pod spec example + +This is an example to show that only Pod specs are modified by Pod presets. Other workload types +like ReplicaSets or Deployments are unaffected. + +Here is the manifest for the PodPreset for this example: -The following example shows that only the pod spec is modified by the Pod -Preset. +{{< codenew file="podpreset/preset.yaml" >}} + +Create the preset: + +```shell +kubectl apply -f https://k8s.io/examples/podpreset/preset.yaml +``` -**User submitted ReplicaSet:** +This manifest defines a ReplicaSet that manages three application Pods: {{< codenew file="podpreset/replicaset.yaml" >}} -**Example Pod Preset:** +Create the ReplicaSet: -{{< codenew file="podpreset/preset.yaml" >}} +```shell +kubectl create -f https://k8s.io/examples/podpreset/replicaset.yaml +``` + +Verify that the Pods created by the ReplicaSet are running: + +```shell +kubectl get pods +``` + +The output shows that the Pods are running: + +``` +NAME READY STATUS RESTARTS AGE +frontend-2l94q 1/1 Running 0 2m18s +frontend-6vdgn 1/1 Running 0 2m18s +frontend-jzt4p 1/1 Running 0 2m18s +``` -**Pod spec after admission controller:** +View the `spec` of the ReplicaSet: -Note that the ReplicaSet spec was not changed, users have to check individual pods -to validate that the PodPreset has been applied. +```shell +kubectl get replicasets frontend -o yaml +``` + +{{< note >}} +The ReplicaSet object's `spec` was not changed, nor does the ReplicaSet contain a +`podpreset.admission.kubernetes.io` annotation. This is because a PodPreset only +applies to Pod objects. + +To see the effects of the preset having been applied, you need to look at individual Pods. +{{< /note >}} + +The command to view the specs of the affected Pods is: + +```shell +kubectl get pod --selector=role=frontend -o yaml +``` {{< codenew file="podpreset/replicaset-merged.yaml" >}} -## Multiple PodPreset Example +Again the `podpreset.admission.kubernetes.io` annotation of the Pods +verifies that the preset has been applied. -This is an example to show how a Pod spec is modified by multiple Pod -Injection Policies. +## Multiple Pod presets example -**User submitted pod spec:** +This is an example to show how a Pod spec is modified by multiple Pod presets. -{{< codenew file="podpreset/pod.yaml" >}} -**Example Pod Preset:** +Here is the manifest for the first PodPreset: {{< codenew file="podpreset/preset.yaml" >}} -**Another Pod Preset:** +Create the first PodPreset for this example: + +```shell +kubectl apply -f https://k8s.io/examples/podpreset/preset.yaml +``` + +Here is the manifest for the second PodPreset: {{< codenew file="podpreset/proxy.yaml" >}} -**Pod spec after admission controller:** +Create the second preset: + +```shell +kubectl apply -f https://k8s.io/examples/podpreset/proxy.yaml +``` + +Here's a manifest containing the definition of an applicable Pod (matched by two PodPresets): + +{{< codenew file="podpreset/pod.yaml" >}} + +Create the Pod: + +```shell +kubectl create -f https://k8s.io/examples/podpreset/pod.yaml +``` + +View the Pod spec altered by the admission controller in order to see the effects of both presets +having been applied: + +```shell +kubectl get pod website -o yaml +``` {{< codenew file="podpreset/multi-merged.yaml" >}} -## Conflict Example +The `DB_PORT` environment variable, the `proxy-volume` VolumeMount and the two `podpreset.admission.kubernetes.io` +annotations of the Pod verify that both presets have been applied. + +## Conflict example + +This is an example to show how a Pod spec is not modified by a Pod preset when there is a conflict. +The conflict in this example consists of a `VolumeMount` in the PodPreset conflicting with a Pod that defines the same `mountPath`. + +Here is the manifest for the PodPreset: -This is an example to show how a Pod spec is not modified by the Pod Preset -when there is a conflict. +{{< codenew file="podpreset/conflict-preset.yaml" >}} + +Note the `mountPath` value of `/cache`. + +Create the preset: + +```shell +kubectl apply -f https://k8s.io/examples/podpreset/conflict-preset.yaml +``` -**User submitted pod spec:** +Here is the manifest for the Pod: {{< codenew file="podpreset/conflict-pod.yaml" >}} -**Example Pod Preset:** +Note the volumeMount element with the same path as in the PodPreset. -{{< codenew file="podpreset/conflict-preset.yaml" >}} +Create the Pod: + +```shell +kubectl create -f https://k8s.io/examples/podpreset/conflict-pod.yaml +``` + +View the Pod spec: -**Pod spec after admission controller will not change because of the conflict:** +```shell +kubectl get pod website -o yaml +``` {{< codenew file="podpreset/conflict-pod.yaml" >}} -**If we run `kubectl describe...` we can see the event:** +You can see there is no preset annotation (`podpreset.admission.kubernetes.io`). Seeing no annotation tells you that no preset has not been applied to the Pod. + +However, the +[PodPreset admission controller](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#podpreset) +logs a warning containing details of the conflict. +You can view the warning using `kubectl`: ```shell -kubectl describe ... +kubectl -n kube-system logs -l=component=kube-apiserver ``` + +The output should look similar to: + ``` -.... -Events: - FirstSeen LastSeen Count From SubobjectPath Reason Message - Tue, 07 Feb 2017 16:56:12 -0700 Tue, 07 Feb 2017 16:56:12 -0700 1 {podpreset.admission.kubernetes.io/podpreset-allow-database } conflict Conflict on pod preset. Duplicate mountPath /cache. +W1214 13:00:12.987884 1 admission.go:147] conflict occurred while applying podpresets: allow-database on pod: err: merging volume mounts for allow-database has a conflict on mount path /cache: +v1.VolumeMount{Name:"other-volume", ReadOnly:false, MountPath:"/cache", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""} +does not match +core.VolumeMount{Name:"cache-volume", ReadOnly:false, MountPath:"/cache", SubPath:"", MountPropagation:(*core.MountPropagationMode)(nil), SubPathExpr:""} + in container ``` -## Deleting a Pod Preset +Note the conflict message on the path for the VolumeMount. + +## Deleting a PodPreset -Once you don't need a pod preset anymore, you can delete it with `kubectl`: +Once you don't need a PodPreset anymore, you can delete it with `kubectl`: ```shell kubectl delete podpreset allow-database ``` +The output shows that the PodPreset was deleted: ``` podpreset "allow-database" deleted ``` diff --git a/content/en/docs/tasks/manage-daemon/update-daemon-set.md b/content/en/docs/tasks/manage-daemon/update-daemon-set.md index ab98f2ea4e55b..8eec3c17818cc 100644 --- a/content/en/docs/tasks/manage-daemon/update-daemon-set.md +++ b/content/en/docs/tasks/manage-daemon/update-daemon-set.md @@ -57,7 +57,7 @@ If you haven't created the DaemonSet in the system, check your DaemonSet manifest with the following command instead: ```shell -kubectl apply -f ds.yaml --dry-run -o go-template='{{.spec.updateStrategy.type}}{{"\n"}}' +kubectl apply -f ds.yaml --dry-run=client -o go-template='{{.spec.updateStrategy.type}}{{"\n"}}' ``` The output from both commands should be: diff --git a/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md b/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md index 890170a98869a..ad6b969c87ba5 100644 --- a/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md +++ b/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md @@ -17,11 +17,11 @@ can consume huge pages and the current limitations. {{% capture prerequisites %}} 1. Kubernetes nodes must pre-allocate huge pages in order for the node to report - its huge page capacity. A node may only pre-allocate huge pages for a single - size. + its huge page capacity. A node can pre-allocate huge pages for multiple + sizes. -The nodes will automatically discover and report all huge page resources as a -schedulable resource. +The nodes will automatically discover and report all huge page resources as +schedulable resources. {{% /capture %}} @@ -30,12 +30,51 @@ schedulable resource. ## API Huge pages can be consumed via container level resource requirements using the -resource name `hugepages-`, where size is the most compact binary notation -using integer values supported on a particular node. For example, if a node -supports 2048KiB page sizes, it will expose a schedulable resource -`hugepages-2Mi`. Unlike CPU or memory, huge pages do not support overcommit. Note -that when requesting hugepage resources, either memory or CPU resources must -be requested as well. +resource name `hugepages-`, where `` is the most compact binary +notation using integer values supported on a particular node. For example, if a +node supports 2048KiB and 1048576KiB page sizes, it will expose a schedulable +resources `hugepages-2Mi` and `hugepages-1Gi`. Unlike CPU or memory, huge pages +do not support overcommit. Note that when requesting hugepage resources, either +memory or CPU resources must be requested as well. + +A pod may consume multiple huge page sizes in a single pod spec. In this case it +must use `medium: HugePages-` notation for all volume mounts. + + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: huge-pages-example +spec: + containers: + - name: example + image: fedora:latest + command: + - sleep + - inf + volumeMounts: + - mountPath: /hugepages-2Mi + name: hugepage-2mi + - mountPath: /hugepages-1Gi + name: hugepage-1gi + resources: + limits: + hugepages-2Mi: 100Mi + hugepages-1Gi: 2Gi + memory: 100Mi + requests: + memory: 100Mi + volumes: + - name: hugepage-2mi + emptyDir: + medium: HugePages-2Mi + - name: hugepage-1gi + emptyDir: + medium: HugePages-1Gi +``` + +A pod may use `medium: HugePages` only if it requests huge pages of one size. ```yaml apiVersion: v1 @@ -66,8 +105,7 @@ spec: - Huge page requests must equal the limits. This is the default if limits are specified, but requests are not. -- Huge pages are isolated at a pod scope, container isolation is planned in a - future iteration. +- Huge pages are isolated at a container scope, so each container has own limit on their cgroup sandbox as requested in a container spec. - EmptyDir volumes backed by huge pages may not consume more huge page memory than the pod request. - Applications that consume huge pages via `shmget()` with `SHM_HUGETLB` must @@ -75,10 +113,15 @@ spec: - Huge page usage in a namespace is controllable via ResourceQuota similar to other compute resources like `cpu` or `memory` using the `hugepages-` token. +- Support of multiple sizes huge pages is feature gated. It can be + enabled with the `HugePageStorageMediumSize` [feature +gate](/docs/reference/command-line-tools-reference/feature-gates/) on the {{< +glossary_tooltip text="kubelet" term_id="kubelet" >}} and {{< +glossary_tooltip text="kube-apiserver" +term_id="kube-apiserver" >}} (`--feature-gates=HugePageStorageMediumSize=true`). ## Future -- Support container isolation of huge pages in addition to pod isolation. - NUMA locality guarantees as a feature of quality of service. - LimitRange support. diff --git a/content/en/docs/tasks/manage-kubernetes-objects/declarative-config.md b/content/en/docs/tasks/manage-kubernetes-objects/declarative-config.md index 0dee8eb60adc4..70b5af9df1868 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/declarative-config.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/declarative-config.md @@ -119,7 +119,7 @@ metadata: {"apiVersion":"apps/v1","kind":"Deployment", "metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"}, "spec":{"minReadySeconds":5,"selector":{"matchLabels":{"app":nginx}},"template":{"metadata":{"labels":{"app":"nginx"}}, - "spec":{"containers":[{"image":"nginx:1.7.9","name":"nginx", + "spec":{"containers":[{"image":"nginx:1.14.2","name":"nginx", "ports":[{"containerPort":80}]}]}}}} # ... spec: @@ -136,7 +136,7 @@ spec: app: nginx spec: containers: - - image: nginx:1.7.9 + - image: nginx:1.14.2 # ... name: nginx ports: @@ -199,7 +199,7 @@ metadata: {"apiVersion":"apps/v1","kind":"Deployment", "metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"}, "spec":{"minReadySeconds":5,"selector":{"matchLabels":{"app":nginx}},"template":{"metadata":{"labels":{"app":"nginx"}}, - "spec":{"containers":[{"image":"nginx:1.7.9","name":"nginx", + "spec":{"containers":[{"image":"nginx:1.14.2","name":"nginx", "ports":[{"containerPort":80}]}]}}}} # ... spec: @@ -216,7 +216,7 @@ spec: app: nginx spec: containers: - - image: nginx:1.7.9 + - image: nginx:1.14.2 # ... name: nginx ports: @@ -255,7 +255,7 @@ metadata: {"apiVersion":"apps/v1","kind":"Deployment", "metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"}, "spec":{"minReadySeconds":5,"selector":{"matchLabels":{"app":nginx}},"template":{"metadata":{"labels":{"app":"nginx"}}, - "spec":{"containers":[{"image":"nginx:1.7.9","name":"nginx", + "spec":{"containers":[{"image":"nginx:1.14.2","name":"nginx", "ports":[{"containerPort":80}]}]}}}} # ... spec: @@ -273,7 +273,7 @@ spec: app: nginx spec: containers: - - image: nginx:1.7.9 + - image: nginx:1.14.2 # ... name: nginx ports: @@ -282,7 +282,7 @@ spec: ``` Update the `simple_deployment.yaml` configuration file to change the image from -`nginx:1.7.9` to `nginx:1.11.9`, and delete the `minReadySeconds` field: +`nginx:1.14.2` to `nginx:1.16.1`, and delete the `minReadySeconds` field: {{< codenew file="application/update_deployment.yaml" >}} @@ -303,7 +303,7 @@ The output shows the following changes to the live configuration: * The `replicas` field retains the value of 2 set by `kubectl scale`. This is possible because it is omitted from the configuration file. -* The `image` field has been updated to `nginx:1.11.9` from `nginx:1.7.9`. +* The `image` field has been updated to `nginx:1.16.1` from `nginx:1.14.2`. * The `last-applied-configuration` annotation has been updated with the new image. * The `minReadySeconds` field has been cleared. * The `last-applied-configuration` annotation no longer contains the `minReadySeconds` field. @@ -320,7 +320,7 @@ metadata: {"apiVersion":"apps/v1","kind":"Deployment", "metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"}, "spec":{"selector":{"matchLabels":{"app":nginx}},"template":{"metadata":{"labels":{"app":"nginx"}}, - "spec":{"containers":[{"image":"nginx:1.11.9","name":"nginx", + "spec":{"containers":[{"image":"nginx:1.16.1","name":"nginx", "ports":[{"containerPort":80}]}]}}}} # ... spec: @@ -338,7 +338,7 @@ spec: app: nginx spec: containers: - - image: nginx:1.11.9 # Set by `kubectl apply` + - image: nginx:1.16.1 # Set by `kubectl apply` # ... name: nginx ports: @@ -460,7 +460,7 @@ metadata: {"apiVersion":"apps/v1","kind":"Deployment", "metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"}, "spec":{"minReadySeconds":5,"selector":{"matchLabels":{"app":nginx}},"template":{"metadata":{"labels":{"app":"nginx"}}, - "spec":{"containers":[{"image":"nginx:1.7.9","name":"nginx", + "spec":{"containers":[{"image":"nginx:1.14.2","name":"nginx", "ports":[{"containerPort":80}]}]}}}} # ... spec: @@ -478,7 +478,7 @@ spec: app: nginx spec: containers: - - image: nginx:1.7.9 + - image: nginx:1.14.2 # ... name: nginx ports: @@ -518,7 +518,7 @@ metadata: {"apiVersion":"apps/v1","kind":"Deployment", "metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"}, "spec":{"selector":{"matchLabels":{"app":nginx}},"template":{"metadata":{"labels":{"app":"nginx"}}, - "spec":{"containers":[{"image":"nginx:1.11.9","name":"nginx", + "spec":{"containers":[{"image":"nginx:1.16.1","name":"nginx", "ports":[{"containerPort":80}]}]}}}} # ... spec: @@ -536,7 +536,7 @@ spec: app: nginx spec: containers: - - image: nginx:1.11.9 # Set by `kubectl apply` + - image: nginx:1.16.1 # Set by `kubectl apply` # ... name: nginx ports: @@ -654,7 +654,7 @@ by `name`. # last-applied-configuration value containers: - name: nginx - image: nginx:1.10 + image: nginx:1.16 - name: nginx-helper-a # key: nginx-helper-a; will be deleted in result image: helper:1.3 - name: nginx-helper-b # key: nginx-helper-b; will be retained @@ -663,7 +663,7 @@ by `name`. # configuration file value containers: - name: nginx - image: nginx:1.10 + image: nginx:1.16 - name: nginx-helper-b image: helper:1.3 - name: nginx-helper-c # key: nginx-helper-c; will be added in result @@ -672,7 +672,7 @@ by `name`. # live configuration containers: - name: nginx - image: nginx:1.10 + image: nginx:1.16 - name: nginx-helper-a image: helper:1.3 - name: nginx-helper-b @@ -684,7 +684,7 @@ by `name`. # result after merge containers: - name: nginx - image: nginx:1.10 + image: nginx:1.16 # Element nginx-helper-a was deleted - name: nginx-helper-b image: helper:1.3 @@ -779,7 +779,7 @@ spec: app: nginx spec: containers: - - image: nginx:1.7.9 + - image: nginx:1.14.2 imagePullPolicy: IfNotPresent # defaulted by apiserver name: nginx ports: @@ -819,7 +819,7 @@ spec: spec: containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ports: - containerPort: 80 @@ -834,7 +834,7 @@ spec: spec: containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ports: - containerPort: 80 @@ -852,7 +852,7 @@ spec: spec: containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ports: - containerPort: 80 @@ -870,7 +870,7 @@ spec: spec: containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ports: - containerPort: 80 ``` diff --git a/content/en/docs/tasks/manage-kubernetes-objects/imperative-command.md b/content/en/docs/tasks/manage-kubernetes-objects/imperative-command.md index 835bf75faf5a4..6b1357a133bb2 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/imperative-command.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/imperative-command.md @@ -139,10 +139,10 @@ creation. This is done by piping the output of the `create` command to the `set` command, and then back to the `create` command. Here's an example: ```sh -kubectl create service clusterip my-svc --clusterip="None" -o yaml --dry-run | kubectl set selector --local -f - 'environment=qa' -o yaml | kubectl create -f - +kubectl create service clusterip my-svc --clusterip="None" -o yaml --dry-run=client | kubectl set selector --local -f - 'environment=qa' -o yaml | kubectl create -f - ``` -1. The `kubectl create service -o yaml --dry-run` command creates the configuration for the Service, but prints it to stdout as YAML instead of sending it to the Kubernetes API server. +1. The `kubectl create service -o yaml --dry-run=client` command creates the configuration for the Service, but prints it to stdout as YAML instead of sending it to the Kubernetes API server. 1. The `kubectl set selector --local -f - -o yaml` command reads the configuration from stdin, and writes the updated configuration to stdout as YAML. 1. The `kubectl create -f -` command creates the object using the configuration provided via stdin. @@ -152,7 +152,7 @@ You can use `kubectl create --edit` to make arbitrary changes to an object before it is created. Here's an example: ```sh -kubectl create service clusterip my-svc --clusterip="None" -o yaml --dry-run > /tmp/srv.yaml +kubectl create service clusterip my-svc --clusterip="None" -o yaml --dry-run=client > /tmp/srv.yaml kubectl create --edit -f /tmp/srv.yaml ``` diff --git a/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md b/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md index f9a6ed4b18fd5..61230513a4c78 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md @@ -791,6 +791,12 @@ kubectl get -k ./ kubectl describe -k ./ ``` +Run the following command to compare the Deployment object `dev-my-nginx` against the state that the cluster would be in if the manifest was applied: + +```shell +kubectl diff -k ./ +``` + Run the following command to delete the Deployment object `dev-my-nginx`: ```shell diff --git a/content/en/docs/tasks/run-application/configure-pdb.md b/content/en/docs/tasks/run-application/configure-pdb.md index 673823feb7f87..d33dc24364f6d 100644 --- a/content/en/docs/tasks/run-application/configure-pdb.md +++ b/content/en/docs/tasks/run-application/configure-pdb.md @@ -180,8 +180,8 @@ then you'll see something like this: kubectl get poddisruptionbudgets ``` ``` -NAME MIN-AVAILABLE ALLOWED-DISRUPTIONS AGE -zk-pdb 2 0 7s +NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE +zk-pdb 2 N/A 0 7s ``` If there are matching pods (say, 3), then you would see something like this: @@ -190,11 +190,11 @@ If there are matching pods (say, 3), then you would see something like this: kubectl get poddisruptionbudgets ``` ``` -NAME MIN-AVAILABLE ALLOWED-DISRUPTIONS AGE -zk-pdb 2 1 7s +NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE +zk-pdb 2 N/A 1 7s ``` -The non-zero value for `ALLOWED-DISRUPTIONS` means that the disruption controller has seen the pods, +The non-zero value for `ALLOWED DISRUPTIONS` means that the disruption controller has seen the pods, counted the matching pods, and updated the status of the PDB. You can get more information about the status of a PDB with this command: @@ -206,14 +206,15 @@ kubectl get poddisruptionbudgets zk-pdb -o yaml apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: - creationTimestamp: 2017-08-28T02:38:26Z + annotations: +… + creationTimestamp: "2020-03-04T04:22:56Z" generation: 1 name: zk-pdb … status: currentHealthy: 3 - desiredHealthy: 3 - disruptedPods: null + desiredHealthy: 2 disruptionsAllowed: 1 expectedPods: 3 observedGeneration: 1 diff --git a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index 6a9fe4de324d5..800abfea69718 100644 --- a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -369,8 +369,8 @@ label, you can specify the following metric block to scale only on GET requests: type: Object object: metric: - name: `http_requests` - selector: `verb=GET` + name: http_requests + selector: {matchLabels: {verb: GET}} ``` This selector uses the same syntax as the full Kubernetes label selectors. The monitoring pipeline diff --git a/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md b/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md index e515206308ff4..b5f7612d75439 100644 --- a/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md +++ b/content/en/docs/tasks/run-application/horizontal-pod-autoscale.md @@ -178,6 +178,8 @@ The beta version, which includes support for scaling on memory and custom metric can be found in `autoscaling/v2beta2`. The new fields introduced in `autoscaling/v2beta2` are preserved as annotations when working with `autoscaling/v1`. +When you create a HorizontalPodAutoscaler API object, make sure the name specified is a valid +[DNS subdomain name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). More details about the API object can be found at [HorizontalPodAutoscaler Object](https://git.k8s.io/community/contributors/design-proposals/autoscaling/horizontal-pod-autoscaler.md#horizontalpodautoscaler-object). @@ -197,13 +199,12 @@ The detailed documentation of `kubectl autoscale` can be found [here](/docs/refe ## Autoscaling during rolling update -Currently in Kubernetes, it is possible to perform a [rolling update](/docs/tasks/run-application/rolling-update-replication-controller/) by managing replication controllers directly, -or by using the deployment object, which manages the underlying replica sets for you. +Currently in Kubernetes, it is possible to perform a rolling update by using the deployment object, which manages the underlying replica sets for you. Horizontal Pod Autoscaler only supports the latter approach: the Horizontal Pod Autoscaler is bound to the deployment object, it sets the size for the deployment object, and the deployment is responsible for setting sizes of underlying replica sets. Horizontal Pod Autoscaler does not work with rolling update using direct manipulation of replication controllers, -i.e. you cannot bind a Horizontal Pod Autoscaler to a replication controller and do rolling update (e.g. using `kubectl rolling-update`). +i.e. you cannot bind a Horizontal Pod Autoscaler to a replication controller and do rolling update. The reason this doesn't work is that when rolling update creates a new replication controller, the Horizontal Pod Autoscaler will not be bound to the new replication controller. @@ -282,6 +283,154 @@ and [external.metrics.k8s.io](https://github.com/kubernetes/community/blob/maste For examples of how to use them see [the walkthrough for using custom metrics](/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-multiple-metrics-and-custom-metrics) and [the walkthrough for using external metrics](/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-metrics-not-related-to-kubernetes-objects). +## Support for configurable scaling behavior + +Starting from +[v1.18](https://github.com/kubernetes/enhancements/blob/master/keps/sig-autoscaling/20190307-configurable-scale-velocity-for-hpa.md) +the `v2beta2` API allows scaling behavior to be configured through the HPA +`behavior` field. Behaviors are specified separately for scaling up and down in +`scaleUp` or `scaleDown` section under the `behavior` field. A stabilization +window can be specified for both directions which prevents the flapping of the +number of the replicas in the scaling target. Similarly specifing scaling +policies controls the rate of change of replicas while scaling. + +### Scaling Policies + +One or more scaling policies can be specified in the `behavior` section of the spec. +When multiple policies are specified the policy which allows the highest amount of +change is the policy which is selected by default. The following example shows this behavior +while scaling down: + +```yaml +behavior: + scaleDown: + policies: + - type: Pods + value: 4 + periodSeconds: 60 + - type: Percent + value: 10 + periodSeconds: 60 +``` + +When the number of pods is more than 40 the second policy will be used for scaling down. +For instance if there are 80 replicas and the target has to be scaled down to 10 replicas +then during the first step 8 replicas will be reduced. In the next iteration when the number +of replicas is 72, 10% of the pods is 7.2 but the number is rounded up to 8. On each loop of +the autoscaler controller the number of pods to be change is re-calculated based on the number +of current replicas. When the number of replicas falls below 40 the first policy_(Pods)_ is applied +and 4 replicas will be reduced at a time. + +`periodSeconds` indicates the length of time in the past for which the policy must hold true. +The first policy allows at most 4 replicas to be scaled down in one minute. The second policy +allows at most 10% of the current replicas to be scaled down in one minute. + +The policy selection can be changed by specifying the `selectPolicy` field for a scaling +direction. By setting the value to `Min` which would select the policy which allows the +smallest change in the replica count. Setting the value to `Disabled` completely disabled +scaling in that direction. + +### Stabilization Window + +The stabilization window is used to retrict the flapping of replicas when the metrics +used for scaling keep fluctuating. The stabilization window is used by the autoscaling +algorithm to consider the computed desired state from the past to prevent scaling. In +the following example the stabilization window is specified for `scaleDown`. + +```yaml +scaleDown: + stabilizationWindowSeconds: 300 +``` + +When the metrics indicate that the target should be scaled down the algorithm looks +into previously computed desired states and uses the highest value from the specified +interval. In above example all desired states from the past 5 minutes will be considered. + +### Default Behavior + +To use the custom scaling not all fields have to be specified. Only values which need to be +customized can be specified. These custom values are merged with default values. The default values +match the existing behavior in the HPA algorithm. + +```yaml +behavior: + scaleDown: + stabilizationWindowSeconds: 300 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + - type: Pods + value: 4 + periodSeconds: 15 + selectPolicy: Max +``` +For scaling down the stabilization window is _300_ seconds(or the value of the +`--horizontal-pod-autoscaler-downscale-stabilization` flag if provided). There is only a single policy +for scaling down which allows a 100% of the currently running replicas to be removed which +means the scaling target can be scaled down to the minimum allowed replicas. +For scaling up there is no stabilization window. When the metrics indicate that the target should be +scaled up the target is scaled up immediately. There are 2 policies which. 4 pods or a 100% of the currently +running replicas will be added every 15 seconds till the HPA reaches its steady state. + +### Example: change downscale stabilization window + +To provide a custom downscale stabilization window of 1 minute, the following +behavior would be added to the HPA: + +```yaml +behavior: + scaleDown: + stabilizationWindowSeconds: 60 +``` + +### Example: limit scale down rate + +To limit the rate at which pods are removed by the HPA to 10% per minute, the +following behavior would be added to the HPA: + +```yaml +behavior: + scaleDown: + policies: + - type: Percent + value: 10 + periodSeconds: 60 +``` + +To allow a final drop of 5 pods, another policy can be added and a selection +strategy of minimum: + +```yaml +behavior: + scaleDown: + policies: + - type: Percent + value: 10 + periodSeconds: 60 + - type: Pods + value: 5 + periodSeconds: 60 + selectPolicy: Max +``` + +### Example: disable scale down + +The `selectPolicy` value of `Disabled` turns off scaling the given direction. +So to prevent downscaling the following policy would be used: + +```yaml +behavior: + scaleDown: + selectPolicy: Disabled +``` + {{% /capture %}} {{% capture whatsnext %}} diff --git a/content/en/docs/tasks/run-application/rolling-update-replication-controller.md b/content/en/docs/tasks/run-application/rolling-update-replication-controller.md deleted file mode 100644 index e70877a9d4a78..0000000000000 --- a/content/en/docs/tasks/run-application/rolling-update-replication-controller.md +++ /dev/null @@ -1,251 +0,0 @@ ---- -reviewers: -- janetkuo -title: Perform Rolling Update Using a Replication Controller -content_template: templates/concept -weight: 80 ---- - -{{% capture overview %}} - -{{< note >}} -The preferred way to create a replicated application is to use a -[Deployment](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#deployment-v1-apps), -which in turn uses a -[ReplicaSet](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#replicaset-v1-apps). -For more information, see -[Running a Stateless Application Using a Deployment](/docs/tasks/run-application/run-stateless-application-deployment/). -{{< /note >}} - -To update a service without an outage, `kubectl` supports what is called [rolling update](/docs/reference/generated/kubectl/kubectl-commands/#rolling-update), which updates one pod at a time, rather than taking down the entire service at the same time. See the [rolling update design document](https://git.k8s.io/community/contributors/design-proposals/cli/simple-rolling-update.md) for more information. - -Note that `kubectl rolling-update` only supports Replication Controllers. However, if you deploy applications with Replication Controllers, -consider switching them to [Deployments](/docs/concepts/workloads/controllers/deployment/). A Deployment is a higher-level controller that automates rolling updates -of applications declaratively, and therefore is recommended. If you still want to keep your Replication Controllers and use `kubectl rolling-update`, keep reading: - -A rolling update applies changes to the configuration of pods being managed by -a replication controller. The changes can be passed as a new replication -controller configuration file; or, if only updating the image, a new container -image can be specified directly. - -A rolling update works by: - -1. Creating a new replication controller with the updated configuration. -2. Increasing/decreasing the replica count on the new and old controllers until - the correct number of replicas is reached. -3. Deleting the original replication controller. - -Rolling updates are initiated with the `kubectl rolling-update` command: - -```shell -kubectl rolling-update NAME NEW_NAME --image=IMAGE:TAG - -# or read the configuration from a file -kubectl rolling-update NAME -f FILE -``` - -{{% /capture %}} - - -{{% capture body %}} - -## Passing a configuration file - -To initiate a rolling update using a configuration file, pass the new file to -`kubectl rolling-update`: - -```shell -kubectl rolling-update NAME -f FILE -``` - -The configuration file must: - -* Specify a different `metadata.name` value. - -* Overwrite at least one common label in its `spec.selector` field. - -* Use the same `metadata.namespace`. - -Replication controller configuration files are described in -[Creating Replication Controllers](/docs/concepts/workloads/controllers/replicationcontroller/). - -### Examples - -```shell -# Update pods of frontend-v1 using new replication controller data in frontend-v2.json. -kubectl rolling-update frontend-v1 -f frontend-v2.json - -# Update pods of frontend-v1 using JSON data passed into stdin. -cat frontend-v2.json | kubectl rolling-update frontend-v1 -f - -``` - -## Updating the container image - -To update only the container image, pass a new image name and tag with the -`--image` flag and (optionally) a new controller name: - -```shell -kubectl rolling-update NAME NEW_NAME --image=IMAGE:TAG -``` - -The `--image` flag is only supported for single-container pods. Specifying -`--image` with multi-container pods returns an error. - -If you didn't specify a new name, this creates a new replication controller -with a temporary name. Once the rollout is complete, the old controller is -deleted, and the new controller is updated to use the original name. - -The update will fail if `IMAGE:TAG` is identical to the -current value. For this reason, we recommend the use of versioned tags as -opposed to values such as `:latest`. Doing a rolling update from `image:latest` -to a new `image:latest` will fail, even if the image at that tag has changed. -Moreover, the use of `:latest` is not recommended, see -[Best Practices for Configuration](/docs/concepts/configuration/overview/#container-images) for more information. - -### Examples - -```shell -# Update the pods of frontend-v1 to frontend-v2 -kubectl rolling-update frontend-v1 frontend-v2 --image=image:v2 - -# Update the pods of frontend, keeping the replication controller name -kubectl rolling-update frontend --image=image:v2 -``` - -## Required and optional fields - -Required fields are: - -* `NAME`: The name of the replication controller to update. - -as well as either: - -* `-f FILE`: A replication controller configuration file, in either JSON or - YAML format. The configuration file must specify a new top-level `id` value - and include at least one of the existing `spec.selector` key:value pairs. - See the - [Run Stateless AP Replication Controller](/docs/tutorials/stateless-application/run-stateless-ap-replication-controller/#replication-controller-configuration-file) - page for details. -
-
- or: -
-
-* `--image IMAGE:TAG`: The name and tag of the image to update to. Must be - different than the current image:tag currently specified. - -Optional fields are: - -* `NEW_NAME`: Only used in conjunction with `--image` (not with `-f FILE`). The - name to assign to the new replication controller. -* `--poll-interval DURATION`: The time between polling the controller status - after update. Valid units are `ns` (nanoseconds), `us` or `µs` (microseconds), - `ms` (milliseconds), `s` (seconds), `m` (minutes), or `h` (hours). Units can - be combined (e.g. `1m30s`). The default is `3s`. -* `--timeout DURATION`: The maximum time to wait for the controller to update a - pod before exiting. Default is `5m0s`. Valid units are as described for - `--poll-interval` above. -* `--update-period DURATION`: The time to wait between updating pods. Default - is `1m0s`. Valid units are as described for `--poll-interval` above. - -Additional information about the `kubectl rolling-update` command is available -from the [`kubectl` reference](/docs/reference/generated/kubectl/kubectl-commands/#rolling-update). - -## Walkthrough - -Let's say you were running version 1.7.9 of nginx: - -{{< codenew file="controllers/replication-nginx-1.7.9.yaml" >}} - -To update to version 1.9.1, you can use [`kubectl rolling-update --image`](https://git.k8s.io/community/contributors/design-proposals/cli/simple-rolling-update.md) to specify the new image: - -```shell -kubectl rolling-update my-nginx --image=nginx:1.9.1 -``` -``` -Created my-nginx-ccba8fbd8cc8160970f63f9a2696fc46 -``` - -In another window, you can see that `kubectl` added a `deployment` label to the pods, whose value is a hash of the configuration, to distinguish the new pods from the old: - -```shell -kubectl get pods -l app=nginx -L deployment -``` -``` -NAME READY STATUS RESTARTS AGE DEPLOYMENT -my-nginx-ccba8fbd8cc8160970f63f9a2696fc46-k156z 1/1 Running 0 1m ccba8fbd8cc8160970f63f9a2696fc46 -my-nginx-ccba8fbd8cc8160970f63f9a2696fc46-v95yh 1/1 Running 0 35s ccba8fbd8cc8160970f63f9a2696fc46 -my-nginx-divi2 1/1 Running 0 2h 2d1d7a8f682934a254002b56404b813e -my-nginx-o0ef1 1/1 Running 0 2h 2d1d7a8f682934a254002b56404b813e -my-nginx-q6all 1/1 Running 0 8m 2d1d7a8f682934a254002b56404b813e -``` - -`kubectl rolling-update` reports progress as it progresses: - -``` -Scaling up my-nginx-ccba8fbd8cc8160970f63f9a2696fc46 from 0 to 3, scaling down my-nginx from 3 to 0 (keep 3 pods available, don't exceed 4 pods) -Scaling my-nginx-ccba8fbd8cc8160970f63f9a2696fc46 up to 1 -Scaling my-nginx down to 2 -Scaling my-nginx-ccba8fbd8cc8160970f63f9a2696fc46 up to 2 -Scaling my-nginx down to 1 -Scaling my-nginx-ccba8fbd8cc8160970f63f9a2696fc46 up to 3 -Scaling my-nginx down to 0 -Update succeeded. Deleting old controller: my-nginx -Renaming my-nginx-ccba8fbd8cc8160970f63f9a2696fc46 to my-nginx -replicationcontroller "my-nginx" rolling updated -``` - -If you encounter a problem, you can stop the rolling update midway and revert to the previous version using `--rollback`: - -```shell -kubectl rolling-update my-nginx --rollback -``` -``` -Setting "my-nginx" replicas to 1 -Continuing update with existing controller my-nginx. -Scaling up nginx from 1 to 1, scaling down my-nginx-ccba8fbd8cc8160970f63f9a2696fc46 from 1 to 0 (keep 1 pods available, don't exceed 2 pods) -Scaling my-nginx-ccba8fbd8cc8160970f63f9a2696fc46 down to 0 -Update succeeded. Deleting my-nginx-ccba8fbd8cc8160970f63f9a2696fc46 -replicationcontroller "my-nginx" rolling updated -``` - -This is one example where the immutability of containers is a huge asset. - -If you need to update more than just the image (e.g., command arguments, environment variables), you can create a new replication controller, with a new name and distinguishing label value, such as: - -{{< codenew file="controllers/replication-nginx-1.9.2.yaml" >}} - -and roll it out: - -```shell -# Assuming you named the file "my-nginx.yaml" -kubectl rolling-update my-nginx -f ./my-nginx.yaml -``` -``` -Created my-nginx-v4 -Scaling up my-nginx-v4 from 0 to 5, scaling down my-nginx from 4 to 0 (keep 4 pods available, don't exceed 5 pods) -Scaling my-nginx-v4 up to 1 -Scaling my-nginx down to 3 -Scaling my-nginx-v4 up to 2 -Scaling my-nginx down to 2 -Scaling my-nginx-v4 up to 3 -Scaling my-nginx down to 1 -Scaling my-nginx-v4 up to 4 -Scaling my-nginx down to 0 -Scaling my-nginx-v4 up to 5 -Update succeeded. Deleting old controller: my-nginx -replicationcontroller "my-nginx-v4" rolling updated -``` - -## Troubleshooting - -If the `timeout` duration is reached during a rolling update, the operation will -fail with some pods belonging to the new replication controller, and some to the -original controller. - -To continue the update from where it failed, retry using the same command. - -To roll back to the original state before the attempted update, append the -`--rollback=true` flag to the original command. This will revert all changes. - -{{% /capture %}} diff --git a/content/en/docs/tasks/run-application/run-stateless-application-deployment.md b/content/en/docs/tasks/run-application/run-stateless-application-deployment.md index 5da19f0c25876..c9e0aebd51373 100644 --- a/content/en/docs/tasks/run-application/run-stateless-application-deployment.md +++ b/content/en/docs/tasks/run-application/run-stateless-application-deployment.md @@ -34,7 +34,7 @@ This page shows how to run an application using a Kubernetes Deployment object. You can run an application by creating a Kubernetes Deployment object, and you can describe a Deployment in a YAML file. For example, this YAML file describes -a Deployment that runs the nginx:1.7.9 Docker image: +a Deployment that runs the nginx:1.14.2 Docker image: {{< codenew file="application/deployment.yaml" >}} @@ -64,7 +64,7 @@ a Deployment that runs the nginx:1.7.9 Docker image: Labels: app=nginx Containers: nginx: - Image: nginx:1.7.9 + Image: nginx:1.14.2 Port: 80/TCP Environment: Mounts: diff --git a/content/en/docs/tasks/service-catalog/install-service-catalog-using-helm.md b/content/en/docs/tasks/service-catalog/install-service-catalog-using-helm.md index 2fbed683c5022..73268ff71417f 100644 --- a/content/en/docs/tasks/service-catalog/install-service-catalog-using-helm.md +++ b/content/en/docs/tasks/service-catalog/install-service-catalog-using-helm.md @@ -20,7 +20,7 @@ Use [Helm](https://helm.sh/) to install Service Catalog on your Kubernetes clust * If you are using a cloud-based Kubernetes cluster or {{< glossary_tooltip text="Minikube" term_id="minikube" >}}, you may already have cluster DNS enabled. * If you are using `hack/local-up-cluster.sh`, ensure that the `KUBE_ENABLE_CLUSTER_DNS` environment variable is set, then run the install script. * [Install and setup kubectl](/docs/tasks/tools/install-kubectl/) v1.7 or higher. Make sure it is configured to connect to the Kubernetes cluster. -* Install [Helm](http://helm.sh/) v2.7.0 or newer. +* Install [Helm](https://helm.sh/) v2.7.0 or newer. * Follow the [Helm install instructions](https://helm.sh/docs/intro/install/). * If you already have an appropriate version of Helm installed, execute `helm init` to install Tiller, the server-side component of Helm. diff --git a/content/en/docs/tasks/tools/install-kubectl.md b/content/en/docs/tasks/tools/install-kubectl.md index 4a799e5f011b4..4a0be4509ada1 100644 --- a/content/en/docs/tasks/tools/install-kubectl.md +++ b/content/en/docs/tasks/tools/install-kubectl.md @@ -87,7 +87,7 @@ If you are on Ubuntu or another Linux distribution that support [snap](https://s ```shell snap install kubectl --classic -kubectl version +kubectl version --client ``` {{% /tab %}} {{% tab name="Homebrew" %}} @@ -95,7 +95,7 @@ If you are on Linux and using [Homebrew](https://docs.brew.sh/Homebrew-on-Linux) ```shell brew install kubectl -kubectl version +kubectl version --client ``` {{% /tab %}} {{< /tabs >}} @@ -385,6 +385,27 @@ However, the kubectl completion script depends on [**bash-completion**](https:// there are two versions of bash-completion, v1 and v2. V1 is for Bash 3.2 (which is the default on macOS), and v2 is for Bash 4.1+. The kubectl completion script **doesn't work** correctly with bash-completion v1 and Bash 3.2. It requires **bash-completion v2** and **Bash 4.1+**. Thus, to be able to correctly use kubectl completion on macOS, you have to install and use Bash 4.1+ ([*instructions*](https://itnext.io/upgrading-bash-on-macos-7138bd1066ba)). The following instructions assume that you use Bash 4.1+ (that is, any Bash version of 4.1 or newer). {{< /warning >}} +### Upgrade Bash + +The instructions here assume you use Bash 4.1+. You can check your Bash's version by running: + +```shell +echo $BASH_VERSION +``` + +If it is too old, you can install/upgrade it using Homebrew: + +```shell +brew install bash +``` + +Reload your shell and verify that the desired version is being used: + +```shell +echo $BASH_VERSION $SHELL +``` + +Homebrew usually installs it at `/usr/local/bin/bash`. ### Install bash-completion diff --git a/content/en/docs/tasks/tools/install-minikube.md b/content/en/docs/tasks/tools/install-minikube.md index 03c3b07cd5cec..50e4436dec88a 100644 --- a/content/en/docs/tasks/tools/install-minikube.md +++ b/content/en/docs/tasks/tools/install-minikube.md @@ -26,7 +26,7 @@ grep -E --color 'vmx|svm' /proc/cpuinfo {{% tab name="macOS" %}} To check if virtualization is supported on macOS, run the following command on your terminal. ``` -sysctl -a | grep -E --color 'machdep.cpu.features|VMX' +sysctl -a | grep -E --color 'machdep.cpu.features|VMX' ``` If you see `VMX` in the output (should be colored), the VT-x feature is enabled in your machine. {{% /tab %}} @@ -74,7 +74,7 @@ If you do not already have a hypervisor installed, install one of these now: • [VirtualBox](https://www.virtualbox.org/wiki/Downloads) -Minikube also supports a `--vm-driver=none` option that runs the Kubernetes components on the host and not in a VM. +Minikube also supports a `--driver=none` option that runs the Kubernetes components on the host and not in a VM. Using this driver requires [Docker](https://www.docker.com/products/docker-desktop) and a Linux environment but not a hypervisor. If you're using the `none` driver in Debian or a derivative, use the `.deb` packages for @@ -83,7 +83,13 @@ You can download `.deb` packages from [Docker](https://www.docker.com/products/d {{< caution >}} The `none` VM driver can result in security and data loss issues. -Before using `--vm-driver=none`, consult [this documentation](https://minikube.sigs.k8s.io/docs/reference/drivers/none/) for more information. +Before using `--driver=none`, consult [this documentation](https://minikube.sigs.k8s.io/docs/reference/drivers/none/) for more information. +{{< /caution >}} + +Minikube also supports a `vm-driver=podman` similar to the Docker driver. Podman run as superuser privilege (root user) is the best way to ensure that your containers have full access to any feature available on your system. + +{{< caution >}} +The `podman` driver requires running the containers as root because regular user accounts don’t have full access to all operating system features that their containers might need to run. {{< /caution >}} ### Install Minikube using a package @@ -208,12 +214,12 @@ To confirm successful installation of both a hypervisor and Minikube, you can ru {{< note >}} -For setting the `--vm-driver` with `minikube start`, enter the name of the hypervisor you installed in lowercase letters where `` is mentioned below. A full list of `--vm-driver` values is available in [specifying the VM driver documentation](https://kubernetes.io/docs/setup/learning-environment/minikube/#specifying-the-vm-driver). +For setting the `--driver` with `minikube start`, enter the name of the hypervisor you installed in lowercase letters where `` is mentioned below. A full list of `--driver` values is available in [specifying the VM driver documentation](https://kubernetes.io/docs/setup/learning-environment/minikube/#specifying-the-vm-driver). {{< /note >}} ```shell -minikube start --vm-driver= +minikube start --driver= ``` Once `minikube start` finishes, run the command below to check the status of the cluster: diff --git a/content/en/docs/tutorials/_index.md b/content/en/docs/tutorials/_index.md index 04013216c3dcf..9f8de2129e658 100644 --- a/content/en/docs/tutorials/_index.md +++ b/content/en/docs/tutorials/_index.md @@ -22,8 +22,6 @@ Before walking through each tutorial, you may want to bookmark the * [Kubernetes Basics](/docs/tutorials/kubernetes-basics/) is an in-depth interactive tutorial that helps you understand the Kubernetes system and try out some basic Kubernetes features. -* [Scalable Microservices with Kubernetes (Udacity)](https://www.udacity.com/course/scalable-microservices-with-kubernetes--ud615) - * [Introduction to Kubernetes (edX)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#) * [Hello Minikube](/docs/tutorials/hello-minikube/) diff --git a/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html b/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html index 2a6af0af4c953..13d3d99758338 100644 --- a/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html @@ -77,7 +77,7 @@

Cluster Diagram

-

Masters manage the cluster and the nodes are used to host the running applications.

+

Masters manage the cluster and the nodes that are used to host the running applications.

diff --git a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html index 7636f8ea943af..6d7e15a7c44c2 100644 --- a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html +++ b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html @@ -17,6 +17,14 @@
+
+
+

+ A Pod is the basic execution unit of a Kubernetes application. Each Pod represents a part of a workload that is running on your cluster. Learn more about Pods. +

+
+
+
diff --git a/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html b/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html index 8adf05965b5ac..8a7d60dd8774c 100644 --- a/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html @@ -79,13 +79,8 @@

Services and Labels

  • Embed version tags
  • Classify an object using tags
  • - -
    -
    -
    -

    You can create a Service at the same time you create a Deployment by using
    --expose in kubectl.

    -
    +

    diff --git a/content/en/docs/tutorials/online-training/_index.md b/content/en/docs/tutorials/online-training/_index.md deleted file mode 100755 index 9b4b09f17f83e..0000000000000 --- a/content/en/docs/tutorials/online-training/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: "Online Training Courses" -weight: 20 ---- - diff --git a/content/en/docs/tutorials/online-training/overview.md b/content/en/docs/tutorials/online-training/overview.md deleted file mode 100644 index e76b22481a975..0000000000000 --- a/content/en/docs/tutorials/online-training/overview.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: Overview of Kubernetes Online Training -content_template: templates/concept ---- - -{{% capture overview %}} - -Here are some of the sites that offer online training for Kubernetes: - -{{% /capture %}} - -{{% capture body %}} - -* [AIOps Essentials (Autoscaling Kubernetes with Prometheus Metrics) with Hands-On Labs (Linux Academy)](https://linuxacademy.com/devops/training/course/name/using-machine-learning-to-scale-kubernetes-clusters) - -* [Amazon EKS Deep Dive with Hands-On Labs (Linux Academy)] (https://linuxacademy.com/amazon-web-services/training/course/name/amazon-eks-deep-dive) - -* [Cloud Native Certified Kubernetes Administrator (CKA) with Hands-On Labs & Practice Exams (Linux Academy)](https://linuxacademy.com/linux/training/course/name/cloud-native-certified-kubernetes-administrator-cka) - -* [Certified Kubernetes Administrator (CKA) Preparation Course (CloudYuga)](https://cloudyuga.guru/courses/cka-online-self-paced) - -* [Certified Kubernetes Administrator Preparation Course with Practice Tests (KodeKloud)](https://kodekloud.com/p/certified-kubernetes-administrator-with-practice-tests) - -* [Certified Kubernetes Application Developer (CKAD) with Hands-On Labs & Practice Exams (Linux Academy)] (https://linuxacademy.com/containers/training/course/name/certified-kubernetes-application-developer-ckad/) - -* [Certified Kubernetes Application Developer (CKAD) Preparation Course (CloudYuga)](https://cloudyuga.guru/courses/ckad-online-self-paced) - -* [Certified Kubernetes Application Developer Preparation Course with Practice Tests (KodeKloud)](https://kodekloud.com/p/kubernetes-certification-course) - -* [Getting Started with Google Kubernetes Engine (Coursera)](https://www.coursera.org/learn/google-kubernetes-engine) - -* [Getting Started with Kubernetes (Pluralsight)](https://www.pluralsight.com/courses/getting-started-kubernetes) - -* [Getting Started with Kubernetes Clusters on OCI Oracle Kubernetes Engine (OKE) (Learning Library)](https://apexapps.oracle.com/pls/apex/f?p=44785:50:0:::50:P50_EVENT_ID,P50_COURSE_ID:5935,256) - -* [Google Kubernetes Engine Deep Dive (Linux Academy)] (https://linuxacademy.com/google-cloud-platform/training/course/name/google-kubernetes-engine-deep-dive) - -* [Helm Deep Dive with Hands-On Labs (Linux Academy)] (https://linuxacademy.com/linux/training/course/name/helm-deep-dive-part-1) - -* [Hands-on Introduction to Kubernetes (Instruqt)](https://play.instruqt.com/public/topics/getting-started-with-kubernetes) - -* [Introduction to Kubernetes (edX)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x) - -* [Kubernetes Essentials with Hands-On Labs (Linux Academy)] (https://linuxacademy.com/linux/training/course/name/kubernetes-essentials) - -* [Kubernetes for the Absolute Beginners with Hands-on Labs (KodeKloud)](https://kodekloud.com/p/kubernetes-for-the-absolute-beginners-hands-on) - -* [Kubernetes Fundamentals (LFS258) (The Linux Foundation)](https://training.linuxfoundation.org/training/kubernetes-fundamentals/) - -* [Kubernetes Quick Start with Hands-On Labs (Linux Academy)] (https://linuxacademy.com/linux/training/course/name/kubernetes-quick-start) - -* [Kubernetes the Hard Way with Hands-On Labs (Linux Academy)](https://linuxacademy.com/linux/training/course/name/kubernetes-the-hard-way) - -* [Kubernetes Security with Hands-On Labs (Linux Academy)] (https://linuxacademy.com/linux/training/course/name/kubernetes-security) - -* [Launch Your First OpenShift Operator with Hands-On Labs (Linux Academy)] (https://linuxacademy.com/containers/training/course/name/red-hat-open-shift) - -* [Learn Kubernetes by Doing - 100% Hands-On Experience (Linux Academy)] (https://linuxacademy.com/linux/training/course/name/learn-kubernetes-by-doing) - -* [Learn Kubernetes using Interactive Hands-on Scenarios (Katacoda)](https://www.katacoda.com/courses/kubernetes/) - -* [Microservice Applications in Kubernetes - 100% Hands-On Experience (Linux Academy)] (https://linuxacademy.com/devops/training/course/name/learn-microservices-by-doing) - -* [Monitoring Kubernetes With Prometheus with Hands-On Labs (Linux Academy)] (https://linuxacademy.com/linux/training/course/name/kubernetes-and-prometheus) - -* [Service Mesh with Istio with Hands-On Labs (Linux Academy)] (https://linuxacademy.com/linux/training/course/name/service-mesh-with-istio-part-1) - -* [Scalable Microservices with Kubernetes (Udacity)](https://www.udacity.com/course/scalable-microservices-with-kubernetes--ud615) - -* [Self-paced Kubernetes online course (Learnk8s Academy)](https://learnk8s.io/academy) -{{% /capture %}} diff --git a/content/en/docs/tutorials/services/source-ip.md b/content/en/docs/tutorials/services/source-ip.md index e1b4876a240d9..ca3a2bb409095 100644 --- a/content/en/docs/tutorials/services/source-ip.md +++ b/content/en/docs/tutorials/services/source-ip.md @@ -1,6 +1,7 @@ --- title: Using Source IP content_template: templates/tutorial +min-kubernetes-server-version: v1.5 --- {{% capture overview %}} @@ -14,26 +15,38 @@ of Services, and how you can toggle this behavior according to your needs. {{% capture prerequisites %}} -{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - -## Terminology +### Terminology This document makes use of the following terms: -* [NAT](https://en.wikipedia.org/wiki/Network_address_translation): network address translation -* [Source NAT](https://en.wikipedia.org/wiki/Network_address_translation#SNAT): replacing the source IP on a packet, usually with a node's IP -* [Destination NAT](https://en.wikipedia.org/wiki/Network_address_translation#DNAT): replacing the destination IP on a packet, usually with a pod IP -* [VIP](/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies): a virtual IP, such as the one assigned to every Kubernetes Service -* [Kube-proxy](/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies): a network daemon that orchestrates Service VIP management on every node +{{< comment >}} +If localizing this section, link to the equivalent Wikipedia pages for +the target localization. +{{< /comment >}} + +[NAT](https://en.wikipedia.org/wiki/Network_address_translation) +: network address translation + +[Source NAT](https://en.wikipedia.org/wiki/Network_address_translation#SNAT) +: replacing the source IP on a packet; in this page, that usually means replacing with the IP address of a node. + +[Destination NAT](https://en.wikipedia.org/wiki/Network_address_translation#DNAT) +: replacing the destination IP on a packet; in this page, that usually means replacing with the IP address of a {{< glossary_tooltip term_id="pod" >}} + +[VIP](/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies) +: a virtual IP address, such as the one assigned to every {{< glossary_tooltip text="Service" term_id="service" >}} in Kubernetes + +[kube-proxy](/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies) +: a network daemon that orchestrates Service VIP management on every node +### Prerequisites -## Prerequisites +{{< include "task-tutorial-prereqs.md" >}} -You must have a working Kubernetes 1.5 cluster to run the examples in this -document. The examples use a small nginx webserver that echoes back the source +The examples use a small nginx webserver that echoes back the source IP of requests it receives through an HTTP header. You can create it as follows: -```console +```shell kubectl create deployment source-ip-app --image=k8s.gcr.io/echoserver:1.4 ``` The output is: @@ -54,12 +67,13 @@ deployment.apps/source-ip-app created {{% capture lessoncontent %}} -## Source IP for Services with Type=ClusterIP +## Source IP for Services with `Type=ClusterIP` Packets sent to ClusterIP from within the cluster are never source NAT'd if -you're running kube-proxy in [iptables mode](/docs/concepts/services-networking/service/#proxy-mode-iptables), -which is the default since Kubernetes 1.2. Kube-proxy exposes its mode through -a `proxyMode` endpoint: +you're running kube-proxy in +[iptables mode](/docs/concepts/services-networking/service/#proxy-mode-iptables), +(the default). You can query the kube-proxy mode by fetching +`http://localhost:10249/proxyMode` on the node where kube-proxy is running. ```console kubectl get nodes @@ -71,9 +85,11 @@ kubernetes-node-6jst Ready 2h v1.13.0 kubernetes-node-cx31 Ready 2h v1.13.0 kubernetes-node-jj1t Ready 2h v1.13.0 ``` -Get the proxy mode on one of the node -```console -kubernetes-node-6jst $ curl localhost:10249/proxyMode + +Get the proxy mode on one of the nodes (kube-proxy listens on port 10249): +```shell +# Run this in a shell on the node you want to query. +curl http://localhost:10249/proxyMode ``` The output is: ``` @@ -82,14 +98,14 @@ iptables You can test source IP preservation by creating a Service over the source IP app: -```console +```shell kubectl expose deployment source-ip-app --name=clusterip --port=80 --target-port=8080 ``` The output is: ``` service/clusterip exposed ``` -```console +```shell kubectl get svc clusterip ``` The output is similar to: @@ -100,7 +116,7 @@ clusterip ClusterIP 10.0.170.92 80/TCP 51s And hitting the `ClusterIP` from a pod in the same cluster: -```console +```shell kubectl run busybox -it --image=busybox --restart=Never --rm ``` The output is similar to this: @@ -108,7 +124,14 @@ The output is similar to this: Waiting for pod default/busybox to be running, status is Pending, pod ready: false If you don't see a command prompt, try pressing enter. -# ip addr +``` +You can then run a command inside that Pod: + +```shell +# Run this inside the terminal from "kubectl run" +ip addr +``` +``` 1: lo: mtu 65536 qdisc noqueue link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo @@ -121,21 +144,28 @@ If you don't see a command prompt, try pressing enter. valid_lft forever preferred_lft forever inet6 fe80::188a:84ff:feb0:26a5/64 scope link valid_lft forever preferred_lft forever +``` -# wget -qO - 10.0.170.92 +…then use `wget` to query the local webserver +```shell +# Replace 10.0.170.92 with the Pod's IPv4 address +wget -qO - 10.0.170.92 +``` +``` CLIENT VALUES: client_address=10.244.3.8 command=GET ... ``` -The client_address is always the client pod's IP address, whether the client pod and server pod are in the same node or in different nodes. +The `client_address` is always the client pod's IP address, whether the client pod and server pod are in the same node or in different nodes. -## Source IP for Services with Type=NodePort +## Source IP for Services with `Type=NodePort` -As of Kubernetes 1.5, packets sent to Services with [Type=NodePort](/docs/concepts/services-networking/service/#nodeport) +Packets sent to Services with +[`Type=NodePort`](/docs/concepts/services-networking/service/#nodeport) are source NAT'd by default. You can test this by creating a `NodePort` Service: -```console +```shell kubectl expose deployment source-ip-app --name=nodeport --port=80 --target-port=8080 --type=NodePort ``` The output is: @@ -143,17 +173,17 @@ The output is: service/nodeport exposed ``` -```console +```shell NODEPORT=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services nodeport) NODES=$(kubectl get nodes -o jsonpath='{ $.items[*].status.addresses[?(@.type=="ExternalIP")].address }') ``` -If you're running on a cloudprovider, you may need to open up a firewall-rule +If you're running on a cloud provider, you may need to open up a firewall-rule for the `nodes:nodeport` reported above. Now you can try reaching the Service from outside the cluster through the node port allocated above. -```console +```shell for node in $NODES; do curl -s $node:$NODEPORT | grep -i client_address; done ``` The output is similar to: @@ -187,18 +217,19 @@ Visually: ``` -To avoid this, Kubernetes has a feature to preserve the client source IP -[(check here for feature availability)](/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip). -Setting `service.spec.externalTrafficPolicy` to the value `Local` will only -proxy requests to local endpoints, never forwarding traffic to other nodes -and thereby preserving the original source IP address. If there are no -local endpoints, packets sent to the node are dropped, so you can rely -on the correct source-ip in any packet processing rules you might apply a -packet that make it through to the endpoint. +To avoid this, Kubernetes has a feature to +[preserve the client source IP](/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip). +If you set `service.spec.externalTrafficPolicy` to the value `Local`, +kube-proxy only proxies proxy requests to local endpoints, and does not +forward traffic to other nodes. This approach preserves the original +source IP address. If there are no local endpoints, packets sent to the +node are dropped, so you can rely on the correct source-ip in any packet +processing rules you might apply a packet that make it through to the +endpoint. Set the `service.spec.externalTrafficPolicy` field as follows: -```console +```shell kubectl patch svc nodeport -p '{"spec":{"externalTrafficPolicy":"Local"}}' ``` The output is: @@ -208,12 +239,12 @@ service/nodeport patched Now, re-run the test: -```console +```shell for node in $NODES; do curl --connect-timeout 1 -s $node:$NODEPORT | grep -i client_address; done ``` -The output is: +The output is similar to: ``` -client_address=104.132.1.79 +client_address=198.51.100.79 ``` Note that you only got one reply, with the *right* client IP, from the one node on which the endpoint pod @@ -242,18 +273,19 @@ Visually: -## Source IP for Services with Type=LoadBalancer +## Source IP for Services with `Type=LoadBalancer` -As of Kubernetes 1.5, packets sent to Services with [Type=LoadBalancer](/docs/concepts/services-networking/service/#loadbalancer) are -source NAT'd by default, because all schedulable Kubernetes nodes in the -`Ready` state are eligible for loadbalanced traffic. So if packets arrive +Packets sent to Services with +[`Type=LoadBalancer`](/docs/concepts/services-networking/service/#loadbalancer) +are source NAT'd by default, because all schedulable Kubernetes nodes in the +`Ready` state are eligible for load-balanced traffic. So if packets arrive at a node without an endpoint, the system proxies it to a node *with* an endpoint, replacing the source IP on the packet with the IP of the node (as described in the previous section). -You can test this by exposing the source-ip-app through a loadbalancer +You can test this by exposing the source-ip-app through a load balancer: -```console +```shell kubectl expose deployment source-ip-app --name=loadbalancer --port=80 --target-port=8080 --type=LoadBalancer ``` The output is: @@ -261,18 +293,20 @@ The output is: service/loadbalancer exposed ``` -Print IPs of the Service: +Print out the IP addresses of the Service: ```console kubectl get svc loadbalancer ``` The output is similar to this: ``` NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -loadbalancer LoadBalancer 10.0.65.118 104.198.149.140 80/TCP 5m +loadbalancer LoadBalancer 10.0.65.118 203.0.113.140 80/TCP 5m ``` -```console -curl 104.198.149.140 +Next, send a request to this Service's external-ip: + +```shell +curl 203.0.113.140 ``` The output is similar to this: ``` @@ -302,25 +336,25 @@ health check ---> node 1 node 2 <--- health check You can test this by setting the annotation: -```console +```shell kubectl patch svc loadbalancer -p '{"spec":{"externalTrafficPolicy":"Local"}}' ``` You should immediately see the `service.spec.healthCheckNodePort` field allocated by Kubernetes: -```console +```shell kubectl get svc loadbalancer -o yaml | grep -i healthCheckNodePort ``` The output is similar to this: -``` +```yaml healthCheckNodePort: 32122 ``` The `service.spec.healthCheckNodePort` field points to a port on every node serving the health check at `/healthz`. You can test this: -```console +```shell kubectl get pod -o wide -l run=source-ip-app ``` The output is similar to this: @@ -328,43 +362,48 @@ The output is similar to this: NAME READY STATUS RESTARTS AGE IP NODE source-ip-app-826191075-qehz4 1/1 Running 0 20h 10.180.1.136 kubernetes-node-6jst ``` -Curl the `/healthz` endpoint on different nodes. -```console -kubernetes-node-6jst $ curl localhost:32122/healthz + +Use `curl` to fetch the `/healthz` endpoint on various nodes: +```shell +# Run this locally on a node you choose +curl localhost:32122/healthz ``` -The output is similar to this: ``` 1 Service Endpoints found ``` -```console -kubernetes-node-jj1t $ curl localhost:32122/healthz + +On a different node you might get a different result: +```shell +# Run this locally on a node you choose +curl localhost:32122/healthz ``` -The output is similar to this: ``` No Service Endpoints Found ``` -A service controller running on the master is responsible for allocating the cloud -loadbalancer, and when it does so, it also allocates HTTP health checks -pointing to this port/path on each node. Wait about 10 seconds for the 2 nodes -without endpoints to fail health checks, then curl the lb ip: +A controller running on the +{{< glossary_tooltip text="control plane" term_id="control-plane" >}} is +responsible for allocating the cloud load balancer. The same controller also +allocates HTTP health checks pointing to this port/path on each node. Wait +about 10 seconds for the 2 nodes without endpoints to fail health checks, +then use `curl` to query the IPv4 address of the load balancer: -```console -curl 104.198.149.140 +```shell +curl 203.0.113.140 ``` The output is similar to this: ``` CLIENT VALUES: -client_address=104.132.1.79 +client_address=198.51.100.79 ... ``` -__Cross platform support__ +## Cross-platform support -As of Kubernetes 1.5, support for source IP preservation through Services -with Type=LoadBalancer is only implemented in a subset of cloudproviders -(GCP and Azure). The cloudprovider you're running on might fulfill the -request for a loadbalancer in a few different ways: +Only some cloud providers offer support for source IP preservation through +Services with `Type=LoadBalancer`. +The cloud provider you're running on might fulfill the request for a loadbalancer +in a few different ways: 1. With a proxy that terminates the client connection and opens a new connection to your nodes/endpoints. In such cases the source IP will always be that of the @@ -374,12 +413,14 @@ cloud LB, not that of the client. loadbalancer VIP end up at the node with the source IP of the client, not an intermediate proxy. -Loadbalancers in the first category must use an agreed upon +Load balancers in the first category must use an agreed upon protocol between the loadbalancer and backend to communicate the true client IP -such as the HTTP [X-FORWARDED-FOR](https://en.wikipedia.org/wiki/X-Forwarded-For) -header, or the [proxy protocol](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt). -Loadbalancers in the second category can leverage the feature described above -by simply creating an HTTP health check pointing at the port stored in +such as the HTTP [Forwarded](https://tools.ietf.org/html/rfc7239#section-5.2) +or [X-FORWARDED-FOR](https://en.wikipedia.org/wiki/X-Forwarded-For) +headers, or the +[proxy protocol](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt). +Load balancers in the second category can leverage the feature described above +by creating an HTTP health check pointing at the port stored in the `service.spec.healthCheckNodePort` field on the Service. {{% /capture %}} @@ -388,13 +429,13 @@ the `service.spec.healthCheckNodePort` field on the Service. Delete the Services: -```console +```shell kubectl delete svc -l run=source-ip-app ``` Delete the Deployment, ReplicaSet and Pod: -```console +```shell kubectl delete deployment source-ip-app ``` @@ -402,7 +443,6 @@ kubectl delete deployment source-ip-app {{% capture whatsnext %}} * Learn more about [connecting applications via services](/docs/concepts/services-networking/connect-applications-service/) -* Learn more about [loadbalancing](/docs/user-guide/load-balancer) +* Read how to [Create an External Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/) {{% /capture %}} - diff --git a/content/en/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md b/content/en/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md index af24c72999457..94008289ee502 100644 --- a/content/en/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md +++ b/content/en/docs/tutorials/stateless-application/guestbook-logs-metrics-with-elk.md @@ -111,7 +111,7 @@ There are four files to edit to create a k8s secret when you are connecting to s 1. ELASTICSEARCH_USERNAME 1. KIBANA_HOST -Set these with the information for your Elasticsearch cluster and your Kibana host. Here are some examples +Set these with the information for your Elasticsearch cluster and your Kibana host. Here are some examples (also see [*this configuration*](https://stackoverflow.com/questions/59892896/how-to-connect-from-minikube-to-elasticsearch-installed-on-host-local-developme/59892897#59892897)) #### `ELASTICSEARCH_HOSTS` 1. A nodeGroup from the Elastic Elasticsearch Helm Chart: diff --git a/content/en/docs/tutorials/stateless-application/guestbook.md b/content/en/docs/tutorials/stateless-application/guestbook.md index 979a14198a064..f1f93dd98828b 100644 --- a/content/en/docs/tutorials/stateless-application/guestbook.md +++ b/content/en/docs/tutorials/stateless-application/guestbook.md @@ -79,7 +79,7 @@ Replace POD-NAME with the name of your Pod. ### Creating the Redis Master Service -The guestbook applications needs to communicate to the Redis master to write its data. You need to apply a [Service](/docs/concepts/services-networking/service/) to proxy the traffic to the Redis master Pod. A Service defines a policy to access the Pods. +The guestbook application needs to communicate to the Redis master to write its data. You need to apply a [Service](/docs/concepts/services-networking/service/) to proxy the traffic to the Redis master Pod. A Service defines a policy to access the Pods. {{< codenew file="application/guestbook/redis-master-service.yaml" >}} diff --git a/content/en/examples/admin/resource/quota-mem-cpu-pod-2.yaml b/content/en/examples/admin/resource/quota-mem-cpu-pod-2.yaml index 22726c600aaf3..380e900fda52f 100644 --- a/content/en/examples/admin/resource/quota-mem-cpu-pod-2.yaml +++ b/content/en/examples/admin/resource/quota-mem-cpu-pod-2.yaml @@ -9,8 +9,7 @@ spec: resources: limits: memory: "1Gi" - cpu: "800m" + cpu: "800m" requests: memory: "700Mi" cpu: "400m" - diff --git a/content/en/examples/admin/resource/quota-mem-cpu-pod.yaml b/content/en/examples/admin/resource/quota-mem-cpu-pod.yaml index ba27bf5ccfc78..b0fd0a9451bf2 100644 --- a/content/en/examples/admin/resource/quota-mem-cpu-pod.yaml +++ b/content/en/examples/admin/resource/quota-mem-cpu-pod.yaml @@ -9,8 +9,7 @@ spec: resources: limits: memory: "800Mi" - cpu: "800m" + cpu: "800m" requests: memory: "600Mi" cpu: "400m" - diff --git a/content/en/examples/admin/snowflake-deployment.yaml b/content/en/examples/admin/snowflake-deployment.yaml new file mode 100644 index 0000000000000..2f4f267916823 --- /dev/null +++ b/content/en/examples/admin/snowflake-deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: snowflake + name: snowflake +spec: + replicas: 2 + selector: + matchLabels: + app: snowflake + template: + metadata: + labels: + app: snowflake + spec: + containers: + - image: k8s.gcr.io/serve_hostname + imagePullPolicy: Always + name: snowflake diff --git a/content/en/examples/application/deployment-scale.yaml b/content/en/examples/application/deployment-scale.yaml index 3bdc7b6f5b8e3..68801c971deb8 100644 --- a/content/en/examples/application/deployment-scale.yaml +++ b/content/en/examples/application/deployment-scale.yaml @@ -14,6 +14,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.8 + image: nginx:1.14.2 ports: - containerPort: 80 diff --git a/content/en/examples/application/deployment-update.yaml b/content/en/examples/application/deployment-update.yaml index 8c683d6dc776e..18e8be65fbd71 100644 --- a/content/en/examples/application/deployment-update.yaml +++ b/content/en/examples/application/deployment-update.yaml @@ -14,6 +14,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.8 # Update the version of nginx from 1.7.9 to 1.8 + image: nginx:1.16.1 # Update the version of nginx from 1.14.2 to 1.16.1 ports: - containerPort: 80 diff --git a/content/en/examples/application/deployment.yaml b/content/en/examples/application/deployment.yaml index 0f526b16c0ad2..2cd599218d01e 100644 --- a/content/en/examples/application/deployment.yaml +++ b/content/en/examples/application/deployment.yaml @@ -14,6 +14,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ports: - containerPort: 80 diff --git a/content/en/examples/application/nginx-app.yaml b/content/en/examples/application/nginx-app.yaml index c3f926b74e752..d00682e1fcbba 100644 --- a/content/en/examples/application/nginx-app.yaml +++ b/content/en/examples/application/nginx-app.yaml @@ -29,6 +29,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ports: - containerPort: 80 diff --git a/content/en/examples/application/nginx/nginx-deployment.yaml b/content/en/examples/application/nginx/nginx-deployment.yaml index f05bfa3c5f557..7f608bc47fa5a 100644 --- a/content/en/examples/application/nginx/nginx-deployment.yaml +++ b/content/en/examples/application/nginx/nginx-deployment.yaml @@ -14,6 +14,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ports: - containerPort: 80 diff --git a/content/en/examples/application/simple_deployment.yaml b/content/en/examples/application/simple_deployment.yaml index 10fa1ddf29999..d9c74af8c577b 100644 --- a/content/en/examples/application/simple_deployment.yaml +++ b/content/en/examples/application/simple_deployment.yaml @@ -14,6 +14,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ports: - containerPort: 80 diff --git a/content/en/examples/application/update_deployment.yaml b/content/en/examples/application/update_deployment.yaml index d53aa3e6d2fc8..2d7603acb956c 100644 --- a/content/en/examples/application/update_deployment.yaml +++ b/content/en/examples/application/update_deployment.yaml @@ -13,6 +13,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.11.9 # update the image + image: nginx:1.16.1 # update the image ports: - containerPort: 80 diff --git a/content/en/examples/controllers/daemonset.yaml b/content/en/examples/controllers/daemonset.yaml index 1bfa082833c72..f291b750c158b 100644 --- a/content/en/examples/controllers/daemonset.yaml +++ b/content/en/examples/controllers/daemonset.yaml @@ -15,6 +15,8 @@ spec: name: fluentd-elasticsearch spec: tolerations: + # this toleration is to have the daemonset runnable on master nodes + # remove it if your masters can't run pods - key: node-role.kubernetes.io/master effect: NoSchedule containers: diff --git a/content/en/examples/controllers/nginx-deployment.yaml b/content/en/examples/controllers/nginx-deployment.yaml index f7f95deebbb23..685c17aa68e1d 100644 --- a/content/en/examples/controllers/nginx-deployment.yaml +++ b/content/en/examples/controllers/nginx-deployment.yaml @@ -16,6 +16,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ports: - containerPort: 80 diff --git a/content/en/examples/controllers/replication-nginx-1.7.9.yaml b/content/en/examples/controllers/replication-nginx-1.14.2.yaml similarity index 89% rename from content/en/examples/controllers/replication-nginx-1.7.9.yaml rename to content/en/examples/controllers/replication-nginx-1.14.2.yaml index 768ab92ca7328..b74bc81547c44 100644 --- a/content/en/examples/controllers/replication-nginx-1.7.9.yaml +++ b/content/en/examples/controllers/replication-nginx-1.14.2.yaml @@ -11,6 +11,6 @@ spec: spec: containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ports: - containerPort: 80 diff --git a/content/en/examples/controllers/replication-nginx-1.9.2.yaml b/content/en/examples/controllers/replication-nginx-1.16.1.yaml similarity index 92% rename from content/en/examples/controllers/replication-nginx-1.9.2.yaml rename to content/en/examples/controllers/replication-nginx-1.16.1.yaml index f92f2657ed6eb..0708cae4b50e9 100644 --- a/content/en/examples/controllers/replication-nginx-1.9.2.yaml +++ b/content/en/examples/controllers/replication-nginx-1.16.1.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: nginx - image: nginx:1.9.2 + image: nginx:1.16.1 args: ["nginx", "-T"] ports: - containerPort: 80 diff --git a/content/en/examples/examples_test.go b/content/en/examples/examples_test.go index ee29c3a68b590..7c9664b64c168 100644 --- a/content/en/examples/examples_test.go +++ b/content/en/examples/examples_test.go @@ -34,8 +34,6 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/apis/admissionregistration" - ar_validation "k8s.io/kubernetes/pkg/apis/admissionregistration/validation" "k8s.io/kubernetes/pkg/apis/apps" apps_validation "k8s.io/kubernetes/pkg/apis/apps/validation" "k8s.io/kubernetes/pkg/apis/autoscaling" @@ -434,12 +432,6 @@ func TestExampleObjectSchemas(t *testing.T) { "node-problem-detector-configmap": {&apps.DaemonSet{}}, "termination": {&api.Pod{}}, }, - "federation": { - "policy-engine-deployment": {&apps.Deployment{}}, - "policy-engine-service": {&api.Service{}}, - "replicaset-example-policy": {&apps.ReplicaSet{}}, - "scheduling-policy-admission": {&api.ConfigMap{}}, - }, "podpreset": { "allow-db": {&settings.PodPreset{}}, "allow-db-merged": {&api.Pod{}}, @@ -525,9 +517,9 @@ func TestExampleObjectSchemas(t *testing.T) { "redis": {&api.Pod{}}, }, "policy": { - "privileged-psp": {&policy.PodSecurityPolicy{}}, - "restricted-psp": {&policy.PodSecurityPolicy{}}, - "example-psp": {&policy.PodSecurityPolicy{}}, + "privileged-psp": {&policy.PodSecurityPolicy{}}, + "restricted-psp": {&policy.PodSecurityPolicy{}}, + "example-psp": {&policy.PodSecurityPolicy{}}, "zookeeper-pod-disruption-budget-maxunavailable": {&policy.PodDisruptionBudget{}}, "zookeeper-pod-disruption-budget-minunavailable": {&policy.PodDisruptionBudget{}}, }, diff --git a/content/en/examples/federation/policy-engine-deployment.yaml b/content/en/examples/federation/policy-engine-deployment.yaml deleted file mode 100644 index 168af7ba4cf0f..0000000000000 --- a/content/en/examples/federation/policy-engine-deployment.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: opa - name: opa - namespace: federation-system -spec: - replicas: 1 - selector: - matchLabels: - app: opa - template: - metadata: - labels: - app: opa - name: opa - spec: - containers: - - name: opa - image: openpolicyagent/opa:0.4.10 - args: - - "run" - - "--server" - - name: kube-mgmt - image: openpolicyagent/kube-mgmt:0.2 - args: - - "-kubeconfig=/srv/kubernetes/kubeconfig" - - "-cluster=federation/v1beta1/clusters" - volumeMounts: - - name: federation-kubeconfig - mountPath: /srv/kubernetes - readOnly: true - volumes: - - name: federation-kubeconfig - secret: - secretName: federation-controller-manager-kubeconfig diff --git a/content/en/examples/federation/policy-engine-service.yaml b/content/en/examples/federation/policy-engine-service.yaml deleted file mode 100644 index 982870b06b218..0000000000000 --- a/content/en/examples/federation/policy-engine-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: opa - namespace: federation-system -spec: - selector: - app: opa - ports: - - name: http - protocol: TCP - port: 8181 - targetPort: 8181 diff --git a/content/en/examples/federation/replicaset-example-policy.yaml b/content/en/examples/federation/replicaset-example-policy.yaml deleted file mode 100644 index 43dc83b18b200..0000000000000 --- a/content/en/examples/federation/replicaset-example-policy.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: apps/v1 -kind: ReplicaSet -metadata: - labels: - app: nginx-pci - name: nginx-pci - annotations: - requires-pci: "true" -spec: - replicas: 3 - selector: - matchLabels: - app: nginx-pci - template: - metadata: - labels: - app: nginx-pci - spec: - containers: - - image: nginx - name: nginx-pci diff --git a/content/en/examples/federation/scheduling-policy-admission.yaml b/content/en/examples/federation/scheduling-policy-admission.yaml deleted file mode 100644 index a164722425555..0000000000000 --- a/content/en/examples/federation/scheduling-policy-admission.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: admission - namespace: federation-system -data: - config.yml: | - apiVersion: apiserver.k8s.io/v1alpha1 - kind: AdmissionConfiguration - plugins: - - name: SchedulingPolicy - path: /etc/kubernetes/admission/scheduling-policy-config.yml - scheduling-policy-config.yml: | - kubeconfig: /etc/kubernetes/admission/opa-kubeconfig - opa-kubeconfig: | - clusters: - - name: opa-api - cluster: - server: http://opa.federation-system.svc.cluster.local:8181/v0/data/kubernetes/placement - users: - - name: scheduling-policy - user: - token: deadbeefsecret - contexts: - - name: default - context: - cluster: opa-api - user: scheduling-policy - current-context: default diff --git a/content/en/examples/podpreset/allow-db-merged.yaml b/content/en/examples/podpreset/allow-db-merged.yaml index 8a0ad101d7d64..7f52cc1fa49c1 100644 --- a/content/en/examples/podpreset/allow-db-merged.yaml +++ b/content/en/examples/podpreset/allow-db-merged.yaml @@ -14,9 +14,6 @@ spec: volumeMounts: - mountPath: /cache name: cache-volume - - mountPath: /etc/app/config.json - readOnly: true - name: secret-volume ports: - containerPort: 80 env: @@ -32,6 +29,3 @@ spec: volumes: - name: cache-volume emptyDir: {} - - name: secret-volume - secret: - secretName: config-details diff --git a/content/en/examples/podpreset/allow-db.yaml b/content/en/examples/podpreset/allow-db.yaml index 0cca13bab2c3d..2c511e650d36e 100644 --- a/content/en/examples/podpreset/allow-db.yaml +++ b/content/en/examples/podpreset/allow-db.yaml @@ -19,12 +19,6 @@ spec: volumeMounts: - mountPath: /cache name: cache-volume - - mountPath: /etc/app/config.json - readOnly: true - name: secret-volume volumes: - name: cache-volume emptyDir: {} - - name: secret-volume - secret: - secretName: config-details diff --git a/content/en/examples/pods/pod-nginx-preferred-affinity.yaml b/content/en/examples/pods/pod-nginx-preferred-affinity.yaml new file mode 100644 index 0000000000000..183ba9f014225 --- /dev/null +++ b/content/en/examples/pods/pod-nginx-preferred-affinity.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: disktype + operator: In + values: + - ssd + containers: + - name: nginx + image: nginx + imagePullPolicy: IfNotPresent diff --git a/content/en/examples/pods/pod-nginx-required-affinity.yaml b/content/en/examples/pods/pod-nginx-required-affinity.yaml new file mode 100644 index 0000000000000..a3805eaa8d9c9 --- /dev/null +++ b/content/en/examples/pods/pod-nginx-required-affinity.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: disktype + operator: In + values: + - ssd + containers: + - name: nginx + image: nginx + imagePullPolicy: IfNotPresent diff --git a/content/en/examples/pods/simple-pod.yaml b/content/en/examples/pods/simple-pod.yaml index 4208f4b36536d..0e79d8a3c6128 100644 --- a/content/en/examples/pods/simple-pod.yaml +++ b/content/en/examples/pods/simple-pod.yaml @@ -5,6 +5,6 @@ metadata: spec: containers: - name: nginx - image: nginx:1.7.9 + image: nginx:1.14.2 ports: - containerPort: 80 diff --git a/content/en/includes/federated-task-tutorial-prereqs.md b/content/en/includes/federated-task-tutorial-prereqs.md deleted file mode 100644 index b254407a676a3..0000000000000 --- a/content/en/includes/federated-task-tutorial-prereqs.md +++ /dev/null @@ -1,5 +0,0 @@ -This guide assumes that you have a running Kubernetes Cluster Federation installation. -If not, then head over to the [federation admin guide](/docs/tutorials/federation/set-up-cluster-federation-kubefed/) to learn how to -bring up a cluster federation (or have your cluster administrator do this for you). -Other tutorials, such as Kelsey Hightower's [Federated Kubernetes Tutorial](https://github.com/kelseyhightower/kubernetes-cluster-federation), -might also help you create a Federated Kubernetes cluster. diff --git a/content/en/training/_index.html b/content/en/training/_index.html new file mode 100644 index 0000000000000..53922a9879030 --- /dev/null +++ b/content/en/training/_index.html @@ -0,0 +1,108 @@ +--- +title: Training +bigheader: Kubernetes Training and Certification +abstract: Training programs, certifications, and partners. +layout: basic +cid: training +class: training +--- + +
    +
    +
    +

    Build your cloud native career

    +

    Kubernetes is at the core of the cloud native movement. Training and certifications from the Linux Foundation and our training partners lets you invest in your career, learn Kubernetes, and make your cloud native projects successful.

    +
    +
    +
    + +
    +
    +
    +

    Take a free course on edX

    +
    +
    +
    +
    +
    + Introduction to Kubernetes
     
    +
    +

    Want to learn Kubernetes? Get an in-depth primer on this powerful system for managing containerized applications.

    +
    + Go to Course +
    +
    +
    +
    +
    + Introduction to Cloud Infrastructure Technologies +
    +

    Learn the fundamentals of building and managing cloud technologies directly from The Linux Foundation, the leader in open source.

    +
    + Go to Course +
    +
    +
    +
    +
    + Introduction to Linux +
    +

    Never learned Linux? Want a refresh? Develop a good working knowledge of Linux using both the graphical interface and command line across the major Linux distribution families.

    +
    + Go to Course +
    +
    +
    +
    + +
    +
    +
    +

    Learn with the Linux Foundation

    +

    The Linux Foundation offers instructor-led and self-paced courses for all aspects of the Kubernetes application development and operations lifecycle.

    +

    + See Courses +
    +
    +
    + +
    +
    +
    +

    Get Kubernetes Certified

    +
    +
    +
    +
    + Certified Kubernetes Application Developer (CKAD) +
    +

    The Certified Kubernetes Application Developer exam certifies that users can design, build, configure, and expose cloud native applications for Kubernetes.

    +
    + Go to Certification +
    +
    +
    +
    +
    + Certified Kubernetes Administrator (CKA) +
    +

    The Certified Kubernetes Administrator (CKA) program provides assurance that CKAs have the skills, knowledge, and competency to perform the responsibilities of Kubernetes administrators.

    +
    + Go to Certification +
    +
    +
    +
    + +
    +
    +
    +

    Kubernetes Training Partners

    +

    Our network of Kubernetes Training Partners provide training services for Kubernetes and cloud native projects.

    +
    +
    +
    + + +
    +
    diff --git a/content/es/docs/concepts/configuration/organize-cluster-access-kubeconfig.md b/content/es/docs/concepts/configuration/organize-cluster-access-kubeconfig.md new file mode 100644 index 0000000000000..dc9f9e14a5015 --- /dev/null +++ b/content/es/docs/concepts/configuration/organize-cluster-access-kubeconfig.md @@ -0,0 +1,153 @@ +--- +title: Organizar el acceso a los clústeres utilizando archivos kubeconfig +content_template: templates/concept +weight: 60 +--- + +{{% capture overview %}} + +Utilice los archivos kubeconfig para organizar la información acerca de los clústeres, los +usuarios, los Namespaces y los mecanismos de autenticación. La herramienta de +línea de comandos `kubectl` utiliza los archivos kubeconfig para hallar la información que +necesita para escoger un clúster y comunicarse con el servidor API de un clúster. + +{{< note >}} +Un archivo utilizado para configurar el acceso a los clústeres se denomina +*archivo kubeconfig*. Esta es una forma genérica de referirse a los archivos de +configuración. Esto no significa que exista un archivo llamado `kubeconfig`. +{{< /note >}} + +Por defecto, `kubectl` busca un archivo llamado `config` en el directorio `$HOME/.kube`. +Puedes especificar otros archivos kubeconfig mediante la configuración de la variable +de entorno `KUBECONFIG` o mediante la configuracion del flag +[`--kubeconfig`](/docs/reference/generated/kubectl/kubectl/). + +Para obtener instrucciones paso a paso acerca de cómo crear y especificar los archivos kubeconfig, +consulte el recurso +[Configurar El Acceso A Múltiples Clústeres](/docs/tasks/access-application-cluster/configure-access-multiple-clusters). + +{{% /capture %}} + +{{% capture body %}} + +## Compatibilidad con múltiples clústeres, usuarios y mecanismos de autenticación + +Suponga que tiene diversos clústeres y que sus usuarios y componentes se autentican +de diversas maneras. Por ejemplo: + +- Un kubelet en ejecución se podría autenticar usando certificados. +- Un usuario se podría autenticar utilizando tokens. +- Los administradores podrían tener un conjunto de certificados que sean suministrados a los usuarios individualmente. + +Con los archivos kubeconfig puedes organizar tus clústeres, usuarios y Namespaces. +También puedes definir diferentes contextos para realizar de forma rápida y +fácil cambios entre clústeres y Namespaces. + +## Contexto + +Un elemento *context* en un archivo kubeconfig se utiliza para agrupar los parámetros de +acceso bajo un nombre apropiado. Cada contexto tiene tres parámetros: clúster, Namespace +y usuario. +Por defecto, la herramienta de línea de comandos `kubectl` utiliza los parámetros del +*contexto actual* para comunicarse con el clúster. + +Para seleccionar el contexto actual: + +```shell +kubectl config use-context +``` + +## Variable de entorno KUBECONFIG + +La variable de entorno `KUBECONFIG` contiene una lista de archivos kubeconfig. +En el caso de Linux y Mac, la lista está delimitada por dos puntos. Si se trata +de Windows, la lista está delimitada por punto y coma. La variable de entorno +`KUBECONFIG` no es indispensable. Si la variable de entorno `KUBECONFIG` no existe, +`kubectl` utiliza el archivo kubeconfig por defecto `$HOME/.kube/config`. + +Si la variable de entorno `KUBECONFIG` existe, `kubectl` utiliza una +configuración eficiente que es el resultado de la fusión de los archivos +listados en la variable de entorno `KUBECONFIG`. + +## Fusionando archivos kubeconfig + +Para poder ver su configuración, escriba el siguiente comando: + +```shell +kubectl config view +``` + +Como se ha descrito anteriormente, la respuesta de este comando podría resultar a partir de un solo +archivo kubeconfig, o podría ser el resultado de la fusión de varios archivos kubeconfig. + +A continuación se muestran las reglas que usa `kubectl` cuando fusiona archivos kubeconfig: + +1. Si el flag `--kubeconfig` está activado, usa solamente el archivo especificado. Sin fusionar. + Sólo se permite una instancia con este flag. + + En caso contrario, si la variable de entorno `KUBECONFIG` está activada, sera usada + como un listado de los archivos a ser fusionados. + Fusionar los archivos listados en la variable de entorno `KUBECONFIG` de acuerdo + con estas reglas: + + * Ignorar nombres de archivo vacíos. + * Producir errores para archivos con contenido que no pueden ser deserializados. + * El primer archivo que establezca un valor particular o una clave se impone. + * Nunca cambie el valor o la clave. + Ejemplo: Conserva el contexto del primer archivo para configurar el `contexto actual`. + Ejemplo: Si dos archivos especifican un `red-user`, utilice sólo los valores del primer archivo. + Incluso desechar el segundo archivo aunque tenga registros que no tengan conflictos. + + Para obtener un ejemplo de configuración de la variable de entorno `KUBECONFIG`, consulte la sección + [Configuración de la variable de entorno KUBECONFIG](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/#set-the-kubeconfig-environment-variable). + + En caso contrario, utilice el archivo kubeconfig predeterminado `$HOME/.kube/config`, sin fusionar. + +2. Determinar el contexto a utilizar con base en el primer acierto en esta secuencia: + + 1. Si es que existe, utilice el flag `---contexto` de la línea de comandos. + 2. Utilice el `contexto actual` procedente de los archivos kubeconfig fusionados. + + En este punto se permite un contexto vacío. + +3. Determinar el clúster y el usuario. En este caso, puede o no haber un contexto. + Determine el clúster y el usuario con base en el primer acierto que se ejecute dos veces en + esta secuencia: una para el usuario y otra para el clúster: + + 1. Si es que existen, utilice el flag `--user` o `--cluster` de la línea de comandos. + 2. Si el contexto no está vacío, tome el usuario o clúster del contexto. + + En este caso el usuario y el clúster pueden estar vacíos. + +4. Determinar la información del clúster a utilizar. En este caso, puede o no haber información del clúster. + Se construye cada pieza de la información del clúster con base en esta secuencia, el primer acierto se impone: + + 1. Si es que existen, use el flag `--server`, `--certificate-authority`, `--insecure-skip-tls-verify` en la línea de comandos. + 2. Si existen atributos de información de clúster procedentes de los archivos kubeconfig fusionados, utilícelos. + 3. Falla si no existe la ubicación del servidor. + +5. Determinar la información del usuario a utilizar. Cree información de usuario utilizando las mismas reglas que + la información de clúster, con la excepción de permitir sólo un mecanismo de autenticación por usuario: + + 1. Si es que existen, utilice el flag `--client-certificate`, `--client-key`, `--username`, `--password`, `--token` de la línea de comandos. + 2. Utilice los campos `user` de los archivos kubeconfig fusionados. + 3. Falla si hay dos mecanismos de autenticación contradictorios. + +6. Si todavía falta información, utilice los valores predeterminados y solicite + información de autenticación. + +## Referencias de archivos + +Las referencias, así también como, las rutas de un archivo kubeconfig son relativas a la ubicación del archivo kubeconfig. +Las referencias de un archivo en la línea de comandos son relativas al directorio actual de trabajo. +Dentro de `$HOME/.kube/config`, las rutas relativas se almacenan de manera relativa a la ubicación del archivo kubeconfig , al igual que las rutas absolutas +se almacenan absolutamente. + +{{% /capture %}} + +{{% capture whatsnext %}} + +* [Configurar el acceso a multiples Clústeres](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) +* [`kubectl config`](/docs/reference/generated/kubectl/kubectl-commands#config) + +{{% /capture %}} diff --git a/content/fr/_index.html b/content/fr/_index.html index 026a0f910f34b..89a66f48b6ca7 100644 --- a/content/fr/_index.html +++ b/content/fr/_index.html @@ -3,12 +3,12 @@ abstract: "Déploiement, mise à l'échelle et gestion automatisée des conteneurs" cid: home --- +{{< announcement >}} {{< deprecationwarning >}} {{< blocks/section id="oceanNodes" >}} {{% blocks/feature image="flower" %}} - ### [Kubernetes (K8s)]({{< relref "/docs/concepts/overview/what-is-kubernetes" >}}) est un système open-source permettant d'automatiser le déploiement, la mise à l'échelle et la gestion des applications conteneurisées. Les conteneurs qui composent une application sont regroupés dans des unités logiques pour en faciliter la gestion et la découverte. Kubernetes s’appuie sur [15 années d’expérience dans la gestion de charges de travail de production (workloads) chez Google](http://queue.acm.org/detail.cfm?id=2898444), associé aux meilleures idées et pratiques de la communauté. @@ -18,6 +18,7 @@ #### Quel que soit le nombre Conçu selon les mêmes principes qui permettent à Google de gérer des milliards de conteneurs par semaine, Kubernetes peut évoluer sans augmenter votre équipe d'opérations. + {{% /blocks/feature %}} {{% blocks/feature image="blocks" %}} @@ -28,18 +29,15 @@ {{% /blocks/feature %}} {{% blocks/feature image="suitcase" %}} - #### Quel que soit l'endroit -Kubernetes est une solution open-source qui vous permet de tirer parti de vos infrastructures qu'elles soient sur site (on-premises), hybride ou en Cloud publique. -Vous pourrez ainsi répartir sans effort vos workloads là où vous le souhaitez. +Kubernetes est une solution open-source qui vous permet de tirer parti de vos infrastructures qu'elles soient sur site (on-premises), hybride ou en Cloud publique. Vous pourrez ainsi répartir sans effort vos workloads là où vous le souhaitez. {{% /blocks/feature %}} {{< /blocks/section >}} {{< blocks/section id="video" background-image="kub_video_banner_homepage" >}} -

    Les défis de la migration de plus de 150 microservices vers Kubernetes

    Par Sarah Wells, directrice technique des opérations et de la fiabilité, Financial Times

    @@ -47,12 +45,12 @@

    Les défis de la migration de plus de 150 microservices vers Kubernetes




    - Venez au KubeCon San Diego du 18 au 21 Nov 2019 + Venez au KubeCon Amsterdam du 13 au 16 Aout 2020



    - Venez au KubeCon Amsterdam du 30 Mars au 2 Avril 2020 + Venez au KubeCon Boston du 17 au 20 Novembre 2020
    diff --git a/content/fr/docs/concepts/architecture/cloud-controller.md b/content/fr/docs/concepts/architecture/cloud-controller.md index e8a513e42da33..ca0542a2c31f0 100644 --- a/content/fr/docs/concepts/architecture/cloud-controller.md +++ b/content/fr/docs/concepts/architecture/cloud-controller.md @@ -253,6 +253,7 @@ Les fournisseurs de cloud suivants ont implémenté leur CCM: * [AWS](https://github.com/kubernetes/cloud-provider-aws) * [BaiduCloud](https://github.com/baidu/cloud-provider-baiducloud) * [Linode](https://github.com/linode/linode-cloud-controller-manager) +* [Scaleway](https://github.com/scaleway/scaleway-cloud-controller-manager) ## Administration de cluster diff --git a/content/fr/docs/concepts/workloads/pods/init-containers.md b/content/fr/docs/concepts/workloads/pods/init-containers.md index 905b9275db6ec..c2ac521df4970 100644 --- a/content/fr/docs/concepts/workloads/pods/init-containers.md +++ b/content/fr/docs/concepts/workloads/pods/init-containers.md @@ -111,10 +111,10 @@ spec: initContainers: - name: init-myservice image: busybox:1.28 - command: ['sh', '-c', 'until nslookup myservice; do echo "En attente de myservice"; sleep 2; done;'] + command: ['sh', '-c', "until nslookup myservice.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo en attente de myservice; sleep 2; done"] - name: init-mydb image: busybox:1.28 - command: ['sh', '-c', 'until nslookup mydb; do echo "En attente de mydb"; sleep 2; done;'] + command: ['sh', '-c', "until nslookup mydb.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo en attente de mydb; sleep 2; done"] ``` Les fichiers YAML suivants résument les services `mydb` et `myservice` : diff --git a/content/fr/docs/contribute/start.md b/content/fr/docs/contribute/start.md index 59cb54cfc24dc..39eee2a39dff3 100644 --- a/content/fr/docs/contribute/start.md +++ b/content/fr/docs/contribute/start.md @@ -70,7 +70,7 @@ Pour plus d'informations sur la contribution à la documentation dans plusieurs Si vous souhaitez démarrer une nouvelle traduction, voir ["Traduction"](/docs/contribute/localization/). -## Créer des demander recevables +## Créer des demandes recevables Toute personne possédant un compte GitHub peut soumettre un problème (rapport de bogue) à la documentation de Kubernetes. Si vous voyez quelque chose qui ne va pas, même si vous ne savez pas comment le réparer, [ouvrez un ticket](#how-to-file-an-issue). diff --git a/content/fr/docs/reference/kubectl/cheatsheet.md b/content/fr/docs/reference/kubectl/cheatsheet.md index 0c320717a2ea2..918debad70e4e 100644 --- a/content/fr/docs/reference/kubectl/cheatsheet.md +++ b/content/fr/docs/reference/kubectl/cheatsheet.md @@ -1,11 +1,14 @@ --- title: Aide-mémoire kubectl description: Cheatsheet kubectl aide-mémoire +reviewers: +- rbenzair +- feloy +- remyleone content_template: templates/concept card: name: reference weight: 30 -original: d4d55a703 --- {{% capture overview %}} @@ -46,6 +49,8 @@ echo "if [ $commands[kubectl] ]; then source <(kubectl completion zsh); fi" >> ~ ## Contexte et configuration de Kubectl Indique avec quel cluster Kubernetes `kubectl` communique et modifie les informations de configuration. Voir la documentation [Authentification multi-clusters avec kubeconfig](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) pour des informations détaillées sur le fichier de configuration. +Information. Voir la documentation [Authentification à travers des clusters avec kubeconfig](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) +pour des informations détaillées sur le fichier de configuration. ```bash kubectl config view # Affiche les paramètres fusionnés de kubeconfig @@ -58,6 +63,7 @@ kubectl config view # Affiche le mot de passe pour l'utilisateur e2e kubectl config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}' +kubectl config view -o jsonpath='{.users[].name}' # Affiche le premier utilisateur kubectl config view -o jsonpath='{.users[*].name}' # Affiche une liste d'utilisateurs kubectl config get-contexts # Affiche la liste des contextes kubectl config current-context # Affiche le contexte courant (current-context) @@ -77,9 +83,7 @@ kubectl config unset users.foo # Supprime l'utilisateur fo ``` ## Apply -`apply` gère des applications en utilisant des fichiers définissant des ressources Kubernetes. -Elle crée et met à jour des ressources dans un cluster en exécutant `kubectl apply`. -C'est la manière recommandée de gérer des applications Kubernetes en production. Voir le [Livre Kubectl](https://kubectl.docs.kubernetes.io). +`apply` gère des applications en utilisant des fichiers définissant des ressources Kubernetes. Elle crée et met à jour des ressources dans un cluster en exécutant `kubectl apply`. C'est la manière recommandée de gérer des applications Kubernetes en production. Voir le [Livre Kubectl](https://kubectl.docs.kubernetes.io). ## Création d'objets @@ -92,7 +96,7 @@ kubectl apply -f ./my1.yaml -f ./my2.yaml # Crée depuis plusieurs fichiers kubectl apply -f ./dir # Crée une ou plusieurs ressources depuis tous les manifests dans dir kubectl apply -f https://git.io/vPieo # Crée une ou plusieurs ressources depuis une url kubectl create deployment nginx --image=nginx # Démarre une instance unique de nginx -kubectl explain pods,svc # Affiche la documentation pour les manifests pod et svc +kubectl explain pods # Affiche la documentation pour les manifests pod # Crée plusieurs objets YAML depuis l'entrée standard (stdin) cat </dev/null; printf "\n"; done - -# Ou cette commande peut aussi être utilisée pour obtenir tous les labels associés aux pods kubectl get pods --show-labels # Vérifie quels noeuds sont prêts @@ -191,8 +192,15 @@ JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.ty # Liste tous les Secrets actuellement utilisés par un pod kubectl get pods -o json | jq '.items[].spec.containers[].env[]?.valueFrom.secretKeyRef.name' | grep -v null | sort | uniq +# Liste les containerIDs des initContainer de tous les Pods +# Utile lors du nettoyage des conteneurs arrêtés, tout en évitant de retirer les initContainers. +kubectl get pods --all-namespaces -o jsonpath='{range .items[*].status.initContainerStatuses[*]}{.containerID}{"\n"}{end}' | cut -d/ -f3 + # Liste les événements (Events) classés par timestamp kubectl get events --sort-by=.metadata.creationTimestamp + +# Compare l'état actuel du cluster à l'état du cluster si le manifeste était appliqué. +kubectl diff -f ./my-manifest.yaml ``` ## Mise à jour de ressources @@ -205,12 +213,8 @@ kubectl rollout history deployment/frontend # Vérifie l'hi kubectl rollout undo deployment/frontend # Rollback du déploiement précédent kubectl rollout undo deployment/frontend --to-revision=2 # Rollback à une version spécifique kubectl rollout status -w deployment/frontend # Écoute (Watch) le status du rolling update du déploiement "frontend" jusqu'à ce qu'il se termine +kubectl rollout restart deployment/frontend # Rolling restart du déploiement "frontend" -# déprécié depuis la version 1.11 -kubectl rolling-update frontend-v1 -f frontend-v2.json # (déprécié) Rolling update des pods de frontend-v1 -kubectl rolling-update frontend-v1 frontend-v2 --image=image:v2 # (déprécié) Modifie le nom de la ressource et met à jour l'image -kubectl rolling-update frontend --image=image:v2 # (déprécié) Met à jour l'image du pod du déploiement frontend -kubectl rolling-update frontend-v1 frontend-v2 --rollback # (déprécié) Annule (rollback) le rollout en cours cat pod.json | kubectl replace -f - # Remplace un pod, en utilisant un JSON passé en entrée standard @@ -231,6 +235,7 @@ kubectl autoscale deployment foo --min=2 --max=10 # Mise à l'éc ## Mise à jour partielle de ressources ```bash +# Mise à jour partielle d'un node kubectl patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}' # Met à jour partiellement un noeud # Met à jour l'image d'un conteneur ; spec.containers[*].name est requis car c'est une clé du merge @@ -287,6 +292,11 @@ kubectl logs -f my-pod # Fait défiler (stream) les kubectl logs -f my-pod -c my-container # Fait défiler (stream) les logs d'un conteneur particulier du pod (stdout, cas d'un pod multi-conteneurs) kubectl logs -f -l name=myLabel --all-containers # Fait défiler (stream) les logs de tous les pods ayant le label name=myLabel (stdout) kubectl run -i --tty busybox --image=busybox -- sh # Exécute un pod comme un shell interactif +kubectl run nginx --image=nginx --restart=Never -n +mynamespace # Run pod nginx in a specific namespace +kubectl run nginx --image=nginx --restart=Never # Run pod nginx and write its spec into a file called pod.yaml +--dry-run -o yaml > pod.yaml + kubectl attach my-pod -i # Attache à un conteneur en cours d'exécution kubectl port-forward my-pod 5000:6000 # Écoute le port 5000 de la machine locale et forwarde vers le port 6000 de my-pod kubectl exec my-pod -- ls / # Exécute une commande dans un pod existant (cas d'un seul conteneur) @@ -332,31 +342,33 @@ kubectl api-resources --api-group=extensions # Toutes les ressources dans le gro Pour afficher les détails sur votre terminal dans un format spécifique, vous pouvez utiliser une des options `-o` ou `--output` avec les commandes `kubectl` qui les prennent en charge. -| Format d'affichage | Description | -|-------------------------------------|-----------------------------------------------------------------------------------------------------------------------| -| `-o=custom-columns=` | Affiche un tableau en spécifiant une liste de colonnes séparées par des virgules | -| `-o=custom-columns-file=` | Affiche un tableau en utilisant les colonnes spécifiées dans le fichier `` | -| `-o=json` | Affiche un objet de l'API formaté en JSON | -| `-o=jsonpath=