Skip to content

Commit

Permalink
chore: only show minor versions on website (#4001)
Browse files Browse the repository at this point in the history
  • Loading branch information
bwagner5 committed Jun 7, 2023
1 parent 62b1069 commit d90f472
Show file tree
Hide file tree
Showing 194 changed files with 24,007 additions and 47 deletions.
44 changes: 21 additions & 23 deletions hack/release/common.sh
Expand Up @@ -27,6 +27,7 @@ versionData(){
RELEASE_VERSION_MINOR="${VERSION#*.}"
RELEASE_VERSION_MINOR="${RELEASE_VERSION_MINOR%.*}"
RELEASE_VERSION_PATCH="${VERSION##*.}"
RELEASE_MINOR_VERSION="v${RELEASE_VERSION_MAJOR}.${RELEASE_VERSION_MINOR}"
}

release() {
Expand Down Expand Up @@ -152,37 +153,33 @@ publishHelmChart() {

createNewWebsiteDirectory() {
RELEASE_VERSION=$1
mkdir -p website/content/en/${RELEASE_VERSION}
cp -r website/content/en/preview/* website/content/en/${RELEASE_VERSION}/
find website/content/en/${RELEASE_VERSION}/ -type f | xargs perl -i -p -e "s/{{< param \"latest_release_version\" >}}/${RELEASE_VERSION}/g;"
find website/content/en/${RELEASE_VERSION}/*/*/*.yaml -type f | xargs perl -i -p -e "s/preview/${RELEASE_VERSION}/g;"
}
versionData "${RELEASE_VERSION}"

mkdir -p "website/content/en/${RELEASE_MINOR_VERSION}"
cp -r website/content/en/preview/* website/content/en/${RELEASE_MINOR_VERSION}/
find "website/content/en/${RELEASE_MINOR_VERSION}/" -type f | xargs perl -i -p -e "s/{{< param \"latest_release_version\" >}}/${RELEASE_VERSION}/g;"
find website/content/en/${RELEASE_MINOR_VERSION}/*/*/*.yaml -type f | xargs perl -i -p -e "s/preview/${RELEASE_VERSION}/g;"
find "website/content/en/${RELEASE_MINOR_VERSION}/" -type f | xargs perl -i -p -e "s/{{< githubRelRef >}}/\/${RELEASE_VERSION}\//g;"

deleteMinorVersionWebsiteDirectory() {
RELEASE_VERSION=$1
versionData "$RELEASE_VERSION"
find website/content/en/* -type d -name "v${RELEASE_VERSION_MAJOR}.${RELEASE_VERSION_MINOR}*" -maxdepth 0 | grep -v "$RELEASE_VERSION" | xargs -r -n 1 rm -r
rm -rf website/content/en/docs
mkdir -p website/content/en/docs
cp -r website/content/en/${RELEASE_MINOR_VERSION}/* website/content/en/docs/
}

removeOldWebsiteDirectories() {
# Get all the directories except the last 2 directories sorted from earliest to latest version
find website/content/en/* -type d -name "*" -maxdepth 0 | grep -v "preview" | sort | head -n -2 | xargs -r -n 1 rm -r
local n=5
# Get all the directories except the last n directories sorted from earliest to latest version
last_n_versions=$(find website/content/en/* -type d -name "*" -maxdepth 0 | grep -v "preview\|docs" | sort | tail -n "$n")
last_n_versions+=$(echo -e "\nwebsite/content/en/preview")
last_n_versions+=$(echo -e "\nwebsite/content/en/docs")
all=$(find website/content/en/* -type d -name "*" -maxdepth 0)
## symmetric difference
comm -3 <(sort <<< $last_n_versions) <(sort <<< $all) | tr -d '\t' | xargs -r -n 1 rm -r
}

editWebsiteConfig() {
RELEASE_VERSION=$1

# sed has a different syntax on mac
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' '/^\/docs\/\*/d' website/static/_redirects
else
sed -i '/^\/docs\/\*/d' website/static/_redirects
fi

echo "/docs/* /${RELEASE_VERSION}/:splat" >>website/static/_redirects

yq -i ".params.latest_release_version = \"${RELEASE_VERSION}\"" website/config.yaml
yq -i ".menu.main[] |=select(.name == \"Docs\") .url = \"${RELEASE_VERSION}\"" website/config.yaml
}

# editWebsiteVersionsMenu sets relevant releases in the version dropdown menu of the website
Expand All @@ -192,7 +189,8 @@ editWebsiteConfig() {
# a selected minor releases we can maintain that list in the repo and use it in here
editWebsiteVersionsMenu() {
RELEASE_VERSION=$1
VERSIONS=(${RELEASE_VERSION})
versionData "${RELEASE_VERSION}"
VERSIONS=(${RELEASE_MINOR_VERSION})
while IFS= read -r LINE; do
SANITIZED_VERSION=$(echo "${LINE}" | sed -e 's/["-]//g' -e 's/ *//g')
VERSIONS+=("${SANITIZED_VERSION}")
Expand Down
1 change: 0 additions & 1 deletion hack/release/prepare-website.sh
Expand Up @@ -14,7 +14,6 @@ fi
echo "RenderingPrep website files for ${GIT_TAG}"

createNewWebsiteDirectory "$GIT_TAG"
deleteMinorVersionWebsiteDirectory "$GIT_TAG"
removeOldWebsiteDirectories
editWebsiteConfig "$GIT_TAG"
editWebsiteVersionsMenu "$GIT_TAG"
2 changes: 1 addition & 1 deletion website/assets/js/versionWarning/index.js
Expand Up @@ -2,7 +2,7 @@ export function versionWarning() {
const viewingVersion = window.location.pathname.split('/')[1]

// only alert if we recognize the version string
if (!viewingVersion || !viewingVersion.match(/^v\d+\.\d+\.\d+$/)) {
if (!viewingVersion || !viewingVersion.match(/^v\d+\.\d+$/)) {
return
}

Expand Down
6 changes: 3 additions & 3 deletions website/config.yaml
Expand Up @@ -68,8 +68,8 @@ params:
desc: 'Chat with us on Slack in the #aws-provider channel'
latest_release_version: v0.27.5
versions:
- v0.27.5
- v0.26.1
- v0.27
- v0.26
- preview
menu:
main:
Expand All @@ -79,5 +79,5 @@ menu:
pre: <i class='fab fa-github'></i>
- name: Docs
weight: 20
url: 'v0.27.5'
url: 'docs'
pre: <i class='fas fa-book'></i>
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Expand Up @@ -44,7 +44,7 @@ authenticate properly by running `aws sts get-caller-identity`.
After setting up the tools, set the Karpenter version number:

```bash
export KARPENTER_VERSION={{< param "latest_release_version" >}}
export KARPENTER_VERSION=v0.27.5
```

Then set the following environment variable:
Expand Down
@@ -1,4 +1,4 @@
curl -fsSL https://karpenter.sh/"${KARPENTER_VERSION}"/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
curl -fsSL https://raw.githubusercontent.com/aws/karpenter/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
&& aws cloudformation deploy \
--stack-name "Karpenter-${CLUSTER_NAME}" \
--template-file "${TEMPOUT}" \
Expand Down
@@ -1,6 +1,6 @@
TEMPOUT=$(mktemp)

curl -fsSL https://karpenter.sh/"${KARPENTER_VERSION}"/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
curl -fsSL https://raw.githubusercontent.com/aws/karpenter/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
&& aws cloudformation deploy \
--stack-name "Karpenter-${CLUSTER_NAME}" \
--template-file "${TEMPOUT}" \
Expand Down
File renamed without changes.
File renamed without changes.
@@ -1,4 +1,4 @@
curl -fsSL https://karpenter.sh/"${KARPENTER_VERSION}"/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
curl -fsSL https://raw.githubusercontent.com/aws/karpenter/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
&& aws cloudformation deploy \
--stack-name "Karpenter-${CLUSTER_NAME}" \
--template-file "${TEMPOUT}" \
Expand Down
@@ -1,6 +1,6 @@
TEMPOUT=$(mktemp)

curl -fsSL https://karpenter.sh/"${KARPENTER_VERSION}"/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
curl -fsSL https://raw.githubusercontent.com/aws/karpenter/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
&& aws cloudformation deploy \
--stack-name "Karpenter-${CLUSTER_NAME}" \
--template-file "${TEMPOUT}" \
Expand Down
File renamed without changes.
File renamed without changes.
Expand Up @@ -15,7 +15,7 @@ AWS is the first cloud provider supported by Karpenter, although it is designed

### Can I write my own cloud provider for Karpenter?
Yes, but there is no documentation yet for it.
Start with Karpenter's GitHub [cloudprovider](https://github.com/aws/karpenter-core/tree{{< githubRelRef >}}pkg/cloudprovider) documentation to see how the AWS provider is built, but there are other sections of the code that will require changes too.
Start with Karpenter's GitHub [cloudprovider](https://github.com/aws/karpenter-core/tree/v0.26.1/pkg/cloudprovider) documentation to see how the AWS provider is built, but there are other sections of the code that will require changes too.

### What operating system nodes does Karpenter deploy?
By default, Karpenter uses Amazon Linux 2 images.
Expand All @@ -28,7 +28,7 @@ Karpenter is flexible to multi architecture configurations using [well known lab

### What RBAC access is required?
All of the required RBAC rules can be found in the helm chart template.
See [clusterrolebinding.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/clusterrolebinding.yaml), [clusterrole.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/clusterrole.yaml), [rolebinding.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/rolebinding.yaml), and [role.yaml](https://github.com/aws/karpenter/blob{{< githubRelRef >}}charts/karpenter/templates/role.yaml) files for details.
See [clusterrolebinding.yaml](https://github.com/aws/karpenter/blob/v0.26.1/charts/karpenter/templates/clusterrolebinding.yaml), [clusterrole.yaml](https://github.com/aws/karpenter/blob/v0.26.1/charts/karpenter/templates/clusterrole.yaml), [rolebinding.yaml](https://github.com/aws/karpenter/blob/v0.26.1/charts/karpenter/templates/rolebinding.yaml), and [role.yaml](https://github.com/aws/karpenter/blob/v0.26.1/charts/karpenter/templates/role.yaml) files for details.

### Can I run Karpenter outside of a Kubernetes cluster?
Yes, as long as the controller has network and IAM/RBAC access to the Kubernetes API and your provider API.
Expand Down
@@ -1,4 +1,4 @@
curl -fsSL https://karpenter.sh/"${KARPENTER_VERSION}"/getting-started/getting-started-with-eksctl/cloudformation.yaml > $TEMPOUT \
curl -fsSL https://raw.githubusercontent.com/aws/karpenter/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
&& aws cloudformation deploy \
--stack-name "Karpenter-${CLUSTER_NAME}" \
--template-file "${TEMPOUT}" \
Expand Down
@@ -1,6 +1,6 @@
TEMPOUT=$(mktemp)

curl -fsSL https://karpenter.sh/"${KARPENTER_VERSION}"/getting-started/getting-started-with-eksctl/cloudformation.yaml > $TEMPOUT \
curl -fsSL https://raw.githubusercontent.com/aws/karpenter/"${KARPENTER_VERSION}"/website/content/en/preview/getting-started/getting-started-with-karpenter/cloudformation.yaml > $TEMPOUT \
&& aws cloudformation deploy \
--stack-name "Karpenter-${CLUSTER_NAME}" \
--template-file "${TEMPOUT}" \
Expand Down
Expand Up @@ -131,7 +131,7 @@ Now that our deployment is ready we can create the karpenter namespace, create t
## Create default provisioner

We need to create a default provisioner so Karpenter knows what types of nodes we want for unscheduled workloads.
You can refer to some of the [example provisioners](https://github.com/aws/karpenter/tree{{< githubRelRef >}}examples/provisioner) for specific needs.
You can refer to some of the [example provisioners](https://github.com/aws/karpenter/tree/v0.26.1/examples/provisioner) for specific needs.

{{% script file="./content/en/{VERSION}/getting-started/migrating-from-cas/scripts/step11-create-provisioner.sh" language="bash" %}}

Expand Down
File renamed without changes.
Expand Up @@ -51,9 +51,9 @@ If you get the error `invalid ownership metadata; label validation error:` while
In general, you can reapply the CRDs in the `crds` directory of the Karpenter helm chart:

```shell
kubectl replace -f https://raw.githubusercontent.com/aws/karpenter{{< githubRelRef >}}pkg/apis/crds/karpenter.sh_provisioners.yaml
kubectl replace -f https://raw.githubusercontent.com/aws/karpenter/v0.26.1/pkg/apis/crds/karpenter.sh_provisioners.yaml

kubectl replace -f https://raw.githubusercontent.com/aws/karpenter{{< githubRelRef >}}pkg/apis/crds/karpenter.k8s.aws_awsnodetemplates.yaml
kubectl replace -f https://raw.githubusercontent.com/aws/karpenter/v0.26.1/pkg/apis/crds/karpenter.k8s.aws_awsnodetemplates.yaml
```

## How Do We Break Incompatibility?
Expand Down Expand Up @@ -125,7 +125,7 @@ By adopting this practice we allow our users who are early adopters to test out
* The karpenter webhook and controller containers are combined into a single binary, which requires changes to the helm chart. If your Karpenter installation (helm or otherwise) currently customizes the karpenter webhook, your deployment tooling may require minor changes.
* Karpenter now supports native interruption handling. If you were previously using Node Termination Handler for spot interruption handling and health events, you will need to remove the component from your cluster before enabling `aws.interruptionQueueName`. For more details on Karpenter's interruption handling, see the [Interruption Handling Docs]({{< ref "./concepts/deprovisioning/#interruption" >}}). For common questions on the migration process, see the [FAQ]({{< ref "./faq/#interruption-handling" >}})
* Instance category defaults are now explicitly persisted in the Provisioner, rather than handled implicitly in memory. By default, Provisioners will limit instance category to c,m,r. If any instance type constraints are applied, it will override this default. If you have created Provisioners in the past with unconstrained instance type, family, or category, Karpenter will now more flexibly use instance types than before. If you would like to apply these constraints, they must be included in the Provisioner CRD.
* Karpenter CRD raw YAML URLs have migrated from `https://raw.githubusercontent.com/aws/karpenter{{< githubRelRef >}}charts/karpenter/crds/...` to `https://raw.githubusercontent.com/aws/karpenter{{< githubRelRef >}}pkg/apis/crds/...`. If you reference static Karpenter CRDs or rely on `kubectl replace -f` to apply these CRDs from their remote location, you will need to migrate to the new location.
* Karpenter CRD raw YAML URLs have migrated from `https://raw.githubusercontent.com/aws/karpenter/v0.26.1/charts/karpenter/crds/...` to `https://raw.githubusercontent.com/aws/karpenter/v0.26.1/pkg/apis/crds/...`. If you reference static Karpenter CRDs or rely on `kubectl replace -f` to apply these CRDs from their remote location, you will need to migrate to the new location.
* Pods without an ownerRef (also called "controllerless" or "naked" pods) will now be evicted by default during node termination and consolidation. Users can prevent controllerless pods from being voluntarily disrupted by applying the `karpenter.sh/do-not-evict: "true"` annotation to the pods in question.
* The following CLI options/environment variables are now removed and replaced in favor of pulling settings dynamically from the `karpenter-global-settings` ConfigMap. See the [Settings docs]({{<ref "./concepts/settings/#environment-variables--cli-flags" >}}) for more details on configuring the new values in the ConfigMap.

Expand Down
40 changes: 40 additions & 0 deletions website/content/en/v0.27/_index.md
@@ -0,0 +1,40 @@

---
title: "Documentation"
linkTitle: "Docs"
weight: 20
cascade:
type: docs
tags:
- preview
---
Karpenter is an open-source node provisioning project built for Kubernetes.
Adding Karpenter to a Kubernetes cluster can dramatically improve the efficiency and cost of running workloads on that cluster.
Karpenter works by:

* **Watching** for pods that the Kubernetes scheduler has marked as unschedulable
* **Evaluating** scheduling constraints (resource requests, nodeselectors, affinities, tolerations, and topology spread constraints) requested by the pods
* **Provisioning** nodes that meet the requirements of the pods
* **Removing** the nodes when the nodes are no longer needed

As someone using Karpenter, once your Kubernetes cluster and the Karpenter controller are up and running (see [Getting Started]({{<ref "./getting-started" >}})), you can:

* **Set up provisioners**: By applying a provisioner to Karpenter, you can configure constraints on node provisioning and set timeout values for node expiry or Kubelet configuration values.
Provisioner-level constraints related to Kubernetes and your cloud provider (AWS, for example) include:

- Taints (`taints`): Identify taints to add to provisioned nodes. If a pod doesn't have a matching toleration for the taint, the effect set by the taint occurs (NoSchedule, PreferNoSchedule, or NoExecute). See Kubernetes [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) for details.
- Labels (`labels`): Apply arbitrary key-value pairs to nodes that can be matched by pods.
- Requirements (`requirements`): Set acceptable (`In`) and unacceptable (`Out`) Kubernetes and Karpenter values for node provisioning based on [Well-Known Labels](https://kubernetes.io/docs/reference/labels-annotations-taints/) and [cloud-specific settings]({{<ref "./concepts/node-templates" >}}). These can include [instance types](https://kubernetes.io/docs/reference/labels-annotations-taints/#nodekubernetesioinstance-type), [zones](https://kubernetes.io/docs/reference/labels-annotations-taints/#topologykubernetesiozone), [computer architecture](https://kubernetes.io/docs/reference/labels-annotations-taints/#kubernetes-io-arch), and [capacity type]({{<ref "./concepts/provisioners/#capacity-type" >}}) (such as AWS spot or on-demand).
- Limits (`limits`): Lets you set limits on the total CPU and Memory that can be used by the cluster, effectively stopping further node provisioning when those limits have been reached.

* **Deploy workloads**: When deploying workloads, you can request that scheduling constraints be met to direct which nodes Karpenter provisions for those workloads. Use any of the following Pod spec constraints when you deploy pods:

- Resources (`resources`): Make requests and set limits for memory and CPU for a Pod. See [Requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits) for details.
- Nodes (`nodeSelector`): Use [nodeSelector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) to ask to match a node that includes one or more selected key-value pairs. These can be arbitrary labels you define, Kubernetes well-known labels, or Karpenter labels.
- Node affinity (`NodeAffinity`): Set [nodeAffinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity) to have the Pod run on nodes that have matching `nodeSelectorTerms` set or not set. Matching affinity can be a particular operating system or zone. You can set the node affinity to be required or simply preferred. `NotIn` and `DoesNotExist` allow you to define node anti-affinity behavior.
- Pod affinity and anti-affinity (`podAffinity/podAntiAffinity`): Choose to run a pod on a node based on whether certain pods are running (`podAffinity`) or not running (`podAntiAffinity`) on the node. See [Inter-pod affinity and anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity) for details.
- Tolerations (`tolerations`): Identify that a pod must match (tolerate) a taint on a node before the pod will run on it. Without the toleration, the effect set by the taint occurs (NoSchedule, PreferNoSchedule, or NoExecute). See Kubernetes [Taints and Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) for details.
- Topology spread (`topologySpreadConstraints`): Request that pods be spread across zones (`topology.kubernetes.io/zone`) or hosts (`kubernetes.io/hostname`), or cloud provider capacity types (`karpenter.sh/capacity-type`). See [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) for details.
- Persistent volume topology: Indicate that the Pod has a storage requirement that requires a node running in a particular zone that can make that storage available to the node.

Learn more about Karpenter and how to get started below.

0 comments on commit d90f472

Please sign in to comment.