Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
136 changes: 136 additions & 0 deletions .github/workflows/dd-build.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
name: Build and Push k8s Release

on:
push:
# Sequence of patterns matched against refs/heads
tags:
# Push events on datadog tags
- "*-dd*"
permissions: write-all
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
platform: ["linux/arm64","linux/amd64"]
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.24
- name: Set env
run: echo SANITIZED_TARGET_PLATFORM=${KUBE_BUILD_PLATFORM/\//-} >> $GITHUB_ENV
env:
KUBE_BUILD_PLATFORM: ${{ matrix.platform }}
- name: Cleanup disk space
run: |
sudo rm -rf /usr/share/dotnet
sudo rm -rf /opt/ghc
sudo rm -rf /usr/local/share/boost
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
sudo rm -rf /usr/local/.ghcup
- name: Build
env:
GOFLAGS: "-tags=fips"
KUBE_BUILD_PLATFORMS: ${{ matrix.platform }}
KUBE_RELEASE_RUN_TESTS: n
run: make quick-release CGO_ENABLED=1 KUBE_CGO_OVERRIDES="kube-apiserver kube-controller-manager kube-scheduler kubelet" KUBE_BUILD_PLATFORMS=$KUBE_BUILD_PLATFORMS GOFLAGS=$GOFLAGS
- name: Calculate checksums
id: calculate_checksums
shell: bash
working-directory: _output/release-tars
env:
KUBE_BUILD_PLATFORM: ${{ matrix.platform }}
run: |
TARGET_PLATFORM="${KUBE_BUILD_PLATFORM/\//-}"
for TARGET_FILE in *"${TARGET_PLATFORM}".tar.gz
do
sha256sum "$TARGET_FILE" > "${TARGET_FILE}.sha256sum"
done
- uses: actions/upload-artifact@v4
with:
name: k8s_output_${{ env.SANITIZED_TARGET_PLATFORM }}
path: _output/release-tars
env:
SANITIZED_TARGET_PLATFORM: ${{ env.SANITIZED_TARGET_PLATFORM }}
release:
permissions:
contents: write
runs-on: ubuntu-latest
needs: build
outputs:
upload_url: ${{ steps.create_release_branch.outputs.upload_url }}${{ steps.create_release_tags.outputs.upload_url }}
steps:
- name: Extract branch name
shell: bash
run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
id: extract_branch
env:
GITHUB_REF: ${{ github.ref }}
if: startsWith(github.ref, 'refs/heads/')
- name: Create Release for Branch
id: create_release_branch
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/heads/')
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
token: ${{ secrets.GITHUB_TOKEN }}
name: branch@${{ steps.extract_branch.outputs.branch }}
tag_name: branch@${{ steps.extract_branch.outputs.branch }}
draft: false
prerelease: false
- name: Extract tags name
shell: bash
run: echo "##[set-output name=tags;]$(echo ${GITHUB_REF#refs/tags/})"
id: extract_tags
env:
GITHUB_REF: ${{ github.ref }}
if: startsWith(github.ref, 'refs/tags/')
- name: Create Release for Tags
id: create_release_tags
uses: softprops/action-gh-release@v2
if: ${{ startsWith(github.ref, 'refs/tags/') }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
token: ${{ secrets.GITHUB_TOKEN }}
name: ${{ steps.extract_tags.outputs.tags }}
tag_name: ${{ steps.extract_tags.outputs.tags }}
release_name: ${{ steps.extract_tags.outputs.tags }}
draft: false
prerelease: false
releaseassetsarm:
runs-on: ubuntu-latest
needs: release
strategy:
matrix:
assets: [
"kubernetes-client",
"kubernetes-node",
"kubernetes-server"
]
platform: ["linux-arm64","linux-amd64"]
extension: ["tar.gz", "tar.gz.sha256sum"]
steps:
- uses: actions/download-artifact@v4
with:
name: k8s_output_${{ matrix.platform }}
path: _output/release-tars
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Display structure of downloaded files
run: ls -R
working-directory: _output
- name: Upload Release Asset
id: upload-release-asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ needs.release.outputs.upload_url }}
asset_path: ./_output/release-tars/${{ matrix.assets }}-${{ matrix.platform }}.${{ matrix.extension}}
asset_name: ${{ matrix.assets }}-${{ matrix.platform }}.${{ matrix.extension }}
asset_content_type: application/tar+gzip
5 changes: 5 additions & 0 deletions build/build-image/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -55,3 +55,8 @@ ADD rsyncd.password /
RUN chmod a+r /rsyncd.password
ADD rsyncd.sh /
RUN chmod a+rx /rsyncd.sh

# Enable fips build
ENV GOEXPERIMENT=boringcrypto
# Enable debug to keep symbols around, allowing us to do go tool nm
ENV DBG=1
6 changes: 6 additions & 0 deletions cmd/kube-apiserver/fips.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
//go:build fips

package main

// enforce fips compliance if boringcrypto is enabled
import _ "crypto/tls/fipsonly"
6 changes: 6 additions & 0 deletions cmd/kube-controller-manager/fips.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
//go:build fips

package main

// enforce fips compliance if boringcrypto is enabled
import _ "crypto/tls/fipsonly"
6 changes: 6 additions & 0 deletions cmd/kube-scheduler/fips.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
//go:build fips

package main

// enforce fips compliance if boringcrypto is enabled
import _ "crypto/tls/fipsonly"
6 changes: 6 additions & 0 deletions cmd/kubectl/fips.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
//go:build fips

package main

// enforce fips compliance if boringcrypto is enabled
import _ "crypto/tls/fipsonly"
6 changes: 6 additions & 0 deletions cmd/kubelet/fips.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
//go:build fips

package main

// enforce fips compliance if boringcrypto is enabled
import _ "crypto/tls/fipsonly"
35 changes: 31 additions & 4 deletions pkg/controller/daemon/daemon_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -1289,8 +1289,14 @@ func NodeShouldRunDaemonPod(node *v1.Node, ds *apps.DaemonSet) (bool, bool) {
}

taints := node.Spec.Taints
fitsNodeName, fitsNodeAffinity, fitsTaints := predicates(pod, node, taints)
if !fitsNodeName || !fitsNodeAffinity {
fitsNodeName := len(pod.Spec.NodeName) == 0 || pod.Spec.NodeName == node.Name
if !fitsNodeName {
return false, false
}

fitsNodeName, fitsNodeSelector, fitsNodeAffinity, fitsTaints := predicates(pod, node, taints)

if !fitsNodeName || !fitsNodeSelector {
return false, false
}

Expand All @@ -1302,14 +1308,35 @@ func NodeShouldRunDaemonPod(node *v1.Node, ds *apps.DaemonSet) (bool, bool) {
return false, !hasUntoleratedTaint
}

if !fitsNodeAffinity {
// IgnoredDuringExecution means that if the node labels change after Kubernetes schedules the Pod, the Pod continues to run.
return false, true
}

return true, true
}

// predicates checks if a DaemonSet's pod can run on a node.
func predicates(pod *v1.Pod, node *v1.Node, taints []v1.Taint) (fitsNodeName, fitsNodeAffinity, fitsTaints bool) {
func predicates(pod *v1.Pod, node *v1.Node, taints []v1.Taint) (fitsNodeName, fitsNodeSelector, fitsNodeAffinity, fitsTaints bool) {
fitsNodeName = len(pod.Spec.NodeName) == 0 || pod.Spec.NodeName == node.Name

if len(pod.Spec.NodeSelector) > 0 {
selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
fitsNodeSelector = selector.Matches(labels.Set(node.Labels))
} else {
fitsNodeSelector = true
}

if pod.Spec.Affinity != nil &&
pod.Spec.Affinity.NodeAffinity != nil &&
pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
affinity := nodeaffinity.NewLazyErrorNodeSelector(pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution)
fitsNodeAffinity, _ = affinity.Match(node)
} else {
fitsNodeAffinity = true
}

// Ignore parsing errors for backwards compatibility.
fitsNodeAffinity, _ = nodeaffinity.GetRequiredNodeAffinity(pod).Match(node)
_, hasUntoleratedTaint := v1helper.FindMatchingUntoleratedTaint(taints, pod.Spec.Tolerations, func(t *v1.Taint) bool {
return t.Effect == v1.TaintEffectNoExecute || t.Effect == v1.TaintEffectNoSchedule
})
Expand Down
80 changes: 78 additions & 2 deletions pkg/controller/daemon/daemon_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1640,6 +1640,52 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
}
}

// RequiredDuringSchedulingIgnoredDuringExecution means that if the node labels change after Kubernetes schedules the Pod, the Pod continues to run.
func TestNodeAffinityAndChangeNodeLabels(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
for _, strategy := range updateStrategies() {
daemon := newDaemonSet("foo")
daemon.Spec.UpdateStrategy = *strategy
daemon.Spec.Template.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "color",
Operator: v1.NodeSelectorOpIn,
Values: []string{simpleNodeLabel["color"]},
},
},
},
},
},
},
}
_, ctx := ktesting.NewTestContext(t)

manager, podControl, _, err := newTestController(ctx, daemon)
if err != nil {
t.Fatalf("error creating DaemonSetsController: %v", err)
}
node1 := newNode("node-1", simpleNodeLabel)
node2 := newNode("node-2", simpleNodeLabel)
manager.nodeStore.Add(node1)
manager.nodeStore.Add(node2)
err = manager.dsStore.Add(daemon)
if err != nil {
t.Fatal(err)
}
expectSyncDaemonSets(t, manager, daemon, podControl, 2, 0, 0)
oldNode := node1.DeepCopy()
node1.Labels = nil
manager.updateNode(logger, oldNode, node1)
manager.nodeStore.Add(newNode("node-3", nil))
expectSyncDaemonSets(t, manager, daemon, podControl, 2, 0, 0)
}
}

func TestNumberReadyStatus(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo")
Expand Down Expand Up @@ -2284,7 +2330,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
shouldContinueRunning: true,
},
{
predicateName: "ErrPodAffinityNotMatch",
predicateName: "PodAffinityNotMatchDuringExecution",
ds: &apps.DaemonSet{
Spec: apps.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Expand Down Expand Up @@ -2315,7 +2361,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
},
},
shouldRun: false,
shouldContinueRunning: false,
shouldContinueRunning: true,
},
{
predicateName: "ShouldRunDaemonPod",
Expand Down Expand Up @@ -2497,6 +2543,36 @@ func TestUpdateNode(t *testing.T) {
expectedCreates: func() int { return 0 },
preExistingPod: true,
},
{
test: "Node labels changed, ds with NodeAffinity ",
oldNode: newNode("node1", simpleNodeLabel),
newNode: newNode("node1", simpleNodeLabel2),
ds: func() *apps.DaemonSet {
ds := newDaemonSet("ds")
ds.Spec.Template.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "color",
Operator: v1.NodeSelectorOpIn,
Values: []string{"blue"},
},
},
},
},
},
},
}
return ds
}(),
shouldEnqueue: true,
expectedCreates: func() int {
return 1
},
},
}
for _, c := range cases {
for _, strategy := range updateStrategies() {
Expand Down
14 changes: 14 additions & 0 deletions pkg/kubelet/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ import (
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/userns"
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/kubelet/util/manager"
"k8s.io/kubernetes/pkg/kubelet/util/queue"
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
Expand Down Expand Up @@ -2900,6 +2901,8 @@ func (kl *Kubelet) HandlePodReconcile(pods []*v1.Pod) {
// TODO: reconcile being calculated in the config manager is questionable, and avoiding
// extra syncs may no longer be necessary. Reevaluate whether Reconcile and Sync can be
// merged (after resolving the next two TODOs).
sidecarsStatus := status.GetSidecarsStatus(pod)
klog.Infof("Pod: %s, status: Present=%v,Ready=%v,ContainersWaiting=%v", format.Pod(pod), sidecarsStatus.SidecarsPresent, sidecarsStatus.SidecarsReady, sidecarsStatus.ContainersWaiting)

// Reconcile Pod "Ready" condition if necessary. Trigger sync pod for reconciliation.
// TODO: this should be unnecessary today - determine what is the cause for this to
Expand All @@ -2912,6 +2915,17 @@ func (kl *Kubelet) HandlePodReconcile(pods []*v1.Pod) {
UpdateType: kubetypes.SyncPodSync,
StartTime: start,
})
} else if sidecarsStatus.ContainersWaiting {
// if containers aren't running and the sidecars are all ready trigger a sync so that the containers get started
if sidecarsStatus.SidecarsPresent && sidecarsStatus.SidecarsReady {
klog.Infof("Pod: %s: sidecars: sidecars are ready, dispatching work", format.Pod(pod))
kl.podWorkers.UpdatePod(UpdatePodOptions{
Pod: pod,
MirrorPod: mirrorPod,
UpdateType: kubetypes.SyncPodSync,
StartTime: start,
})
}
}

// After an evicted pod is synced, all dead containers in the pod can be removed.
Expand Down
Loading