Skip to content

Commit

Permalink
feat(k8s): add cluster-buildkit buildMode
Browse files Browse the repository at this point in the history
This adds a new `cluster-buildkit` buildMode to the kubernetes provider,
which uses the [buildkit](https://github.com/moby/buildkit) project.

It also works differently to the existing `cluster-docker` and `kaniko`
build modes, in that it doesn't require any cluster-wide components,
such as the ever-frustrating NFS provisioner.

When configured, a `garden-buildkit` Deployment with an ephemeral
volume is deployed on-demand to _the project namespace_. This greatly
simplifies administration and operation, while still offering a
performance benefit compared to `kaniko`, and it still uses the
deployment registry (in-cluster or otherwise) as a shared cache between
users/namespaces.

It can also be configured to run in "rootless" mode, i.e. run without
elevated privileges. This has some caveats (see the buildkit docs for
details) but offers a much improved security profile compared to the
privileged Docker and Kaniko builders.
  • Loading branch information
edvald authored and thsig committed Feb 9, 2021
1 parent c9d0a58 commit 15f2ab5
Show file tree
Hide file tree
Showing 45 changed files with 1,626 additions and 387 deletions.
4 changes: 3 additions & 1 deletion .circleci/config.yml
Expand Up @@ -595,6 +595,7 @@ jobs:
machine:
image: 'ubuntu-1604:202004-01'
docker_layer_caching: true
resource_class: large
parameters:
minikubeVersion:
description: The Minikube version to use
Expand Down Expand Up @@ -639,11 +640,12 @@ jobs:
--extra-config=kubeadm.ignore-preflight-errors=RSRC_INSUFFICIENT_CORES \
--kubernetes-version=$K8S_VERSION \
--vm-driver=none \
--cpus 2 \
--cpus 3 \
--memory 4096
sudo chown -R circleci:circleci /home/circleci/.kube /home/circleci/.minikube /etc/kubernetes
# Work around annoying issue on recent minikubes where namespaces take a long time to generate default service account
kubectl create namespace container-default
sleep 10
- run:
name: Integ tests
# Note: We skip tests that only work for remote environments
Expand Down
2 changes: 1 addition & 1 deletion core/package.json
Expand Up @@ -241,7 +241,7 @@
"lint": "tslint -p .",
"migration:generate": "typeorm migration:generate --config ormconfig.js -n",
"integ": "mocha --opts test/mocha.integ.opts",
"integ-kind": "GARDEN_INTEG_TEST_MODE=local GARDEN_SKIP_TESTS=\"cluster-docker kaniko remote-only\" mocha --opts test/mocha.integ.opts",
"integ-kind": "GARDEN_INTEG_TEST_MODE=local GARDEN_SKIP_TESTS=\"cluster-docker cluster-buildkit cluster-buildkit-rootless kaniko remote-only\" mocha --opts test/mocha.integ.opts",
"integ-local": "GARDEN_INTEG_TEST_MODE=local GARDEN_SKIP_TESTS=remote-only mocha --opts test/mocha.integ.opts",
"integ-remote": "GARDEN_INTEG_TEST_MODE=remote GARDEN_SKIP_TESTS=local-only mocha --opts test/mocha.integ.opts",
"e2e": "cd test/e2e && ../../../bin/garden test",
Expand Down
30 changes: 18 additions & 12 deletions core/src/plugins/container/build.ts
Expand Up @@ -82,27 +82,33 @@ export async function buildContainerModule({ ctx, module, log }: BuildModulePara
export function getDockerBuildFlags(module: ContainerModule) {
const args: string[] = []

for (const arg of getDockerBuildArgs(module)) {
args.push("--build-arg", arg)
}

if (module.spec.build.targetImage) {
args.push("--target", module.spec.build.targetImage)
}

args.push(...(module.spec.extraFlags || []))

return args
}

export function getDockerBuildArgs(module: ContainerModule) {
const buildArgs: PrimitiveMap = {
GARDEN_MODULE_VERSION: module.version.versionString,
...module.spec.buildArgs,
}

for (const [key, value] of Object.entries(buildArgs)) {
return Object.entries(buildArgs).map(([key, value]) => {
// 0 is falsy
if (value || value === 0) {
args.push("--build-arg", `${key}=${value}`)
return `${key}=${value}`
} else {
// If the value of a build-arg is null, Docker pulls it from
// the environment: https://docs.docker.com/engine/reference/commandline/build/
args.push("--build-arg", `${key}`)
return key
}
}

if (module.spec.build.targetImage) {
args.push("--target", module.spec.build.targetImage)
}

args.push(...(module.spec.extraFlags || []))

return args
})
}
28 changes: 28 additions & 0 deletions core/src/plugins/kubernetes/api.ts
Expand Up @@ -28,6 +28,7 @@ import {
V1Status,
Exec,
Attach,
V1Deployment,
} from "@kubernetes/client-node"
import AsyncLock = require("async-lock")
import request = require("request-promise")
Expand Down Expand Up @@ -106,6 +107,14 @@ const crudMap = {
replace: "replaceNamespacedSecret",
delete: "deleteNamespacedSecret",
},
Deployment: {
cls: new V1Deployment(),
group: "apps",
read: "readNamespacedDeployment",
create: "createNamespacedDeployment",
replace: "replaceNamespacedDeployment",
delete: "deleteNamespacedDeployment",
},
}

type CrudMap = typeof crudMap
Expand Down Expand Up @@ -323,6 +332,9 @@ export class KubeApi {
}
}

/**
* Given a manifest, attempt to read the matching resource from the cluster.
*/
async readBySpec({ log, namespace, manifest }: { log: LogEntry; namespace: string; manifest: KubernetesResource }) {
log.silly(`Fetching Kubernetes resource ${manifest.apiVersion}/${manifest.kind}/${manifest.metadata.name}`)

Expand All @@ -332,6 +344,22 @@ export class KubeApi {
return res.body
}

/**
* Same as readBySpec() but returns null if the resource is missing.
*/
async readOrNull(params: { log: LogEntry; namespace: string; manifest: KubernetesResource }) {
try {
const resource = await this.readBySpec(params)
return resource
} catch (err) {
if (err.statusCode === 404) {
return null
} else {
throw err
}
}
}

async listResources<T extends KubernetesResource>({
log,
apiVersion,
Expand Down
Expand Up @@ -25,7 +25,7 @@ import { apply } from "../kubectl"
import { waitForResources } from "../status/status"
import { execInWorkload } from "../container/exec"
import { dedent, deline } from "../../../util/string"
import { buildSyncDeploymentName } from "../container/build/common"
import { sharedBuildSyncDeploymentName } from "../container/build/common"
import { getDeploymentPod } from "../util"
import { getSystemNamespace } from "../namespace"
import { PluginContext } from "../../../plugin-context"
Expand Down Expand Up @@ -427,7 +427,7 @@ async function cleanupBuildSyncVolume({
status: "active",
})

const pod = await getDeploymentPod({ api, deploymentName: buildSyncDeploymentName, namespace: systemNamespace })
const pod = await getDeploymentPod({ api, deploymentName: sharedBuildSyncDeploymentName, namespace: systemNamespace })

const runner = new PodRunner({
api,
Expand Down
128 changes: 46 additions & 82 deletions core/src/plugins/kubernetes/commands/pull-image.ts
Expand Up @@ -8,7 +8,7 @@

import fs from "fs"
import tmp from "tmp-promise"
import { KubernetesPluginContext, KubernetesProvider } from "../config"
import { KubernetesPluginContext } from "../config"
import { PluginError, ParameterError } from "../../../exceptions"
import { PluginCommand } from "../../../types/plugin/command"
import chalk from "chalk"
Expand All @@ -20,12 +20,12 @@ import { LogEntry } from "../../../logger/log-entry"
import { containerHelpers } from "../../container/helpers"
import { RuntimeError } from "../../../exceptions"
import { PodRunner } from "../run"
import { inClusterRegistryHostname } from "../constants"
import { inClusterRegistryHostname, gardenUtilDaemonDeploymentName } from "../constants"
import { getAppNamespace, getSystemNamespace } from "../namespace"
import { makePodName, getSkopeoContainer, getDockerAuthVolume } from "../util"
import { getDeploymentPod } from "../util"
import { getRegistryPortForward } from "../container/util"
import { PluginContext } from "../../../plugin-context"
import { KubernetesPod } from "../types"
import { buildkitDeploymentName } from "../container/build/buildkit"

export const pullImage: PluginCommand = {
name: "pull-image",
Expand Down Expand Up @@ -140,11 +140,21 @@ async function pullFromExternalRegistry(
localId: string
) {
const api = await KubeApi.factory(log, ctx, ctx.provider)
const namespace = await getAppNamespace(ctx, log, ctx.provider)
const podName = makePodName("skopeo", namespace, module.name)
const systemNamespace = await getSystemNamespace(ctx, ctx.provider, log)
const buildMode = ctx.provider.config.buildMode

let namespace: string
let deploymentName: string

if (buildMode === "cluster-buildkit") {
namespace = await getAppNamespace(ctx, log, ctx.provider)
deploymentName = buildkitDeploymentName
} else {
namespace = await getSystemNamespace(ctx, ctx.provider, log)
deploymentName = gardenUtilDaemonDeploymentName
}

const imageId = containerHelpers.getDeploymentImageId(module, module.version, ctx.provider.config.deploymentRegistry)
const tarName = `${module.name}-${module.version.versionString}`
const tarName = `/tmp/${module.name}-${module.version.versionString}`

const skopeoCommand = [
"skopeo",
Expand All @@ -155,18 +165,29 @@ async function pullFromExternalRegistry(
`docker-archive:${tarName}`,
]

const runner = await launchSkopeoContainer({
const pod = await getDeploymentPod({
api,
deploymentName,
namespace,
})
const runner = new PodRunner({
api,
ctx,
provider: ctx.provider,
api,
podName,
systemNamespace,
namespace,
pod,
})

await runner.exec({
command: ["sh", "-c", skopeoCommand.join(" ")],
containerName: "util",
log,
timeoutSec: 60 * 1000 * 5, // 5 minutes,
})

try {
await pullImageFromRegistry(runner, skopeoCommand.join(" "), log)
await importImage({ module, runner, tarName, imageId, log, ctx })

await containerHelpers.dockerCli({ cwd: module.buildPath, args: ["tag", imageId, localId], log, ctx })
await containerHelpers.dockerCli({ cwd: module.buildPath, args: ["rmi", imageId], log, ctx })
} catch (err) {
Expand All @@ -175,7 +196,15 @@ async function pullFromExternalRegistry(
imageId,
})
} finally {
await runner.stop()
try {
await runner.exec({
command: ["rm", "-rf", tarName],
containerName: "util",
log,
})
} catch (err) {
log.warn("Failed cleaning up temporary file: " + err.message)
}
}
}

Expand All @@ -194,14 +223,14 @@ async function importImage({
log: LogEntry
ctx: PluginContext
}) {
const sourcePath = `/${tarName}`
const getOutputCommand = ["cat", sourcePath]
const getOutputCommand = ["cat", tarName]

await tmp.withFile(async ({ path }) => {
let writeStream = fs.createWriteStream(path)

await runner.exec({
command: getOutputCommand,
containerName: "skopeo",
containerName: "util",
log,
stdout: writeStream,
})
Expand All @@ -210,68 +239,3 @@ async function importImage({
await containerHelpers.dockerCli({ cwd: module.buildPath, args, log, ctx })
})
}

async function pullImageFromRegistry(runner: PodRunner, command: string, log: LogEntry) {
// TODO: make this timeout configurable
await runner.exec({
command: ["sh", "-c", command],
containerName: "skopeo",
log,
timeoutSec: 60 * 1000 * 5, // 5 minutes,
})
}

async function launchSkopeoContainer({
ctx,
provider,
api,
podName,
systemNamespace,
log,
}: {
ctx: PluginContext
provider: KubernetesProvider
api: KubeApi
podName: string
systemNamespace: string
log: LogEntry
}): Promise<PodRunner> {
const sleepCommand = "sleep 86400"

const pod: KubernetesPod = {
apiVersion: "v1",
kind: "Pod",
metadata: {
name: podName,
namespace: systemNamespace,
},
spec: {
shareProcessNamespace: true,
volumes: [
// Mount the docker auth secret, so skopeo can inspect private registries.
getDockerAuthVolume(),
],
containers: [getSkopeoContainer(sleepCommand)],
},
}

const runner = new PodRunner({
ctx,
api,
pod,
provider,
namespace: systemNamespace,
})

const { status } = await runner.start({
log,
})

if (status.state !== "ready") {
throw new RuntimeError("Failed to start skopeo container", {
status,
})
}

return runner
}

0 comments on commit 15f2ab5

Please sign in to comment.