Skip to content

Commit

Permalink
Merge pull request #8884 from mattfarina/new-image-location
Browse files Browse the repository at this point in the history
Moving Tiller to new location
  • Loading branch information
mattfarina committed Oct 15, 2020
2 parents d9977b8 + 01dc62e commit 7dc8737
Show file tree
Hide file tree
Showing 8 changed files with 72 additions and 44 deletions.
64 changes: 46 additions & 18 deletions .circleci/deploy.sh
Expand Up @@ -29,8 +29,9 @@ VERSION=
if [[ -n "${CIRCLE_TAG:-}" ]]; then
VERSION="${CIRCLE_TAG}"
else
echo "Skipping deploy step; this is not a tag"
exit
# Canary version is used with helm init --canary-image flag.
# Does not push canary binary which is Helm v3.
VERSION="canary"
fi

echo "Install docker client"
Expand All @@ -44,35 +45,62 @@ export CLOUDSDK_CORE_DISABLE_PROMPTS=1
curl https://sdk.cloud.google.com | bash
${HOME}/google-cloud-sdk/bin/gcloud --quiet components update

echo "Configuring GitHub Container Repository configuration"
echo ${GH_TOKEN_PUSH_TILLER} | docker login ghcr.io -u helm-bot --password-stdin

echo "Configuring gcloud authentication"
echo "${GCLOUD_SERVICE_KEY}" | base64 --decode > "${HOME}/gcloud-service-key.json"
${HOME}/google-cloud-sdk/bin/gcloud auth activate-service-account --key-file "${HOME}/gcloud-service-key.json"
${HOME}/google-cloud-sdk/bin/gcloud config set project "${PROJECT_NAME}"
docker login -u _json_key -p "$(cat ${HOME}/gcloud-service-key.json)" https://gcr.io

echo "Installing Azure CLI"
apt update
apt install -y apt-transport-https
echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ stretch main" | tee /etc/apt/sources.list.d/azure-cli.list
curl -L https://packages.microsoft.com/keys/microsoft.asc | apt-key add
apt update
apt install -y azure-cli
echo "Configuring Docker Hub configuration"
echo ${DOCKER_PASS} | docker login -u ${DOCKER_USER} --password-stdin

echo "Configuring Quay configuration"
echo ${QUAY_PASS} | docker login quay.io -u ${QUAY_USER} --password-stdin

echo "Building the tiller image"
make docker-build VERSION="${VERSION}"

# Image is pushed to GitHub container repository (ghcr.io),
# GCR, Docker Hub, and Quay.
echo "Pushing image to ghcr.io"
docker push "ghcr.io/helm/tiller:${VERSION}"

echo "Pushing image to gcr.io"
docker tag "ghcr.io/helm/tiller:${VERSION}" "gcr.io/kubernetes-helm/tiller:${VERSION}"
docker push "gcr.io/kubernetes-helm/tiller:${VERSION}"

echo "Building helm binaries"
make build-cross
make dist checksum VERSION="${VERSION}"
echo "Pushing image to Docker Hub"
docker tag "ghcr.io/helm/tiller:${VERSION}" "helmpack/tiller:${VERSION}"
docker push "helmpack/tiller:${VERSION}"

echo "Pushing binaries to gs bucket"
${HOME}/google-cloud-sdk/bin/gsutil cp ./_dist/* "gs://${PROJECT_NAME}"
echo "Pushing image to Quay"
docker tag "ghcr.io/helm/tiller:${VERSION}" "quay.io/helmpack/tiller:${VERSION}"
docker push "quay.io/helmpack/tiller:${VERSION}"

echo "Pushing binaries to Azure"
az storage blob upload-batch -s _dist/ -d "$AZURE_STORAGE_CONTAINER_NAME" --pattern 'helm-*' --connection-string "$AZURE_STORAGE_CONNECTION_STRING"
# Canary version is used with helm init --canary-image flag.
# Does not push canary binary which is Helm v3.
if [ "$VERSION" != "canary" ]; then
echo "Installing Azure CLI"
apt update
apt install -y apt-transport-https
echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ stretch main" | tee /etc/apt/sources.list.d/azure-cli.list
curl -L https://packages.microsoft.com/keys/microsoft.asc | apt-key add
apt update
apt install -y azure-cli

echo "Pushing KEYS file to Azure"
az storage blob upload -f "KEYS" -n "KEYS" -c "$AZURE_STORAGE_CONTAINER_NAME" --connection-string "$AZURE_STORAGE_CONNECTION_STRING"
echo "Building helm binaries"
make build-cross
make dist checksum VERSION="${VERSION}"

echo "Pushing binaries to gs bucket"
${HOME}/google-cloud-sdk/bin/gsutil cp ./_dist/* "gs://${PROJECT_NAME}"

echo "Pushing binaries to Azure"
az storage blob upload-batch -s _dist/ -d "$AZURE_STORAGE_CONTAINER_NAME" --pattern 'helm-*' --connection-string "$AZURE_STORAGE_CONNECTION_STRING"

echo "Pushing KEYS file to Azure"
az storage blob upload -f "KEYS" -n "KEYS" -c "$AZURE_STORAGE_CONTAINER_NAME" --connection-string "$AZURE_STORAGE_CONNECTION_STRING"
fi
4 changes: 2 additions & 2 deletions Makefile
@@ -1,5 +1,5 @@
DOCKER_REGISTRY ?= gcr.io
IMAGE_PREFIX ?= kubernetes-helm
DOCKER_REGISTRY ?= ghcr.io
IMAGE_PREFIX ?= helm
DEV_IMAGE ?= golang:1.14
SHORT_NAME ?= tiller
SHORT_NAME_RUDDER ?= rudder
Expand Down
2 changes: 1 addition & 1 deletion cmd/helm/init.go
Expand Up @@ -316,7 +316,7 @@ func (i *initCmd) run() error {
fmt.Fprintf(i.out, "\nWarning: You appear to be using an unreleased version of Helm. Please either use the\n"+
"--canary-image flag, or specify your desired tiller version with --tiller-image.\n\n"+
"Ex:\n"+
"$ helm init --tiller-image gcr.io/kubernetes-helm/tiller:v2.8.2\n\n")
"$ helm init --tiller-image ghcr.io/helm/tiller:v2.17.0\n\n")
}

return nil
Expand Down
32 changes: 16 additions & 16 deletions cmd/helm/installer/install_test.go
Expand Up @@ -24,7 +24,7 @@ import (

"github.com/ghodss/yaml"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
Expand All @@ -42,8 +42,8 @@ func TestDeployment(t *testing.T) {
expect string
imagePullPolicy v1.PullPolicy
}{
{"default", "", false, "gcr.io/kubernetes-helm/tiller:" + version.Version, "IfNotPresent"},
{"canary", "example.com/tiller", true, "gcr.io/kubernetes-helm/tiller:canary", "Always"},
{"default", "", false, "ghcr.io/helm/tiller:" + version.Version, "IfNotPresent"},
{"canary", "example.com/tiller", true, "ghcr.io/helm/tiller:canary", "Always"},
{"custom", "example.com/tiller:latest", false, "example.com/tiller:latest", "IfNotPresent"},
}

Expand All @@ -55,7 +55,7 @@ func TestDeployment(t *testing.T) {

// Unreleased versions of helm don't have a release image. See issue 3370
if tt.name == "default" && version.BuildMetadata == "unreleased" {
tt.expect = "gcr.io/kubernetes-helm/tiller:canary"
tt.expect = "ghcr.io/helm/tiller:canary"
}
if got := dep.Spec.Template.Spec.Containers[0].Image; got != tt.expect {
t.Errorf("%s: expected image %q, got %q", tt.name, tt.expect, got)
Expand All @@ -80,8 +80,8 @@ func TestDeploymentForServiceAccount(t *testing.T) {
imagePullPolicy v1.PullPolicy
serviceAccount string
}{
{"withSA", "", false, "gcr.io/kubernetes-helm/tiller:latest", "IfNotPresent", "service-account"},
{"withoutSA", "", false, "gcr.io/kubernetes-helm/tiller:latest", "IfNotPresent", ""},
{"withSA", "", false, "ghcr.io/helm/tiller:latest", "IfNotPresent", "service-account"},
{"withoutSA", "", false, "ghcr.io/helm/tiller:latest", "IfNotPresent", ""},
}
for _, tt := range tests {
opts := &Options{Namespace: v1.NamespaceDefault, ImageSpec: tt.image, UseCanary: tt.canary, ServiceAccount: tt.serviceAccount}
Expand Down Expand Up @@ -187,7 +187,7 @@ func TestSecretManifest(t *testing.T) {
}

func TestInstall(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0"
image := "ghcr.io/helm/tiller:v2.0.0"

fc := &fake.Clientset{}
fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
Expand Down Expand Up @@ -234,7 +234,7 @@ func TestInstall(t *testing.T) {
}

func TestInstallHA(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0"
image := "ghcr.io/helm/tiller:v2.0.0"

fc := &fake.Clientset{}
fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
Expand All @@ -257,7 +257,7 @@ func TestInstallHA(t *testing.T) {
}

func TestInstall_WithTLS(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0"
image := "ghcr.io/helm/tiller:v2.0.0"
name := "tiller-secret"

fc := &fake.Clientset{}
Expand Down Expand Up @@ -332,7 +332,7 @@ func TestInstall_canary(t *testing.T) {
fc.AddReactor("create", "deployments", func(action testcore.Action) (bool, runtime.Object, error) {
obj := action.(testcore.CreateAction).GetObject().(*appsv1.Deployment)
i := obj.Spec.Template.Spec.Containers[0].Image
if i != "gcr.io/kubernetes-helm/tiller:canary" {
if i != "ghcr.io/helm/tiller:canary" {
t.Errorf("expected canary image, got '%s'", i)
}
return true, obj, nil
Expand All @@ -353,7 +353,7 @@ func TestInstall_canary(t *testing.T) {
}

func TestUpgrade(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0"
image := "ghcr.io/helm/tiller:v2.0.0"
serviceAccount := "newServiceAccount"
existingDeployment, _ := generateDeployment(&Options{
Namespace: v1.NamespaceDefault,
Expand Down Expand Up @@ -394,7 +394,7 @@ func TestUpgrade(t *testing.T) {
}

func TestUpgrade_serviceNotFound(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0"
image := "ghcr.io/helm/tiller:v2.0.0"

existingDeployment, _ := generateDeployment(&Options{
Namespace: v1.NamespaceDefault,
Expand Down Expand Up @@ -437,7 +437,7 @@ func TestUpgrade_serviceNotFound(t *testing.T) {
}

func TestUgrade_newerVersion(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0"
image := "ghcr.io/helm/tiller:v2.0.0"
serviceAccount := "newServiceAccount"
existingDeployment, _ := generateDeployment(&Options{
Namespace: v1.NamespaceDefault,
Expand Down Expand Up @@ -497,7 +497,7 @@ func TestUgrade_newerVersion(t *testing.T) {
}

func TestUpgrade_identical(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0"
image := "ghcr.io/helm/tiller:v2.0.0"
serviceAccount := "newServiceAccount"
existingDeployment, _ := generateDeployment(&Options{
Namespace: v1.NamespaceDefault,
Expand Down Expand Up @@ -538,7 +538,7 @@ func TestUpgrade_identical(t *testing.T) {
}

func TestUpgrade_canaryClient(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:canary"
image := "ghcr.io/helm/tiller:canary"
serviceAccount := "newServiceAccount"
existingDeployment, _ := generateDeployment(&Options{
Namespace: v1.NamespaceDefault,
Expand Down Expand Up @@ -579,7 +579,7 @@ func TestUpgrade_canaryClient(t *testing.T) {
}

func TestUpgrade_canaryServer(t *testing.T) {
image := "gcr.io/kubernetes-helm/tiller:v2.0.0"
image := "ghcr.io/helm/tiller:v2.0.0"
serviceAccount := "newServiceAccount"
existingDeployment, _ := generateDeployment(&Options{
Namespace: v1.NamespaceDefault,
Expand Down
4 changes: 2 additions & 2 deletions cmd/helm/installer/options.go
Expand Up @@ -19,13 +19,13 @@ package installer // import "k8s.io/helm/cmd/helm/installer"
import (
"fmt"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/helm/pkg/strvals"
"k8s.io/helm/pkg/version"
)

const (
defaultImage = "gcr.io/kubernetes-helm/tiller"
defaultImage = "ghcr.io/helm/tiller"

fmtJSON OutputFormat = "json"
fmtYAML OutputFormat = "yaml"
Expand Down
2 changes: 1 addition & 1 deletion docs/developers.md
Expand Up @@ -92,7 +92,7 @@ development may not be available in Google Cloud Container Registry. If you're g
image pull errors, you can override the version of Tiller. Example:

```console
helm init --tiller-image=gcr.io/kubernetes-helm/tiller:2.7.2
helm init --tiller-image=ghcr.io/helm/tiller:2.17.0
```

Or use the latest version:
Expand Down
2 changes: 1 addition & 1 deletion docs/install.md
Expand Up @@ -242,7 +242,7 @@ the Tiller image:

```console
$ export TILLER_TAG=v2.0.0-beta.1 # Or whatever version you want
$ kubectl --namespace=kube-system set image deployments/tiller-deploy tiller=gcr.io/kubernetes-helm/tiller:$TILLER_TAG
$ kubectl --namespace=kube-system set image deployments/tiller-deploy tiller=ghcr.io/helm/tiller:$TILLER_TAG
deployment "tiller-deploy" image updated
```

Expand Down
6 changes: 3 additions & 3 deletions pkg/helm/portforwarder/portforwarder_test.go
Expand Up @@ -19,7 +19,7 @@ package portforwarder
import (
"testing"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
)
Expand Down Expand Up @@ -99,11 +99,11 @@ func TestGetTillerPodImage(t *testing.T) {
Containers: []v1.Container{
{
Name: "tiller",
Image: "gcr.io/kubernetes-helm/tiller:v2.0.0",
Image: "ghcr.io/helm/tiller:v2.0.0",
},
},
},
expected: "gcr.io/kubernetes-helm/tiller:v2.0.0",
expected: "ghcr.io/helm/tiller:v2.0.0",
err: false,
},
{
Expand Down

0 comments on commit 7dc8737

Please sign in to comment.