Skip to content

Commit

Permalink
chore: azure e2e
Browse files Browse the repository at this point in the history
Add code to support azure e2e

Signed-off-by: Noel Georgi <git@frezbo.dev>
  • Loading branch information
frezbo committed Mar 23, 2024
1 parent 55dd41c commit ee51f04
Show file tree
Hide file tree
Showing 10 changed files with 213 additions and 130 deletions.
91 changes: 79 additions & 12 deletions .drone.jsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -739,16 +739,17 @@ local integration_pipelines = [
Pipeline('cron-integration-qemu-csi', default_pipeline_steps + [integration_qemu_csi], [default_cron_pipeline]) + cron_trigger(['nightly']),
Pipeline('cron-integration-images', default_pipeline_steps + [integration_images], [default_cron_pipeline]) + cron_trigger(['nightly']),
Pipeline('cron-integration-reproducibility-test', default_pipeline_steps + [integration_reproducibility_test], [default_cron_pipeline]) + cron_trigger(['nightly']),
Pipeline('cron-image-factory', default_pipeline_steps + [
integration_factory_16_iso,
integration_factory_16_image,
integration_factory_16_pxe,
integration_factory_16_secureboot,
integration_factory_15_iso,
integration_factory_13_iso,
integration_factory_13_image,
],
[default_cron_pipeline]) + cron_trigger(['nightly']),
Pipeline('cron-image-factory',
default_pipeline_steps + [
integration_factory_16_iso,
integration_factory_16_image,
integration_factory_16_pxe,
integration_factory_16_secureboot,
integration_factory_15_iso,
integration_factory_13_iso,
integration_factory_13_image,
],
[default_cron_pipeline]) + cron_trigger(['nightly']),
];


Expand Down Expand Up @@ -835,11 +836,75 @@ local E2EAWS(target) =

targets;

local E2EAzure() =
local depends_on = [load_artifacts];

local e2e_azure_prepare = Step(
'e2e-azure-prepare',
depends_on=depends_on,
environment=creds_env_vars {
IMAGE_REGISTRY: local_registry,
},
extra_commands=[
'az login --service-principal -u "$${AZURE_CLIENT_ID}" -p "$${AZURE_CLIENT_SECRET}" --tenant "$${AZURE_TENANT_ID}"',
'az storage blob upload-batch --overwrite -s _out --pattern "e2e-azure-generated/*" -d "${CI_COMMIT_SHA}${DRONE_TAG//./-}"',
]
);

local tf_apply = TriggerDownstream(
'tf-apply',
'e2e-talos-tf-apply',
['siderolabs/contrib@main'],
params=[
'BUCKET_PATH=${CI_COMMIT_SHA}${DRONE_TAG//./-}',
'TYPE=azure',
],
depends_on=[e2e_azure_prepare],
);

local e2e_azure_tf_apply_post = Step(
'e2e-azure-download-artifacts',
with_make=false,
environment=creds_env_vars,
extra_commands=[
'az login --service-principal -u "$${AZURE_CLIENT_ID}" -p "$${AZURE_CLIENT_SECRET}" --tenant "$${AZURE_TENANT_ID}"',
'az storage blob download -f _out/e2e-azure-talosconfig -n e2e-azure-talosconfig -c ${CI_COMMIT_SHA}${DRONE_TAG//./-}',
'az storage blob download -f _out/e2e-azure-kubeconfig -n e2e-azure-kubeconfig -c ${CI_COMMIT_SHA}${DRONE_TAG//./-}',
],
depends_on=[tf_apply],
);

local e2e_azure = Step(
'e2e-azure',
depends_on=[e2e_azure_tf_apply_post],
environment=creds_env_vars {}
);

local tf_destroy = TriggerDownstream(
'tf-destroy',
'e2e-talos-tf-destroy',
['siderolabs/contrib@main'],
params=[
'BUCKET_PATH=${CI_COMMIT_SHA}${DRONE_TAG//./-}',
'TYPE=azure',
],
depends_on=[e2e_azure],
when={
status: [
'failure',
'success',
],
},
);

local targets = [e2e_azure_prepare, tf_apply, e2e_azure_tf_apply_post, e2e_azure, tf_destroy];

targets;


local e2e_aws = [step for step in E2EAWS('default')];
local e2e_aws_nvidia_oss = [step for step in E2EAWS('nvidia-oss')];

local e2e_azure = Step('e2e-azure', depends_on=[e2e_capi], environment=creds_env_vars);
local e2e_azure = [step for step in E2EAzure()];
local e2e_gcp = Step('e2e-gcp', depends_on=[e2e_capi], environment=creds_env_vars);

local e2e_trigger(names) = {
Expand All @@ -854,10 +919,12 @@ local e2e_pipelines = [
// regular pipelines, triggered on promote events
Pipeline('e2e-aws', default_pipeline_steps + e2e_aws) + e2e_trigger(['e2e-aws']),
Pipeline('e2e-aws-nvidia-oss', default_pipeline_steps + e2e_aws_nvidia_oss) + e2e_trigger(['e2e-aws-nvidia-oss']),
Pipeline('e2e-azure', default_pipeline_steps + e2e_azure) + e2e_trigger(['e2e-azure']),
Pipeline('e2e-gcp', default_pipeline_steps + [capi_docker, e2e_capi, e2e_gcp]) + e2e_trigger(['e2e-gcp']),

// cron pipelines, triggered on schedule events
Pipeline('cron-e2e-aws', default_pipeline_steps + e2e_aws, [default_cron_pipeline]) + cron_trigger(['thrice-daily', 'nightly']),
Pipeline('cron-e2e-azure', default_pipeline_steps + e2e_azure, [default_cron_pipeline]) + cron_trigger(['thrice-daily', 'nightly']),
Pipeline('cron-e2e-gcp', default_pipeline_steps + [capi_docker, e2e_capi, e2e_gcp], [default_cron_pipeline]) + cron_trigger(['thrice-daily', 'nightly']),
];

Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,7 @@ image-%: ## Builds the specified image. Valid options are aws, azure, digital-oc
docker run --rm -t -v /dev:/dev -v $(PWD)/$(ARTIFACTS):/secureboot:ro -v $(PWD)/$(ARTIFACTS):/out --network=host --privileged $(REGISTRY_AND_USERNAME)/imager:$(IMAGE_TAG) $* --arch $$arch $(IMAGER_ARGS) ; \
done

images-essential: image-aws image-gcp image-metal secureboot-installer ## Builds only essential images used in the CI (AWS, GCP, and Metal).
images-essential: image-aws image-azure image-gcp image-metal secureboot-installer ## Builds only essential images used in the CI (AWS, GCP, and Metal).

images: image-aws image-azure image-digital-ocean image-exoscale image-gcp image-hcloud image-iso image-metal image-nocloud image-opennebula image-openstack image-oracle image-scaleway image-upcloud image-vmware image-vultr ## Builds all known images (AWS, Azure, DigitalOcean, Exoscale, GCP, HCloud, Metal, NoCloud, OpenNebula, Openstack, Oracle, Scaleway, UpCloud, Vultr and VMware).

Expand Down
2 changes: 1 addition & 1 deletion hack/cloud-image-uploader.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ set -e

cd hack/cloud-image-uploader

go run . --artifacts-path="../../${ARTIFACTS}" --tag="${TAG}" --abbrev-tag="${ABBREV_TAG}" "$@"
go run . --artifacts-path="../../${ARTIFACTS}" --tag="${TAG}" "$@"
125 changes: 60 additions & 65 deletions hack/cloud-image-uploader/azure.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/blang/semver/v4"
"github.com/siderolabs/gen/channel"
"github.com/siderolabs/gen/xslices"
"github.com/ulikunitz/xz"
"golang.org/x/sync/errgroup"
)
Expand All @@ -47,29 +48,31 @@ var azureArchitectures = map[string]string{
type AzureUploader struct {
Options Options

preRelease bool

helper azureHelper
}

// extractVersion extracts the version number in the format of int.int.int for Azure and assigns to the Options.AzureTag value.
func (azu *AzureUploader) setVersion() error {
v, err := semver.ParseTolerant(azu.Options.AzureAbbrevTag)
v, err := semver.ParseTolerant(azu.Options.Tag)
if err != nil {
return err
}

versionCore := fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)

if fmt.Sprintf("v%s", versionCore) != azu.Options.AzureAbbrevTag {
azu.helper.version = versionCore
azu.Options.AzureGalleryName = "SideroLabs"

if fmt.Sprintf("v%s", versionCore) != azu.Options.Tag {
azu.preRelease = true
azu.Options.AzureGalleryName = "SideroGalleryTest"
azu.Options.AzureCoreTag = versionCore
fmt.Println(azu.Options.AzureGalleryName)
} else {
azu.Options.AzureGalleryName = "SideroLabs"
azu.Options.AzureCoreTag = versionCore
fmt.Println(azu.Options.AzureGalleryName)
}

return err
log.Println("azure: using Azure Gallery:", azu.Options.AzureGalleryName)

return nil
}

// AzureGalleryUpload uploads the image to Azure.
Expand All @@ -91,11 +94,13 @@ func (azu *AzureUploader) AzureGalleryUpload(ctx context.Context) error {
return fmt.Errorf("error setting default Azure credentials: %w", err)
}

log.Printf("azure: getting locations")
if len(azu.Options.AzureRegions) == 0 {
regions, err := azu.helper.getAzureLocations(ctx)
if err != nil {
return fmt.Errorf("azure: error setting default Azure credentials: %w", err)
}

err = azu.helper.getAzureLocations(ctx)
if err != nil {
return fmt.Errorf("azure: error setting default Azure credentials: %w", err)
azu.Options.AzureRegions = regions
}

// Upload blob
Expand Down Expand Up @@ -245,14 +250,16 @@ uploadLoop:
}

func (azu *AzureUploader) createAzureImageVersion(ctx context.Context, arch string) error {
targetRegions := make([]*armcompute.TargetRegion, 0, len(azu.helper.locations))

for _, region := range azu.helper.locations {
targetRegions = append(targetRegions, &armcompute.TargetRegion{
Name: to.Ptr(region.Name),
ExcludeFromLatest: to.Ptr(false),
RegionalReplicaCount: to.Ptr[int32](1),
StorageAccountType: to.Ptr(armcompute.StorageAccountTypeStandardLRS),
var targetRegions []*armcompute.TargetRegion

if !azu.preRelease {
targetRegions = xslices.Map(azu.Options.AzureRegions, func(region string) *armcompute.TargetRegion {
return &armcompute.TargetRegion{
Name: to.Ptr(region),
ExcludeFromLatest: to.Ptr(false),
RegionalReplicaCount: to.Ptr[int32](1),
StorageAccountType: to.Ptr(armcompute.StorageAccountTypeStandardLRS),
}
})
}

Expand All @@ -265,8 +272,8 @@ func (azu *AzureUploader) createAzureImageVersion(ctx context.Context, arch stri
}

for _, v := range page.Value {
if *v.Name == azu.Options.AzureCoreTag {
log.Printf("azure: image version exists for %s\n azure: removing old image version\n", *v.Name)
if *v.Name == azu.helper.version {
log.Printf("azure: image version exists for %s\n", *v.Name)

err = azu.deleteImageVersion(ctx, arch)
if err != nil {
Expand All @@ -283,7 +290,7 @@ func (azu *AzureUploader) createAzureImageVersion(ctx context.Context, arch stri
resourceGroupName,
azu.Options.AzureGalleryName,
fmt.Sprintf("talos-%s", azureArchitectures[arch]),
azu.Options.AzureCoreTag,
azu.helper.version,
armcompute.GalleryImageVersion{
Location: to.Ptr(defaultRegion),
Properties: &armcompute.GalleryImageVersionProperties{
Expand All @@ -309,21 +316,34 @@ func (azu *AzureUploader) createAzureImageVersion(ctx context.Context, arch stri
return fmt.Errorf("azure: failed to create image version: %w", err)
}

_, err = poller.PollUntilDone(ctx, nil)
res, err := poller.PollUntilDone(ctx, nil)
if err != nil {
return fmt.Errorf("azure: failed to pull the result for image version creation: %w", err)
}

return err
for _, region := range azu.Options.AzureRegions {
pushResult(CloudImage{
Cloud: "azure",
Tag: azu.Options.Tag,
Region: region,
Arch: arch,
Type: "vhd",
ID: *res.ID,
})
}

return nil
}

func (azu *AzureUploader) deleteImageVersion(ctx context.Context, arch string) error {
log.Println("azure: removing old image version")

poller, err := azu.helper.clientFactory.NewGalleryImageVersionsClient().BeginDelete(
ctx,
resourceGroupName,
azu.Options.AzureGalleryName,
fmt.Sprintf("talos-%s", azureArchitectures[arch]),
azu.Options.AzureCoreTag,
azu.helper.version,
nil)
if err != nil {
return fmt.Errorf("azure: failed to delete image: %w", err)
Expand All @@ -334,16 +354,16 @@ func (azu *AzureUploader) deleteImageVersion(ctx context.Context, arch string) e
return fmt.Errorf("azure: failed to pull the result for image deletion: %w", err)
}

return err
return nil
}

type azureHelper struct {
version string
subscriptionID string
clientFactory *armcompute.ClientFactory
cred *azidentity.DefaultAzureCredential
authorizer autorest.Authorizer
providersClient resources.ProvidersClient
locations map[string]Location
}

func (helper *azureHelper) setDefaultAzureCreds() error {
Expand Down Expand Up @@ -385,52 +405,27 @@ func (helper *azureHelper) setDefaultAzureCreds() error {
return nil
}

//nolint:gocyclo
func (helper *azureHelper) getAzureLocations(ctx context.Context) error {
providers, err := helper.listProviders(ctx)
if err != nil {
return err
}

var computeProvider resources.Provider
func (helper *azureHelper) getAzureLocations(ctx context.Context) ([]string, error) {
var regions []string

for _, provider := range providers {
if provider.Namespace != nil && *provider.Namespace == "Microsoft.Compute" {
computeProvider = provider

break
}
result, err := helper.providersClient.Get(ctx, "Microsoft.Compute", "")
if err != nil {
return nil, fmt.Errorf("azure: error getting Microsoft.Compute: %w", err)
}

helper.locations = make(map[string]Location)

if computeProvider.ResourceTypes != nil {
for _, rt := range *computeProvider.ResourceTypes {
if result.ResourceTypes != nil {
for _, rt := range *result.ResourceTypes {
if rt.ResourceType != nil && *rt.ResourceType == "virtualMachines" {
if rt.Locations != nil {
for _, region := range *rt.Locations {
abbr := strings.ReplaceAll(region, " ", "")
abbr = strings.ToLower(abbr)
helper.locations[abbr] = Location{Abbreviation: abbr, Name: region}
}
regions = xslices.Map(*rt.Locations, func(s string) string {
return strings.ToLower(strings.ReplaceAll(s, " ", ""))
})
}

break
}
}
}

return err
}

func (helper *azureHelper) listProviders(ctx context.Context) (result []resources.Provider, err error) {
for list, err := helper.providersClient.List(ctx, ""); list.NotDone(); err = list.NextWithContext(ctx) {
if err != nil {
return nil, fmt.Errorf("azure: error getting providers list: %v", err)
}

result = append(result, list.Values()...)
}

return
return regions, nil
}
Loading

0 comments on commit ee51f04

Please sign in to comment.