Skip to content

Commit

Permalink
Do not set AWS_PROFILE env in generated kubeconfig (#1082)
Browse files Browse the repository at this point in the history
### Proposed changes

AWS_PROFILE values can vary between users despite variations providing
the same level of access to the AWS resources. Always setting the
profile name in the kubeconfig will mean that other users of the Pulumi
program will need to ensure that their profile names also match, which
isn't ideal.

This change would enable users to switch profiles, without needing to
regenerate a Kubeconfig as the underlying `eks get-token` command will
use the ambient `AWS_PROFILE` env var to determine the best profile.

I have manually validated that this change works locally by explicitly
specifying a custom profile on the first `pulumi up`, changing the
profile to another name and creating an on cluster deployment to see if
the generated kubeconfig can handle the profile name change.

### Related issues (optional)

Fixes: #868

---------

Co-authored-by: Pulumi Bot <30351955+pulumi-bot@users.noreply.github.com>
  • Loading branch information
rquitales and pulumi-bot committed Mar 22, 2024
1 parent 990aa23 commit fc90daf
Show file tree
Hide file tree
Showing 28 changed files with 27,494 additions and 27,433 deletions.
3 changes: 3 additions & 0 deletions examples/aws-profile-py/__main__.py
Expand Up @@ -29,3 +29,6 @@

# Export the cluster kubeconfig.
pulumi.export("kubeconfig", cluster.kubeconfig)

# Export the cluster kubeconfig with the AWS_PROFILE set.
pulumi.export("kubeconfig_with_profile", cluster.get_kubeconfig(profile_name=profile_name))
3 changes: 3 additions & 0 deletions examples/aws-profile/index.ts
Expand Up @@ -38,3 +38,6 @@ const cluster = new eks.Cluster(`${projectName}`, {

// Export the cluster kubeconfig.
export const kubeconfig = cluster.kubeconfig;

// Export the cluster kubeconfig with the AWS_PROFILE set.
export const kubeconfigWithProfile = cluster.getKubeconfig({profileName: profileName})
6 changes: 5 additions & 1 deletion examples/examples_nodejs_test.go
Expand Up @@ -285,9 +285,13 @@ func TestAccAwsProfile(t *testing.T) {
With(integration.ProgramTestOptions{
Dir: path.Join(getCwd(t), "aws-profile"),
ExtraRuntimeValidation: func(t *testing.T, info integration.RuntimeValidationStackInfo) {
// The `cluster.kubeconfig` output should fail as it does not have the right AWS_PROFILE set.
t.Logf("Ensuring cluster.kubeconfig fails without AWS_PROFILE envvar set")
utils.EnsureKubeconfigFails(t, info.Outputs["kubeconfig"])

utils.RunEKSSmokeTest(t,
info.Deployment.Resources,
info.Outputs["kubeconfig"],
info.Outputs["kubeconfigWithProfile"],
)
},
NoParallel: true,
Expand Down
6 changes: 5 additions & 1 deletion examples/examples_py_test.go
Expand Up @@ -32,9 +32,13 @@ func TestAccAwsProfilePy(t *testing.T) {
NoParallel: true,
Dir: filepath.Join(getCwd(t), "aws-profile-py"),
ExtraRuntimeValidation: func(t *testing.T, info integration.RuntimeValidationStackInfo) {
// The `cluster.kubeconfig` output should fail as it does not have the right AWS_PROFILE set.
t.Logf("Ensuring cluster.kubeconfig fails without AWS_PROFILE envvar set")
utils.EnsureKubeconfigFails(t, info.Outputs["kubeconfig"])

utils.RunEKSSmokeTest(t,
info.Deployment.Resources,
info.Outputs["kubeconfig"],
info.Outputs["kubeconfig_with_profile"],
)
},
})
Expand Down
33 changes: 29 additions & 4 deletions examples/utils/utils.go
Expand Up @@ -514,11 +514,19 @@ func mapClusterToKubeAccess(kubeconfigs ...interface{}) (clusterKubeAccessMap, e
// Map EKS cluster names to its KubeAccess.
clusterToKubeAccess := make(clusterKubeAccessMap)
for _, kubeconfig := range kubeconfigs {
// Convert kubconfig to KubeAccess
kc, err := json.Marshal(kubeconfig)
if err != nil {
return nil, err
// Convert kubeconfig to KubeAccess. We might need to work with either the raw
// or jsonified kubeconfig data.
var kc []byte
var err error
if kubeStr, ok := kubeconfig.(string); !ok {
kc, err = json.Marshal(kubeconfig)
if err != nil {
return nil, err
}
} else {
kc = []byte(kubeStr)
}

kubeAccess, err := KubeconfigToKubeAccess(kc)
if err != nil {
return nil, err
Expand Down Expand Up @@ -652,3 +660,20 @@ func clientSetFromKubeconfig(kubeconfig any) (*kubernetes.Clientset, error) {
}
return clientSet, nil
}

// EnsureKubeconfigFails ensures that the provided kubeconfig fails to authenticate.
func EnsureKubeconfigFails(t *testing.T, kubeconfig any) {
kc, err := json.Marshal(kubeconfig)
if err != nil {
t.Errorf("unable to marshal provided kubeconfig: %s", err)
}
kubeAccess, err := KubeconfigToKubeAccess(kc)
if err != nil {
t.Errorf("unable to create KubeAccess from kubeconfig: %s", err)
}

_, err = kubeAccess.Clientset.Discovery().ServerVersion()
if err == nil {
t.Errorf("expected kubeconfig to fail, but it succeeded to return the server version")
}
}
30 changes: 22 additions & 8 deletions nodejs/eks/cluster.ts
Expand Up @@ -199,6 +199,7 @@ interface ExecEnvVar {
export function generateKubeconfig(
clusterName: pulumi.Input<string>,
clusterEndpoint: pulumi.Input<string>,
includeProfile: boolean,
certData?: pulumi.Input<string>,
opts?: KubeconfigOptions,
) {
Expand All @@ -213,11 +214,9 @@ export function generateKubeconfig(
if (opts?.roleArn) {
args = [...args, "--role", opts.roleArn];
}
if (opts?.profileName) {
env.push({
name: "AWS_PROFILE",
value: opts.profileName,
});

if (includeProfile && opts?.profileName) {
env.push({ name: "AWS_PROFILE", value: opts.profileName });
}

return pulumi.all([args, env]).apply(([tokenArgs, envvars]) => {
Expand Down Expand Up @@ -653,7 +652,8 @@ export function createCore(
// Compute the required kubeconfig. Note that we do not export this value: we want the exported config to
// depend on the autoscaling group we'll create later so that nothing attempts to use the EKS cluster before
// its worker nodes have come up.
const kubeconfig = pulumi
const genKubeconfig = (useProfileName: boolean) => {
const kubeconfig = pulumi
.all([
eksCluster.name,
endpoint,
Expand All @@ -675,6 +675,7 @@ export function createCore(
return generateKubeconfig(
clusterName,
clusterEndpoint,
useProfileName,
clusterCertificateAuthority?.data,
opts,
);
Expand All @@ -683,19 +684,30 @@ export function createCore(
config = generateKubeconfig(
clusterName,
clusterEndpoint,
useProfileName,
clusterCertificateAuthority?.data,
providerCredentialOpts,
);
} else {
config = generateKubeconfig(
clusterName,
clusterEndpoint,
useProfileName,
clusterCertificateAuthority?.data,
);
}
return config;
},
);

return kubeconfig;
}

// We need 2 forms of kubeconfig, one with the profile name and one without. The one with the profile name
// is required to interact with the cluster by this provider. The one without is used by the user to interact
// with the cluster and enable multi-user access.
const kubeconfig = genKubeconfig(true);
const kubeconfigWithoutProfile = genKubeconfig(false);

const k8sProvider = new k8s.Provider(
`${name}-eks-k8s`,
Expand Down Expand Up @@ -950,7 +962,7 @@ export function createCore(
});
const getAnnosOutputStr = getAnnosOutput.toString();
// See if getAnnosOutputStr contains the annotation we're looking for.
if (!getAnnosOutputStr.includes("eks.amazonaws.com/compute-type") ) {
if (!getAnnosOutputStr.includes("eks.amazonaws.com/compute-type")) {
// No need to patch the deployment object since the annotation is not present. However, we need to re-create the CoreDNS pods since
// the existing pods were created before the FargateProfile was created, and therefore will not have been scheduled by fargate-scheduler.
// See: https://github.com/pulumi/pulumi-eks/issues/1030.
Expand Down Expand Up @@ -1018,7 +1030,7 @@ export function createCore(
cluster: eksCluster,
endpoint: endpoint,
nodeGroupOptions: nodeGroupOptions,
kubeconfig: kubeconfig,
kubeconfig: kubeconfigWithoutProfile,
provider: k8sProvider,
awsProvider: provider,
vpcCni: vpcCni,
Expand Down Expand Up @@ -1695,6 +1707,7 @@ export class Cluster extends pulumi.ComponentResource {
const kc = generateKubeconfig(
this.eksCluster.name,
this.eksCluster.endpoint,
true,
this.eksCluster.certificateAuthority?.data,
args,
);
Expand Down Expand Up @@ -1898,6 +1911,7 @@ export class ClusterInternal extends pulumi.ComponentResource {
const kc = generateKubeconfig(
this.eksCluster.name,
this.eksCluster.endpoint,
true,
this.eksCluster.certificateAuthority?.data,
args,
);
Expand Down

0 comments on commit fc90daf

Please sign in to comment.