Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

eksctl-anywhere cluster config generation with parameters for bare metal and vSphere #7983

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
271 changes: 249 additions & 22 deletions cmd/eksctl-anywhere/cmd/generateclusterconfig.go
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
package cmd

import (
"encoding/csv"
"fmt"
"log"
"os"
"strings"

"github.com/spf13/cobra"
Expand Down Expand Up @@ -50,6 +52,7 @@ func preRunGenerateClusterConfig(cmd *cobra.Command, args []string) {
func init() {
generateCmd.AddCommand(generateClusterConfigCmd)
generateClusterConfigCmd.Flags().StringP("provider", "p", "", fmt.Sprintf("Provider to use (%s)", strings.Join(constants.SupportedProviders, " or ")))
generateClusterConfigCmd.Flags().StringP("paramsFile", "m", "", "parameters file (vsphere or tinkerbell)")
err := generateClusterConfigCmd.MarkFlagRequired("provider")
if err != nil {
log.Fatalf("marking flag as required: %v", err)
Expand All @@ -61,6 +64,34 @@ func generateClusterConfig(clusterName string) error {
var datacenterYaml []byte
var machineGroupYaml [][]byte
var clusterConfigOpts []v1alpha1.ClusterGenerateOpt
var kubernetesVersion string
var tinkerbellTemplateConfigTemplate string
var podsCidrBlocks []string
var servicesCidrBlocks []string
var paramsData []byte
var err error

// use cluster name as the default management cluster name.
managementClusterName := clusterName

if viper.IsSet("paramsFile") {
paramsFile := viper.GetString("paramsFile")
paramsData, err = os.ReadFile(paramsFile)

switch strings.ToLower(viper.GetString("provider")) {
case constants.VSphereProviderName:
if err != nil {
return fmt.Errorf("reading paramsFile: %v\nSample paramsFile has content:\n%s", err, GetDefaultVSphereParamsTemplate())
}
case constants.TinkerbellProviderName:
if err != nil {
return fmt.Errorf("reading paramsFile: %v\nSample paramsFile has content:\n%s", err, GetDefaultTinkerbellParamsTemplate())
}
default:
return fmt.Errorf("parameter file is only supported for vsphere and tinkerbell")
}
}

switch strings.ToLower(viper.GetString("provider")) {
case constants.DockerProviderName:
datacenterConfig := v1alpha1.NewDockerDatacenterConfigGenerate(clusterName)
Expand All @@ -77,25 +108,76 @@ func generateClusterConfig(clusterName string) error {
}
datacenterYaml = dcyaml
case constants.VSphereProviderName:
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpoint())
datacenterConfig := v1alpha1.NewVSphereDatacenterConfigGenerate(clusterName)
var vSphereParams v1alpha1.VSphereClusterConfigParams
err = yaml.Unmarshal(paramsData, &vSphereParams)
if err != nil {
return fmt.Errorf("unmarshal vSphereParams: %v", err)
}

if vSphereParams.ManagementClusterName != "" {
// override the management cluster name with that from parameter file.
managementClusterName = vSphereParams.ManagementClusterName
}

// set podsCidrBlocks and servicesCidrBlocks to the values from parameter file.
podsCidrBlocks = vSphereParams.PodsCidrBlocks
servicesCidrBlocks = vSphereParams.ServicesCidrBlocks

if vSphereParams.CPEndpointHost != "" {
// add control plane endpoint config with host from parameter file.
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpointHost(vSphereParams.CPEndpointHost))
} else {
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpoint())
}

// create datacenter config with values from parameter file
datacenterConfig := v1alpha1.NewVSphereDatacenterConfigGenerate(clusterName, vSphereParams.Datacenter, vSphereParams.Network, vSphereParams.Server, vSphereParams.Thumbprint, vSphereParams.Insecure)
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithDatacenterRef(datacenterConfig))
// default counts of CP nodes, Etcd nodes and worker nodes.
cpCount := 2
etcdCount := 3
workerCount := 2

if vSphereParams.CPCount != 0 {
// override counts of CP nodes with value from parameter file.
cpCount = vSphereParams.CPCount
}

if vSphereParams.EtcdCount != 0 {
// override counts of Etcd nodes with value from parameter file.
etcdCount = vSphereParams.EtcdCount
}

if vSphereParams.WorkerCount != 0 {
// override counts of Worker nodes with value from parameter file.
workerCount = vSphereParams.WorkerCount
}
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.ControlPlaneConfigCount(2),
v1alpha1.ExternalETCDConfigCount(3),
v1alpha1.WorkerNodeConfigCount(2),
v1alpha1.ControlPlaneConfigCount(cpCount),
v1alpha1.ExternalETCDConfigCount(etcdCount),
v1alpha1.WorkerNodeConfigCount(workerCount),
v1alpha1.WorkerNodeConfigName(constants.DefaultWorkerNodeGroupName),
)
dcyaml, err := yaml.Marshal(datacenterConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
datacenterYaml = dcyaml
var sshAuthorizedKey string
if vSphereParams.SSHAuthorizedKeyFile != "" {
b, err := os.ReadFile(vSphereParams.SSHAuthorizedKeyFile)
if err != nil {
return fmt.Errorf("open sshAuthorizedKeyFile file: %v", err)
}
sshAuthorizedKey = string(b)
}

kubernetesVersion = vSphereParams.KubernetesVersion
// need to default control plane config name to something different from the cluster name based on assumption
// in controller code
cpMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(providers.GetControlPlaneNodeName(clusterName))
workerMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(clusterName)
etcdMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(providers.GetEtcdNodeName(clusterName))
cpMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(providers.GetControlPlaneNodeName(clusterName), vSphereParams.Datastore, vSphereParams.Folder, vSphereParams.ResourcePool, vSphereParams.Template, sshAuthorizedKey, vSphereParams.OSFamily, vSphereParams.CPDiskGiB, vSphereParams.CPNumCPUs, vSphereParams.CPMemoryMiB)
workerMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(clusterName, vSphereParams.Datastore, vSphereParams.Folder, vSphereParams.ResourcePool, vSphereParams.Template, sshAuthorizedKey, vSphereParams.OSFamily, vSphereParams.WorkerDiskGiB, vSphereParams.WorkerNumCPUs, vSphereParams.WorkerMemoryMiB)
etcdMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(providers.GetEtcdNodeName(clusterName), vSphereParams.Datastore, vSphereParams.Folder, vSphereParams.ResourcePool, vSphereParams.Template, sshAuthorizedKey, vSphereParams.OSFamily, vSphereParams.EtcdDiskGiB, vSphereParams.EtcdNumCPUs, vSphereParams.EtcdMemoryMiB)
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.WithCPMachineGroupRef(cpMachineConfig),
v1alpha1.WithWorkerMachineGroupRef(workerMachineConfig),
Expand Down Expand Up @@ -183,35 +265,172 @@ func generateClusterConfig(clusterName string) error {
}
machineGroupYaml = append(machineGroupYaml, cpMcYaml, workerMcYaml, etcdMcYaml)
case constants.TinkerbellProviderName:
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpoint())
datacenterConfig := v1alpha1.NewTinkerbellDatacenterConfigGenerate(clusterName)
var tinkerbellParams v1alpha1.TinkerbellClusterConfigParams
err = yaml.Unmarshal(paramsData, &tinkerbellParams)
if err != nil {
return fmt.Errorf("unmarshal tinkerbellParams: %v", err)
}

if tinkerbellParams.ManagementClusterName != "" {
// override the management cluster name with that from parameter file.
managementClusterName = tinkerbellParams.ManagementClusterName
}

// set podsCidrBlocks and servicesCidrBlocks to the values from parameter file.
podsCidrBlocks = tinkerbellParams.PodsCidrBlocks
servicesCidrBlocks = tinkerbellParams.ServicesCidrBlocks

if tinkerbellParams.CPEndpointHost != "" {
// add control plane endpoint config with host from parameter file.
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpointHost(tinkerbellParams.CPEndpointHost))
} else {
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpoint())
}

kubernetesVersion = tinkerbellParams.KubernetesVersion

adminIP := tinkerbellParams.AdminIP
tinkerbellIP := tinkerbellParams.TinkerbellIP
osImageURL := tinkerbellParams.OSImageURL

// create datacenter config with values from parameter file
datacenterConfig := v1alpha1.NewTinkerbellDatacenterConfigGenerate(clusterName, tinkerbellIP, osImageURL)
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithDatacenterRef(datacenterConfig))
// default counts of CP nodes, Etcd nodes and worker nodes.
cpCount := 1
workerCount := 1
if tinkerbellParams.HardwareCSV != "" {
// parse hardware.csv file to get counts of CP/worker nodes
f, err := os.Open(tinkerbellParams.HardwareCSV)
if err != nil {
return fmt.Errorf("open hardware file: %v", err)
}
defer f.Close()
csvReader := csv.NewReader(f)
data, err := csvReader.ReadAll()
if err != nil {
return fmt.Errorf("read hardware file: %v", err)
}
macIndex := -1
ipIndex := -1
labelsIndex := -1
cpCount = 0
workerCount = 0
for i, line := range data {
if i == 0 {
// from the header (first line), find the index of
// MAC, IP, labels.
for j, field := range line {
if strings.EqualFold(field, "mac") {
macIndex = j
} else if strings.EqualFold(field, "ip_address") {
ipIndex = j
} else if strings.EqualFold(field, "labels") {
labelsIndex = j
}
}
if macIndex == -1 {
return fmt.Errorf("no mac header found in hardware file")
}
if ipIndex == -1 {
return fmt.Errorf("no ip header found in hardware file")
}
if labelsIndex == -1 {
return fmt.Errorf("no labels header found in hardware file")
}
} else {
// for rest lines, increase counts of CP nodes and worker nodes.
if strings.ToLower(line[labelsIndex]) == "type=cp" {
cpCount = cpCount + 1
} else {
workerCount = workerCount + 1
}
}
}
}

if tinkerbellParams.CPCount != 0 {
// override counts of CP nodes with value from parameter file.
cpCount = tinkerbellParams.CPCount
}

if tinkerbellParams.WorkerCount != 0 {
// override counts of Worker nodes with value from parameter file.
workerCount = tinkerbellParams.WorkerCount
}

clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.ControlPlaneConfigCount(1),
v1alpha1.WorkerNodeConfigCount(1),
v1alpha1.WorkerNodeConfigName(constants.DefaultWorkerNodeGroupName),
v1alpha1.ControlPlaneConfigCount(cpCount),
)
if workerCount > 0 {
// only generate worker cluster when worker count > 0.
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.WorkerNodeConfigCount(workerCount),
v1alpha1.WorkerNodeConfigName(constants.DefaultWorkerNodeGroupName),
)
}
dcyaml, err := yaml.Marshal(datacenterConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
datacenterYaml = dcyaml

cpMachineConfig := v1alpha1.NewTinkerbellMachineConfigGenerate(providers.GetControlPlaneNodeName(clusterName))
workerMachineConfig := v1alpha1.NewTinkerbellMachineConfigGenerate(clusterName)
var sshAuthorizedKey string
if tinkerbellParams.SSHAuthorizedKeyFile != "" {
b, err := os.ReadFile(tinkerbellParams.SSHAuthorizedKeyFile)
if err != nil {
return fmt.Errorf("open sshAuthorizedKeyFile file: %v", err)
}
sshAuthorizedKey = string(b)
}

cpMachineConfig := v1alpha1.NewTinkerbellMachineConfigGenerate(clusterName, providers.GetControlPlaneNodeName(clusterName), "cp", sshAuthorizedKey, tinkerbellParams.OSFamily)
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.WithCPMachineGroupRef(cpMachineConfig),
v1alpha1.WithWorkerMachineGroupRef(workerMachineConfig),
)
cpMcYaml, err := yaml.Marshal(cpMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
workerMcYaml, err := yaml.Marshal(workerMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
machineGroupYaml = append(machineGroupYaml, cpMcYaml)

if workerCount > 0 {
workerMachineConfig := v1alpha1.NewTinkerbellMachineConfigGenerate(clusterName, clusterName, "worker", sshAuthorizedKey, tinkerbellParams.OSFamily)
// only generate worker machine group reference when worker count > 0.
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.WithWorkerMachineGroupRef(workerMachineConfig),
)
// only generate worker machine config YAML when worker count > 0.
workerMcYaml, err := yaml.Marshal(workerMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
machineGroupYaml = append(machineGroupYaml, workerMcYaml)
}

if viper.IsSet("paramsFile") {
if tinkerbellParams.TinkerbellTemplateConfigTemplateFile != "" {
b, err := os.ReadFile(tinkerbellParams.TinkerbellTemplateConfigTemplateFile)
if err != nil {
if tinkerbellParams.OSFamily == v1alpha1.Ubuntu {
return fmt.Errorf("open tinkerbellTemplateConfigTemplateFile file: %v\nSample TinkerbellTemplateConfigTemplateFile has content:%s", err, GetDefaultTinkerbellTemplateConfigTemplateUbuntu())
} else if tinkerbellParams.OSFamily == v1alpha1.Bottlerocket {
return fmt.Errorf("open tinkerbellTemplateConfigTemplateFile file: %v\nSample TinkerbellTemplateConfigTemplateFile has content:%s", err, GetDefaultTinkerbellTemplateConfigTemplateBottlerocket())
}
return fmt.Errorf("open tinkerbellTemplateConfigTemplateFile file: %v", err)
}
tinkerbellTemplateConfigTemplate = string(b)
} else if tinkerbellParams.OSFamily == v1alpha1.Ubuntu {
tinkerbellTemplateConfigTemplate = GetDefaultTinkerbellTemplateConfigTemplateUbuntu()
} else if tinkerbellParams.OSFamily == v1alpha1.Bottlerocket {
tinkerbellTemplateConfigTemplate = GetDefaultTinkerbellTemplateConfigTemplateBottlerocket()
}

tinkerbellTemplateConfigTemplate = strings.Replace(tinkerbellTemplateConfigTemplate, "$$NAME", clusterName, -1)
tinkerbellTemplateConfigTemplate = strings.Replace(tinkerbellTemplateConfigTemplate, "$$IMG_URL", osImageURL, -1)
tinkerbellTemplateConfigTemplate = strings.Replace(tinkerbellTemplateConfigTemplate, "$$ADMIN_IP", adminIP, -1)
tinkerbellTemplateConfigTemplate = strings.Replace(tinkerbellTemplateConfigTemplate, "$$TINKERBELL_IP", tinkerbellIP, -1)
}
machineGroupYaml = append(machineGroupYaml, cpMcYaml, workerMcYaml)
case constants.NutanixProviderName:
datacenterConfig := v1alpha1.NewNutanixDatacenterConfigGenerate(clusterName)
dcYaml, err := yaml.Marshal(datacenterConfig)
Expand Down Expand Up @@ -257,7 +476,8 @@ func generateClusterConfig(clusterName string) error {
default:
return fmt.Errorf("not a valid provider")
}
config := v1alpha1.NewClusterGenerate(clusterName, clusterConfigOpts...)

config := v1alpha1.NewClusterGenerate(clusterName, managementClusterName, kubernetesVersion, podsCidrBlocks, servicesCidrBlocks, clusterConfigOpts...)

configMarshal, err := yaml.Marshal(config)
if err != nil {
Expand All @@ -272,6 +492,13 @@ func generateClusterConfig(clusterName string) error {
resources = append(resources, machineGroupYaml...)
}

fmt.Println(string(templater.AppendYamlResources(resources...)))
fmt.Print(string(templater.AppendYamlResources(resources...)))

if tinkerbellTemplateConfigTemplate != "" {
fmt.Println(tinkerbellTemplateConfigTemplate)
} else {
fmt.Println("")
}

return nil
}
26 changes: 26 additions & 0 deletions cmd/eksctl-anywhere/cmd/tinkerbellparams_defaults_template.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
package cmd

const (
defaultTinkerbellParamsTemplate = `managementClusterName: <management cluster name>
podsCidrBlocks:
- 192.168.64.0/18
servicesCidrBlocks:
- 10.96.0.0/12
kubernetesVersion: 1.26
cpCount: 1
workerCount: 2
cpEndpointHost: <control plane endpoint host ip>
tinkerbellIP: <tinkerbellIP>
adminIP: <admin machine ip>
osFamily: ubuntu
osImageURL: <osImageURL of K8s 1.26>
hardwareCSV: <hardware CSV file>
sshAuthorizedKeyFile: <sshKey.pub file>
tinkerbellTemplateConfigTemplateFile: tinkerbellTemplateConfigTemplate.yaml
`
)

// GetDefaultTinkerbellParamsTemplate returns the default TinkerbellParamsTemplate.
func GetDefaultTinkerbellParamsTemplate() string {
return string(defaultTinkerbellParamsTemplate)
}