Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MGMT-14810: Remove API and Ingress VIP #5501

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
21 changes: 21 additions & 0 deletions cmd/agentbasedinstaller/register.go
Expand Up @@ -13,6 +13,7 @@ import (
"github.com/openshift/assisted-service/client"
"github.com/openshift/assisted-service/client/installer"
"github.com/openshift/assisted-service/client/manifests"
"github.com/openshift/assisted-service/internal/cluster/validations"
"github.com/openshift/assisted-service/internal/controller/controllers"
"github.com/openshift/assisted-service/internal/oc"
"github.com/openshift/assisted-service/models"
Expand Down Expand Up @@ -52,6 +53,26 @@ func RegisterCluster(ctx context.Context, log *log.Logger, bmInventory *client.A
return nil, aciErr
}

desiredApiVips, err := validations.HandleApiVipBackwardsCompatibility(
nil,
aci.Spec.APIVIP,
controllers.ApiVipsEntriesToArray(aci.Spec.APIVIPs))
if err != nil {
return nil, err
}
aci.Spec.APIVIPs = controllers.ApiVipsArrayToStrings(desiredApiVips)
aci.Spec.APIVIP = ""

desiredIngressVips, err := validations.HandleIngressVipBackwardsCompatibility(
nil,
aci.Spec.IngressVIP,
controllers.IngressVipsEntriesToArray(aci.Spec.IngressVIPs))
if err != nil {
return nil, err
}
aci.Spec.IngressVIPs = controllers.IngressVipsArrayToStrings(desiredIngressVips)
aci.Spec.IngressVIP = ""

releaseImage, releaseError := getReleaseVersion(clusterImageSetPath)
if releaseError != nil {
return nil, releaseError
Expand Down
6 changes: 4 additions & 2 deletions deploy/operator/ztp/agentClusterInstall.j2
Expand Up @@ -9,10 +9,12 @@ spec:
imageSetRef:
name: '{{ cluster_image_set_name }}'
{% if spoke_api_vip %}
apiVIP: '{{ spoke_api_vip }}'
apiVIPs:
- '{{ spoke_api_vip }}'
{% endif %}
{% if spoke_ingress_vip %}
ingressVIP: '{{ spoke_ingress_vip }}'
ingressVIPs:
- '{{ spoke_ingress_vip }}'
{% endif %}
networking:
clusterNetwork:
Expand Down
Expand Up @@ -7,11 +7,15 @@ metadata:
namespace: spoke-cluster
spec:
apiVIP: 1.2.3.8
apiVIPs:
- 1.2.3.8
clusterDeploymentRef:
name: test-cluster
imageSetRef:
name: openshift-v4.9.0
ingressVIP: 1.2.3.9
ingressVIPs:
- 1.2.3.9
platformType: BareMetal
networking:
clusterNetwork:
Expand Down
4 changes: 4 additions & 0 deletions docs/hive-integration/crds/agentClusterInstall.yaml
Expand Up @@ -5,11 +5,15 @@ metadata:
namespace: spoke-cluster
spec:
apiVIP: 1.2.3.8
apiVIPs:
- 1.2.3.8
clusterDeploymentRef:
name: test-cluster
imageSetRef:
name: openshift-v4.9.0
ingressVIP: 1.2.3.9
ingressVIPs:
- 1.2.3.9
platformType: BareMetal
networking:
clusterNetwork:
Expand Down
39 changes: 5 additions & 34 deletions internal/bminventory/inventory.go
Expand Up @@ -305,14 +305,6 @@ func (b *bareMetalInventory) updatePullSecret(pullSecret string, log logrus.Fiel
func (b *bareMetalInventory) setDefaultRegisterClusterParams(ctx context.Context, params installer.V2RegisterClusterParams, id strfmt.UUID) (installer.V2RegisterClusterParams, error) {
log := logutil.FromContext(ctx, b.log)

if params.NewClusterParams.APIVip != "" && len(params.NewClusterParams.APIVips) == 0 {
params.NewClusterParams.APIVips = []*models.APIVip{{IP: models.IP(params.NewClusterParams.APIVip), ClusterID: id}}
}

if params.NewClusterParams.IngressVip != "" && len(params.NewClusterParams.IngressVip) == 0 {
params.NewClusterParams.IngressVips = []*models.IngressVip{{IP: models.IP(params.NewClusterParams.IngressVip), ClusterID: id}}
}

if params.NewClusterParams.ClusterNetworks == nil {
params.NewClusterParams.ClusterNetworks = []*models.ClusterNetwork{
{Cidr: models.Subnet(b.Config.DefaultClusterNetworkCidr), HostPrefix: b.Config.DefaultClusterNetworkHostPrefix},
Expand Down Expand Up @@ -590,10 +582,8 @@ func (b *bareMetalInventory) RegisterClusterInternal(
ID: &id,
Href: swag.String(url.String()),
Kind: swag.String(models.ClusterKindCluster),
APIVip: params.NewClusterParams.APIVip,
APIVips: params.NewClusterParams.APIVips,
BaseDNSDomain: params.NewClusterParams.BaseDNSDomain,
IngressVip: params.NewClusterParams.IngressVip,
IngressVips: params.NewClusterParams.IngressVips,
Name: swag.StringValue(params.NewClusterParams.Name),
OpenshiftVersion: *releaseImage.Version,
Expand Down Expand Up @@ -2140,15 +2130,9 @@ func (b *bareMetalInventory) updateNonDhcpNetworkParams(updates map[string]inter
}
reqDualStack := network.CheckIfClusterIsDualStack(&targetConfiguration)

if params.ClusterUpdateParams.APIVip != nil {
updates["api_vip"] = *params.ClusterUpdateParams.APIVip
}
if params.ClusterUpdateParams.APIVips != nil {
targetConfiguration.APIVips = params.ClusterUpdateParams.APIVips
}
if params.ClusterUpdateParams.IngressVip != nil {
updates["ingress_vip"] = *params.ClusterUpdateParams.IngressVip
}
if params.ClusterUpdateParams.IngressVips != nil {
targetConfiguration.IngressVips = params.ClusterUpdateParams.IngressVips
}
Expand Down Expand Up @@ -2234,8 +2218,7 @@ func (b *bareMetalInventory) updateNonDhcpNetworkParams(updates map[string]inter
}

func (b *bareMetalInventory) updateDhcpNetworkParams(db *gorm.DB, id *strfmt.UUID, updates map[string]interface{}, params installer.V2UpdateClusterParams, primaryMachineCIDR string) error {
if err := validations.ValidateVIPsWereNotSetDhcpMode(swag.StringValue(params.ClusterUpdateParams.APIVip), swag.StringValue(params.ClusterUpdateParams.IngressVip),
params.ClusterUpdateParams.APIVips, params.ClusterUpdateParams.IngressVips); err != nil {
if err := validations.ValidateVIPsWereNotSetDhcpMode(params.ClusterUpdateParams.APIVips, params.ClusterUpdateParams.IngressVips); err != nil {
return common.NewApiError(http.StatusBadRequest, err)
}
// VIPs are always allocated from the first provided machine network. We want to trigger
Expand All @@ -2247,8 +2230,6 @@ func (b *bareMetalInventory) updateDhcpNetworkParams(db *gorm.DB, id *strfmt.UUI
// Ref.: https://bugzilla.redhat.com/show_bug.cgi?id=1999297
// Ref.: https://github.com/openshift/assisted-service/pull/2512
if params.ClusterUpdateParams.MachineNetworks != nil && params.ClusterUpdateParams.MachineNetworks[0] != nil && string(params.ClusterUpdateParams.MachineNetworks[0].Cidr) != primaryMachineCIDR {
updates["api_vip"] = ""
updates["ingress_vip"] = ""
emptyCluster := common.Cluster{Cluster: models.Cluster{ID: id}}
if err := network.UpdateVipsTables(db, &emptyCluster, true, true); err != nil {
return err
Expand Down Expand Up @@ -2372,11 +2353,7 @@ func (b *bareMetalInventory) updateClusterData(_ context.Context, cluster *commo
return nil
}

func wereClusterVipsUpdated(clusterVip string, paramVip *string, clusterVips []string, paramVips []string) bool {
if paramVip != nil && clusterVip != swag.StringValue(paramVip) {
return true
}

func wereClusterVipsUpdated(clusterVips []string, paramVips []string) bool {
if paramVips == nil {
return false
}
Expand All @@ -2402,15 +2379,13 @@ func (b *bareMetalInventory) updateVips(db *gorm.DB, params installer.V2UpdateCl
},
}

if wereClusterVipsUpdated(cluster.APIVip, params.ClusterUpdateParams.APIVip, network.GetApiVips(cluster), network.GetApiVips(&paramVips)) {
if wereClusterVipsUpdated(network.GetApiVips(cluster), network.GetApiVips(&paramVips)) {
apiVipUpdated = true
cluster.APIVip = swag.StringValue(params.ClusterUpdateParams.APIVip)
cluster.APIVips = params.ClusterUpdateParams.APIVips
}

if wereClusterVipsUpdated(cluster.IngressVip, params.ClusterUpdateParams.IngressVip, network.GetIngressVips(cluster), network.GetIngressVips(&paramVips)) {
if wereClusterVipsUpdated(network.GetIngressVips(cluster), network.GetIngressVips(&paramVips)) {
ingressVipUpdated = true
cluster.IngressVip = swag.StringValue(params.ClusterUpdateParams.IngressVip)
cluster.IngressVips = params.ClusterUpdateParams.IngressVips
}

Expand Down Expand Up @@ -2619,8 +2594,6 @@ func (b *bareMetalInventory) updateNetworkParams(params installer.V2UpdateCluste
vipDhcpAllocation = swag.BoolValue(params.ClusterUpdateParams.VipDhcpAllocation)
updates["vip_dhcp_allocation"] = vipDhcpAllocation
updates["machine_network_cidr_updated_at"] = time.Now()
updates["api_vip"] = ""
updates["ingress_vip"] = ""
cluster.MachineNetworks = []*models.MachineNetwork{}
emptyCluster := common.Cluster{Cluster: models.Cluster{ID: cluster.ID}}
if err = network.UpdateVipsTables(db, &emptyCluster, true, true); err != nil {
Expand Down Expand Up @@ -2663,8 +2636,6 @@ func setCommonUserNetworkManagedParams(db *gorm.DB, id *strfmt.UUID, params *mod
return err, false
}
updates["vip_dhcp_allocation"] = false
updates["api_vip"] = ""
updates["ingress_vip"] = ""
emptyCluster := common.Cluster{Cluster: models.Cluster{ID: id}}
if err = network.UpdateVipsTables(db, &emptyCluster, true, true); err != nil {
return err, false
Expand Down Expand Up @@ -2693,7 +2664,7 @@ func (b *bareMetalInventory) updateNtpSources(params installer.V2UpdateClusterPa
}

func validateUserManagedNetworkConflicts(params *models.V2ClusterUpdateParams, log logrus.FieldLogger) error {
if err := validations.ValidateVIPsWereNotSetUserManagedNetworking(swag.StringValue(params.APIVip), swag.StringValue(params.IngressVip), params.APIVips, params.IngressVips, swag.BoolValue(params.VipDhcpAllocation)); err != nil {
if err := validations.ValidateVIPsWereNotSetUserManagedNetworking(params.APIVips, params.IngressVips, swag.BoolValue(params.VipDhcpAllocation)); err != nil {
log.WithError(err)
return common.NewApiError(http.StatusBadRequest, err)
}
Expand Down