Skip to content

Commit

Permalink
MGMT-17080: enable infrastructure operator when MCE and storage opera…
Browse files Browse the repository at this point in the history
…tors are selected (#6037)

Signed-off-by: Riccardo Piccoli <rpiccoli@redhat.com>
  • Loading branch information
rccrdpccl committed Mar 21, 2024
1 parent 2f7c5ca commit 41ad865
Show file tree
Hide file tree
Showing 46 changed files with 7,663 additions and 275 deletions.
6 changes: 6 additions & 0 deletions docs/user-guide/additional-operator-notes.md
Expand Up @@ -4,3 +4,9 @@
- When deploying CNV on Single Node OpenShift (SNO), [hostpath-provisioner](https://github.com/kubevirt/hostpath-provisioner) (part of the CNV product) storage is automatically opted in and set up to use, to enable persisting VM disks.
This is done with the thought in mind that most virtualization use cases require persistence.
The hostpath-provisioner is set up to utilize an LSO PV as the backing storage for provisioning dynamic hostPath volumes on.

## Multi-Cluster Engine (MCE)

- When deploying MCE together with a storage operator (ODF or LVM) Infrastructure Operator will be automatically
enabled. This will require extra disk space availability requirements for the storage operator.
When selecting MCE with both ODF and LVM, ODF will have priority and its storage class will be selected to provision Infrastructure Operator's PVCs.
1 change: 1 addition & 0 deletions go.mod
Expand Up @@ -65,6 +65,7 @@ require (
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.57.0
github.com/prometheus/client_golang v1.17.0
github.com/rs/cors v1.10.1
github.com/samber/lo v1.39.0
github.com/segmentio/kafka-go v0.4.38
github.com/sirupsen/logrus v1.9.3
github.com/slok/go-http-metrics v0.11.0
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Expand Up @@ -1489,6 +1489,8 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb
github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA=
github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
Expand Down
5 changes: 3 additions & 2 deletions internal/bminventory/inventory.go
Expand Up @@ -44,6 +44,7 @@ import (
"github.com/openshift/assisted-service/internal/metrics"
"github.com/openshift/assisted-service/internal/network"
"github.com/openshift/assisted-service/internal/operators"
operatorscommon "github.com/openshift/assisted-service/internal/operators/common"
"github.com/openshift/assisted-service/internal/operators/lvm"
"github.com/openshift/assisted-service/internal/provider"
"github.com/openshift/assisted-service/internal/provider/registry"
Expand Down Expand Up @@ -2906,7 +2907,7 @@ func (b *bareMetalInventory) updateOperatorsData(ctx context.Context, cluster *c
continue
}

if !operators.IsEnabled(updateOLMOperators, clusterOperator.Name) {
if !operatorscommon.HasOperator(updateOLMOperators, clusterOperator.Name) {
removedOLMOperators = append(removedOLMOperators, clusterOperator)
if err = db.Where("name = ? and cluster_id = ?", clusterOperator.Name, params.ClusterID).Delete(&models.MonitoredOperator{}).Error; err != nil {
err = errors.Wrapf(err, "failed to delete operator %s of cluster %s", clusterOperator.Name, params.ClusterID)
Expand Down Expand Up @@ -3859,7 +3860,7 @@ func (b *bareMetalInventory) GetCredentialsInternal(ctx context.Context, params
return nil, err
}
var consoleURL string
if b.clusterApi.IsOperatorMonitored(&cluster, operators.OperatorConsole.Name) {
if operatorscommon.HasOperator(cluster.Cluster.MonitoredOperators, operators.OperatorConsole.Name) {
if !b.clusterApi.IsOperatorAvailable(&cluster, operators.OperatorConsole.Name) {
err := errors.New("console-url isn't available yet, it will be once console operator is ready as part of cluster finalizing stage")
log.WithError(err)
Expand Down
39 changes: 24 additions & 15 deletions internal/bminventory/inventory_test.go
Expand Up @@ -17451,22 +17451,12 @@ var _ = Describe("GetCredentials", func() {
bm *bareMetalInventory
db *gorm.DB
dbName string
c common.Cluster
c *common.Cluster
)

BeforeEach(func() {
db, dbName = common.PrepareTestDB()
bm = createInventory(db, cfg)

clusterID := strfmt.UUID(uuid.New().String())
c = common.Cluster{
Cluster: models.Cluster{
ID: &clusterID,
Name: "my-cluster",
BaseDNSDomain: "my-domain",
},
}
Expect(db.Create(&c).Error).ShouldNot(HaveOccurred())
})

AfterEach(func() {
Expand All @@ -17475,7 +17465,7 @@ var _ = Describe("GetCredentials", func() {
})

It("Console operator available", func() {
mockClusterApi.EXPECT().IsOperatorMonitored(gomock.Any(), operators.OperatorConsole.Name).Return(true)
c = createClusterWithMonitoredOperator(db, operators.OperatorConsole)
mockClusterApi.EXPECT().IsOperatorAvailable(gomock.Any(), operators.OperatorConsole.Name).Return(true)
objectName := fmt.Sprintf("%s/%s", *c.ID, "kubeadmin-password")
mockS3Client.EXPECT().Download(ctx, objectName).Return(io.NopCloser(strings.NewReader("my_password")), int64(0), nil)
Expand All @@ -17485,15 +17475,15 @@ var _ = Describe("GetCredentials", func() {
})

It("Console operator not available", func() {
mockClusterApi.EXPECT().IsOperatorMonitored(gomock.Any(), operators.OperatorConsole.Name).Return(true)
c = createClusterWithMonitoredOperator(db, operators.OperatorConsole)
mockClusterApi.EXPECT().IsOperatorAvailable(gomock.Any(), operators.OperatorConsole.Name).Return(false)

reply := bm.V2GetCredentials(ctx, installer.V2GetCredentialsParams{ClusterID: *c.ID})
verifyApiError(reply, http.StatusConflict)
})

It("Returns credentials and no console URL if the console capability is disabled", func() {
mockClusterApi.EXPECT().IsOperatorMonitored(gomock.Any(), operators.OperatorConsole.Name).Return(false)
c = createCluster(db, models.ClusterStatusInstalled)
objectName := fmt.Sprintf("%s/%s", *c.ID, "kubeadmin-password")
mockS3Client.EXPECT().Download(ctx, objectName).Return(io.NopCloser(strings.NewReader("my_password")), int64(0), nil)

Expand All @@ -17508,7 +17498,7 @@ var _ = Describe("GetCredentials", func() {
})

It("Returns credentials and console URL if the console capability is enabled", func() {
mockClusterApi.EXPECT().IsOperatorMonitored(gomock.Any(), operators.OperatorConsole.Name).Return(true)
c = createClusterWithMonitoredOperator(db, operators.OperatorConsole)
mockClusterApi.EXPECT().IsOperatorAvailable(gomock.Any(), operators.OperatorConsole.Name).Return(true)
objectName := fmt.Sprintf("%s/%s", *c.ID, "kubeadmin-password")
mockS3Client.EXPECT().Download(ctx, objectName).Return(io.NopCloser(strings.NewReader("my_password")), int64(0), nil)
Expand Down Expand Up @@ -18892,3 +18882,22 @@ var _ = Describe("V2UpdateHostIgnition unbound blabla", func() {
Expect(response).To(BeAssignableToTypeOf(&installer.V2UpdateHostIgnitionCreated{}))
})
})

func getDummyCluster() common.Cluster {
clusterID := strfmt.UUID(uuid.New().String())
c := common.Cluster{
Cluster: models.Cluster{
ID: &clusterID,
Name: "my-cluster",
BaseDNSDomain: "my-domain",
},
}
return c
}

func createClusterWithMonitoredOperator(db *gorm.DB, operator models.MonitoredOperator) *common.Cluster {
c := getDummyCluster()
c.MonitoredOperators = append(c.MonitoredOperators, &operator)
Expect(db.Create(&c).Error).ShouldNot(HaveOccurred())
return &c
}
10 changes: 0 additions & 10 deletions internal/cluster/cluster.go
Expand Up @@ -103,7 +103,6 @@ type API interface {
// Refresh state in case of hosts update
RefreshStatus(ctx context.Context, c *common.Cluster, db *gorm.DB) (*common.Cluster, error)
ClusterMonitoring()
IsOperatorMonitored(c *common.Cluster, operatorName string) bool
IsOperatorAvailable(c *common.Cluster, operatorName string) bool
UploadIngressCert(c *common.Cluster) (err error)
VerifyClusterUpdatability(c *common.Cluster) (err error)
Expand Down Expand Up @@ -687,15 +686,6 @@ func CanDownloadKubeconfig(c *common.Cluster) (err error) {
return err
}

func (m *Manager) IsOperatorMonitored(c *common.Cluster, operatorName string) bool {
for _, o := range c.MonitoredOperators {
if o.Name == operatorName {
return true
}
}
return false
}

func (m *Manager) IsOperatorAvailable(c *common.Cluster, operatorName string) bool {
// TODO: MGMT-4458
// Backward-compatible solution for clusters that don't have monitored operators data
Expand Down
14 changes: 0 additions & 14 deletions internal/cluster/mock_cluster_api.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions internal/operators/api/api.go
Expand Up @@ -56,3 +56,10 @@ type Operator interface {
// GetFeatureSupportID returns the operator unique feature-support ID
GetFeatureSupportID() models.FeatureSupportLevelID
}

// Storage Operator provide a generic API for storage operators
type StorageOperator interface {
Operator
StorageClassName() string
SetAdditionalDiskRequirements(additionalSizeGB int64)
}
33 changes: 33 additions & 0 deletions internal/operators/common/common.go
@@ -0,0 +1,33 @@
package common

import (
"github.com/openshift/assisted-service/models"
"github.com/openshift/assisted-service/pkg/conversions"
)

// Returns count for disks that are not installion disk and fulfill size requirements (eligible disks) and
// disks that are not installation disk (available disks)
func NonInstallationDiskCount(disks []*models.Disk, installationDiskID string, minSizeGB int64) (int64, int64) {
var eligibleDisks int64
var availableDisks int64

for _, disk := range disks {
if (disk.DriveType == models.DriveTypeSSD || disk.DriveType == models.DriveTypeHDD) && installationDiskID != disk.ID && disk.SizeBytes != 0 {
if disk.SizeBytes >= conversions.GbToBytes(minSizeGB) {
eligibleDisks++
} else {
availableDisks++
}
}
}
return eligibleDisks, availableDisks
}

func HasOperator(operators []*models.MonitoredOperator, operatorName string) bool {
for _, o := range operators {
if o.Name == operatorName {
return true
}
}
return false
}
61 changes: 61 additions & 0 deletions internal/operators/common/common_test.go
@@ -0,0 +1,61 @@
package common_test

import (
"testing"

. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
"github.com/openshift/assisted-service/internal/operators/common"
"github.com/openshift/assisted-service/internal/operators/mce"
"github.com/openshift/assisted-service/internal/operators/odf"
"github.com/openshift/assisted-service/models"
"github.com/openshift/assisted-service/pkg/conversions"
)

var _ = DescribeTable(
"Get valid disk count",
func(disks []*models.Disk, diskID string, minSize, expectedEligibleDisks, expectedAvailableDisks int64) {
eligibleDisks, availableDisks := common.NonInstallationDiskCount(disks, diskID, minSize)
Expect(eligibleDisks).To(Equal(expectedEligibleDisks))
Expect(availableDisks).To(Equal(expectedAvailableDisks))
},
Entry("no disk provided", []*models.Disk{}, "", int64(0), int64(0), int64(0)),
Entry("no valid disk provided", []*models.Disk{
{SizeBytes: 20 * conversions.GB, DriveType: models.DriveTypeUnknown, ID: "/dev/disk/by-id/disk-1"},
{SizeBytes: 20 * conversions.GB, DriveType: models.DriveTypeVirtual, ID: "/dev/disk/by-id/disk-2"},
}, "", int64(0), int64(0), int64(0)),
Entry("valid disk provided, but wrong size", []*models.Disk{
{SizeBytes: 20 * conversions.GB, DriveType: models.DriveTypeSSD, ID: "/dev/disk/by-id/disk-1"},
{SizeBytes: 20 * conversions.GB, DriveType: models.DriveTypeSSD, ID: "/dev/disk/by-id/disk-2"},
}, "", int64(25), int64(0), int64(2)),
Entry("only one valid disk provided with the right size, but chosen for install", []*models.Disk{
{SizeBytes: 20 * conversions.GB, DriveType: models.DriveTypeSSD, ID: "/dev/disk/by-id/disk-1"},
{SizeBytes: 200 * conversions.GB, DriveType: models.DriveTypeSSD, ID: "/dev/disk/by-id/disk-2"},
}, "/dev/disk/by-id/disk-2", int64(25), int64(0), int64(1)),
Entry("only one valid disk provided with the right size", []*models.Disk{
{SizeBytes: 50 * conversions.GB, DriveType: models.DriveTypeHDD, ID: "/dev/disk/by-id/disk-1"},
{SizeBytes: 200 * conversions.GB, DriveType: models.DriveTypeSSD, ID: "/dev/disk/by-id/disk-2"},
}, "/dev/disk/by-id/disk-2", int64(25), int64(1), int64(0)),
Entry("two valid disks provided with the right size", []*models.Disk{
{SizeBytes: 50 * conversions.GB, DriveType: models.DriveTypeHDD, ID: "/dev/disk/by-id/disk-1"},
{SizeBytes: 200 * conversions.GB, DriveType: models.DriveTypeSSD, ID: "/dev/disk/by-id/disk-2"},
{SizeBytes: 50 * conversions.GB, DriveType: models.DriveTypeSSD, ID: "/dev/disk/by-id/disk-3"},
}, "/dev/disk/by-id/disk-2", int64(25), int64(2), int64(0)),
)

var _ = DescribeTable(
"has operator",
func(operators []*models.MonitoredOperator, operatorName string, isExpected bool) {
found := common.HasOperator(operators, operatorName)
Expect(found).To(Equal(isExpected))
},
Entry("no operators", []*models.MonitoredOperator{}, mce.Operator.Name, false),
Entry("not matching any operator", []*models.MonitoredOperator{&odf.Operator}, mce.Operator.Name, false),
Entry("matching a operator", []*models.MonitoredOperator{&odf.Operator, &mce.Operator}, mce.Operator.Name, true),
)

func TestHandler(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Operators common test suite")
}
29 changes: 3 additions & 26 deletions internal/operators/lvm/config.go
@@ -1,40 +1,17 @@
package lvm

import (
"github.com/openshift/assisted-service/models"
)

const (
// LvmMinOpenshiftVersion is the minimum OCP version in which lvmo is supported
// Any changes here should be updated at line 16 too.
LvmoMinOpenshiftVersion string = "4.11.0"
LvmsMinOpenshiftVersion4_12 string = "4.12.0"
LvmsMinOpenshiftVersion_ForNewResourceRequirements string = "4.13.0"
LvmMinMultiNodeSupportVersion string = "4.15.0"

LvmoSubscriptionName string = "odf-lvm-operator"
LvmsSubscriptionName string = "lvms-operator"

// LvmsMemoryRequirement int64 = 400
// LvmsMemoryRequirementBefore4_13 int64 = 1200

)

type Config struct {
LvmCPUPerHost int64 `envconfig:"LVM_CPU_PER_HOST" default:"1"`
LvmMemoryPerHostMiB int64 `envconfig:"LVM_MEMORY_PER_HOST_MIB" default:"400"`
LvmMemoryPerHostMiBBefore4_13 int64 `envconfig:"LVM_MEMORY_PER_HOST_MIB" default:"1200"`
LvmMinOpenshiftVersion string `envconfig:"LVM_MIN_OPENSHIFT_VERSION" default:"4.11.0"`
}

// count all disks of drive type ssd or hdd
func (o *operator) getValidDiskCount(disks []*models.Disk, installationDiskID string) int64 {
var countDisks int64

for _, disk := range disks {
if (disk.DriveType == models.DriveTypeSSD || disk.DriveType == models.DriveTypeHDD) && installationDiskID != disk.ID && disk.SizeBytes != 0 {
countDisks++
}
}
return countDisks
LvmCPUPerHost int64 `envconfig:"LVM_CPU_PER_HOST" default:"1"`
LvmMemoryPerHostMiB int64 `envconfig:"LVM_MEMORY_PER_HOST_MIB" default:"400"`
LvmMemoryPerHostMiBBefore4_13 int64 `envconfig:"LVM_MEMORY_PER_HOST_MIB" default:"1200"`
}

0 comments on commit 41ad865

Please sign in to comment.