Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[release-4.14] MGMT-16517: Add Env Var Deployment Type & Set ABI #5987

Merged
merged 3 commits into from Feb 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 4 additions & 0 deletions cmd/main.go
Expand Up @@ -398,6 +398,10 @@ func main() {
failOnError(autoMigrationWithLeader(autoMigrationLeader, db, log), "Failed auto migration process")

Options.UploaderConfig.AssistedServiceVersion = versions.GetRevision()
Options.UploaderConfig.Versions = Options.Versions
if Options.GeneratorConfig.InstallInvoker == "agent-installer" {
Options.UploaderConfig.DeploymentType = "ABI"
}
uploadClient := uploader.NewClient(&Options.UploaderConfig, db, log, ocpClient)

hostApi := host.NewManager(log.WithField("pkg", "host-state"), db, notificationStream, eventsHandler, hwValidator,
Expand Down
2 changes: 2 additions & 0 deletions deploy/assisted-service.yaml
Expand Up @@ -44,6 +44,8 @@ spec:
value: "ai-kafka-0.ai-kafka-headless.assisted-installer.svc.cluster.local:9092"
- name: KAFKA_EVENT_STREAM_TOPIC
value: "events-stream"
- name: DEPLOYMENT_TYPE
value: "SaaS"
- name: DB_HOST
valueFrom:
secretKeyRef:
Expand Down
1 change: 1 addition & 0 deletions deploy/podman/configmap-disconnected.yml
Expand Up @@ -12,6 +12,7 @@ data:
DB_PORT: "5432"
DB_USER: admin
DEPLOY_TARGET: onprem
DEPLOYMENT_TYPE: "Podman"
DISK_ENCRYPTION_SUPPORT: "true"
DUMMY_IGNITION: "false"
ENABLE_SINGLE_NODE_DNSMASQ: "true"
Expand Down
1 change: 1 addition & 0 deletions deploy/podman/configmap.yml
Expand Up @@ -12,6 +12,7 @@ data:
DB_PORT: "5432"
DB_USER: admin
DEPLOY_TARGET: onprem
DEPLOYMENT_TYPE: "Podman"
DISK_ENCRYPTION_SUPPORT: "true"
DUMMY_IGNITION: "false"
ENABLE_SINGLE_NODE_DNSMASQ: "true"
Expand Down
1 change: 1 addition & 0 deletions deploy/podman/configmap_tls.yml
Expand Up @@ -16,6 +16,7 @@ data:
DB_PORT: "5432"
DB_USER: admin
DEPLOY_TARGET: onprem
DEPLOYMENT_TYPE: "Podman"
DISK_ENCRYPTION_SUPPORT: "true"
DUMMY_IGNITION: "false"
ENABLE_SINGLE_NODE_DNSMASQ: "true"
Expand Down
25 changes: 23 additions & 2 deletions internal/uploader/events_uploader.go
Expand Up @@ -17,6 +17,7 @@ import (
"github.com/openshift/assisted-service/internal/cluster/validations"
"github.com/openshift/assisted-service/internal/common"
eventsapi "github.com/openshift/assisted-service/internal/events/api"
"github.com/openshift/assisted-service/internal/versions"
"github.com/openshift/assisted-service/models"
"github.com/openshift/assisted-service/pkg/k8sclient"
"github.com/pkg/errors"
Expand All @@ -39,7 +40,7 @@ func (e *eventsUploader) UploadEvents(ctx context.Context, cluster *common.Clust
if err != nil {
return errors.Wrapf(err, "failed to get pull secret to upload event data for cluster %s", cluster.ID)
}
buffer, err := prepareFiles(ctx, e.db, cluster, eventsHandler, pullSecret)
buffer, err := prepareFiles(ctx, e.db, cluster, eventsHandler, pullSecret, e.Config)
if err != nil {
return errors.Wrapf(err, "failed to prepare files to upload for cluster %s", cluster.ID)
}
Expand Down Expand Up @@ -118,7 +119,8 @@ func (e *eventsUploader) sendRequest(req *http.Request) error {
return nil
}

func prepareFiles(ctx context.Context, db *gorm.DB, cluster *common.Cluster, eventsHandler eventsapi.Handler, pullSecret *validations.PullSecretCreds) (*bytes.Buffer, error) {
func prepareFiles(ctx context.Context, db *gorm.DB, cluster *common.Cluster, eventsHandler eventsapi.Handler, pullSecret *validations.PullSecretCreds,
config Config) (*bytes.Buffer, error) {
buffer := &bytes.Buffer{}
gz := gzip.NewWriter(buffer)
tw := tar.NewWriter(gz)
Expand Down Expand Up @@ -148,6 +150,14 @@ func prepareFiles(ctx context.Context, db *gorm.DB, cluster *common.Cluster, eve
return nil, errors.Errorf("no event data files created for cluster %s", cluster.ID)
}

// Add versions file to bundle
if versionsJson, err := json.Marshal(versions.GetModelVersions(config.Versions)); err == nil {
addFile(tw, versionsJson, fmt.Sprintf("%s/versions.json", *cluster.ID)) //nolint:errcheck // errors adding this file shouldn't prevent the data from being sent
}

// Add metadata file to bundle
metadataFile(tw, cluster.ID, config)

// produce tar
if err := tw.Close(); err != nil {
return nil, errors.Wrap(err, "failed closing tar file")
Expand All @@ -159,6 +169,17 @@ func prepareFiles(ctx context.Context, db *gorm.DB, cluster *common.Cluster, eve
return buffer, nil
}

func metadataFile(tw *tar.Writer, clusterID *strfmt.UUID, config Config) {
metadata := versions.GetModelVersions(config.Versions)
metadata["deployment-type"] = config.DeploymentType
metadata["deployment-version"] = config.DeploymentVersion
metadata["git-ref"] = config.AssistedServiceVersion

if metadataJson, err := json.Marshal(metadata); err == nil {
addFile(tw, metadataJson, fmt.Sprintf("%s/metadata.json", *clusterID)) //nolint:errcheck // errors adding this file shouldn't prevent the data from being sent
}
}

func eventsFile(ctx context.Context, clusterID *strfmt.UUID, eventsHandler eventsapi.Handler, tw *tar.Writer) error {
if eventsHandler == nil {
return errors.Errorf("failed to get events for cluster %s, events handler is nil", clusterID)
Expand Down
105 changes: 87 additions & 18 deletions internal/uploader/events_uploader_test.go
Expand Up @@ -27,6 +27,7 @@ import (
"github.com/openshift/assisted-service/internal/events"
eventsapi "github.com/openshift/assisted-service/internal/events/api"
"github.com/openshift/assisted-service/internal/events/eventstest"
"github.com/openshift/assisted-service/internal/versions"
"github.com/openshift/assisted-service/models"
"github.com/openshift/assisted-service/pkg/k8sclient"
"github.com/pkg/errors"
Expand Down Expand Up @@ -150,15 +151,17 @@ var _ = Describe("prepareBody", func() {

var _ = Describe("prepareFiles", func() {
var (
ctrl *gomock.Controller
ctx context.Context
db *gorm.DB
dbName string
token string
clusterID strfmt.UUID
mockEvents *eventsapi.MockHandler
hostID strfmt.UUID
infraEnvID strfmt.UUID
ctrl *gomock.Controller
ctx context.Context
db *gorm.DB
dbName string
token string
clusterID strfmt.UUID
mockEvents *eventsapi.MockHandler
hostID strfmt.UUID
infraEnvID strfmt.UUID
cfg Config
serviceVersion versions.Versions
)

BeforeEach(func() {
Expand All @@ -170,6 +173,16 @@ var _ = Describe("prepareFiles", func() {
infraEnvID = strfmt.UUID(uuid.New().String())
hostID = strfmt.UUID(uuid.New().String())
token = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, "thePassword")))
serviceVersion = versions.Versions{
SelfVersion: "self-version",
AgentDockerImg: "agent-image",
InstallerImage: "installer-image",
ControllerImage: "controller-image",
ReleaseTag: "v1.2.3",
}
cfg = Config{
Versions: serviceVersion,
}
})

AfterEach(func() {
Expand All @@ -182,21 +195,25 @@ var _ = Describe("prepareFiles", func() {

cluster := createTestObjects(db, &clusterID, &hostID, &infraEnvID)
pullSecret := validations.PullSecretCreds{AuthRaw: token, Email: fmt.Sprintf("testemail@%s", emailDomain), Username: username}
buf, err := prepareFiles(ctx, db, cluster, mockEvents, &pullSecret)
buf, err := prepareFiles(ctx, db, cluster, mockEvents, &pullSecret, cfg)
Expect(err).NotTo(HaveOccurred())
Expect(buf.Bytes()).NotTo(BeEmpty())
testFiles := map[string]*testFile{
"cluster": {expected: true},
"infraenv": {expected: true},
"hosts": {expected: true},
"events": {expected: true},
"versions": {expected: true},
"metadata": {expected: true},
}

readtgzFiles(testFiles, clusterID, buf.Bytes())
checkHostsFile(db, testFiles["hosts"], clusterID)
checkClusterFile(db, testFiles["cluster"], clusterID, username, emailDomain)
checkInfraEnvFile(db, testFiles["infraenv"], infraEnvID)
checkEventsFile(testFiles["events"], []string{}, 0)
checkVersionsFile(testFiles["versions"], serviceVersion)
checkMetadataFile(testFiles["metadata"], cfg)
})
It("prepares only the event data for the current cluster", func() {
clusterID2 := strfmt.UUID(uuid.New().String())
Expand All @@ -206,63 +223,75 @@ var _ = Describe("prepareFiles", func() {
cluster := createTestObjects(db, &clusterID, &hostID, &infraEnvID)
createTestObjects(db, &clusterID2, nil, nil)
pullSecret := validations.PullSecretCreds{AuthRaw: token, Email: fmt.Sprintf("testemail@%s", emailDomain), Username: username}
buf, err := prepareFiles(ctx, db, cluster, eventsHandler, &pullSecret)
buf, err := prepareFiles(ctx, db, cluster, eventsHandler, &pullSecret, cfg)
Expect(err).NotTo(HaveOccurred())
Expect(buf.Bytes()).NotTo(BeEmpty())
testFiles := map[string]*testFile{
"cluster": {expected: true},
"infraenv": {expected: true},
"hosts": {expected: true},
"events": {expected: true},
"versions": {expected: true},
"metadata": {expected: true},
}

readtgzFiles(testFiles, clusterID, buf.Bytes())
checkHostsFile(db, testFiles["hosts"], clusterID)
checkClusterFile(db, testFiles["cluster"], clusterID, username, emailDomain)
checkInfraEnvFile(db, testFiles["infraenv"], infraEnvID)
checkEventsFile(testFiles["events"], []string{models.ClusterStatusAddingHosts}, 1)
checkVersionsFile(testFiles["versions"], serviceVersion)
checkMetadataFile(testFiles["metadata"], cfg)
})
It("prepares only the cluster, host, and event data when missing infraEnv ID", func() {
mockEvents.EXPECT().V2GetEvents(
ctx, common.GetDefaultV2GetEventsParams(&clusterID, nil, nil, models.EventCategoryMetrics, models.EventCategoryUser)).Return(&common.V2GetEventsResponse{}, nil).Times(1)

cluster := createTestObjects(db, &clusterID, &hostID, nil)
pullSecret := validations.PullSecretCreds{AuthRaw: token, Email: fmt.Sprintf("testemail@%s", emailDomain), Username: username}
buf, err := prepareFiles(ctx, db, cluster, mockEvents, &pullSecret)
buf, err := prepareFiles(ctx, db, cluster, mockEvents, &pullSecret, cfg)
Expect(err).NotTo(HaveOccurred())
Expect(buf.Bytes()).NotTo(BeEmpty())
testFiles := map[string]*testFile{
"cluster": {expected: true},
"infraenv": {expected: false},
"hosts": {expected: true},
"events": {expected: true},
"versions": {expected: true},
"metadata": {expected: true},
}

readtgzFiles(testFiles, clusterID, buf.Bytes())
checkHostsFile(db, testFiles["hosts"], clusterID)
checkClusterFile(db, testFiles["cluster"], clusterID, username, emailDomain)
checkInfraEnvFile(db, testFiles["infraenv"], infraEnvID)
checkEventsFile(testFiles["events"], []string{}, 0)
checkVersionsFile(testFiles["versions"], serviceVersion)
checkMetadataFile(testFiles["metadata"], cfg)
})
It("fails to prepare files when there is no data", func() {
mockEvents.EXPECT().V2GetEvents(ctx, common.GetDefaultV2GetEventsParams(nil, nil, nil, models.EventCategoryMetrics, models.EventCategoryUser)).Return(
nil, errors.New("no events found")).Times(1)
pullSecret := validations.PullSecretCreds{AuthRaw: token, Email: fmt.Sprintf("testemail@%s", emailDomain)}
buf, err := prepareFiles(ctx, db, &common.Cluster{}, mockEvents, &pullSecret)
buf, err := prepareFiles(ctx, db, &common.Cluster{}, mockEvents, &pullSecret, cfg)
Expect(err).To(HaveOccurred())
Expect(buf).To(BeNil())
testFiles := map[string]*testFile{
"cluster": {expected: false},
"infraenv": {expected: false},
"hosts": {expected: false},
"events": {expected: false},
"versions": {expected: false},
"metadata": {expected: false},
}

readtgzFiles(testFiles, clusterID, nil)
checkHostsFile(db, testFiles["hosts"], clusterID)
checkClusterFile(db, testFiles["cluster"], clusterID, username, emailDomain)
checkInfraEnvFile(db, testFiles["infraenv"], infraEnvID)
checkEventsFile(testFiles["events"], []string{}, 0)
checkVersionsFile(testFiles["versions"], serviceVersion)
checkMetadataFile(testFiles["metadata"], cfg)
})
})

Expand All @@ -284,6 +313,7 @@ var _ = Describe("UploadEvents", func() {
uploader *eventsUploader
dataUploadServer func([]string, int, map[string]*testFile) http.HandlerFunc
mockEvents *eventsapi.MockHandler
servicesVersion versions.Versions
)

BeforeEach(func() {
Expand All @@ -296,7 +326,17 @@ var _ = Describe("UploadEvents", func() {
infraEnvID = strfmt.UUID(uuid.New().String())
hostID = strfmt.UUID(uuid.New().String())
token = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, "thePassword")))

servicesVersion = versions.Versions{
SelfVersion: "self-version",
AgentDockerImg: "agent-image",
InstallerImage: "installer-image",
ControllerImage: "controller-image",
ReleaseTag: "v1.2.3",
}
cfg := Config{
AssistedServiceVersion: serviceVersion,
Versions: servicesVersion,
}
dataUploadServer = func(expectedEvents []string, expectedNumberOfEvents int, testFiles map[string]*testFile) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expect(r.Method).To(Equal(http.MethodPost))
Expand All @@ -320,17 +360,17 @@ var _ = Describe("UploadEvents", func() {
checkInfraEnvFile(db, testFiles["infraenv"], infraEnvID)
checkHostsFile(db, testFiles["hosts"], clusterID)
checkEventsFile(testFiles["events"], expectedEvents, expectedNumberOfEvents)
checkVersionsFile(testFiles["versions"], servicesVersion)
checkMetadataFile(testFiles["metadata"], cfg)
}
})
}
cfg := &Config{
AssistedServiceVersion: serviceVersion,
}

uploader = &eventsUploader{
db: db,
log: common.GetTestLog(),
client: mockK8sClient,
Config: *cfg,
Config: cfg,
}
})
AfterEach(func() {
Expand All @@ -355,9 +395,12 @@ var _ = Describe("UploadEvents", func() {
"hosts": {expected: true},
"infraenv": {expected: true},
"events": {expected: true},
"versions": {expected: true},
"metadata": {expected: true},
}
server := httptest.NewServer(dataUploadServer([]string{models.ClusterStatusAddingHosts}, 1, testFiles))
uploader.Config.DataUploadEndpoint = fmt.Sprintf("%s/%s", server.URL, "upload/test")
uploader.Config.Versions = servicesVersion

cluster := createTestObjects(db, &clusterID, &hostID, &infraEnvID)
err := uploader.UploadEvents(ctx, cluster, mockEvents)
Expand Down Expand Up @@ -460,6 +503,10 @@ func readFiles(tr *tar.Reader, testFiles map[string]*testFile, clusterID strfmt.
fileName = "cluster"
case fmt.Sprintf("%s/events.json", clusterID):
fileName = "events"
case fmt.Sprintf("%s/versions.json", clusterID):
fileName = "versions"
case fmt.Sprintf("%s/metadata.json", clusterID):
fileName = "metadata"
}
if fileName != "" {
fileContents, err := io.ReadAll(tr)
Expand Down Expand Up @@ -554,3 +601,25 @@ func checkEventsFile(eventsFile *testFile, expectedEvents []string, expectedNumb
}
}
}

func checkVersionsFile(versionsFile *testFile, expectedVersions versions.Versions) {
Expect(versionsFile.expected).To(Equal(versionsFile.exists))
if versionsFile.expected {
var serviceVersion models.Versions
Expect(json.Unmarshal(versionsFile.contents, &serviceVersion)).ShouldNot(HaveOccurred())
Expect(serviceVersion).To(BeEquivalentTo(versions.GetModelVersions(expectedVersions)))
}
}

func checkMetadataFile(metadataFile *testFile, cfg Config) {
Expect(metadataFile.expected).To(Equal(metadataFile.exists))
if metadataFile.expected {
var metadataContents models.Versions
Expect(json.Unmarshal(metadataFile.contents, &metadataContents)).ShouldNot(HaveOccurred())
expectedMetadata := versions.GetModelVersions(cfg.Versions)
expectedMetadata["deployment-type"] = cfg.DeploymentType
expectedMetadata["deployment-version"] = cfg.DeploymentVersion
expectedMetadata["git-ref"] = cfg.AssistedServiceVersion
Expect(metadataContents).To(BeEquivalentTo(expectedMetadata))
}
}
4 changes: 4 additions & 0 deletions internal/uploader/uploader.go
Expand Up @@ -5,6 +5,7 @@ import (

"github.com/openshift/assisted-service/internal/common"
eventsapi "github.com/openshift/assisted-service/internal/events/api"
"github.com/openshift/assisted-service/internal/versions"
"github.com/openshift/assisted-service/pkg/k8sclient"
"github.com/sirupsen/logrus"
"gorm.io/gorm"
Expand All @@ -17,7 +18,10 @@ type Client interface {
}

type Config struct {
Versions versions.Versions
DataUploadEndpoint string `envconfig:"DATA_UPLOAD_ENDPOINT" default:"https://console.redhat.com/api/ingress/v1/upload"`
DeploymentType string `envconfig:"DEPLOYMENT_TYPE" default:""`
DeploymentVersion string `envconfig:"DEPLOYMENT_VERSION" default:""`
AssistedServiceVersion string
EnableDataCollection bool `envconfig:"ENABLE_DATA_COLLECTION" default:"true"`
}
Expand Down
4 changes: 4 additions & 0 deletions openshift/template.yaml
Expand Up @@ -195,6 +195,8 @@ parameters:
- name: ENABLE_DATA_COLLECTION
value: "false"
required: false
- name: DEPLOYMENT_TYPE
value: "SaaS"
- name: INSTALLER_CACHE_CAPACITY
value: "6442450944"
required: false
Expand Down Expand Up @@ -457,6 +459,8 @@ objects:
value: ${ENABLE_REJECT_UNKNOWN_FIELDS}
- name: ENABLE_DATA_COLLECTION
value: ${ENABLE_DATA_COLLECTION}
- name: DEPLOYMENT_TYPE
value: ${DEPLOYMENT_TYPE}
- name: INSTALLER_CACHE_CAPACITY
value: ${INSTALLER_CACHE_CAPACITY}
- name: ENABLE_OKD_SUPPORT
Expand Down