Skip to content

Commit

Permalink
MGMT-16517: Add Env Var for On-Prem Data Deployment Type (openshift#5874
Browse files Browse the repository at this point in the history
)

* MGMT-16517: Add Env Var for On-Prem Data Deployment Type

https://issues.redhat.com/browse/MGMT-16517
Adds the environment variables `DEPLOYMENT_TYPE`
and `DEPLOYMENT_VERSION` to determine how assisted
was deployed. Expected values for deployment type
include ACM, MCE, ABI, podman, standalone operator.

* MGMT-16517: Add Env Var to podman and SaaS templates

https://issues.redhat.com/browse/MGMT-16517
  • Loading branch information
CrystalChun committed Feb 13, 2024
1 parent 4a46b20 commit b48c666
Show file tree
Hide file tree
Showing 8 changed files with 67 additions and 12 deletions.
2 changes: 2 additions & 0 deletions deploy/assisted-service.yaml
Expand Up @@ -44,6 +44,8 @@ spec:
value: "ai-kafka-0.ai-kafka-headless.assisted-installer.svc.cluster.local:9092"
- name: KAFKA_EVENT_STREAM_TOPIC
value: "events-stream"
- name: DEPLOYMENT_TYPE
value: "SaaS"
- name: DB_HOST
valueFrom:
secretKeyRef:
Expand Down
1 change: 1 addition & 0 deletions deploy/podman/configmap-disconnected.yml
Expand Up @@ -12,6 +12,7 @@ data:
DB_PORT: "5432"
DB_USER: admin
DEPLOY_TARGET: onprem
DEPLOYMENT_TYPE: "Podman"
DISK_ENCRYPTION_SUPPORT: "true"
DUMMY_IGNITION: "false"
ENABLE_SINGLE_NODE_DNSMASQ: "true"
Expand Down
1 change: 1 addition & 0 deletions deploy/podman/configmap.yml
Expand Up @@ -12,6 +12,7 @@ data:
DB_PORT: "5432"
DB_USER: admin
DEPLOY_TARGET: onprem
DEPLOYMENT_TYPE: "Podman"
DISK_ENCRYPTION_SUPPORT: "true"
DUMMY_IGNITION: "false"
ENABLE_SINGLE_NODE_DNSMASQ: "true"
Expand Down
1 change: 1 addition & 0 deletions deploy/podman/configmap_tls.yml
Expand Up @@ -16,6 +16,7 @@ data:
DB_PORT: "5432"
DB_USER: admin
DEPLOY_TARGET: onprem
DEPLOYMENT_TYPE: "Podman"
DISK_ENCRYPTION_SUPPORT: "true"
DUMMY_IGNITION: "false"
ENABLE_SINGLE_NODE_DNSMASQ: "true"
Expand Down
20 changes: 17 additions & 3 deletions internal/uploader/events_uploader.go
Expand Up @@ -40,7 +40,7 @@ func (e *eventsUploader) UploadEvents(ctx context.Context, cluster *common.Clust
if err != nil {
return errors.Wrapf(err, "failed to get pull secret to upload event data for cluster %s", cluster.ID)
}
buffer, err := prepareFiles(ctx, e.db, cluster, eventsHandler, pullSecret, e.Config.Versions)
buffer, err := prepareFiles(ctx, e.db, cluster, eventsHandler, pullSecret, e.Config)
if err != nil {
return errors.Wrapf(err, "failed to prepare files to upload for cluster %s", cluster.ID)
}
Expand Down Expand Up @@ -120,7 +120,7 @@ func (e *eventsUploader) sendRequest(req *http.Request) error {
}

func prepareFiles(ctx context.Context, db *gorm.DB, cluster *common.Cluster, eventsHandler eventsapi.Handler, pullSecret *validations.PullSecretCreds,
serviceVersions versions.Versions) (*bytes.Buffer, error) {
config Config) (*bytes.Buffer, error) {
buffer := &bytes.Buffer{}
gz := gzip.NewWriter(buffer)
tw := tar.NewWriter(gz)
Expand Down Expand Up @@ -151,10 +151,13 @@ func prepareFiles(ctx context.Context, db *gorm.DB, cluster *common.Cluster, eve
}

// Add versions file to bundle
if versionsJson, err := json.Marshal(versions.GetModelVersions(serviceVersions)); err == nil {
if versionsJson, err := json.Marshal(versions.GetModelVersions(config.Versions)); err == nil {
addFile(tw, versionsJson, fmt.Sprintf("%s/versions.json", *cluster.ID)) //nolint:errcheck // errors adding this file shouldn't prevent the data from being sent
}

// Add metadata file to bundle
metadataFile(tw, cluster.ID, config)

// produce tar
if err := tw.Close(); err != nil {
return nil, errors.Wrap(err, "failed closing tar file")
Expand All @@ -166,6 +169,17 @@ func prepareFiles(ctx context.Context, db *gorm.DB, cluster *common.Cluster, eve
return buffer, nil
}

func metadataFile(tw *tar.Writer, clusterID *strfmt.UUID, config Config) {
metadata := versions.GetModelVersions(config.Versions)
metadata["deployment-type"] = config.DeploymentType
metadata["deployment-version"] = config.DeploymentVersion
metadata["git-ref"] = config.AssistedServiceVersion

if metadataJson, err := json.Marshal(metadata); err == nil {
addFile(tw, metadataJson, fmt.Sprintf("%s/metadata.json", *clusterID)) //nolint:errcheck // errors adding this file shouldn't prevent the data from being sent
}
}

func eventsFile(ctx context.Context, clusterID *strfmt.UUID, eventsHandler eventsapi.Handler, tw *tar.Writer) error {
if eventsHandler == nil {
return errors.Errorf("failed to get events for cluster %s, events handler is nil", clusterID)
Expand Down
48 changes: 39 additions & 9 deletions internal/uploader/events_uploader_test.go
Expand Up @@ -160,6 +160,7 @@ var _ = Describe("prepareFiles", func() {
mockEvents *eventsapi.MockHandler
hostID strfmt.UUID
infraEnvID strfmt.UUID
cfg Config
serviceVersion versions.Versions
)

Expand All @@ -179,6 +180,9 @@ var _ = Describe("prepareFiles", func() {
ControllerImage: "controller-image",
ReleaseTag: "v1.2.3",
}
cfg = Config{
Versions: serviceVersion,
}
})

AfterEach(func() {
Expand All @@ -191,7 +195,7 @@ var _ = Describe("prepareFiles", func() {

cluster := createTestObjects(db, &clusterID, &hostID, &infraEnvID)
pullSecret := validations.PullSecretCreds{AuthRaw: token, Email: fmt.Sprintf("testemail@%s", emailDomain), Username: username}
buf, err := prepareFiles(ctx, db, cluster, mockEvents, &pullSecret, serviceVersion)
buf, err := prepareFiles(ctx, db, cluster, mockEvents, &pullSecret, cfg)
Expect(err).NotTo(HaveOccurred())
Expect(buf.Bytes()).NotTo(BeEmpty())
testFiles := map[string]*testFile{
Expand All @@ -200,6 +204,7 @@ var _ = Describe("prepareFiles", func() {
"hosts": {expected: true},
"events": {expected: true},
"versions": {expected: true},
"metadata": {expected: true},
}

readtgzFiles(testFiles, clusterID, buf.Bytes())
Expand All @@ -208,6 +213,7 @@ var _ = Describe("prepareFiles", func() {
checkInfraEnvFile(db, testFiles["infraenv"], infraEnvID)
checkEventsFile(testFiles["events"], []string{}, 0)
checkVersionsFile(testFiles["versions"], serviceVersion)
checkMetadataFile(testFiles["metadata"], cfg)
})
It("prepares only the event data for the current cluster", func() {
clusterID2 := strfmt.UUID(uuid.New().String())
Expand All @@ -217,7 +223,7 @@ var _ = Describe("prepareFiles", func() {
cluster := createTestObjects(db, &clusterID, &hostID, &infraEnvID)
createTestObjects(db, &clusterID2, nil, nil)
pullSecret := validations.PullSecretCreds{AuthRaw: token, Email: fmt.Sprintf("testemail@%s", emailDomain), Username: username}
buf, err := prepareFiles(ctx, db, cluster, eventsHandler, &pullSecret, serviceVersion)
buf, err := prepareFiles(ctx, db, cluster, eventsHandler, &pullSecret, cfg)
Expect(err).NotTo(HaveOccurred())
Expect(buf.Bytes()).NotTo(BeEmpty())
testFiles := map[string]*testFile{
Expand All @@ -226,6 +232,7 @@ var _ = Describe("prepareFiles", func() {
"hosts": {expected: true},
"events": {expected: true},
"versions": {expected: true},
"metadata": {expected: true},
}

readtgzFiles(testFiles, clusterID, buf.Bytes())
Expand All @@ -234,14 +241,15 @@ var _ = Describe("prepareFiles", func() {
checkInfraEnvFile(db, testFiles["infraenv"], infraEnvID)
checkEventsFile(testFiles["events"], []string{models.ClusterStatusAddingHosts}, 1)
checkVersionsFile(testFiles["versions"], serviceVersion)
checkMetadataFile(testFiles["metadata"], cfg)
})
It("prepares only the cluster, host, and event data when missing infraEnv ID", func() {
mockEvents.EXPECT().V2GetEvents(
ctx, common.GetDefaultV2GetEventsParams(&clusterID, nil, nil, models.EventCategoryMetrics, models.EventCategoryUser)).Return(&common.V2GetEventsResponse{}, nil).Times(1)

cluster := createTestObjects(db, &clusterID, &hostID, nil)
pullSecret := validations.PullSecretCreds{AuthRaw: token, Email: fmt.Sprintf("testemail@%s", emailDomain), Username: username}
buf, err := prepareFiles(ctx, db, cluster, mockEvents, &pullSecret, serviceVersion)
buf, err := prepareFiles(ctx, db, cluster, mockEvents, &pullSecret, cfg)
Expect(err).NotTo(HaveOccurred())
Expect(buf.Bytes()).NotTo(BeEmpty())
testFiles := map[string]*testFile{
Expand All @@ -250,6 +258,7 @@ var _ = Describe("prepareFiles", func() {
"hosts": {expected: true},
"events": {expected: true},
"versions": {expected: true},
"metadata": {expected: true},
}

readtgzFiles(testFiles, clusterID, buf.Bytes())
Expand All @@ -258,12 +267,13 @@ var _ = Describe("prepareFiles", func() {
checkInfraEnvFile(db, testFiles["infraenv"], infraEnvID)
checkEventsFile(testFiles["events"], []string{}, 0)
checkVersionsFile(testFiles["versions"], serviceVersion)
checkMetadataFile(testFiles["metadata"], cfg)
})
It("fails to prepare files when there is no data", func() {
mockEvents.EXPECT().V2GetEvents(ctx, common.GetDefaultV2GetEventsParams(nil, nil, nil, models.EventCategoryMetrics, models.EventCategoryUser)).Return(
nil, errors.New("no events found")).Times(1)
pullSecret := validations.PullSecretCreds{AuthRaw: token, Email: fmt.Sprintf("testemail@%s", emailDomain)}
buf, err := prepareFiles(ctx, db, &common.Cluster{}, mockEvents, &pullSecret, serviceVersion)
buf, err := prepareFiles(ctx, db, &common.Cluster{}, mockEvents, &pullSecret, cfg)
Expect(err).To(HaveOccurred())
Expect(buf).To(BeNil())
testFiles := map[string]*testFile{
Expand All @@ -272,6 +282,7 @@ var _ = Describe("prepareFiles", func() {
"hosts": {expected: false},
"events": {expected: false},
"versions": {expected: false},
"metadata": {expected: false},
}

readtgzFiles(testFiles, clusterID, nil)
Expand All @@ -280,6 +291,7 @@ var _ = Describe("prepareFiles", func() {
checkInfraEnvFile(db, testFiles["infraenv"], infraEnvID)
checkEventsFile(testFiles["events"], []string{}, 0)
checkVersionsFile(testFiles["versions"], serviceVersion)
checkMetadataFile(testFiles["metadata"], cfg)
})
})

Expand Down Expand Up @@ -321,7 +333,10 @@ var _ = Describe("UploadEvents", func() {
ControllerImage: "controller-image",
ReleaseTag: "v1.2.3",
}

cfg := Config{
AssistedServiceVersion: serviceVersion,
Versions: servicesVersion,
}
dataUploadServer = func(expectedEvents []string, expectedNumberOfEvents int, testFiles map[string]*testFile) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expect(r.Method).To(Equal(http.MethodPost))
Expand All @@ -346,17 +361,16 @@ var _ = Describe("UploadEvents", func() {
checkHostsFile(db, testFiles["hosts"], clusterID)
checkEventsFile(testFiles["events"], expectedEvents, expectedNumberOfEvents)
checkVersionsFile(testFiles["versions"], servicesVersion)
checkMetadataFile(testFiles["metadata"], cfg)
}
})
}
cfg := &Config{
AssistedServiceVersion: serviceVersion,
}

uploader = &eventsUploader{
db: db,
log: common.GetTestLog(),
client: mockK8sClient,
Config: *cfg,
Config: cfg,
}
})
AfterEach(func() {
Expand All @@ -382,6 +396,7 @@ var _ = Describe("UploadEvents", func() {
"infraenv": {expected: true},
"events": {expected: true},
"versions": {expected: true},
"metadata": {expected: true},
}
server := httptest.NewServer(dataUploadServer([]string{models.ClusterStatusAddingHosts}, 1, testFiles))
uploader.Config.DataUploadEndpoint = fmt.Sprintf("%s/%s", server.URL, "upload/test")
Expand Down Expand Up @@ -490,6 +505,8 @@ func readFiles(tr *tar.Reader, testFiles map[string]*testFile, clusterID strfmt.
fileName = "events"
case fmt.Sprintf("%s/versions.json", clusterID):
fileName = "versions"
case fmt.Sprintf("%s/metadata.json", clusterID):
fileName = "metadata"
}
if fileName != "" {
fileContents, err := io.ReadAll(tr)
Expand Down Expand Up @@ -593,3 +610,16 @@ func checkVersionsFile(versionsFile *testFile, expectedVersions versions.Version
Expect(serviceVersion).To(BeEquivalentTo(versions.GetModelVersions(expectedVersions)))
}
}

func checkMetadataFile(metadataFile *testFile, cfg Config) {
Expect(metadataFile.expected).To(Equal(metadataFile.exists))
if metadataFile.expected {
var metadataContents models.Versions
Expect(json.Unmarshal(metadataFile.contents, &metadataContents)).ShouldNot(HaveOccurred())
expectedMetadata := versions.GetModelVersions(cfg.Versions)
expectedMetadata["deployment-type"] = cfg.DeploymentType
expectedMetadata["deployment-version"] = cfg.DeploymentVersion
expectedMetadata["git-ref"] = cfg.AssistedServiceVersion
Expect(metadataContents).To(BeEquivalentTo(expectedMetadata))
}
}
2 changes: 2 additions & 0 deletions internal/uploader/uploader.go
Expand Up @@ -20,6 +20,8 @@ type Client interface {
type Config struct {
Versions versions.Versions
DataUploadEndpoint string `envconfig:"DATA_UPLOAD_ENDPOINT" default:"https://console.redhat.com/api/ingress/v1/upload"`
DeploymentType string `envconfig:"DEPLOYMENT_TYPE" default:""`
DeploymentVersion string `envconfig:"DEPLOYMENT_VERSION" default:""`
AssistedServiceVersion string
EnableDataCollection bool `envconfig:"ENABLE_DATA_COLLECTION" default:"true"`
}
Expand Down
4 changes: 4 additions & 0 deletions openshift/template.yaml
Expand Up @@ -195,6 +195,8 @@ parameters:
- name: ENABLE_DATA_COLLECTION
value: "false"
required: false
- name: DEPLOYMENT_TYPE
value: "SaaS"
- name: INSTALLER_CACHE_CAPACITY
value: "6442450944"
required: false
Expand Down Expand Up @@ -457,6 +459,8 @@ objects:
value: ${ENABLE_REJECT_UNKNOWN_FIELDS}
- name: ENABLE_DATA_COLLECTION
value: ${ENABLE_DATA_COLLECTION}
- name: DEPLOYMENT_TYPE
value: ${DEPLOYMENT_TYPE}
- name: INSTALLER_CACHE_CAPACITY
value: ${INSTALLER_CACHE_CAPACITY}
- name: ENABLE_OKD_SUPPORT
Expand Down

0 comments on commit b48c666

Please sign in to comment.