Skip to content

Commit

Permalink
MGMT-4475: suggested auto-assign #1
Browse files Browse the repository at this point in the history
First phase of early auto assign implementation:
- add role info to record the reason for the role assignment
- add usage
  • Loading branch information
slaviered committed Aug 26, 2021
1 parent dabadc7 commit 0ee840b
Show file tree
Hide file tree
Showing 14 changed files with 252 additions and 45 deletions.
21 changes: 19 additions & 2 deletions internal/bminventory/inventory.go
Expand Up @@ -1448,6 +1448,21 @@ func (b *bareMetalInventory) InstallClusterInternal(ctx context.Context, params
return nil, err
}

//usage for auto role selection is measured only for day1 clusters with more than
//3 hosts (which would automatically be assigned as masters if the hw is sufficient)
_ = b.db.Transaction(func(tx *gorm.DB) error {
if usages, uerr := usage.Unmarshal(cluster.Cluster.FeatureUsage); uerr != nil {
if cluster.Cluster.EnabledHostCount > common.MinMasterHostsNeededForInstallation &&
len(funk.Filter(cluster.Cluster.Hosts, func(h *models.Host) bool {
return h.RoleInfo != models.RoleInfoUserAssigned
}).([]*models.Host)) > 0 {
b.setUsage(true, usage.AutoAssignRoleUsage, nil, usages)
b.usageApi.Save(tx, *cluster.ID, usages)
}
}
return nil
})

if err = b.refreshAllHosts(ctx, cluster); err != nil {
return nil, err
}
Expand Down Expand Up @@ -2693,7 +2708,7 @@ func (b *bareMetalInventory) updateHostRoles(ctx context.Context, params install
hostRole.ID, params.ClusterID)
return common.NewApiError(http.StatusNotFound, err)
}
err = b.hostApi.UpdateRole(ctx, &host.Host, models.HostRole(hostRole.Role), db)
err = b.hostApi.UpdateRole(ctx, &host.Host, models.HostRole(hostRole.Role), models.RoleInfoUserAssigned, db)
if err != nil {
log.WithError(err).Errorf("failed to set role <%s> host <%s> in cluster <%s>",
hostRole.Role, hostRole.ID,
Expand Down Expand Up @@ -5585,6 +5600,7 @@ func (b *bareMetalInventory) V2RegisterHost(ctx context.Context, params installe
DiscoveryAgentVersion: params.NewHostParams.DiscoveryAgentVersion,
UserName: ocm.UserNameFromContext(ctx),
Role: defaultRole,
RoleInfo: models.RoleInfoUserAssigned, //TBD: Should the default info be 'auto-assigned'?
InfraEnvID: infraEnv.ID,
}

Expand All @@ -5607,6 +5623,7 @@ func (b *bareMetalInventory) V2RegisterHost(ctx context.Context, params installe
}
if common.IsSingleNodeCluster(cluster) {
host.Role = models.HostRoleMaster
host.RoleInfo = models.RoleInfoMinimalMasterCount
}
if swag.StringValue(cluster.Kind) == models.ClusterKindAddHostsCluster {
host.Kind = swag.String(models.HostKindAddToExistingClusterHost)
Expand Down Expand Up @@ -6162,7 +6179,7 @@ func (b *bareMetalInventory) updateHostRole(ctx context.Context, host *common.Ho
log.Errorf(msg)
return common.NewApiError(http.StatusBadRequest, fmt.Errorf(msg))
}
err := b.hostApi.UpdateRole(ctx, &host.Host, models.HostRole(*hostRole), db)
err := b.hostApi.UpdateRole(ctx, &host.Host, models.HostRole(*hostRole), models.RoleInfoUserAssigned, db)
if err != nil {
log.WithError(err).Errorf("failed to set role <%s> host <%s>, infra env <%s>",
*hostRole, host.ID, host.InfraEnvID)
Expand Down
24 changes: 13 additions & 11 deletions internal/bminventory/inventory_test.go
Expand Up @@ -1094,9 +1094,10 @@ var _ = Describe("RegisterHost", func() {
for _, test := range []struct {
availability string
expectedRole models.HostRole
expectedInfo models.RoleInfo
}{
{availability: models.ClusterHighAvailabilityModeFull, expectedRole: models.HostRoleAutoAssign},
{availability: models.ClusterHighAvailabilityModeNone, expectedRole: models.HostRoleMaster},
{availability: models.ClusterHighAvailabilityModeFull, expectedRole: models.HostRoleAutoAssign, expectedInfo: models.RoleInfoUserAssigned},
{availability: models.ClusterHighAvailabilityModeNone, expectedRole: models.HostRoleMaster, expectedInfo: models.RoleInfoMinimalMasterCount},
} {
test := test

Expand All @@ -1110,6 +1111,7 @@ var _ = Describe("RegisterHost", func() {
DoAndReturn(func(ctx context.Context, h *models.Host, db *gorm.DB) error {
// validate that host is registered with auto-assign role
Expect(h.Role).Should(Equal(test.expectedRole))
Expect(h.RoleInfo).Should(Equal(test.expectedInfo))
Expect(h.InfraEnvID).Should(Equal(infraEnv.ID))
return nil
}).Times(1)
Expand Down Expand Up @@ -6017,7 +6019,7 @@ var _ = Describe("infraEnvs host", func() {
})

It("update host role success", func() {
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), models.HostRole("master"), gomock.Any()).Return(nil).Times(1)
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), models.HostRole("master"), models.RoleInfoUserAssigned, gomock.Any()).Return(nil).Times(1)
mockHostApi.EXPECT().UpdateHostname(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateInstallationDisk(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateMachineConfigPoolName(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
Expand All @@ -6034,7 +6036,7 @@ var _ = Describe("infraEnvs host", func() {
})

It("update host role failure", func() {
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), models.HostRole("master"), gomock.Any()).Return(fmt.Errorf("some error")).Times(1)
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), models.HostRole("master"), models.RoleInfoUserAssigned, gomock.Any()).Return(fmt.Errorf("some error")).Times(1)
mockHostApi.EXPECT().UpdateHostname(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateInstallationDisk(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateMachineConfigPoolName(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
Expand All @@ -6050,7 +6052,7 @@ var _ = Describe("infraEnvs host", func() {
})

It("update host name success", func() {
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), gomock.Any(), models.RoleInfoUserAssigned, gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateHostname(gomock.Any(), gomock.Any(), "somehostname", gomock.Any()).Return(nil).Times(1)
mockHostApi.EXPECT().UpdateInstallationDisk(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateMachineConfigPoolName(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
Expand All @@ -6067,7 +6069,7 @@ var _ = Describe("infraEnvs host", func() {
})

It("update host name failure", func() {
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), gomock.Any(), models.RoleInfoUserAssigned, gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateHostname(gomock.Any(), gomock.Any(), "somehostname", gomock.Any()).Return(fmt.Errorf("some error")).Times(1)
mockHostApi.EXPECT().UpdateInstallationDisk(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateMachineConfigPoolName(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
Expand All @@ -6083,7 +6085,7 @@ var _ = Describe("infraEnvs host", func() {
})

It("update host name invalid format", func() {
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), gomock.Any(), models.RoleInfoUserAssigned, gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateHostname(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateInstallationDisk(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateMachineConfigPoolName(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
Expand All @@ -6099,7 +6101,7 @@ var _ = Describe("infraEnvs host", func() {
})

It("update disks config success", func() {
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), gomock.Any(), models.RoleInfoUserAssigned, gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateHostname(gomock.Any(), gomock.Any(), "somehostname", gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateInstallationDisk(gomock.Any(), gomock.Any(), gomock.Any(), diskID1).Return(nil).Times(1)
mockHostApi.EXPECT().UpdateMachineConfigPoolName(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
Expand All @@ -6119,7 +6121,7 @@ var _ = Describe("infraEnvs host", func() {
})

It("update disks config invalid config, multiple boot disk", func() {
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), gomock.Any(), models.RoleInfoUserAssigned, gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateHostname(gomock.Any(), gomock.Any(), "somehostname", gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateInstallationDisk(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateMachineConfigPoolName(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
Expand All @@ -6138,7 +6140,7 @@ var _ = Describe("infraEnvs host", func() {
})

It("update machine pool success", func() {
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), gomock.Any(), models.RoleInfoUserAssigned, gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateHostname(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateInstallationDisk(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateMachineConfigPoolName(gomock.Any(), gomock.Any(), gomock.Any(), "machinepool").Return(nil).Times(1)
Expand All @@ -6155,7 +6157,7 @@ var _ = Describe("infraEnvs host", func() {
})

It("update machine pool failure", func() {
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateRole(gomock.Any(), gomock.Any(), gomock.Any(), models.RoleInfoUserAssigned, gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateHostname(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateInstallationDisk(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
mockHostApi.EXPECT().UpdateMachineConfigPoolName(gomock.Any(), gomock.Any(), gomock.Any(), "machinepool").Return(fmt.Errorf("some error")).Times(1)
Expand Down
4 changes: 2 additions & 2 deletions internal/host/common.go
Expand Up @@ -76,7 +76,7 @@ func refreshHostStageUpdateTime(
}

// update host role with an option to update only if the current role is srcRole to prevent races
func updateRole(log logrus.FieldLogger, h *models.Host, role models.HostRole, db *gorm.DB, srcRole *string) error {
func updateRole(log logrus.FieldLogger, h *models.Host, role models.HostRole, info models.RoleInfo, db *gorm.DB, srcRole *string) error {
hostStatus := swag.StringValue(h.Status)
allowedStatuses := append(hostStatusesBeforeInstallation[:], hostStatusesInInfraEnv[:]...)
if !funk.ContainsString(allowedStatuses, hostStatus) {
Expand All @@ -85,7 +85,7 @@ func updateRole(log logrus.FieldLogger, h *models.Host, role models.HostRole, db
hostStatus, hostStatusesBeforeInstallation[:]))
}

extras := append(make([]interface{}, 0), "role", role)
extras := append(make([]interface{}, 0), "role", role, "role_info", info)

if hostutil.IsDay2Host(h) && (h.MachineConfigPoolName == "" || h.MachineConfigPoolName == *srcRole) {
extras = append(extras, "machine_config_pool_name", role)
Expand Down
30 changes: 16 additions & 14 deletions internal/host/host.go
Expand Up @@ -138,7 +138,7 @@ type API interface {
PermanentHostsDeletion(olderThan strfmt.DateTime) error
ReportValidationFailedMetrics(ctx context.Context, h *models.Host, ocpVersion, emailDomain string) error

UpdateRole(ctx context.Context, h *models.Host, role models.HostRole, db *gorm.DB) error
UpdateRole(ctx context.Context, h *models.Host, role models.HostRole, info models.RoleInfo, db *gorm.DB) error
UpdateHostname(ctx context.Context, h *models.Host, hostname string, db *gorm.DB) error
UpdateInventory(ctx context.Context, h *models.Host, inventory string) error
RefreshInventory(ctx context.Context, cluster *common.Cluster, h *models.Host, db *gorm.DB) error
Expand Down Expand Up @@ -611,16 +611,16 @@ func (m *Manager) UpdateApiVipConnectivityReport(ctx context.Context, h *models.
return nil
}

func (m *Manager) UpdateRole(ctx context.Context, h *models.Host, role models.HostRole, db *gorm.DB) error {
func (m *Manager) UpdateRole(ctx context.Context, h *models.Host, role models.HostRole, info models.RoleInfo, db *gorm.DB) error {
cdb := m.db
if db != nil {
cdb = db
}

if h.Role == "" {
return updateRole(m.log, h, role, cdb, nil)
return updateRole(m.log, h, role, info, cdb, nil)
} else {
return updateRole(m.log, h, role, cdb, swag.String(string(h.Role)))
return updateRole(m.log, h, role, info, cdb, swag.String(string(h.Role)))
}
}

Expand Down Expand Up @@ -984,22 +984,22 @@ func (m *Manager) autoRoleSelection(ctx context.Context, h *models.Host, db *gor
return errors.Errorf("host %s from cluster %s don't have hardware info",
h.ID.String(), h.ClusterID.String())
}
role, err := m.selectRole(ctx, h, db)
role, info, err := m.selectRole(ctx, h, db)
if err != nil {
return err
}
// use sourced role to prevent races with user role setting
if err := updateRole(m.log, h, role, db, swag.String(string(models.HostRoleAutoAssign))); err != nil {
if err := updateRole(m.log, h, role, info, db, swag.String(string(models.HostRoleAutoAssign))); err != nil {
log.WithError(err).Errorf("failed to update role %s for host %s cluster %s",
role, h.ID.String(), h.ClusterID.String())
}
log.Infof("Auto selected role %s for host %s cluster %s", role, h.ID.String(), h.ClusterID.String())
log.Infof("Auto selected role %s for host %s cluster %s with reason %s", role, h.ID.String(), h.ClusterID.String(), info)
// pointer was changed in selectRole or after the update - need to take the host again
return db.Model(&models.Host{}).
Take(h, "id = ? and infra_env_id = ?", h.ID.String(), h.InfraEnvID.String()).Error
}

func (m *Manager) selectRole(ctx context.Context, h *models.Host, db *gorm.DB) (models.HostRole, error) {
func (m *Manager) selectRole(ctx context.Context, h *models.Host, db *gorm.DB) (models.HostRole, models.RoleInfo, error) {
var (
autoSelectedRole = models.HostRoleWorker
log = logutil.FromContext(ctx, m.log)
Expand All @@ -1008,35 +1008,37 @@ func (m *Manager) selectRole(ctx context.Context, h *models.Host, db *gorm.DB) (
)

if hostutil.IsDay2Host(h) {
return autoSelectedRole, nil
return autoSelectedRole, models.RoleInfoDay2, nil
}

// count already existing masters
mastersCount := 0
if err = db.Model(&models.Host{}).Where("cluster_id = ? and status != ? and role = ?",
h.ClusterID, models.HostStatusDisabled, models.HostRoleMaster).Count(&mastersCount).Error; err != nil {
log.WithError(err).Errorf("failed to count masters in cluster %s", h.ClusterID.String())
return autoSelectedRole, err
return autoSelectedRole, models.RoleInfoOther, err
}

if mastersCount < common.MinMasterHostsNeededForInstallation {
h.Role = models.HostRoleMaster
vc, err = newValidationContext(h, nil, nil, db, m.hwValidator)
if err != nil {
log.WithError(err).Errorf("failed to create new validation context for host %s", h.ID.String())
return autoSelectedRole, err
return autoSelectedRole, models.RoleInfoOther, err
}
conditions, _, err := m.rp.preprocess(vc)
if err != nil {
log.WithError(err).Errorf("failed to run validations on host %s", h.ID.String())
return autoSelectedRole, err
return autoSelectedRole, models.RoleInfoOther, err
}
if m.canBeMaster(conditions) {
return models.HostRoleMaster, nil
return models.HostRoleMaster, models.RoleInfoHwRequirements, nil
} else {
return autoSelectedRole, models.RoleInfoHwRequirements, nil
}
}

return autoSelectedRole, nil
return autoSelectedRole, models.RoleInfoMinimalMasterCount, nil
}

func (m *Manager) IsValidMasterCandidate(h *models.Host, c *common.Cluster, db *gorm.DB, log logrus.FieldLogger) (bool, error) {
Expand Down
22 changes: 15 additions & 7 deletions internal/host/host_test.go
Expand Up @@ -72,17 +72,19 @@ var _ = Describe("update_role", func() {
success := func(srcState string) {
host = hostutil.GenerateTestHost(id, infraEnvID, clusterID, srcState)
Expect(db.Create(&host).Error).ShouldNot(HaveOccurred())
Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, nil)).ShouldNot(HaveOccurred())
Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, models.RoleInfoUserAssigned, nil)).ShouldNot(HaveOccurred())
h := hostutil.GetHostFromDB(id, infraEnvID, db)
Expect(h.Role).To(Equal(models.HostRoleMaster))
Expect(h.RoleInfo).To(Equal(models.RoleInfoUserAssigned))
}

failure := func(srcState string) {
host = hostutil.GenerateTestHost(id, infraEnvID, clusterID, srcState)
Expect(db.Create(&host).Error).ShouldNot(HaveOccurred())
Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, nil)).To(HaveOccurred())
Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, models.RoleInfoUserAssigned, nil)).To(HaveOccurred())
h := hostutil.GetHostFromDB(id, infraEnvID, db)
Expect(h.Role).To(Equal(models.HostRoleWorker))
Expect(h.RoleInfo).To(Equal(models.RoleInfoUserAssigned))
}

tests := []struct {
Expand Down Expand Up @@ -151,30 +153,36 @@ var _ = Describe("update_role", func() {
By("rollback transaction", func() {
tx := db.Begin()
Expect(tx.Error).ShouldNot(HaveOccurred())
Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, tx)).NotTo(HaveOccurred())
Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, models.RoleInfoHwRequirements, tx)).NotTo(HaveOccurred())
Expect(tx.Rollback().Error).ShouldNot(HaveOccurred())
h := hostutil.GetHostFromDB(id, infraEnvID, db)
//verify that we are back to the initial values as generated by GenerateTestHost
Expect(h.Role).Should(Equal(models.HostRoleWorker))
Expect(h.RoleInfo).To(Equal(models.RoleInfoUserAssigned))
})
By("commit transaction", func() {
tx := db.Begin()
Expect(tx.Error).ShouldNot(HaveOccurred())
Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, tx)).NotTo(HaveOccurred())
Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, models.RoleInfoHwRequirements, tx)).NotTo(HaveOccurred())
Expect(tx.Commit().Error).ShouldNot(HaveOccurred())
h := hostutil.GetHostFromDB(id, infraEnvID, db)
//verify that the role fields have been updated
Expect(h.Role).Should(Equal(models.HostRoleMaster))
Expect(h.RoleInfo).To(Equal(models.RoleInfoHwRequirements))
})
})

It("update role master to worker", func() {
host = hostutil.GenerateTestHost(id, infraEnvID, clusterID, models.HostStatusKnown)
Expect(db.Create(&host).Error).ShouldNot(HaveOccurred())
Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, nil)).NotTo(HaveOccurred())
Expect(state.UpdateRole(ctx, &host, models.HostRoleMaster, models.RoleInfoUserAssigned, nil)).NotTo(HaveOccurred())
h := hostutil.GetHostFromDB(id, infraEnvID, db)
Expect(h.Role).To(Equal(models.HostRoleMaster))
Expect(state.UpdateRole(ctx, &host, models.HostRoleWorker, nil)).NotTo(HaveOccurred())
Expect(h.RoleInfo).To(Equal(models.RoleInfoUserAssigned))
Expect(state.UpdateRole(ctx, &host, models.HostRoleWorker, models.RoleInfoUserAssigned, nil)).NotTo(HaveOccurred())
h = hostutil.GetHostFromDB(id, infraEnvID, db)
Expect(h.Role).To(Equal(models.HostRoleWorker))
Expect(h.RoleInfo).To(Equal(models.RoleInfoUserAssigned))
})

Context("update machine config pool", func() {
Expand Down Expand Up @@ -252,7 +260,7 @@ var _ = Describe("update_role", func() {
Expect(db.Create(&host).Error).ShouldNot(HaveOccurred())

// Test
Expect(state.UpdateRole(ctx, &host, t.role, nil)).NotTo(HaveOccurred())
Expect(state.UpdateRole(ctx, &host, t.role, models.RoleInfoUserAssigned, nil)).NotTo(HaveOccurred())
h := hostutil.GetHostFromDB(*host.ID, host.InfraEnvID, db)
Expect(h.Role).To(Equal(t.role))
Expect(h.MachineConfigPoolName).Should(Equal(t.expectedMachineConfigPool))
Expand Down

0 comments on commit 0ee840b

Please sign in to comment.