Skip to content

Commit

Permalink
Merge pull request #9 from sapcc/revert-8-orphaned-gauge-vec-fix
Browse files Browse the repository at this point in the history
Revert "Cleanup orphaned gauge vectors"
  • Loading branch information
talal committed Sep 17, 2020
2 parents ddb0d7a + 36492ed commit 8945ec4
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 27 deletions.
14 changes: 1 addition & 13 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,17 +7,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## [Unreleased]

## [0.6.1] - 2020-09-16

### Changed

- Delete the reconcile time metric for orphaned `PrometheusRule` in advance,
regardless of the cleanup error status.

### Fixed

- Use a single loop for controller worker.

## [0.6.0] - 2020-08-26

### Added
Expand Down Expand Up @@ -74,8 +63,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

- Initial release.

[unreleased]: https://github.com/sapcc/absent-metrics-operator/compare/v0.6.1...HEAD
[0.6.1]: https://github.com/sapcc/absent-metrics-operator/compare/v0.6.0...v0.6.1
[unreleased]: https://github.com/sapcc/absent-metrics-operator/compare/v0.6.0...HEAD
[0.6.0]: https://github.com/sapcc/absent-metrics-operator/compare/v0.5.2...v0.6.0
[0.5.2]: https://github.com/sapcc/absent-metrics-operator/compare/v0.5.1...v0.5.2
[0.5.1]: https://github.com/sapcc/absent-metrics-operator/compare/v0.5.0...v0.5.1
Expand Down
1 change: 0 additions & 1 deletion internal/controller/absent_prometheusrule.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,6 @@ OuterLoop:
// This RuleGroup should be carried over as is.
new = append(new, oldG)
}

// Add the pending RuleGroups.
for _, g := range absentAlertRuleGroups {
if !updated[g.Name] {
Expand Down
2 changes: 1 addition & 1 deletion internal/controller/cleanup.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,10 +84,10 @@ func (c *Controller) cleanUpOrphanedAbsentAlertsCluster() error {

aPR := &absentPrometheusRule{PrometheusRule: pr}
for n := range cleanup {
c.metrics.SuccessfulPrometheusRuleReconcileTime.DeleteLabelValues(namespace, n)
if err := c.cleanUpOrphanedAbsentAlerts(n, aPR); err != nil {
return err
}
c.metrics.SuccessfulPrometheusRuleReconcileTime.DeleteLabelValues(namespace, n)
}
}
}
Expand Down
32 changes: 20 additions & 12 deletions internal/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -235,21 +235,27 @@ func (c *Controller) runWorker() {
defer reconcileT.Stop()
maintenanceT := time.NewTicker(maintenancePeriod)
defer maintenanceT.Stop()
for {
select {
case <-reconcileT.C:
c.enqueueAllObjects()
case <-maintenanceT.C:
if err := c.cleanUpOrphanedAbsentAlertsCluster(); err != nil {
c.logger.ErrorWithBackoff("msg", "could not cleanup orphaned absent alerts from cluster",
"err", err)
}
default:
if ok := c.processNextWorkItem(); !ok {
done := make(chan struct{})
go func() {
for {
select {
case <-done:
return
case <-reconcileT.C:
c.enqueueAllObjects()
case <-maintenanceT.C:
if err := c.cleanUpOrphanedAbsentAlertsCluster(); err != nil {
c.logger.ErrorWithBackoff("msg", "could not cleanup orphaned absent alerts from cluster",
"err", err)
}
}
}
}()

for c.processNextWorkItem() {
}

done <- struct{}{}
}

// processNextWorkItem will read a single work item off the workqueue and
Expand Down Expand Up @@ -309,8 +315,10 @@ func (c *Controller) syncHandler(key string) error {
// The resource may no longer exist, in which case we clean up any
// orphaned absent alerts.
c.logger.Debug("msg", "PrometheusRule no longer exists", "key", key)
c.metrics.SuccessfulPrometheusRuleReconcileTime.DeleteLabelValues(namespace, name)
err = c.cleanUpOrphanedAbsentAlertsNamespace(name, namespace)
if err == nil {
c.metrics.SuccessfulPrometheusRuleReconcileTime.DeleteLabelValues(namespace, name)
}
default:
// Requeue object for later processing.
return err
Expand Down

0 comments on commit 8945ec4

Please sign in to comment.