Skip to content

Commit

Permalink
Alertmanager: Allow usage with local storage type, with appropriate…
Browse files Browse the repository at this point in the history
… warnings. (#1836)

An oversight when we removed non-sharding modes of operation is that the `local`
storage type stopped working. Unfortunately it is not conceptually simple to
support this type fully, as alertmanager requires remote storage shared between
all replicas, to support recovering tenant state to an arbitrary replica
following an all-replica outage.

To support provisioning of alerts with `local` storage, but persisting of state
to remote storage, we would need to allow different storage configurations.

This change fixes the issue in a more naive way, so that the alertmanager can at
least be started up for testing or development purposes, but persisting state
will always fail. A second PR will propose allowing the `Persister` to be
disabled.

Although this configuration is not recommended for production used, as long as
the number of replicas is equal to the replication factor, then tenants will
never move between replicas, and so the local snapshot behaviour of the upstream
alertmanager will be sufficient.

Fixes #1638
  • Loading branch information
stevesg committed May 10, 2022
1 parent 00f7738 commit 8738cc8
Show file tree
Hide file tree
Showing 9 changed files with 76 additions and 18 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@
* [BUGFIX] Multikv: Fix watching for runtime config changes in `multi` KV store in ruler and querier. #1665
* [BUGFIX] Memcached: allow to use CNAME DNS records for the memcached backend addresses. #1654
* [BUGFIX] Querier: fixed temporary partial query results when shuffle sharding is enabled and hash ring backend storage is flushed / reset. #1829
* [BUGFUX] Alertmanager: Allow usage with `-alertmanager-storage.backend=local`. Note that when using this storage type, the Alertmanager is not able persist state remotely, so it not recommended for production use. #1836
* [BUGFIX] Alertmanager: Do not validate alertmanager configuration if it's not running. #1835

### Mixin
Expand Down
37 changes: 37 additions & 0 deletions integration/alertmanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,43 @@ func TestAlertmanager(t *testing.T) {
require.Equal(t, "Accept-Encoding", res.Header.Get("Vary"))
}

func TestAlertmanagerLocalStore(t *testing.T) {
s, err := e2e.NewScenario(networkName)
require.NoError(t, err)
defer s.Close()

consul := e2edb.NewConsul()
require.NoError(t, s.StartAndWaitReady(consul))

require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs/user-1.yaml", []byte(mimirAlertmanagerUserConfigYaml)))

alertmanager := e2emimir.NewAlertmanager(
"alertmanager",
mergeFlags(
AlertmanagerFlags(),
AlertmanagerLocalFlags(),
AlertmanagerShardingFlags(consul.NetworkHTTPEndpoint(), 1),
),
)
require.NoError(t, s.StartAndWaitReady(alertmanager))
require.NoError(t, alertmanager.WaitSumMetrics(e2e.Equals(1), "cortex_alertmanager_config_last_reload_successful"))
require.NoError(t, alertmanager.WaitSumMetrics(e2e.Greater(0), "cortex_alertmanager_config_hash"))

c, err := e2emimir.NewClient("", "", alertmanager.HTTPEndpoint(), "", "user-1")
require.NoError(t, err)

cfg, err := c.GetAlertmanagerConfig(context.Background())
require.NoError(t, err)

// Ensure the returned status config matches alertmanager_test_fixtures/user-1.yaml
require.NotNil(t, cfg)
require.Equal(t, "example_receiver", cfg.Route.Receiver)
require.Len(t, cfg.Route.GroupByStr, 1)
require.Equal(t, "example_groupby", cfg.Route.GroupByStr[0])
require.Len(t, cfg.Receivers, 1)
require.Equal(t, "example_receiver", cfg.Receivers[0].Name)
}

func TestAlertmanagerStoreAPI(t *testing.T) {
s, err := e2e.NewScenario(networkName)
require.NoError(t, err)
Expand Down
9 changes: 9 additions & 0 deletions integration/configs.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,12 @@ package integration
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"text/template"

e2e "github.com/grafana/e2e"
e2edb "github.com/grafana/e2e/db"
)

Expand Down Expand Up @@ -95,6 +97,13 @@ var (
}
}

AlertmanagerLocalFlags = func() map[string]string {
return map[string]string{
"-alertmanager-storage.backend": "local",
"-alertmanager-storage.local.path": filepath.Join(e2e.ContainerSharedDir, "alertmanager_configs"),
}
}

AlertmanagerS3Flags = func() map[string]string {
return map[string]string{
"-alertmanager-storage.backend": "s3",
Expand Down
10 changes: 0 additions & 10 deletions pkg/alertmanager/alertstore/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,3 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.Local.RegisterFlagsWithPrefix(prefix, f)
cfg.RegisterFlagsWithPrefixAndDefaultDirectory(prefix, "alertmanager", f)
}

// IsFullStateSupported returns if the given configuration supports access to FullState objects.
func (cfg *Config) IsFullStateSupported() bool {
for _, backend := range bucket.SupportedBackends {
if cfg.Backend == backend {
return true
}
}
return false
}
4 changes: 2 additions & 2 deletions pkg/alertmanager/alertstore/local/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,12 +108,12 @@ func (f *Store) DeleteAlertConfig(_ context.Context, user string) error {

// ListUsersWithFullState implements alertstore.AlertStore.
func (f *Store) ListUsersWithFullState(ctx context.Context) ([]string, error) {
return nil, errState
return []string{}, nil
}

// GetFullState implements alertstore.AlertStore.
func (f *Store) GetFullState(ctx context.Context, user string) (alertspb.FullStateDesc, error) {
return alertspb.FullStateDesc{}, errState
return alertspb.FullStateDesc{}, alertspb.ErrNotFound
}

// SetFullState implements alertstore.AlertStore.
Expand Down
24 changes: 24 additions & 0 deletions pkg/alertmanager/alertstore/local/store_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,30 @@ func TestStore_GetAlertConfigs(t *testing.T) {
}
}

func TestStore_FullState(t *testing.T) {
ctx := context.Background()
store, _ := prepareLocalStore(t)

// FullState not persisted - List always returns no users.

configs, err := store.ListUsersWithFullState(ctx)
require.NoError(t, err)
assert.Empty(t, configs)

// FullState not persisted - Get always returns NotFound.

_, err = store.GetFullState(ctx, "user-1")
require.ErrorIs(t, err, alertspb.ErrNotFound)

// Any attempt to write the store fails.

err = store.SetFullState(ctx, "user-1", alertspb.FullStateDesc{})
require.ErrorIs(t, err, errState)

err = store.DeleteFullState(ctx, "user-1")
require.ErrorIs(t, err, errState)
}

func prepareLocalStore(t *testing.T) (store *Store, storeDir string) {
var err error

Expand Down
1 change: 1 addition & 0 deletions pkg/alertmanager/alertstore/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ type AlertStore interface {
// NewAlertStore returns a alertmanager store backend client based on the provided cfg.
func NewAlertStore(ctx context.Context, cfg Config, cfgProvider bucket.TenantConfigProvider, logger log.Logger, reg prometheus.Registerer) (AlertStore, error) {
if cfg.Backend == local.Name {
level.Warn(logger).Log("msg", "-alertmanager-storage.backend=local is not suitable for persisting alertmanager state between replicas (silences, notifications); you should switch to an external object store for production use")
return local.NewStore(cfg.Local)
}

Expand Down
4 changes: 0 additions & 4 deletions pkg/alertmanager/multitenant.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ const (
var (
errEmptyExternalURL = errors.New("-alertmanager.web.external-url cannot be empty")
errInvalidExternalURL = errors.New("the configured external URL is invalid: should not end with /")
errShardingUnsupportedStorage = errors.New("the configured alertmanager storage backend is not supported when sharding is enabled")
errZoneAwarenessEnabledWithoutZoneInfo = errors.New("the configured alertmanager has zone awareness enabled but zone is not set")
errNotUploadingFallback = errors.New("not uploading fallback configuration")
)
Expand Down Expand Up @@ -129,9 +128,6 @@ func (cfg *MultitenantAlertmanagerConfig) Validate(storageCfg alertstore.Config)
return err
}

if !storageCfg.IsFullStateSupported() {
return errShardingUnsupportedStorage
}
if cfg.ShardingRing.ZoneAwarenessEnabled && cfg.ShardingRing.InstanceZone == "" {
return errZoneAwarenessEnabledWithoutZoneInfo
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/alertmanager/multitenant_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,11 +165,11 @@ func TestMultitenantAlertmanagerConfig_Validate(t *testing.T) {
},
expected: nil,
},
"should fail if new storage store configuration given with local type": {
"should succeed if new storage store configuration given with local type": {
setup: func(t *testing.T, cfg *MultitenantAlertmanagerConfig, storageCfg *alertstore.Config) {
storageCfg.Backend = "local"
},
expected: errShardingUnsupportedStorage,
expected: nil,
},
"should fail if zone aware is enabled but zone is not set": {
setup: func(t *testing.T, cfg *MultitenantAlertmanagerConfig, storageCfg *alertstore.Config) {
Expand Down

0 comments on commit 8738cc8

Please sign in to comment.