Skip to content

Commit

Permalink
*: Only use PGParameters from the cluster spec.
Browse files Browse the repository at this point in the history
With this patch the postgresql.conf will only include the defined
pgParameters in the cluster spec and not include other config files
anymore.

This makes the postgres config handing more consistent and let us really
handle it in a centralized fashion without surprises.

When initializing a cluster we'd like to retain the previous pg
parameters:

* When initMode is new: the one created by initdb when initializing a new db
* When initMode is existing: the current instance parameters
* When handling point in time recovery: the parameters provided by the
restored instance.

For doing this, the keeper will, during initialization, rename and
include the existing `postgresql.conf`, get the list of the modified
parameters, report them to the sentinel that will merge them back in the
cluster spec.
  • Loading branch information
sgotti committed Nov 9, 2016
1 parent eeef070 commit b312f3c
Show file tree
Hide file tree
Showing 12 changed files with 170 additions and 208 deletions.
58 changes: 35 additions & 23 deletions cmd/keeper/keeper.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ type config struct {
pgListenAddress string
pgPort string
pgBinPath string
pgConfDir string
pgReplUsername string
pgReplPassword string
pgReplPasswordFile string
Expand Down Expand Up @@ -108,7 +107,6 @@ func init() {
cmdKeeper.PersistentFlags().StringVar(&cfg.pgListenAddress, "pg-listen-address", "localhost", "postgresql instance listening address")
cmdKeeper.PersistentFlags().StringVar(&cfg.pgPort, "pg-port", "5432", "postgresql instance listening port")
cmdKeeper.PersistentFlags().StringVar(&cfg.pgBinPath, "pg-bin-path", "", "absolute path to postgresql binaries. If empty they will be searched in the current PATH")
cmdKeeper.PersistentFlags().StringVar(&cfg.pgConfDir, "pg-conf-dir", "", "absolute path to user provided postgres configuration. If empty a default dir under $dataDir/postgres/conf.d will be used")
cmdKeeper.PersistentFlags().StringVar(&cfg.pgReplUsername, "pg-repl-username", "", "postgres replication user name. Required. It'll be created on db initialization. Requires --pg-repl-password or --pg-repl-passwordfile. Must be the same for all keepers.")
cmdKeeper.PersistentFlags().StringVar(&cfg.pgReplPassword, "pg-repl-password", "", "postgres replication user password. Required. Only one of --pg-repl-password or --pg-repl-passwordfile is required. Must be the same for all keepers.")
cmdKeeper.PersistentFlags().StringVar(&cfg.pgReplPasswordFile, "pg-repl-passwordfile", "", "postgres replication user password file. Only one of --pg-repl-password or --pg-repl-passwordfile is required. Must be the same for all keepers.")
Expand All @@ -118,13 +116,26 @@ func init() {
cmdKeeper.PersistentFlags().BoolVar(&cfg.debug, "debug", false, "enable debug logging")
}

var mandatoryPGParameters = pg.Parameters{
var mandatoryPGParameters = common.Parameters{
"unix_socket_directories": "/tmp",
"wal_level": "hot_standby",
"wal_keep_segments": "8",
"hot_standby": "on",
}

var managedPGParameters = []string{
"unix_socket_directories",
"wal_level",
"wal_keep_segments",
"hot_standby",
"listen_addresses",
"port",
"max_replication_slots",
"max_wal_senders",
"wal_log_hints",
"synchronous_standby_names",
}

func readPasswordFromFile(filepath string) (string, error) {
fi, err := os.Lstat(filepath)
if err != nil {
Expand Down Expand Up @@ -188,8 +199,8 @@ func (p *PostgresKeeper) getOurReplConnParams() pg.ConnParams {
}
}

func (p *PostgresKeeper) createPGParameters(db *cluster.DB) pg.Parameters {
parameters := pg.Parameters{}
func (p *PostgresKeeper) createPGParameters(db *cluster.DB) common.Parameters {
parameters := common.Parameters{}
// Copy user defined pg parameters
for k, v := range db.Spec.PGParameters {
parameters[k] = v
Expand Down Expand Up @@ -243,7 +254,6 @@ type PostgresKeeper struct {
pgListenAddress string
pgPort string
pgBinPath string
pgConfDir string
pgReplUsername string
pgReplPassword string
pgSUUsername string
Expand Down Expand Up @@ -289,7 +299,6 @@ func NewPostgresKeeper(cfg *config, stop chan bool, end chan error) (*PostgresKe
pgListenAddress: cfg.pgListenAddress,
pgPort: cfg.pgPort,
pgBinPath: cfg.pgBinPath,
pgConfDir: cfg.pgConfDir,
pgReplUsername: cfg.pgReplUsername,
pgReplPassword: cfg.pgReplPassword,
pgSUUsername: cfg.pgSUUsername,
Expand Down Expand Up @@ -414,6 +423,21 @@ func (p *PostgresKeeper) GetPGState(pctx context.Context) (*cluster.PostgresStat
return nil, err
}
if initialized {
pgParameters, err := p.pgm.GetConfigFilePGParameters()
if err != nil {
log.Errorf("error: %v", err)
return pgState, nil
}
log.Debugf("pgParameters: %v", pgParameters)
filteredPGParameters := common.Parameters{}
for k, v := range pgParameters {
if !util.StringInSlice(managedPGParameters, k) {
filteredPGParameters[k] = v
}
}
log.Debugf("filteredPGParameters: %v", filteredPGParameters)
pgState.PGParameters = filteredPGParameters

sd, err := p.pgm.GetSystemData()
if err != nil {
pgState.Healthy = false
Expand Down Expand Up @@ -482,8 +506,8 @@ func (p *PostgresKeeper) Start() {

// TODO(sgotti) reconfigure the various configurations options
// (RequestTimeout) after a changed cluster config
pgParameters := make(postgresql.Parameters)
pgm := postgresql.NewManager(p.pgBinPath, p.dataDir, p.pgConfDir, pgParameters, p.getLocalConnParams(), p.getOurReplConnParams(), p.pgSUUsername, p.pgSUPassword, p.pgReplUsername, p.pgReplPassword, p.requestTimeout)
pgParameters := make(common.Parameters)
pgm := postgresql.NewManager(p.pgBinPath, p.dataDir, pgParameters, p.getLocalConnParams(), p.getOurReplConnParams(), p.pgSUUsername, p.pgSUPassword, p.pgReplUsername, p.pgReplPassword, p.requestTimeout)
p.pgm = pgm

p.pgm.Stop(true)
Expand Down Expand Up @@ -669,6 +693,8 @@ func (p *PostgresKeeper) postgresKeeperSM(pctx context.Context) {
// update pgm postgres parameters
pgm.SetParameters(pgParameters)

pgm.SetIncludeConfig(db.Spec.IncludeConfig)

dbls := p.dbLocalState
if dbls.Initializing {
// If we are here this means that the db initialization or
Expand Down Expand Up @@ -1127,20 +1153,6 @@ func keeper(cmd *cobra.Command, args []string) {
log.Fatalf("error: %v", err)
}

if cfg.pgConfDir != "" {
if !filepath.IsAbs(cfg.pgConfDir) {
log.Fatalf("pg-conf-dir must be an absolute path")
}
var fi os.FileInfo
fi, err = os.Stat(cfg.pgConfDir)
if err != nil {
log.Fatalf("cannot stat pg-conf-dir: %v", err)
}
if !fi.IsDir() {
log.Fatalf("pg-conf-dir is not a directory")
}
}

if cfg.pgReplUsername == "" {
log.Fatalf("--pg-repl-username is required")
}
Expand Down
31 changes: 23 additions & 8 deletions cmd/sentinel/sentinel.go
Original file line number Diff line number Diff line change
Expand Up @@ -472,6 +472,7 @@ func (s *Sentinel) updateDBsStatus(cd *cluster.ClusterData, dbStates map[string]
db.Status.ListenAddress = dbs.ListenAddress
db.Status.Port = dbs.Port
db.Status.CurrentGeneration = dbs.Generation
db.Status.PGParameters = cluster.PGParameters(dbs.PGParameters)
if dbs.Healthy {
db.CleanError()
db.Status.SystemID = dbs.SystemID
Expand Down Expand Up @@ -532,10 +533,11 @@ func (s *Sentinel) updateCluster(cd *cluster.ClusterData) (*cluster.ClusterData,
Generation: cluster.InitialGeneration,
ChangeTime: time.Now(),
Spec: &cluster.DBSpec{
KeeperUID: k.UID,
InitMode: cluster.DBInitModeNew,
Role: common.RoleMaster,
Followers: []string{},
KeeperUID: k.UID,
InitMode: cluster.DBInitModeNew,
Role: common.RoleMaster,
Followers: []string{},
IncludeConfig: *cd.Cluster.Spec.MergePgParameters,
},
}
newcd.DBs[db.UID] = db
Expand All @@ -553,6 +555,12 @@ func (s *Sentinel) updateCluster(cd *cluster.ClusterData) (*cluster.ClusterData,
log.Infof("db %q on keeper %q initialized", db.UID, db.Spec.KeeperUID)
// Set db initMode to none, not needed but just a security measure
db.Spec.InitMode = cluster.DBInitModeNone
// Don't include previous config anymore
db.Spec.IncludeConfig = false
// Replace reported pg parameters in cluster spec
if *cd.Cluster.Spec.MergePgParameters {
newcd.Cluster.Spec.PGParameters = db.Status.PGParameters
}
// Cluster initialized, switch to Normal state
newcd.Cluster.Status.Phase = cluster.ClusterPhaseNormal
case Converging:
Expand Down Expand Up @@ -582,10 +590,11 @@ func (s *Sentinel) updateCluster(cd *cluster.ClusterData) (*cluster.ClusterData,
Generation: cluster.InitialGeneration,
ChangeTime: time.Now(),
Spec: &cluster.DBSpec{
KeeperUID: k.UID,
InitMode: cluster.DBInitModeNone,
Role: common.RoleMaster,
Followers: []string{},
KeeperUID: k.UID,
InitMode: cluster.DBInitModeNone,
Role: common.RoleMaster,
Followers: []string{},
IncludeConfig: *cd.Cluster.Spec.MergePgParameters,
},
}
newcd.DBs[db.UID] = db
Expand All @@ -600,6 +609,12 @@ func (s *Sentinel) updateCluster(cd *cluster.ClusterData) (*cluster.ClusterData,
// TODO(sgotti) set a timeout (the max time for a noop operation, just a start/restart)
if s.dbConvergenceState(cd, db, 0) == Converged {
log.Infof("db %q on keeper %q initialized", db.UID, db.Spec.KeeperUID)
// Don't include previous config anymore
db.Spec.IncludeConfig = false
// Replace reported pg parameters in cluster spec
if *cd.Cluster.Spec.MergePgParameters {
newcd.Cluster.Spec.PGParameters = db.Status.PGParameters
}
// Cluster initialized, switch to Normal state
newcd.Cluster.Status.Phase = cluster.ClusterPhaseNormal
}
Expand Down
12 changes: 12 additions & 0 deletions cmd/sentinel/sentinel_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ import (
"reflect"
)

var valTrue = true

func TestUpdateCluster(t *testing.T) {
tests := []struct {
cd *cluster.ClusterData
Expand All @@ -47,6 +49,7 @@ func TestUpdateCluster(t *testing.T) {
UsePgrewind: true,
PGParameters: cluster.PGParameters{"param01": "value01", "param02": "value02"},
InitMode: cluster.ClusterInitModeNew,
MergePgParameters: &valTrue,
},
Status: cluster.ClusterStatus{
CurrentGeneration: 1,
Expand All @@ -70,6 +73,7 @@ func TestUpdateCluster(t *testing.T) {
UsePgrewind: true,
PGParameters: cluster.PGParameters{"param01": "value01", "param02": "value02"},
InitMode: cluster.ClusterInitModeNew,
MergePgParameters: &valTrue,
},
Status: cluster.ClusterStatus{
CurrentGeneration: 1,
Expand Down Expand Up @@ -97,6 +101,7 @@ func TestUpdateCluster(t *testing.T) {
UsePgrewind: true,
PGParameters: cluster.PGParameters{"param01": "value01", "param02": "value02"},
InitMode: cluster.ClusterInitModeNew,
MergePgParameters: &valTrue,
},
Status: cluster.ClusterStatus{
CurrentGeneration: 1,
Expand Down Expand Up @@ -128,6 +133,7 @@ func TestUpdateCluster(t *testing.T) {
UsePgrewind: true,
PGParameters: cluster.PGParameters{"param01": "value01", "param02": "value02"},
InitMode: cluster.ClusterInitModeNew,
MergePgParameters: &valTrue,
},
Status: cluster.ClusterStatus{
CurrentGeneration: 1,
Expand Down Expand Up @@ -159,6 +165,7 @@ func TestUpdateCluster(t *testing.T) {
InitMode: cluster.DBInitModeNew,
Role: common.RoleMaster,
Followers: []string{},
IncludeConfig: true,
},
},
},
Expand All @@ -180,6 +187,7 @@ func TestUpdateCluster(t *testing.T) {
UsePgrewind: true,
PGParameters: cluster.PGParameters{"param01": "value01", "param02": "value02"},
InitMode: cluster.ClusterInitModeNew,
MergePgParameters: &valTrue,
},
Status: cluster.ClusterStatus{
CurrentGeneration: 1,
Expand Down Expand Up @@ -218,6 +226,7 @@ func TestUpdateCluster(t *testing.T) {
UsePgrewind: true,
PGParameters: cluster.PGParameters{"param01": "value01", "param02": "value02"},
InitMode: cluster.ClusterInitModeNew,
MergePgParameters: &valTrue,
},
Status: cluster.ClusterStatus{
CurrentGeneration: 1,
Expand Down Expand Up @@ -256,6 +265,7 @@ func TestUpdateCluster(t *testing.T) {
InitMode: cluster.DBInitModeNew,
Role: common.RoleMaster,
Followers: []string{},
IncludeConfig: true,
},
},
},
Expand All @@ -272,6 +282,7 @@ func TestUpdateCluster(t *testing.T) {
ConvergenceTimeout: cluster.Duration{Duration: cluster.DefaultConvergenceTimeout},
InitTimeout: cluster.Duration{Duration: cluster.DefaultInitTimeout},
InitMode: cluster.ClusterInitModeNew,
MergePgParameters: &valTrue,
},
Status: cluster.ClusterStatus{
CurrentGeneration: 1,
Expand Down Expand Up @@ -321,6 +332,7 @@ func TestUpdateCluster(t *testing.T) {
ConvergenceTimeout: cluster.Duration{Duration: cluster.DefaultConvergenceTimeout},
InitTimeout: cluster.Duration{Duration: cluster.DefaultInitTimeout},
InitMode: cluster.ClusterInitModeNew,
MergePgParameters: &valTrue,
},
Status: cluster.ClusterStatus{
CurrentGeneration: 1,
Expand Down
7 changes: 7 additions & 0 deletions common/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ package common

import (
"fmt"
"reflect"
"strings"

"github.com/satori/go.uuid"
Expand Down Expand Up @@ -54,3 +55,9 @@ func NameFromStolonName(stolonName string) string {
func IsStolonName(name string) bool {
return strings.HasPrefix(name, stolonPrefix)
}

type Parameters map[string]string

func (s Parameters) Equals(is Parameters) bool {
return reflect.DeepEqual(s, is)
}
3 changes: 2 additions & 1 deletion doc/cluster_spec.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ Some options in a running cluster specification can be changed to update the des
| usePgrewind | try to use pg_rewind for faster instance resyncronization. | no | bool | false |
| initMode | The cluster initialization mode. Can be *new* or *existing*. *new* means that a new db cluster will be created on a random keeper and the other keepers will sync with it. *existing* means that a keeper (that needs to have an already created db cluster) will be choosed as the initial master and the other keepers will sync with it. In this case the `existingConfig` object needs to be populated. | yes | string | |
| existingConfig | configuration for initMode of type "existing" | if initMode is "existing" | ExistingConfig | |
| pgParameters | a map containing the postgres server parameters and their values. | no | map[string]string | |
| mergePgParameters | merge pgParameters of the initialized db cluster, useful the retain initdb generated parameters when InitMode is new, retain current parameters when initMode is existing or pitr. | no | bool | true |
| pgParameters | a map containing the postgres server parameters and their values. The parameters value don't have to be quoted and single quotes don't have to be doubled since this is already done by the keeper when writing the postgresql.conf file | no | map[string]string | |

#### ExistingConfig

Expand Down
4 changes: 4 additions & 0 deletions doc/initialization.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ This is the same as passing a cluster specification with `initMode` set to `new`
stolonctl init '{ "initMode": "new" }'
```

The postgres parameters generated by the `initdb` command will be merged back inside the cluster specification `pgParameters` map. See the related [postgres parameters](postgres_parameters.md) documentation.

### Initialize a new stolon cluster using an existing keeper

This can be useful if you want, for whatever reason, to force a new master (with all the possible problems caused by doing this).
Expand All @@ -26,6 +28,8 @@ Given the declarative nature of the cluster specification you cannot force a new
stolonctl init '{ "initMode": "existing", "existingConfig": { "keeperUID": "keeper01" } }'
```

The existing instance postgres parameters will be merged back inside the cluster specification `pgParameters` map. See the related [postgres parameters](postgres_parameters.md) documentation.

### First time initialization without stolonctl

You can also provide the `--initial-cluster-spec` option to the `stolon-sentinel` but this will work only when the clusterdata in the store is empty.
Expand Down
Loading

0 comments on commit b312f3c

Please sign in to comment.