Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 3 additions & 6 deletions cmd/mysql-helper/apphelper/apphelper.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,12 +147,9 @@ func waitForMysqlReady() error {
}

func configReadOnly() error {
var query string
if tb.NodeRole() == "master" {
query = "SET GLOBAL READ_ONLY = 0"
} else {
query = "SET GLOBAL SUPER_READ_ONLY = 1"
}

query := "SET GLOBAL SUPER_READ_ONLY = 1"

if err := tb.RunQuery(query); err != nil {
return fmt.Errorf("failed to set read_only config, err: %s", err)
}
Expand Down
4 changes: 4 additions & 0 deletions deploy/mysqlclusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,10 @@ spec:
type: integer
required:
- maxQueryTime
readOnly:
description: Makes the cluster READ ONLY. Set the master to writable
or ReadOnly
type: boolean
replicas:
description: The number of pods. This updates replicas filed Defaults
to 0
Expand Down
6 changes: 6 additions & 0 deletions pkg/apis/mysql/v1alpha1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,10 @@ type ClusterSpec struct {
// QueryLimits represents limits for a query
// +optional
QueryLimits *QueryLimits `json:"queryLimits,omitempty"`

// Makes the cluster READ ONLY. Set the master to writable or ReadOnly
// +optional
ReadOnly bool `json:"readOnly,omitempty"`
}

type MysqlConf map[string]string
Expand Down Expand Up @@ -139,6 +143,7 @@ type ClusterConditionType string
const (
ClusterConditionReady ClusterConditionType = "Ready"
ClusterConditionFailoverAck = "PendingFailoverAck"
ClusterConditionReadOnly = "ReadOnly"
)

type NodeStatus struct {
Expand All @@ -158,6 +163,7 @@ const (
NodeConditionLagged NodeConditionType = "Lagged"
NodeConditionReplicating = "Replicating"
NodeConditionMaster = "Master"
NodeConditionReadOnly = "ReadOnly"
)

type PodSpec struct {
Expand Down
166 changes: 160 additions & 6 deletions pkg/mysqlcluster/orc_reconciliation.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,14 @@ func (f *cFactory) SyncOrchestratorStatus(ctx context.Context) error {
// Try to get cluster from orchestrator if cluster is not present then
// register nodes into orchestrator.
if insts, err := f.orcClient.Cluster(f.getClusterAlias()); err == nil {

err = f.updateNodesReadOnlyFlagInOrc(insts)
if err != nil {
glog.Infof("Error setting Master readOnly/writable %s", err)
}

f.updateStatusFromOrc(insts)

} else {
glog.Errorf("Fail to get cluster from orchestrator: %s. Now tries to register nodes.", err)
return f.registerNodesInOrc()
Expand Down Expand Up @@ -75,11 +82,145 @@ func (f *cFactory) SyncOrchestratorStatus(ctx context.Context) error {
return nil
}

func getInstance(hostname string, insts []orc.Instance) (*orc.Instance, error) {

for _, node := range insts {
host := node.Key.Hostname

if host == hostname {
return &node, nil
}
}

return nil, fmt.Errorf("the element was not found")
}

func getMaster(node *orc.Instance, insts []orc.Instance) (*orc.Instance, error) {

if len(node.MasterKey.Hostname) != 0 && node.IsCoMaster == false {
next, err := getInstance(node.MasterKey.Hostname, insts)
if err == nil {
return getMaster(next, insts)
} else {
return nil, err
}
}

if node.IsCoMaster == true {
next, err := getInstance(node.MasterKey.Hostname, insts)
if err == nil {
return next, nil
} else {
return nil, err
}
}

return node, nil
}

func determineMasterFor(insts []orc.Instance) (*orc.Instance, error) {

var masterForNode []orc.Instance

for _, node := range insts {
master, err := getMaster(&node, insts)
if err == nil {
masterForNode = append(masterForNode, *master)
} else {
return nil, fmt.Errorf("not able to retrieve the root of this node %s", node.Key.Hostname)
}
}

if len(masterForNode) != 0 {
masterHostName := masterForNode[0]
var check bool = true
for _, node := range masterForNode {
if node.Key.Hostname != masterHostName.Key.Hostname {
check = false
}
}
if check == true {
return &masterHostName, nil
} else {
return nil, fmt.Errorf("multiple masters")
}
} else {
return nil, fmt.Errorf("0 elements in instance array")
}

}

// set a host writable just if needed
func (f *cFactory) setInstWritable(inst orc.Instance) error {
if inst.ReadOnly == true {
glog.V(2).Infof("set instance %s writable", inst.Key.Hostname)
return f.orcClient.SetHostWritable(inst.Key)
}
return nil
}

func (f *cFactory) putNodeInMaintenance(inst orc.Instance) error {

glog.V(2).Infof("set instance %s in maintenance", inst.Key.Hostname)
return f.orcClient.BeginMaintenance(inst.Key, "mysqlcontroller", "clusterReadOnly")

}

func (f *cFactory) getNodeOutOfMaintenance(inst orc.Instance) error {

glog.V(2).Infof("set instance %s out of maintenance", inst.Key.Hostname)
return f.orcClient.EndMaintenance(inst.Key)

}

// set a host read only just if needed
func (f *cFactory) setInstReadOnly(inst orc.Instance) error {
if !inst.ReadOnly == true {
glog.V(2).Infof("set instance %s read only", inst.Key.Hostname)
return f.orcClient.SetHostReadOnly(inst.Key)
}
return nil
}

func (f *cFactory) updateNodesReadOnlyFlagInOrc(insts []orc.Instance) error {
master, err := determineMasterFor(insts)
if err != nil && err.Error() == "multiple masters" {
// master is not found
// set cluster read only
for _, inst := range insts {
f.putNodeInMaintenance(inst)
f.setInstReadOnly(inst)
}
return nil
} else if err != nil {
return err
}

// master is determinated
for _, inst := range insts {
if f.cluster.Spec.ReadOnly == true {
f.putNodeInMaintenance(inst)
f.setInstReadOnly(inst)
} else if f.cluster.Spec.ReadOnly == false && err == nil {
f.getNodeOutOfMaintenance(inst)
if inst.Key.Hostname == master.Key.Hostname {
f.setInstWritable(inst)
} else {
f.setInstReadOnly(inst)
}
}
}

return nil
}

func (f *cFactory) updateStatusFromOrc(insts []orc.Instance) {
// TODO: imporve this code by computing differences between what
// orchestartor knows and what we know

updatedNodes := []string{}

var isReadOnly bool = true
for _, node := range insts {
host := node.Key.Hostname
updatedNodes = append(updatedNodes, host)
Expand All @@ -92,30 +233,43 @@ func (f *cFactory) updateStatusFromOrc(insts []orc.Instance) {
}
continue
}

maxSlaveLatency := defaultMaxSlaveLatency
if f.cluster.Spec.MaxSlaveLatency != nil {
maxSlaveLatency = *f.cluster.Spec.MaxSlaveLatency
}

if !node.SlaveLagSeconds.Valid {
f.updateNodeCondition(host, api.NodeConditionLagged, core.ConditionUnknown)
} else if node.SlaveLagSeconds.Int64 <= maxSlaveLatency {
f.updateNodeCondition(host, api.NodeConditionLagged, core.ConditionFalse)
} else { // node is behind master
f.updateNodeCondition(host, api.NodeConditionLagged, core.ConditionTrue)
}

if node.Slave_SQL_Running && node.Slave_IO_Running {
f.updateNodeCondition(host, api.NodeConditionReplicating, core.ConditionTrue)
} else {
f.updateNodeCondition(host, api.NodeConditionReplicating, core.ConditionFalse)
}
f.updateNodeCondition(host, api.NodeConditionMaster, core.ConditionFalse)
isReadOnly = isReadOnly && node.ReadOnly
if node.ReadOnly == true {
f.updateNodeCondition(host, api.NodeConditionReadOnly, core.ConditionTrue)
} else {
f.updateNodeCondition(host, api.NodeConditionReadOnly, core.ConditionFalse)
}
}

master, err := determineMasterFor(insts)
if err != nil {
glog.Errorf("Error acquiring master name %s", err)
} else {
f.updateNodeCondition(master.Key.Hostname, api.NodeConditionMaster, core.ConditionTrue)

if !node.ReadOnly {
f.updateNodeCondition(host, api.NodeConditionMaster, core.ConditionTrue)
if isReadOnly == true {
f.cluster.UpdateStatusCondition(api.ClusterConditionReadOnly,
core.ConditionTrue, "initializedTrue", "settingReadOnlyTrue")
} else {
f.updateNodeCondition(host, api.NodeConditionMaster, core.ConditionFalse)
f.cluster.UpdateStatusCondition(api.ClusterConditionReadOnly,
core.ConditionFalse, "initializedFalse", "settingReadOnlyFalse")
}
}

Expand Down
Loading