Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: unnecessary use of fmt.Sprintf #1494

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
34 changes: 17 additions & 17 deletions go/http/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -723,7 +723,7 @@ func (this *HttpAPI) LocateErrantGTID(params martini.Params, r render.Render, re
Respond(r, &APIResponse{Code: ERROR, Message: err.Error()})
return
}
Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("located errant GTID"), Details: errantBinlogs})
Respond(r, &APIResponse{Code: OK, Message: "located errant GTID", Details: errantBinlogs})
}

// ErrantGTIDResetMaster removes errant transactions on a server by way of RESET MASTER
Expand Down Expand Up @@ -2805,7 +2805,7 @@ func (this *HttpAPI) Health(params martini.Params, r render.Render, req *http.Re
return
}

Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Application node is healthy"), Details: health})
Respond(r, &APIResponse{Code: OK, Message: "Application node is healthy", Details: health})

}

Expand Down Expand Up @@ -2839,7 +2839,7 @@ func (this *HttpAPI) StatusCheck(params martini.Params, r render.Render, req *ht
r.JSON(500, &APIResponse{Code: ERROR, Message: fmt.Sprintf("Application node is unhealthy %+v", err), Details: health})
return
}
Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Application node is healthy"), Details: health})
Respond(r, &APIResponse{Code: OK, Message: "Application node is healthy", Details: health})
}

// GrabElection forcibly grabs leadership. Use with care!!
Expand All @@ -2854,7 +2854,7 @@ func (this *HttpAPI) GrabElection(params martini.Params, r render.Render, req *h
return
}

Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Node elected as leader")})
Respond(r, &APIResponse{Code: OK, Message: "Node elected as leader"})
}

// Reelect causes re-elections for an active node
Expand All @@ -2869,7 +2869,7 @@ func (this *HttpAPI) Reelect(params martini.Params, r render.Render, req *http.R
return
}

Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Set re-elections")})
Respond(r, &APIResponse{Code: OK, Message: "Set re-elections"})
}

// RaftAddPeer adds a new node to the raft cluster
Expand Down Expand Up @@ -2923,7 +2923,7 @@ func (this *HttpAPI) RaftYield(params martini.Params, r render.Render, req *http
return
}
orcraft.PublishYield(params["node"])
Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Asynchronously yielded")})
Respond(r, &APIResponse{Code: OK, Message: "Asynchronously yielded"})
}

// RaftYieldHint yields to a host whose name contains given hint (e.g. DC)
Expand Down Expand Up @@ -3064,7 +3064,7 @@ func (this *HttpAPI) ReloadConfiguration(params martini.Params, r render.Render,
config.Reload(extraConfigFile)
inst.AuditOperation("reload-configuration", nil, "Triggered via API")

Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Config reloaded"), Details: extraConfigFile})
Respond(r, &APIResponse{Code: OK, Message: "Config reloaded", Details: extraConfigFile})
}

// ReplicationAnalysis retuens list of issues
Expand All @@ -3085,7 +3085,7 @@ func (this *HttpAPI) replicationAnalysis(clusterName string, instanceKey *inst.I
analysis = filtered
}

Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Analysis"), Details: analysis})
Respond(r, &APIResponse{Code: OK, Message: "Analysis", Details: analysis})
}

// ReplicationAnalysis retuens list of issues
Expand Down Expand Up @@ -3296,7 +3296,7 @@ func (this *HttpAPI) AutomatedRecoveryFilters(params martini.Params, r render.Re
automatedRecoveryMap["RecoverIntermediateMasterClusterFilters"] = config.Config.RecoverIntermediateMasterClusterFilters
automatedRecoveryMap["RecoveryIgnoreHostnameFilters"] = config.Config.RecoveryIgnoreHostnameFilters

Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Automated recovery configuration details"), Details: automatedRecoveryMap})
Respond(r, &APIResponse{Code: OK, Message: "Automated recovery configuration details", Details: automatedRecoveryMap})
}

// AuditFailureDetection provides list of topology_failure_detection entries
Expand Down Expand Up @@ -3439,7 +3439,7 @@ func (this *HttpAPI) AcknowledgeClusterRecoveries(params martini.Params, r rende

comment := strings.TrimSpace(req.URL.Query().Get("comment"))
if comment == "" {
Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("No acknowledge comment given")})
Respond(r, &APIResponse{Code: ERROR, Message: "No acknowledge comment given"})
return
}
userId := getUserId(req, user)
Expand All @@ -3458,7 +3458,7 @@ func (this *HttpAPI) AcknowledgeClusterRecoveries(params martini.Params, r rende
return
}

Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Acknowledged cluster recoveries"), Details: clusterName})
Respond(r, &APIResponse{Code: OK, Message: "Acknowledged cluster recoveries", Details: clusterName})
}

// ClusterInfo provides details of a given cluster
Expand All @@ -3476,7 +3476,7 @@ func (this *HttpAPI) AcknowledgeInstanceRecoveries(params martini.Params, r rend

comment := strings.TrimSpace(req.URL.Query().Get("comment"))
if comment == "" {
Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("No acknowledge comment given")})
Respond(r, &APIResponse{Code: ERROR, Message: "No acknowledge comment given"})
return
}
userId := getUserId(req, user)
Expand All @@ -3495,7 +3495,7 @@ func (this *HttpAPI) AcknowledgeInstanceRecoveries(params martini.Params, r rend
return
}

Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Acknowledged instance recoveries"), Details: instanceKey})
Respond(r, &APIResponse{Code: OK, Message: "Acknowledged instance recoveries", Details: instanceKey})
}

// ClusterInfo provides details of a given cluster
Expand All @@ -3522,7 +3522,7 @@ func (this *HttpAPI) AcknowledgeRecovery(params martini.Params, r render.Render,
}
comment := strings.TrimSpace(req.URL.Query().Get("comment"))
if comment == "" {
Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("No acknowledge comment given")})
Respond(r, &APIResponse{Code: ERROR, Message: "No acknowledge comment given"})
return
}
userId := getUserId(req, user)
Expand All @@ -3547,7 +3547,7 @@ func (this *HttpAPI) AcknowledgeRecovery(params martini.Params, r render.Render,
return
}

Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Acknowledged recovery"), Details: idParam})
Respond(r, &APIResponse{Code: OK, Message: "Acknowledged recovery", Details: idParam})
}

// ClusterInfo provides details of a given cluster
Expand All @@ -3559,7 +3559,7 @@ func (this *HttpAPI) AcknowledgeAllRecoveries(params martini.Params, r render.Re

comment := strings.TrimSpace(req.URL.Query().Get("comment"))
if comment == "" {
Respond(r, &APIResponse{Code: ERROR, Message: fmt.Sprintf("No acknowledge comment given")})
Respond(r, &APIResponse{Code: ERROR, Message: "No acknowledge comment given"})
return
}
userId := getUserId(req, user)
Expand All @@ -3579,7 +3579,7 @@ func (this *HttpAPI) AcknowledgeAllRecoveries(params martini.Params, r render.Re
return
}

Respond(r, &APIResponse{Code: OK, Message: fmt.Sprintf("Acknowledged all recoveries"), Details: comment})
Respond(r, &APIResponse{Code: OK, Message: "Acknowledged all recoveries", Details: comment})
}

// BlockedRecoveries reads list of currently blocked recoveries, optionally filtered by cluster name
Expand Down
6 changes: 2 additions & 4 deletions go/inst/candidate_database_instance_dao.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@
package inst

import (
"fmt"

"github.com/openark/golib/log"
"github.com/openark/golib/sqlutils"

Expand All @@ -33,7 +31,7 @@ func RegisterCandidateInstance(candidate *CandidateDatabaseInstance) error {
}
args := sqlutils.Args(candidate.Hostname, candidate.Port, string(candidate.PromotionRule), candidate.LastSuggestedString)

query := fmt.Sprintf(`
query := `
insert into candidate_database_instance (
hostname,
port,
Expand All @@ -44,7 +42,7 @@ func RegisterCandidateInstance(candidate *CandidateDatabaseInstance) error {
) on duplicate key update
last_suggested=values(last_suggested),
promotion_rule=values(promotion_rule)
`)
`
writeFunc := func() error {
_, err := db.ExecOrchestrator(query, args...)
AuditOperation("register-candidate", candidate.Key(), string(candidate.PromotionRule))
Expand Down
38 changes: 19 additions & 19 deletions go/logic/topology_recovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -553,17 +553,17 @@ func recoverDeadMaster(topologyRecovery *TopologyRecovery, candidateInstanceKey
switch topologyRecovery.RecoveryType {
case MasterRecoveryGTID:
{
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadMaster: regrouping replicas via GTID"))
AuditTopologyRecovery(topologyRecovery, "RecoverDeadMaster: regrouping replicas via GTID")
lostReplicas, _, cannotReplicateReplicas, promotedReplica, err = inst.RegroupReplicasGTID(failedInstanceKey, true, false, nil, &topologyRecovery.PostponedFunctionsContainer, promotedReplicaIsIdeal)
}
case MasterRecoveryPseudoGTID:
{
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadMaster: regrouping replicas via Pseudo-GTID"))
AuditTopologyRecovery(topologyRecovery, "RecoverDeadMaster: regrouping replicas via Pseudo-GTID")
lostReplicas, _, _, cannotReplicateReplicas, promotedReplica, err = inst.RegroupReplicasPseudoGTIDIncludingSubReplicasOfBinlogServers(failedInstanceKey, true, nil, &topologyRecovery.PostponedFunctionsContainer, promotedReplicaIsIdeal)
}
case MasterRecoveryBinlogServer:
{
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadMaster: recovering via binlog servers"))
AuditTopologyRecovery(topologyRecovery, "RecoverDeadMaster: recovering via binlog servers")
promotedReplica, err = recoverDeadMasterInBinlogServerTopology(topologyRecovery)
}
}
Expand Down Expand Up @@ -644,9 +644,9 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de
// Maybe we promoted a "prefer_not"
// Maybe we promoted a server in a different DC than the master
// There's many options. We may wish to replace the server we promoted with a better one.
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("checking if should replace promoted replica with a better candidate"))
AuditTopologyRecovery(topologyRecovery, "checking if should replace promoted replica with a better candidate")
if candidateInstanceKey == nil {
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ checking if promoted replica is the ideal candidate"))
AuditTopologyRecovery(topologyRecovery, "+ checking if promoted replica is the ideal candidate")
if deadInstance != nil {
for _, candidateReplica := range candidateReplicas {
if promotedReplica.Key.Equals(&candidateReplica.Key) &&
Expand All @@ -662,7 +662,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de
// We didn't pick the ideal candidate; let's see if we can replace with a candidate from same DC and ENV
if candidateInstanceKey == nil {
// Try a candidate replica that is in same DC & env as the dead instance
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ searching for an ideal candidate"))
AuditTopologyRecovery(topologyRecovery, "+ searching for an ideal candidate")
if deadInstance != nil {
for _, candidateReplica := range candidateReplicas {
if canTakeOverPromotedServerAsMaster(candidateReplica, promotedReplica) &&
Expand All @@ -677,7 +677,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de
}
if candidateInstanceKey == nil {
// We cannot find a candidate in same DC and ENV as dead master
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ checking if promoted replica is an OK candidate"))
AuditTopologyRecovery(topologyRecovery, "+ checking if promoted replica is an OK candidate")
for _, candidateReplica := range candidateReplicas {
if promotedReplica.Key.Equals(&candidateReplica.Key) {
// Seems like we promoted a candidate replica (though not in same DC and ENV as dead master)
Expand All @@ -694,7 +694,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de
// Still nothing?
if candidateInstanceKey == nil {
// Try a candidate replica that is in same DC & env as the promoted replica (our promoted replica is not an "is_candidate")
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ searching for a candidate"))
AuditTopologyRecovery(topologyRecovery, "+ searching for a candidate")
for _, candidateReplica := range candidateReplicas {
if canTakeOverPromotedServerAsMaster(candidateReplica, promotedReplica) &&
promotedReplica.DataCenter == candidateReplica.DataCenter &&
Expand All @@ -708,7 +708,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de
// Still nothing?
if candidateInstanceKey == nil {
// Try a candidate replica (our promoted replica is not an "is_candidate")
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ searching for a candidate"))
AuditTopologyRecovery(topologyRecovery, "+ searching for a candidate")
for _, candidateReplica := range candidateReplicas {
if canTakeOverPromotedServerAsMaster(candidateReplica, promotedReplica) {
if satisfied, reason := MasterFailoverGeographicConstraintSatisfied(&topologyRecovery.AnalysisEntry, candidateReplica); satisfied {
Expand All @@ -735,7 +735,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de
if candidateInstanceKey == nil {
// Still nothing? Then we didn't find a replica marked as "candidate". OK, further down the stream we have:
// find neutral instance in same dv&env as dead master
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ searching for a neutral server to replace promoted server, in same DC and env as dead master"))
AuditTopologyRecovery(topologyRecovery, "+ searching for a neutral server to replace promoted server, in same DC and env as dead master")
for _, neutralReplica := range neutralReplicas {
if canTakeOverPromotedServerAsMaster(neutralReplica, promotedReplica) &&
deadInstance.DataCenter == neutralReplica.DataCenter &&
Expand All @@ -747,7 +747,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de
}
if candidateInstanceKey == nil {
// find neutral instance in same dv&env as promoted replica
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ searching for a neutral server to replace promoted server, in same DC and env as promoted replica"))
AuditTopologyRecovery(topologyRecovery, "+ searching for a neutral server to replace promoted server, in same DC and env as promoted replica")
for _, neutralReplica := range neutralReplicas {
if canTakeOverPromotedServerAsMaster(neutralReplica, promotedReplica) &&
promotedReplica.DataCenter == neutralReplica.DataCenter &&
Expand All @@ -758,7 +758,7 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de
}
}
if candidateInstanceKey == nil {
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ searching for a neutral server to replace a prefer_not"))
AuditTopologyRecovery(topologyRecovery, "+ searching for a neutral server to replace a prefer_not")
for _, neutralReplica := range neutralReplicas {
if canTakeOverPromotedServerAsMaster(neutralReplica, promotedReplica) {
if satisfied, reason := MasterFailoverGeographicConstraintSatisfied(&topologyRecovery.AnalysisEntry, neutralReplica); satisfied {
Expand All @@ -776,12 +776,12 @@ func SuggestReplacementForPromotedReplica(topologyRecovery *TopologyRecovery, de
// So do we have a candidate?
if candidateInstanceKey == nil {
// Found nothing. Stick with promoted replica
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ found no server to promote on top promoted replica"))
AuditTopologyRecovery(topologyRecovery, "+ found no server to promote on top promoted replica")
return promotedReplica, false, nil
}
if promotedReplica.Key.Equals(candidateInstanceKey) {
// Sanity. It IS the candidate, nothing to promote...
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("+ sanity check: found our very own server to promote; doing nothing"))
AuditTopologyRecovery(topologyRecovery, "+ sanity check: found our very own server to promote; doing nothing")
return promotedReplica, false, nil
}
replacement, _, err = inst.ReadInstance(candidateInstanceKey)
Expand Down Expand Up @@ -905,7 +905,7 @@ func checkAndRecoverDeadMaster(analysisEntry inst.ReplicationAnalysis, candidate

if config.Config.ApplyMySQLPromotionAfterMasterFailover || analysisEntry.CommandHint == inst.GracefulMasterTakeoverCommandHint {
// on GracefulMasterTakeoverCommandHint it makes utter sense to RESET SLAVE ALL and read_only=0, and there is no sense in not doing so.
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadMaster: will apply MySQL changes to promoted master"))
AuditTopologyRecovery(topologyRecovery, "- RecoverDeadMaster: will apply MySQL changes to promoted master")
{
_, err := inst.ResetReplicationOperation(&promotedReplica.Key)
if err != nil {
Expand Down Expand Up @@ -949,7 +949,7 @@ func checkAndRecoverDeadMaster(analysisEntry inst.ReplicationAnalysis, candidate
}
if config.Config.MasterFailoverDetachReplicaMasterHost {
postponedFunction := func() error {
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadMaster: detaching master host on promoted master"))
AuditTopologyRecovery(topologyRecovery, "- RecoverDeadMaster: detaching master host on promoted master")
inst.DetachReplicaMasterHost(&promotedReplica.Key)
return nil
}
Expand Down Expand Up @@ -1167,7 +1167,7 @@ func RecoverDeadIntermediateMaster(topologyRecovery *TopologyRecovery, skipProce
relocateReplicasToCandidateSibling()
}
if !recoveryResolved {
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadIntermediateMaster: will next attempt regrouping of replicas"))
AuditTopologyRecovery(topologyRecovery, "- RecoverDeadIntermediateMaster: will next attempt regrouping of replicas")
// Plan B: regroup (we wish to reduce cross-DC replication streams)
lostReplicas, _, _, _, regroupPromotedReplica, regroupError := inst.RegroupReplicas(failedInstanceKey, true, nil, nil)
if regroupError != nil {
Expand All @@ -1185,7 +1185,7 @@ func RecoverDeadIntermediateMaster(topologyRecovery *TopologyRecovery, skipProce
}
// Plan C: try replacement intermediate master in other DC...
if candidateSiblingOfIntermediateMaster != nil && candidateSiblingOfIntermediateMaster.DataCenter != intermediateMasterInstance.DataCenter {
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadIntermediateMaster: will next attempt relocating to another DC server"))
AuditTopologyRecovery(topologyRecovery, "- RecoverDeadIntermediateMaster: will next attempt relocating to another DC server")
relocateReplicasToCandidateSibling()
}
}
Expand Down Expand Up @@ -1438,7 +1438,7 @@ func checkAndRecoverDeadCoMaster(analysisEntry inst.ReplicationAnalysis, candida
recoverDeadCoMasterSuccessCounter.Inc(1)

if config.Config.ApplyMySQLPromotionAfterMasterFailover {
AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("- RecoverDeadMaster: will apply MySQL changes to promoted master"))
AuditTopologyRecovery(topologyRecovery, "- RecoverDeadMaster: will apply MySQL changes to promoted master")
inst.SetReadOnly(&promotedReplica.Key, false)
}
if !skipProcesses {
Expand Down