Skip to content

Commit

Permalink
Fix support for multiple pvc for pd (#3820)
Browse files Browse the repository at this point in the history
  • Loading branch information
dragonly committed Mar 10, 2021
1 parent bdff17a commit 52e1f7f
Show file tree
Hide file tree
Showing 24 changed files with 295 additions and 159 deletions.
12 changes: 12 additions & 0 deletions docs/api-references/docs.md
Expand Up @@ -7799,6 +7799,16 @@ k8s.io/apimachinery/pkg/types.UID
</tr>
<tr>
<td>
<code>pvcUIDSet</code></br>
<em>
map[k8s.io/apimachinery/pkg/types.UID]struct{}
</em>
</td>
<td>
</td>
</tr>
<tr>
<td>
<code>memberDeleted</code></br>
<em>
bool
Expand Down Expand Up @@ -9073,6 +9083,7 @@ map[string]github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDMember
</em>
</td>
<td>
<p>Members contains PDs in current TidbCluster</p>
</td>
</tr>
<tr>
Expand All @@ -9085,6 +9096,7 @@ map[string]github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1.PDMember
</em>
</td>
<td>
<p>PeerMembers contains PDs NOT in current TidbCluster</p>
</td>
</tr>
<tr>
Expand Down
6 changes: 3 additions & 3 deletions pkg/apis/pingcap/v1alpha1/tidbcluster.go
Expand Up @@ -334,13 +334,13 @@ func (tc *TidbCluster) PDAutoFailovering() bool {
}

func (tc *TidbCluster) GetPDDeletedFailureReplicas() int32 {
var failureReplicas int32 = 0
var deteledReplicas int32 = 0
for _, failureMember := range tc.Status.PD.FailureMembers {
if failureMember.MemberDeleted {
failureReplicas++
deteledReplicas++
}
}
return failureReplicas
return deteledReplicas
}

func (tc *TidbCluster) PDStsDesiredReplicas() int32 {
Expand Down
21 changes: 12 additions & 9 deletions pkg/apis/pingcap/v1alpha1/types.go
Expand Up @@ -940,10 +940,12 @@ type Service struct {

// PDStatus is PD status
type PDStatus struct {
Synced bool `json:"synced,omitempty"`
Phase MemberPhase `json:"phase,omitempty"`
StatefulSet *apps.StatefulSetStatus `json:"statefulSet,omitempty"`
Members map[string]PDMember `json:"members,omitempty"`
Synced bool `json:"synced,omitempty"`
Phase MemberPhase `json:"phase,omitempty"`
StatefulSet *apps.StatefulSetStatus `json:"statefulSet,omitempty"`
// Members contains PDs in current TidbCluster
Members map[string]PDMember `json:"members,omitempty"`
// PeerMembers contains PDs NOT in current TidbCluster
PeerMembers map[string]PDMember `json:"peerMembers,omitempty"`
Leader PDMember `json:"leader,omitempty"`
FailureMembers map[string]PDFailureMember `json:"failureMembers,omitempty"`
Expand All @@ -965,11 +967,12 @@ type PDMember struct {

// PDFailureMember is the pd failure member information
type PDFailureMember struct {
PodName string `json:"podName,omitempty"`
MemberID string `json:"memberID,omitempty"`
PVCUID types.UID `json:"pvcUID,omitempty"`
MemberDeleted bool `json:"memberDeleted,omitempty"`
CreatedAt metav1.Time `json:"createdAt,omitempty"`
PodName string `json:"podName,omitempty"`
MemberID string `json:"memberID,omitempty"`
PVCUID types.UID `json:"pvcUID,omitempty"`
PVCUIDSet map[types.UID]struct{} `json:"pvcUIDSet,omitempty"`
MemberDeleted bool `json:"memberDeleted,omitempty"`
CreatedAt metav1.Time `json:"createdAt,omitempty"`
}

// UnjoinedMember is the pd unjoin cluster member information
Expand Down
8 changes: 8 additions & 0 deletions pkg/apis/pingcap/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions pkg/controller/tidbcluster/tidb_cluster_control.go
Expand Up @@ -143,6 +143,7 @@ func (c *defaultTidbClusterControl) updateTidbCluster(tc *v1alpha1.TidbCluster)
}

// cleaning all orphan pods(pd, tikv or tiflash which don't have a related PVC) managed by operator
// this could be useful when failover run into an undesired situation as described in PD failover function
skipReasons, err := c.orphanPodsCleaner.Clean(tc)
if err != nil {
return err
Expand Down
2 changes: 1 addition & 1 deletion pkg/manager/member/dm_master_member_manager.go
Expand Up @@ -351,7 +351,7 @@ func (m *masterMemberManager) syncDMClusterStatus(dc *v1alpha1.DMCluster, set *a
dc.Status.Master.Members = masterStatus
dc.Status.Master.Leader = dc.Status.Master.Members[leader.Name]
dc.Status.Master.Image = ""
c := filterContainer(set, "dm-master")
c := findContainerByName(set, "dm-master")
if c != nil {
dc.Status.Master.Image = c.Image
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/manager/member/dm_master_scaler.go
Expand Up @@ -61,7 +61,7 @@ func (s *masterScaler) ScaleOut(meta metav1.Object, oldSet *apps.StatefulSet, ne
dcName := dc.GetName()

klog.Infof("scaling out dm-master statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List())
_, err := s.deleteDeferDeletingPVC(dc, oldSet.GetName(), v1alpha1.DMMasterMemberType, ordinal)
_, err := s.deleteDeferDeletingPVC(dc, v1alpha1.DMMasterMemberType, ordinal)
if err != nil {
return err
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/manager/member/dm_worker_member_manager.go
Expand Up @@ -280,7 +280,7 @@ func (m *workerMemberManager) syncDMClusterStatus(dc *v1alpha1.DMCluster, set *a
dc.Status.Worker.Synced = true
dc.Status.Worker.Members = workerStatus
dc.Status.Worker.Image = ""
c := filterContainer(set, "dm-worker")
c := findContainerByName(set, "dm-worker")
if c != nil {
dc.Status.Worker.Image = c.Image
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/manager/member/dm_worker_scaler.go
Expand Up @@ -61,7 +61,7 @@ func (s *workerScaler) ScaleOut(meta metav1.Object, oldSet *apps.StatefulSet, ne
dcName := dc.GetName()

klog.Infof("scaling out dm-worker statefulset %s/%s, ordinal: %d (replicas: %d, delete slots: %v)", oldSet.Namespace, oldSet.Name, ordinal, replicas, deleteSlots.List())
_, err := s.deleteDeferDeletingPVC(dc, oldSet.GetName(), v1alpha1.DMWorkerMemberType, ordinal)
_, err := s.deleteDeferDeletingPVC(dc, v1alpha1.DMWorkerMemberType, ordinal)
if err != nil {
return err
}
Expand Down
3 changes: 3 additions & 0 deletions pkg/manager/member/failover.go
Expand Up @@ -15,6 +15,9 @@ package member

import "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"

// TODO: move this to a centralized place
// Since the "Unhealthy" is a very universal event reason string, which could apply to all the TiDB/DM cluster components,
// we should make a global event module, and put event related constants there.
const (
unHealthEventReason = "Unhealthy"
unHealthEventMsgPattern = "%s pod[%s] is unhealthy, msg:%s"
Expand Down

0 comments on commit 52e1f7f

Please sign in to comment.