Skip to content

Commit

Permalink
Expose PVReclaimPolicy for TidbMonitor (pingcap#2379)
Browse files Browse the repository at this point in the history
  • Loading branch information
Yisaer authored and sre-bot committed May 9, 2020
1 parent 1349b40 commit 4ce2af8
Show file tree
Hide file tree
Showing 9 changed files with 94 additions and 8 deletions.
26 changes: 26 additions & 0 deletions docs/api-references/docs.md
Original file line number Diff line number Diff line change
Expand Up @@ -1424,6 +1424,19 @@ InitializerSpec
</tr>
<tr>
<td>
<code>pvReclaimPolicy</code></br>
<em>
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#persistentvolumereclaimpolicy-v1-core">
Kubernetes core/v1.PersistentVolumeReclaimPolicy
</a>
</em>
</td>
<td>
<p>Persistent volume reclaim policy applied to the PVs that consumed by TiDB cluster</p>
</td>
</tr>
<tr>
<td>
<code>imagePullPolicy</code></br>
<em>
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#pullpolicy-v1-core">
Expand Down Expand Up @@ -14276,6 +14289,19 @@ InitializerSpec
</tr>
<tr>
<td>
<code>pvReclaimPolicy</code></br>
<em>
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#persistentvolumereclaimpolicy-v1-core">
Kubernetes core/v1.PersistentVolumeReclaimPolicy
</a>
</em>
</td>
<td>
<p>Persistent volume reclaim policy applied to the PVs that consumed by TiDB cluster</p>
</td>
</tr>
<tr>
<td>
<code>imagePullPolicy</code></br>
<em>
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#pullpolicy-v1-core">
Expand Down
2 changes: 2 additions & 0 deletions manifests/crd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5509,6 +5509,8 @@ spec:
persistent:
type: boolean
prometheus: {}
pvReclaimPolicy:
type: string
reloader: {}
storage:
type: string
Expand Down
7 changes: 7 additions & 0 deletions pkg/apis/pingcap/v1alpha1/openapi_generated.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions pkg/apis/pingcap/v1alpha1/tidbmonitor_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,10 @@ type TidbMonitorSpec struct {
Reloader ReloaderSpec `json:"reloader"`
Initializer InitializerSpec `json:"initializer"`

// Persistent volume reclaim policy applied to the PVs that consumed by TiDB cluster
// +kubebuilder:default=Recycle
PVReclaimPolicy corev1.PersistentVolumeReclaimPolicy `json:"pvReclaimPolicy,omitempty"`

ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// +optional
Persistent bool `json:"persistent,omitempty"`
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/tidbmonitor/tidb_monitor_control.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func (tmc *defaultTidbMonitorControl) ReconcileTidbMonitor(tm *v1alpha1.TidbMoni

func (tmc *defaultTidbMonitorControl) reconcileTidbMonitor(tm *v1alpha1.TidbMonitor) error {

return tmc.monitorManager.Sync(tm)
return tmc.monitorManager.SyncMonitor(tm)
}

var _ ControlInterface = &defaultTidbMonitorControl{}
Expand Down
29 changes: 24 additions & 5 deletions pkg/manager/meta/reclaim_policy_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ import (
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/label"
"github.com/pingcap/tidb-operator/pkg/manager"
"github.com/pingcap/tidb-operator/pkg/monitor"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
corelisters "k8s.io/client-go/listers/core/v1"
)

Expand All @@ -38,10 +41,26 @@ func NewReclaimPolicyManager(pvcLister corelisters.PersistentVolumeClaimLister,
}
}

// NewReclaimPolicyMonitorManager returns a *reclaimPolicyManager
func NewReclaimPolicyMonitorManager(pvcLister corelisters.PersistentVolumeClaimLister,
pvLister corelisters.PersistentVolumeLister,
pvControl controller.PVControlInterface) monitor.MonitorManager {
return &reclaimPolicyManager{
pvcLister,
pvLister,
pvControl,
}
}

func (rpm *reclaimPolicyManager) Sync(tc *v1alpha1.TidbCluster) error {
ns := tc.GetNamespace()
instanceName := tc.GetInstanceName()
return rpm.sync(tc.GetNamespace(), tc.GetInstanceName(), tc.IsPVReclaimEnabled(), tc.Spec.PVReclaimPolicy, tc)
}

func (rpm *reclaimPolicyManager) SyncMonitor(tm *v1alpha1.TidbMonitor) error {
return rpm.sync(tm.GetNamespace(), tm.GetName(), false, tm.Spec.PVReclaimPolicy, tm)
}

func (rpm *reclaimPolicyManager) sync(ns, instanceName string, isPVReclaimEnabled bool, policy corev1.PersistentVolumeReclaimPolicy, obj runtime.Object) error {
l, err := label.New().Instance(instanceName).Selector()
if err != nil {
return err
Expand All @@ -55,7 +74,7 @@ func (rpm *reclaimPolicyManager) Sync(tc *v1alpha1.TidbCluster) error {
if pvc.Spec.VolumeName == "" {
continue
}
if tc.IsPVReclaimEnabled() && len(pvc.Annotations[label.AnnPVCDeferDeleting]) != 0 {
if isPVReclaimEnabled && len(pvc.Annotations[label.AnnPVCDeferDeleting]) != 0 {
// If the pv reclaim function is turned on, and when pv is the candidate pv to be reclaimed, skip patch this pv.
continue
}
Expand All @@ -64,11 +83,11 @@ func (rpm *reclaimPolicyManager) Sync(tc *v1alpha1.TidbCluster) error {
return err
}

if pv.Spec.PersistentVolumeReclaimPolicy == tc.Spec.PVReclaimPolicy {
if pv.Spec.PersistentVolumeReclaimPolicy == policy {
continue
}

err = rpm.pvControl.PatchPVReclaimPolicy(tc, pv, tc.Spec.PVReclaimPolicy)
err = rpm.pvControl.PatchPVReclaimPolicy(obj, pv, policy)
if err != nil {
return err
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/monitor/monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,5 +16,5 @@ package monitor
import "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1"

type MonitorManager interface {
Sync(monitor *v1alpha1.TidbMonitor) error
SyncMonitor(monitor *v1alpha1.TidbMonitor) error
}
16 changes: 15 additions & 1 deletion pkg/monitor/monitor/monitor_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ import (
informers "github.com/pingcap/tidb-operator/pkg/client/informers/externalversions"
v1alpha1listers "github.com/pingcap/tidb-operator/pkg/client/listers/pingcap/v1alpha1"
"github.com/pingcap/tidb-operator/pkg/controller"
"github.com/pingcap/tidb-operator/pkg/manager/meta"
"github.com/pingcap/tidb-operator/pkg/monitor"
utildiscovery "github.com/pingcap/tidb-operator/pkg/util/discovery"
corev1 "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
Expand All @@ -36,6 +38,7 @@ import (
)

type MonitorManager struct {
pvManager monitor.MonitorManager
discoveryInterface discovery.CachedDiscoveryInterface
typedControl controller.TypedControlInterface
deploymentLister appslisters.DeploymentLister
Expand All @@ -59,7 +62,12 @@ func NewMonitorManager(
recorder record.EventRecorder) *MonitorManager {
pvcLister := kubeInformerFactory.Core().V1().PersistentVolumeClaims().Lister()
pvLister := kubeInformerFactory.Core().V1().PersistentVolumes().Lister()
pvControl := controller.NewRealPVControl(kubeCli, pvcLister, pvLister, recorder)
return &MonitorManager{
pvManager: meta.NewReclaimPolicyMonitorManager(
pvcLister,
pvLister,
pvControl),
discoveryInterface: discoverycachedmemory.NewMemCacheClient(kubeCli.Discovery()),
typedControl: typedControl,
deploymentLister: kubeInformerFactory.Apps().V1().Deployments().Lister(),
Expand All @@ -71,7 +79,7 @@ func NewMonitorManager(
}
}

func (mm *MonitorManager) Sync(monitor *v1alpha1.TidbMonitor) error {
func (mm *MonitorManager) SyncMonitor(monitor *v1alpha1.TidbMonitor) error {

if monitor.DeletionTimestamp != nil {
return nil
Expand All @@ -95,6 +103,12 @@ func (mm *MonitorManager) Sync(monitor *v1alpha1.TidbMonitor) error {
return err
}
klog.V(4).Infof("tm[%s/%s]'s pvc synced", monitor.Namespace, monitor.Name)

// syncing all PVs managed by this tidbmonitor
if err := mm.pvManager.SyncMonitor(monitor); err != nil {
return err
}
klog.V(4).Infof("tm[%s/%s]'s pv synced", monitor.Namespace, monitor.Name)
}

// Sync Deployment
Expand Down
14 changes: 14 additions & 0 deletions tests/e2e/tidbcluster/tidbcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -890,6 +890,7 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
framework.ExpectNoError(err, "Expected get tidbcluster")

tm := fixture.NewTidbMonitor("e2e-monitor", tc.Namespace, tc, true, true)
tm.Spec.PVReclaimPolicy = corev1.PersistentVolumeReclaimDelete
_, err = cli.PingcapV1alpha1().TidbMonitors(tc.Namespace).Create(tm)
framework.ExpectNoError(err, "Expected tidbmonitor deployed success")
err = tests.CheckTidbMonitor(tm, c, fw)
Expand Down Expand Up @@ -919,6 +920,9 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
if !existed || value != label.TiDBOperator {
return false, nil
}
if pv.Spec.PersistentVolumeReclaimPolicy != corev1.PersistentVolumeReclaimDelete {
return false, fmt.Errorf("pv[%s] 's policy is not Delete", pv.Name)
}
return true, nil
})
framework.ExpectNoError(err, "monitor pv label error")
Expand All @@ -927,6 +931,7 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Get(tm.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "fetch latest tidbmonitor error")
tm.Spec.Prometheus.Service.Type = corev1.ServiceTypeNodePort
tm.Spec.PVReclaimPolicy = corev1.PersistentVolumeReclaimRetain
tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Update(tm)
framework.ExpectNoError(err, "update tidbmonitor service type error")

Expand Down Expand Up @@ -954,6 +959,12 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
tm, err = cli.PingcapV1alpha1().TidbMonitors(ns).Update(tm)
framework.ExpectNoError(err, "update tidbmonitor service portName error")

pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get("e2e-monitor-monitor", metav1.GetOptions{})
framework.ExpectNoError(err, "Expected fetch tidbmonitor pvc success")
pvName = pvc.Spec.VolumeName
pv, err = c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
framework.ExpectNoError(err, "Expected fetch tidbmonitor pv success")

err = wait.Poll(5*time.Second, 5*time.Minute, func() (done bool, err error) {
prometheusSvc, err := c.CoreV1().Services(ns).Get(fmt.Sprintf("%s-prometheus", tm.Name), metav1.GetOptions{})
if err != nil {
Expand All @@ -971,6 +982,9 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() {
if prometheusSvc.Spec.Ports[0].NodePort != targetPort {
return false, nil
}
if pv.Spec.PersistentVolumeReclaimPolicy != corev1.PersistentVolumeReclaimRetain {
return false, fmt.Errorf("pv[%s] 's policy is not Retain", pv.Name)
}
return true, nil
})
framework.ExpectNoError(err, "second update tidbmonitor service error")
Expand Down

0 comments on commit 4ce2af8

Please sign in to comment.