Skip to content

Commit

Permalink
Fix static errors
Browse files Browse the repository at this point in the history
Longhorn 6984

Signed-off-by: Derek Su <derek.su@suse.com>
  • Loading branch information
derekbit authored and David Ko committed Dec 24, 2023
1 parent 95f6a86 commit 90f4ec7
Show file tree
Hide file tree
Showing 12 changed files with 29 additions and 37 deletions.
4 changes: 2 additions & 2 deletions app/migrate.go
Expand Up @@ -189,13 +189,13 @@ func migratePVAndPVCForPre070Volume(kubeClient *kubeclientset.Clientset, lhClien
}

newPV := datastore.NewPVManifestForVolume(v, oldPV.Name, staticStorageClass.Value, oldPV.Spec.CSI.FSType)
if newPV, err = kubeClient.CoreV1().PersistentVolumes().Create(context.TODO(), newPV, metav1.CreateOptions{}); err != nil {
if _, err = kubeClient.CoreV1().PersistentVolumes().Create(context.TODO(), newPV, metav1.CreateOptions{}); err != nil {
return err
}

if pvcRecreationRequired {
pvc := datastore.NewPVCManifestForVolume(v, oldPV.Name, namespace, pvcName, staticStorageClass.Value)
if pvc, err = kubeClient.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil {
if _, err = kubeClient.CoreV1().PersistentVolumeClaims(namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil {
return err
}
}
Expand Down
5 changes: 4 additions & 1 deletion controller/controller_test.go
Expand Up @@ -629,11 +629,14 @@ func (s *TestSuite) TestIsSameGuaranteedCPURequirement(c *C) {
c.Assert(IsSameGuaranteedCPURequirement(a, b), Equals, true)

b.Requests[corev1.ResourceCPU], err = resource.ParseQuantity("250m")
c.Assert(err, IsNil)
a = &corev1.ResourceRequirements{}
c.Assert(IsSameGuaranteedCPURequirement(a, b), Equals, false)

b.Requests[corev1.ResourceCPU], err = resource.ParseQuantity("250m")
c.Assert(err, IsNil)
a.Requests = corev1.ResourceList{}
a.Requests[corev1.ResourceCPU], err = resource.ParseQuantity("0.25")
a.Requests[corev1.ResourceCPU], _ = resource.ParseQuantity("0.25")
c.Assert(err, IsNil)
c.Assert(IsSameGuaranteedCPURequirement(a, b), Equals, true)
}
1 change: 0 additions & 1 deletion controller/snapshot_controller.go
Expand Up @@ -304,7 +304,6 @@ func (sc *SnapshotController) handleErr(err error, key interface{}) {
log := sc.logger.WithField("Snapshot", key)
handleReconcileErrorLogging(log, err, "Failed to sync Longhorn snapshot")
sc.queue.AddRateLimited(key)
return
}

func (sc *SnapshotController) syncHandler(key string) (err error) {
Expand Down
6 changes: 3 additions & 3 deletions csi/deployment.go
Expand Up @@ -285,7 +285,7 @@ func NewPluginDeployment(namespace, serviceAccount, nodeDriverRegistrarImage, li
},
},
SecurityContext: &corev1.SecurityContext{
Privileged: pointer.BoolPtr(true),
Privileged: pointer.Bool(true),
},
Args: []string{
"--v=2",
Expand Down Expand Up @@ -328,13 +328,13 @@ func NewPluginDeployment(namespace, serviceAccount, nodeDriverRegistrarImage, li
{
Name: types.CSIPluginName,
SecurityContext: &corev1.SecurityContext{
Privileged: pointer.BoolPtr(true),
Privileged: pointer.Bool(true),
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{
"SYS_ADMIN",
},
},
AllowPrivilegeEscalation: pointer.BoolPtr(true),
AllowPrivilegeEscalation: pointer.Bool(true),
},
Image: managerImage,
ImagePullPolicy: imagePullPolicy,
Expand Down
6 changes: 3 additions & 3 deletions datastore/longhorn.go
Expand Up @@ -325,15 +325,15 @@ func (s *DataStore) ValidateSetting(name, value string) (err error) {
return errors.Wrapf(err, "failed to list volumes before modifying toleration setting")
}
if !volumesDetached {
return &types.ErrorInvalidState{Reason: fmt.Sprintf("cannot modify toleration setting before all volumes are detached")}
return &types.ErrorInvalidState{Reason: "cannot modify toleration setting before all volumes are detached"}
}
case types.SettingNameSystemManagedComponentsNodeSelector:
volumesDetached, err := s.AreAllVolumesDetachedState()
if err != nil {
return errors.Wrapf(err, "failed to list volumes before modifying node selector for managed components setting")
}
if !volumesDetached {
return &types.ErrorInvalidState{Reason: fmt.Sprintf("cannot modify node selector for managed components setting before all volumes are detached")}
return &types.ErrorInvalidState{Reason: "cannot modify node selector for managed components setting before all volumes are detached"}
}
case types.SettingNamePriorityClass:
if value != "" {
Expand All @@ -346,7 +346,7 @@ func (s *DataStore) ValidateSetting(name, value string) (err error) {
return errors.Wrapf(err, "failed to list volumes before modifying priority class setting")
}
if !volumesDetached {
return &types.ErrorInvalidState{Reason: fmt.Sprintf("cannot modify priority class setting before all volumes are detached")}
return &types.ErrorInvalidState{Reason: "cannot modify priority class setting before all volumes are detached"}
}
case types.SettingNameGuaranteedInstanceManagerCPU:
guaranteedInstanceManagerCPU, err := s.GetSettingWithAutoFillingRO(types.SettingNameGuaranteedInstanceManagerCPU)
Expand Down
4 changes: 1 addition & 3 deletions engineapi/backup_monitor.go
Expand Up @@ -39,8 +39,6 @@ type BackupMonitor struct {
engine *longhorn.Engine
engineClientProxy EngineClientProxy

compressionMethod longhorn.BackupCompressionMethod

backupStatus longhorn.BackupStatus
backupStatusLock sync.RWMutex

Expand Down Expand Up @@ -251,7 +249,7 @@ func (m *BackupMonitor) exponentialBackOffTimer() bool {
}

// Set to Error state
err = fmt.Errorf("Max retry period %s reached in exponential backoff timer", BackupMonitorMaxRetryPeriod)
err = fmt.Errorf("max retry period %s reached in exponential backoff timer", BackupMonitorMaxRetryPeriod)
m.logger.Error(err)

currentBackupStatus.State = longhorn.BackupStateError
Expand Down
6 changes: 3 additions & 3 deletions engineapi/enginesim_test.go
Expand Up @@ -28,7 +28,7 @@ func (s *TestSuite) TestBasic(c *C) {
var err error
coll := NewEngineSimulatorCollection()

sim, err := coll.GetEngineSimulator(VolumeName)
_, err = coll.GetEngineSimulator(VolumeName)
c.Assert(err, NotNil)

req := &EngineSimulatorRequest{
Expand All @@ -46,7 +46,7 @@ func (s *TestSuite) TestBasic(c *C) {
err = coll.CreateEngineSimulator(req)
c.Assert(err, ErrorMatches, "duplicate simulator.*")

sim, err = coll.GetEngineSimulator(VolumeName)
sim, err := coll.GetEngineSimulator(VolumeName)
c.Assert(err, IsNil)

e := &longhorn.Engine{}
Expand All @@ -64,7 +64,7 @@ func (s *TestSuite) TestBasic(c *C) {
c.Assert(replicas, HasLen, 1)
c.Assert(replicas[Replica1Addr].Mode, Equals, longhorn.ReplicaModeRW)

err = sim.ReplicaAdd(e, "", Replica3Addr, false, false, 30)
_ = sim.ReplicaAdd(e, "", Replica3Addr, false, false, 30)
replicas, err = sim.ReplicaList(e)
c.Assert(err, IsNil)
c.Assert(replicas, HasLen, 2)
Expand Down
4 changes: 2 additions & 2 deletions manager/kubernetes.go
Expand Up @@ -72,7 +72,7 @@ func (m *VolumeManager) PVCreate(name, pvName, fsType, secretNamespace, secretNa
pv.Spec.CSI.NodePublishSecretRef = secretRef
}

pv, err = m.ds.CreatePersistentVolume(pv)
_, err = m.ds.CreatePersistentVolume(pv)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -131,7 +131,7 @@ func (m *VolumeManager) PVCCreate(name, namespace, pvcName string) (v *longhorn.
}

pvc := datastore.NewPVCManifestForVolume(v, ks.PVName, namespace, pvcName, pv.Spec.StorageClassName)
pvc, err = m.ds.CreatePersistentVolumeClaim(namespace, pvc)
_, err = m.ds.CreatePersistentVolumeClaim(namespace, pvc)
if err != nil {
return nil, err
}
Expand Down
21 changes: 9 additions & 12 deletions webhook/common/common.go
Expand Up @@ -56,25 +56,22 @@ func GetLonghornLabelsPatchOp(obj runtime.Object, requiredLabels, removingLabels
labels = map[string]string{}
}

if removingLabels != nil {
for k := range removingLabels {
delete(labels, k)
}
for k := range removingLabels {
delete(labels, k)
}
if requiredLabels != nil {
for k, v := range requiredLabels {
labels[k] = v
}

for k, v := range requiredLabels {
labels[k] = v
}

volumeName := ""
switch obj.(type) {
switch o := obj.(type) {
case *longhorn.Volume:
volumeName = obj.(*longhorn.Volume).Name
volumeName = o.Name
case *longhorn.Engine:
volumeName = obj.(*longhorn.Engine).Spec.VolumeName
volumeName = o.Spec.VolumeName
case *longhorn.Replica:
volumeName = obj.(*longhorn.Replica).Spec.VolumeName
volumeName = o.Spec.VolumeName
}

if volumeName != "" {
Expand Down
4 changes: 0 additions & 4 deletions webhook/resources/snapshot/mutator.go
Expand Up @@ -18,10 +18,6 @@ import (
admissionregv1 "k8s.io/api/admissionregistration/v1"
)

var (
longhornFinalizerKey = longhorn.SchemeGroupVersion.Group
)

type snapShotMutator struct {
admission.DefaultMutator
ds *datastore.DataStore
Expand Down
4 changes: 2 additions & 2 deletions webhook/resources/volumeattachment/validator.go
Expand Up @@ -50,11 +50,11 @@ func (v *volumeAttachmentValidator) Update(request *admission.Request, oldObj ru
newVA := newObj.(*longhorn.VolumeAttachment)

if newVA.Spec.Volume != oldVA.Spec.Volume {
return werror.NewInvalidError(fmt.Sprintf("spec.volume field is immutable"), "spec.volume")
return werror.NewInvalidError("spec.volume field is immutable", "spec.volume")
}

if len(oldVA.OwnerReferences) != 0 && !reflect.DeepEqual(newVA.OwnerReferences, oldVA.OwnerReferences) {
return werror.NewInvalidError(fmt.Sprintf("VolumeAttachment's OwnerReferences field is immutable"), "metadata.ownerReferences")
return werror.NewInvalidError("VolumeAttachment's OwnerReferences field is immutable", "metadata.ownerReferences")
}

if _, ok := oldVA.Labels[types.LonghornLabelVolume]; ok && newVA.Labels[types.LonghornLabelVolume] != oldVA.Labels[types.LonghornLabelVolume] {
Expand Down
1 change: 0 additions & 1 deletion webhook/server/handler.go
Expand Up @@ -26,5 +26,4 @@ func newhealthzHandler() *healthzHandler {

func (h *healthzHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusOK)
return
}

0 comments on commit 90f4ec7

Please sign in to comment.