Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
167 changes: 167 additions & 0 deletions pkg/library/status/check.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
//
// Copyright (c) 2019-2022 Red Hat, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//

package status

import (
"context"
"fmt"
"strings"

"github.com/devfile/devworkspace-operator/pkg/common"
"github.com/devfile/devworkspace-operator/pkg/config"
"github.com/devfile/devworkspace-operator/pkg/infrastructure"
"github.com/devfile/devworkspace-operator/pkg/provision/sync"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/fields"
k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
)

var containerFailureStateReasons = []string{
"CrashLoopBackOff",
"ImagePullBackOff",
"CreateContainerError",
"RunContainerError",
}

// unrecoverablePodEventReasons contains Kubernetes events that should fail workspace startup
// if they occur related to a workspace pod. Events are stored as a map with event names as keys
// and values representing the threshold of how many times we can see an event before it is considered
// unrecoverable.
var unrecoverablePodEventReasons = map[string]int32{
"FailedPostStartHook": 1,
"FailedMount": 3,
"FailedScheduling": 1,
"FailedCreate": 1,
"ReplicaSetCreateError": 1,
}

var unrecoverableDeploymentConditionReasons = []string{
"FailedCreate",
}

func CheckDeploymentStatus(deployment *appsv1.Deployment) (ready bool) {
return deployment.Status.ReadyReplicas > 0
}

func CheckDeploymentConditions(deployment *appsv1.Deployment) (healthy bool, errorMsg string) {
conditions := deployment.Status.Conditions
for _, condition := range conditions {
for _, unrecoverableReason := range unrecoverableDeploymentConditionReasons {
if condition.Reason == unrecoverableReason {
return false, fmt.Sprintf("Detected unrecoverable deployment condition: %s %s", condition.Reason, condition.Message)
}
}
}
return true, ""
}

// checkPodsState checks if workspace-related pods are in an unrecoverable state. A pod is considered to be unrecoverable
// if it has a container with one of the containerFailureStateReasons states, or if an unrecoverable event (with reason
// matching unrecoverablePodEventReasons) has the pod as the involved object.
// Returns optional message with detected unrecoverable state details
// error if any happens during check
func CheckPodsState(workspaceID string, namespace string, labelSelector k8sclient.MatchingLabels,
clusterAPI sync.ClusterAPI) (stateMsg string, checkFailure error) {
podList := &corev1.PodList{}
if err := clusterAPI.Client.List(context.TODO(), podList, k8sclient.InNamespace(namespace), labelSelector); err != nil {
return "", err
}

for _, pod := range podList.Items {
for _, containerStatus := range pod.Status.ContainerStatuses {
ok, reason := CheckContainerStatusForFailure(&containerStatus)
if !ok {
return fmt.Sprintf("Container %s has state %s", containerStatus.Name, reason), nil
}
}
for _, initContainerStatus := range pod.Status.InitContainerStatuses {
ok, reason := CheckContainerStatusForFailure(&initContainerStatus)
if !ok {
return fmt.Sprintf("Init Container %s has state %s", initContainerStatus.Name, reason), nil
}
}
if msg, err := CheckPodEvents(&pod, workspaceID, clusterAPI); err != nil || msg != "" {
return msg, err
}
}
return "", nil
}

func CheckPodEvents(pod *corev1.Pod, workspaceID string, clusterAPI sync.ClusterAPI) (msg string, err error) {
evs := &corev1.EventList{}
selector, err := fields.ParseSelector(fmt.Sprintf("involvedObject.name=%s", pod.Name))
if err != nil {
return "", fmt.Errorf("failed to parse field selector: %s", err)
}
if err := clusterAPI.Client.List(clusterAPI.Ctx, evs, k8sclient.InNamespace(pod.Namespace), k8sclient.MatchingFieldsSelector{Selector: selector}); err != nil {
return "", fmt.Errorf("failed to list events in namespace %s: %w", pod.Namespace, err)
}
for _, ev := range evs.Items {
if ev.InvolvedObject.Kind != "Pod" {
continue
}

// On OpenShift, it's possible see "FailedMount" events when using a routingClass that depends on the service-ca
// operator. To avoid this, we always ignore FailedMount events if the message refers to the DWO-provisioned volume
if infrastructure.IsOpenShift() &&
ev.Reason == "FailedMount" &&
strings.Contains(ev.Message, common.ServingCertVolumeName(common.ServiceName(workspaceID))) {
continue
}

if maxCount, isUnrecoverableEvent := unrecoverablePodEventReasons[ev.Reason]; isUnrecoverableEvent {
if !checkIfUnrecoverableEventIgnored(ev.Reason) && ev.Count >= maxCount {
var msg string
if ev.Count > 1 {
msg = fmt.Sprintf("Detected unrecoverable event %s %d times: %s.", ev.Reason, ev.Count, ev.Message)
} else {
msg = fmt.Sprintf("Detected unrecoverable event %s: %s.", ev.Reason, ev.Message)
}
return msg, nil
}
}
}
return "", nil
}

func CheckContainerStatusForFailure(containerStatus *corev1.ContainerStatus) (ok bool, reason string) {
if containerStatus.State.Waiting != nil {
for _, failureReason := range containerFailureStateReasons {
if containerStatus.State.Waiting.Reason == failureReason {
return checkIfUnrecoverableEventIgnored(containerStatus.State.Waiting.Reason), containerStatus.State.Waiting.Reason
}
}
}

if containerStatus.State.Terminated != nil {
for _, failureReason := range containerFailureStateReasons {
if containerStatus.State.Terminated.Reason == failureReason {
return checkIfUnrecoverableEventIgnored(containerStatus.State.Terminated.Reason), containerStatus.State.Terminated.Reason
}
}
}
return true, ""
}

func checkIfUnrecoverableEventIgnored(reason string) (ignored bool) {
for _, ignoredReason := range config.Workspace.IgnoredUnrecoverableEvents {
if ignoredReason == reason {
return true
}
}
return false
}
24 changes: 23 additions & 1 deletion pkg/provision/storage/cleanup.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"time"

dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2"
"github.com/devfile/devworkspace-operator/pkg/library/status"
nsconfig "github.com/devfile/devworkspace-operator/pkg/provision/config"
"github.com/devfile/devworkspace-operator/pkg/provision/sync"
batchv1 "k8s.io/api/batch/v1"
Expand All @@ -29,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

"github.com/devfile/devworkspace-operator/internal/images"
Expand Down Expand Up @@ -91,6 +93,21 @@ func runCommonPVCCleanupJob(workspace *dw.DevWorkspace, clusterAPI sync.ClusterA
}
}
}

msg, err := status.CheckPodsState(workspace.Status.DevWorkspaceId, clusterJob.Namespace, k8sclient.MatchingLabels{"job-name": common.PVCCleanupJobName(workspace.Status.DevWorkspaceId)}, clusterAPI)
if err != nil {
return &ProvisioningError{
Err: err,
}
}

if msg != "" {
errMsg := fmt.Sprintf("DevWorkspace common PVC cleanup job failed: see logs for job %q for details. Additional information: %s", clusterJob.Name, msg)
return &ProvisioningError{
Message: errMsg,
}
}

// Requeue at least each 10 seconds to check if PVC is not removed by someone else
return &NotReadyError{
Message: "Cleanup job is not in completed state",
Expand All @@ -110,7 +127,9 @@ func getSpecCommonPVCCleanupJob(workspace *dw.DevWorkspace, clusterAPI sync.Clus
}

jobLabels := map[string]string{
constants.DevWorkspaceIDLabel: workspaceId,
constants.DevWorkspaceIDLabel: workspaceId,
constants.DevWorkspaceNameLabel: workspace.Name,
constants.DevWorkspaceCreatorLabel: workspace.Labels[constants.DevWorkspaceCreatorLabel],
}
if restrictedAccess, needsRestrictedAccess := workspace.Annotations[constants.DevWorkspaceRestrictedAccessAnnotation]; needsRestrictedAccess {
jobLabels[constants.DevWorkspaceRestrictedAccessAnnotation] = restrictedAccess
Expand All @@ -126,6 +145,9 @@ func getSpecCommonPVCCleanupJob(workspace *dw.DevWorkspace, clusterAPI sync.Clus
Completions: &cleanupJobCompletions,
BackoffLimit: &cleanupJobBackoffLimit,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: jobLabels,
},
Spec: corev1.PodSpec{
RestartPolicy: "Never",
SecurityContext: wsprovision.GetDevWorkspaceSecurityContext(),
Expand Down
Loading