From 3d3d17790e8cbcd6edb29c827447264e3bff4553 Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Fri, 9 Nov 2018 15:55:14 -0500 Subject: [PATCH 01/14] Remove builder interface, linearize pod creation and watching --- cmd/controller/main.go | 5 +- pkg/builder/cluster/builder.go | 350 ------------ pkg/builder/cluster/builder_test.go | 705 ------------------------ pkg/builder/interface.go | 116 ---- pkg/builder/nop/builder.go | 140 ----- pkg/builder/nop/builder_test.go | 152 ----- pkg/controller/controller.go | 17 - pkg/reconciler/build/build.go | 341 +++++++++--- pkg/reconciler/build/build_test.go | 489 ++++++++-------- pkg/reconciler/build/resources/pod.go | 3 +- pkg/reconciler/build/validate_build.go | 6 +- pkg/reconciler/build/validation_test.go | 2 - 12 files changed, 491 insertions(+), 1835 deletions(-) delete mode 100644 pkg/builder/cluster/builder.go delete mode 100644 pkg/builder/cluster/builder_test.go delete mode 100644 pkg/builder/interface.go delete mode 100644 pkg/builder/nop/builder.go delete mode 100644 pkg/builder/nop/builder_test.go diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 2ed50f3c..e60af01c 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -37,7 +37,6 @@ import ( // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - onclusterbuilder "github.com/knative/build/pkg/builder/cluster" buildclientset "github.com/knative/build/pkg/client/clientset/versioned" informers "github.com/knative/build/pkg/client/informers/externalversions" "github.com/knative/build/pkg/controller" @@ -104,11 +103,9 @@ func main() { clusterBuildTemplateInformer := buildInformerFactory.Build().V1alpha1().ClusterBuildTemplates() imageInformer := cachingInformerFactory.Caching().V1alpha1().Images() - bldr := onclusterbuilder.NewBuilder(kubeClient, kubeInformerFactory, logger) - // Build all of our controllers, with the clients constructed above. controllers := []controller.Interface{ - build.NewController(logger, kubeClient, buildClient, buildInformer, buildTemplateInformer, clusterBuildTemplateInformer, bldr), + build.NewController(logger, kubeClient, kubeInformerFactory, buildClient, buildInformer, buildTemplateInformer, clusterBuildTemplateInformer), clusterbuildtemplate.NewController(logger, kubeClient, buildClient, cachingClient, clusterBuildTemplateInformer, imageInformer), buildtemplate.NewController(logger, kubeClient, buildClient, diff --git a/pkg/builder/cluster/builder.go b/pkg/builder/cluster/builder.go deleted file mode 100644 index 78ed679f..00000000 --- a/pkg/builder/cluster/builder.go +++ /dev/null @@ -1,350 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package cluster provides a Builder that runs workloads on-cluster. -package cluster - -import ( - "fmt" - "sync" - "time" - - "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeinformers "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - - v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" - buildercommon "github.com/knative/build/pkg/builder" - "github.com/knative/build/pkg/reconciler/build/resources" - duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" -) - -type operation struct { - builder *builder - namespace string - name string - startTime *metav1.Time - statuses []corev1.ContainerStatus -} - -func (op *operation) Name() string { - return op.name -} - -func (op *operation) Checkpoint(build *v1alpha1.Build, status *v1alpha1.BuildStatus) error { - status.Builder = v1alpha1.ClusterBuildProvider - if status.Cluster == nil { - status.Cluster = &v1alpha1.ClusterSpec{} - } - status.Cluster.Namespace = op.namespace - status.Cluster.PodName = op.Name() - status.StartTime = op.startTime - status.StepStates = nil - status.StepsCompleted = nil - - // Always ignore the first pod status, which is creds-init. - skip := 1 - if build.Spec.Source != nil { - // If the build specifies source, skip another container status, which - // is the source-fetching container. - skip++ - } - if skip > len(op.statuses) { - skip = 0 - } - - for _, s := range op.statuses[skip:] { - if s.State.Terminated != nil { - status.StepsCompleted = append(status.StepsCompleted, s.Name) - } - status.StepStates = append(status.StepStates, s.State) - } - status.SetCondition(&duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Building", - }) - return nil -} - -func (op *operation) Terminate() error { - if err := op.builder.kubeclient.CoreV1().Pods(op.namespace).Delete(op.name, &metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { - return err - } - return nil -} - -func (op *operation) Wait() (*v1alpha1.BuildStatus, error) { - podCh := make(chan *corev1.Pod) - defer close(podCh) - - // Ask the builder's watch loop to send a message on our channel when it sees our Pod complete. - if err := op.builder.registerDoneCallback(op.namespace, op.name, podCh); err != nil { - return nil, err - } - - op.builder.logger.Infof("Waiting for %q", op.Name()) - pod := <-podCh - op.statuses = pod.Status.InitContainerStatuses - - states := []corev1.ContainerState{} - stepsCompleted := []string{} - for _, status := range pod.Status.InitContainerStatuses { - if status.State.Terminated != nil { - stepsCompleted = append(stepsCompleted, status.Name) - } - states = append(states, status.State) - } - - bs := &v1alpha1.BuildStatus{ - Builder: v1alpha1.ClusterBuildProvider, - Cluster: &v1alpha1.ClusterSpec{ - Namespace: op.namespace, - PodName: op.Name(), - }, - StartTime: op.startTime, - CompletionTime: &metav1.Time{time.Now()}, - StepStates: states, - StepsCompleted: stepsCompleted, - } - - if pod.Status.Phase == corev1.PodFailed { - msg := getFailureMessage(pod) - bs.SetCondition(&duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionFalse, - Message: msg, - }) - } else if pod.Status.Phase == corev1.PodPending { - msg := getWaitingMessage(pod) - bs.SetCondition(&duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Pending", - Message: msg, - }) - } else { - bs.SetCondition(&duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionTrue, - }) - } - return bs, nil -} - -type build struct { - builder *builder - body *corev1.Pod -} - -func (b *build) Execute() (buildercommon.Operation, error) { - pod, err := b.builder.kubeclient.CoreV1().Pods(b.body.Namespace).Create(b.body) - if err != nil { - return nil, err - } - return &operation{ - builder: b.builder, - namespace: pod.Namespace, - name: pod.Name, - startTime: &metav1.Time{time.Now()}, - statuses: pod.Status.InitContainerStatuses, - }, nil -} - -// NewBuilder constructs an on-cluster builder.Interface for executing Build custom resources. -func NewBuilder(kubeclient kubernetes.Interface, kubeinformers kubeinformers.SharedInformerFactory, logger *zap.SugaredLogger) buildercommon.Interface { - b := &builder{ - kubeclient: kubeclient, - callbacks: make(map[string]chan *corev1.Pod), - logger: logger, - } - - podInformer := kubeinformers.Core().V1().Pods() - podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: b.addPodEvent, - UpdateFunc: b.updatePodEvent, - DeleteFunc: b.deletePodEvent, - }) - - return b -} - -type builder struct { - kubeclient kubernetes.Interface - - // mux guards modifications to callbacks - mux sync.Mutex - // callbacks is keyed by Pod names and stores the channel on which to - // send a completion notification when we see that Pod complete. - // On success, an empty string is sent. - // On failure, the Message of the failure PodCondition is sent. - callbacks map[string]chan *corev1.Pod - logger *zap.SugaredLogger -} - -func (b *builder) Builder() v1alpha1.BuildProvider { - return v1alpha1.ClusterBuildProvider -} - -func (b *builder) Validate(u *v1alpha1.Build) error { - _, err := resources.MakePod(u, b.kubeclient) - return err -} - -func (b *builder) BuildFromSpec(u *v1alpha1.Build) (buildercommon.Build, error) { - bld, err := resources.MakePod(u, b.kubeclient) - if err != nil { - return nil, err - } - return &build{ - builder: b, - body: bld, - }, nil -} - -func (b *builder) OperationFromStatus(status *v1alpha1.BuildStatus) (buildercommon.Operation, error) { - if status.Builder != v1alpha1.ClusterBuildProvider { - return nil, fmt.Errorf("not a 'Cluster' builder: %v", status.Builder) - } - if status.Cluster == nil { - return nil, fmt.Errorf("status.cluster cannot be empty: %v", status) - } - var statuses []corev1.ContainerStatus - for _, state := range status.StepStates { - statuses = append(statuses, corev1.ContainerStatus{State: state}) - } - return &operation{ - builder: b, - namespace: status.Cluster.Namespace, - name: status.Cluster.PodName, - startTime: status.StartTime, - statuses: statuses, - }, nil -} - -func getKey(namespace, name string) string { - return fmt.Sprintf("%s/%s", namespace, name) -} - -// registerDoneCallback directs the builders to send a completion notification on podCh -// when the named Pod completes. An empty message is sent on successful completion. -func (b *builder) registerDoneCallback(namespace, name string, podCh chan *corev1.Pod) error { - b.mux.Lock() - defer b.mux.Unlock() - k := getKey(namespace, name) - if _, ok := b.callbacks[k]; ok { - return fmt.Errorf("another process is already waiting on %q", k) - } - b.callbacks[k] = podCh - return nil -} - -// addPodEvent handles the informer's AddFunc event for Pods. -func (b *builder) addPodEvent(obj interface{}) { - pod := obj.(*corev1.Pod) - ownerRef := metav1.GetControllerOf(pod) - - // If this object is not owned by a Build, we should not do anything more with it. - if ownerRef == nil || ownerRef.Kind != "Build" { - return - } - - // Once we have a Pod to act on, take the lock and see if anyone's watching. - b.mux.Lock() - defer b.mux.Unlock() - key := getKey(pod.Namespace, pod.Name) - - if ch, ok := b.callbacks[key]; ok { - // Send the person listening the message. - ch <- pod - delete(b.callbacks, key) - } else { - b.logger.Errorf("Saw %q update, but nothing was watching for it!", key) - } - - //Remove this callback from our map - if isDone(pod) { - b.logger.Debugf("Build finished, deleting the key %q", key) - delete(b.callbacks, key) - } -} - -// updatePodEvent handles the informer's UpdateFunc event for Pods. -func (b *builder) updatePodEvent(old, new interface{}) { - // Same as addPodEvent(new) - b.addPodEvent(new) -} - -// deletePodEvent handles the informer's DeleteFunc event for Pods. -func (b *builder) deletePodEvent(obj interface{}) { - // TODO(mattmoor): If a pod gets deleted and someone's watching, we should propagate our - // own error message so that we don't leak a go routine waiting forever. - b.logger.Errorf("NYI: delete event for: %v", obj) -} - -func isDone(pod *corev1.Pod) bool { - return pod.Status.Phase == corev1.PodSucceeded || - pod.Status.Phase == corev1.PodFailed -} - -func getWaitingMessage(pod *corev1.Pod) string { - // First, try to surface reason for pending/unknown about the actual build step. - for _, status := range pod.Status.InitContainerStatuses { - wait := status.State.Waiting - if wait != nil && wait.Message != "" { - return fmt.Sprintf("build step %q is pending with reason %q", - status.Name, wait.Message) - } - } - // Try to surface underlying reason by inspecting pod's recent status if condition is not true - for i, podStatus := range pod.Status.Conditions { - if podStatus.Status != corev1.ConditionTrue { - return fmt.Sprintf("pod status %q:%q; message: %q", - pod.Status.Conditions[i].Type, - pod.Status.Conditions[i].Status, - pod.Status.Conditions[i].Message) - } - } - // Next, return the Pod's status message if it has one. - if pod.Status.Message != "" { - return pod.Status.Message - } - - // Lastly fall back on a generic pending message. - return "Pending" -} - -func getFailureMessage(pod *corev1.Pod) string { - // First, try to surface an error about the actual build step that failed. - for _, status := range pod.Status.InitContainerStatuses { - term := status.State.Terminated - if term != nil && term.ExitCode != 0 { - return fmt.Sprintf("build step %q exited with code %d (image: %q); for logs run: kubectl -n %s logs %s -c %s", - status.Name, term.ExitCode, status.ImageID, - pod.Namespace, pod.Name, status.Name) - } - } - // Next, return the Pod's status message if it has one. - if pod.Status.Message != "" { - return pod.Status.Message - } - // Lastly fall back on a generic error message. - return "build failed for unspecified reasons." -} diff --git a/pkg/builder/cluster/builder_test.go b/pkg/builder/cluster/builder_test.go deleted file mode 100644 index 88e46535..00000000 --- a/pkg/builder/cluster/builder_test.go +++ /dev/null @@ -1,705 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "strings" - "time" - - v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" - "github.com/knative/pkg/apis" - "go.uber.org/zap" - - "github.com/google/go-cmp/cmp" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeinformers "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - fakek8s "k8s.io/client-go/kubernetes/fake" - - buildercommon "github.com/knative/build/pkg/builder" - "github.com/knative/build/pkg/buildtest" - duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" - - "testing" -) - -const ( - namespace = "" - expectedErrorMessage = "stuff broke" - expectedErrorReason = "it was bad" - expectedPendingMsg = "build step \"\" is pending with reason \"stuff broke\"" -) - -var ignoreVolatileTime = cmp.Comparer(func(_, _ apis.VolatileTime) bool { - return true -}) - -func newBuilder(cs kubernetes.Interface) *builder { - kif := kubeinformers.NewSharedInformerFactory(cs, time.Second*30) - logger := zap.NewExample().Sugar() - return NewBuilder(cs, kif, logger).(*builder) -} - -func TestBasicFlow(t *testing.T) { - cs := fakek8s.NewSimpleClientset(&corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default"}}) - builder := newBuilder(cs) - b, err := builder.BuildFromSpec(&v1alpha1.Build{}) - if err != nil { - t.Fatalf("Unexpected error creating builder.Build from Spec: %v", err) - } - op, err := b.Execute() - if err != nil { - t.Fatalf("Unexpected error executing builder.Build: %v", err) - } - - build := &v1alpha1.Build{ - Status: v1alpha1.BuildStatus{}, - } - if err := op.Checkpoint(build, &build.Status); err != nil { - t.Fatalf("Unexpected error executing op.Checkpoint: %v", err) - } - if buildercommon.IsDone(&build.Status) { - t.Errorf("IsDone(%v); wanted not done, got done.", build.Status) - } - if !build.Status.CompletionTime.IsZero() { - t.Errorf("build.Status.CompletionTime; want zero, got %v", build.Status.CompletionTime) - } - if build.Status.StartTime.IsZero() { - t.Errorf("build.Status.StartTime; want non-zero, got %v", build.Status.StartTime) - } - op, err = builder.OperationFromStatus(&build.Status) - if err != nil { - t.Fatalf("Unexpected error executing OperationFromStatus: %v", err) - } - - checksComplete := buildtest.NewWait() - readyForUpdate := buildtest.NewWait() - go func() { - // Wait sufficiently long for Wait() to have been called and then - // signal to the main test thread that it should perform the update. - readyForUpdate.In(1 * time.Second) - - defer checksComplete.Done() - status, err := op.Wait() - if err != nil { - t.Fatalf("Unexpected error waiting for builder.Operation: %v", err) - } - - // Check that status came out how we expect. - if !buildercommon.IsDone(status) { - t.Errorf("IsDone(%v); wanted true, got false", status) - } - if status.Cluster.PodName != op.Name() { - t.Errorf("status.Cluster.PodName; wanted %q, got %q", op.Name(), status.Cluster.PodName) - } - if msg, failed := buildercommon.ErrorMessage(status); failed { - t.Errorf("ErrorMessage(%v); wanted not failed, got %q", status, msg) - } - if status.CompletionTime.IsZero() { - t.Errorf("status.CompletionTime; want non-zero, got %v", status.CompletionTime) - } - if status.StartTime.IsZero() { - t.Errorf("status.StartTime; want non-zero, got %v", status.StartTime) - } - }() - // Wait until the test thread is ready for us to update things. - readyForUpdate.Wait() - - // We should be able to fetch the Pod that b.Execute() created in our fake client. - podsclient := cs.CoreV1().Pods(namespace) - pod, err := podsclient.Get(op.Name(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Unexpected error fetching Pod: %v", err) - } - pod.Status.StartTime = &metav1.Time{Time: time.Now()} - - // Now modify it to look done. - pod.Status.Phase = corev1.PodSucceeded - pod, err = podsclient.Update(pod) - if err != nil { - t.Fatalf("Unexpected error updating Pod: %v", err) - } - - // The informer doesn't seem to properly pick up this update via the fake, - // so trigger the update event manually. - builder.updatePodEvent(nil, pod) - - checksComplete.WaitUntil(5*time.Second, buildtest.WaitNop, func() { - t.Fatal("timed out in op.Wait()") - }) - - // Trigger termination of pod - err = op.Terminate() - if err != nil { - t.Errorf("Expected no error while terminating operation") - } - // Verify pod is not available - if _, err = podsclient.Get(op.Name(), metav1.GetOptions{}); err == nil { - t.Fatalf("Expected 'not found' error while fetching Pod") - } -} - -func TestNonFinalUpdateFlow(t *testing.T) { - cs := fakek8s.NewSimpleClientset(&corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default"}}) - builder := newBuilder(cs) - b, err := builder.BuildFromSpec(&v1alpha1.Build{}) - if err != nil { - t.Fatalf("Unexpected error creating builder.Build from Spec: %v", err) - } - op, err := b.Execute() - if err != nil { - t.Fatalf("Unexpected error executing builder.Build: %v", err) - } - - build := &v1alpha1.Build{ - Status: v1alpha1.BuildStatus{}, - } - if err := op.Checkpoint(build, &build.Status); err != nil { - t.Fatalf("Unexpected error executing op.Checkpoint: %v", err) - } - if buildercommon.IsDone(&build.Status) { - t.Errorf("IsDone(%v); wanted not done, got done.", build.Status) - } - if !build.Status.CompletionTime.IsZero() { - t.Errorf("build.Status.CompletionTime; want zero, got %v", build.Status.CompletionTime) - } - if build.Status.StartTime.IsZero() { - t.Errorf("build.Status.StartTime; want non-zero, got %v", build.Status.StartTime) - } - op, err = builder.OperationFromStatus(&build.Status) - if err != nil { - t.Fatalf("Unexpected error executing OperationFromStatus: %v", err) - } - - checksComplete := buildtest.NewWait() - readyForUpdate := buildtest.NewWait() - go func() { - // Wait sufficiently long for Wait() to have been called and then - // signal to the main test thread that it should perform the update. - readyForUpdate.In(1 * time.Second) - - defer checksComplete.Done() - status, err := op.Wait() - if err != nil { - t.Fatalf("Unexpected error waiting for builder.Operation: %v", err) - } - if status.CompletionTime.IsZero() { - t.Errorf("status.CompletionTime; want non-zero, got %v", status.CompletionTime) - } - }() - // Wait until the test thread is ready for us to update things. - readyForUpdate.Wait() - - // We should be able to fetch the Pod that b.Execute() created in our fake client. - podsclient := cs.CoreV1().Pods(namespace) - pod, err := podsclient.Get(op.Name(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Unexpected error fetching Pod: %v", err) - } - // Make a non-terminal modification - pod.Status.Phase = corev1.PodRunning - pod.Status.StartTime = &metav1.Time{Time: time.Now()} - - pod, err = podsclient.Update(pod) - if err != nil { - t.Fatalf("Unexpected error updating Pod: %v", err) - } - - // The informer doesn't seem to properly pick up this update via the fake, - // so trigger the update event manually. - builder.updatePodEvent(nil, pod) - - // If we do not get a message from our Wait(), then we ignored the - // benign update. If we still haven't heard anything after 5 seconds, then - // throw an error. - checksComplete.WaitUntil(5*time.Second, buildtest.WaitNop, func() { - t.Fatal("timed out in op.Wait()") - }) - - // Now make it look done. - pod.Status.Phase = corev1.PodSucceeded - pod, err = podsclient.Update(pod) - if err != nil { - t.Fatalf("Unexpected error updating Pod: %v", err) - } - - // The informer doesn't seem to properly pick up this update via the fake, - // so trigger the update event manually. - builder.updatePodEvent(nil, pod) - - checksComplete.WaitUntil(5*time.Second, buildtest.WaitNop, func() { - t.Fatal("timed out in op.Wait()") - }) -} - -func TestFailureFlow(t *testing.T) { - cs := fakek8s.NewSimpleClientset(&corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default"}}) - builder := newBuilder(cs) - b, err := builder.BuildFromSpec(&v1alpha1.Build{}) - if err != nil { - t.Fatalf("Unexpected error creating builder.Build from Spec: %v", err) - } - op, err := b.Execute() - if err != nil { - t.Fatalf("Unexpected error executing builder.Build: %v", err) - } - - build := &v1alpha1.Build{ - Status: v1alpha1.BuildStatus{}, - } - if err := op.Checkpoint(build, &build.Status); err != nil { - t.Fatalf("Unexpected error executing op.Checkpoint: %v", err) - } - if buildercommon.IsDone(&build.Status) { - t.Errorf("IsDone(%v); wanted not done, got done.", build.Status) - } - if !build.Status.CompletionTime.IsZero() { - t.Errorf("build.Status.CompletionTime; want zero, got %v", build.Status.CompletionTime) - } - if build.Status.StartTime.IsZero() { - t.Errorf("build.Status.StartTime; want non-zero, got %v", build.Status.StartTime) - } - op, err = builder.OperationFromStatus(&build.Status) - if err != nil { - t.Fatalf("Unexpected error executing OperationFromStatus: %v", err) - } - - checksComplete := buildtest.NewWait() - readyForUpdate := buildtest.NewWait() - go func() { - // Wait sufficiently long for Wait() to have been called and then - // signal to the main test thread that it should perform the update. - readyForUpdate.In(1 * time.Second) - - defer checksComplete.Done() - status, err := op.Wait() - if err != nil { - t.Fatalf("Unexpected error waiting for builder.Operation: %v", err) - } - - // Check that status came out how we expect. - if !buildercommon.IsDone(status) { - t.Errorf("IsDone(%v); wanted true, got false", status) - } - if status.Cluster.PodName != op.Name() { - t.Errorf("status.Cluster.PodName; wanted %q, got %q", op.Name(), status.Cluster.PodName) - } - if msg, failed := buildercommon.ErrorMessage(status); !failed || msg != expectedErrorMessage { - t.Errorf("ErrorMessage(%v); wanted %q, got %q", status, expectedErrorMessage, msg) - } - if status.CompletionTime.IsZero() { - t.Errorf("status.CompletionTime; want non-zero, got %v", status.CompletionTime) - } - if status.StartTime.IsZero() { - t.Errorf("status.StartTime; want non-zero, got %v", status.StartTime) - } - if len(status.StepStates) != 1 { - t.Errorf("StepStates contained %d states, want 1: %+v", len(status.StepStates), status.StepStates) - } else if status.StepStates[0].Terminated.Reason != expectedErrorReason { - t.Errorf("StepStates[0] reason got %q, want %q", status.StepStates[0].Terminated.Reason, expectedErrorReason) - } - }() - // Wait until the test thread is ready for us to update things. - readyForUpdate.Wait() - - // We should be able to fetch the Pod that b.Execute() created in our fake client. - podsclient := cs.CoreV1().Pods(namespace) - pod, err := podsclient.Get(op.Name(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Unexpected error fetching Pod: %v", err) - } - // Now modify it to look done. - pod.Status.Phase = corev1.PodFailed - pod.Status.Message = expectedErrorMessage - pod.Status.InitContainerStatuses = []corev1.ContainerStatus{{ - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - Reason: expectedErrorReason, - }, - }, - }} - pod.Status.StartTime = &metav1.Time{Time: time.Now()} - pod, err = podsclient.Update(pod) - if err != nil { - t.Fatalf("Unexpected error updating Pod: %v", err) - } - - // The informer doesn't seem to properly pick up this update via the fake, - // so trigger the update event manually. - builder.updatePodEvent(nil, pod) - - checksComplete.WaitUntil(5*time.Second, buildtest.WaitNop, func() { - t.Fatal("timed out in op.Wait()") - }) -} - -func TestPodPendingFlow(t *testing.T) { - cs := fakek8s.NewSimpleClientset(&corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default"}}) - builder := newBuilder(cs) - b, err := builder.BuildFromSpec(&v1alpha1.Build{}) - if err != nil { - t.Fatalf("Unexpected error creating builder.Build from Spec: %v", err) - } - op, err := b.Execute() - if err != nil { - t.Fatalf("Unexpected error executing builder.Build: %v", err) - } - - build := &v1alpha1.Build{ - Status: v1alpha1.BuildStatus{}, - } - if err := op.Checkpoint(build, &build.Status); err != nil { - t.Fatalf("Unexpected error executing op.Checkpoint: %v", err) - } - if buildercommon.IsDone(&build.Status) { - t.Errorf("IsDone(%v); wanted not done, got done.", build.Status) - } - if !build.Status.CompletionTime.IsZero() { - t.Errorf("build.Status.CompletionTime; want zero, got %v", build.Status.CompletionTime) - } - if build.Status.StartTime.IsZero() { - t.Errorf("build.Status.StartTime; want non-zero, got %v", build.Status.StartTime) - } - op, err = builder.OperationFromStatus(&build.Status) - if err != nil { - t.Fatalf("Unexpected error executing OperationFromStatus: %v", err) - } - - checksComplete := buildtest.NewWait() - readyForUpdate := buildtest.NewWait() - go func() { - // Wait sufficiently long for Wait() to have been called and then - // signal to the main test thread that it should perform the update. - readyForUpdate.In(1 * time.Second) - - defer checksComplete.Done() - status, err := op.Wait() - if err != nil { - t.Fatalf("Unexpected error waiting for builder.Operation: %v", err) - } - - // Check that status came out how we expect. - if buildercommon.IsDone(status) { - t.Errorf("IsDone(%v); wanted false, got true", status) - } - if status.Cluster.PodName != op.Name() { - t.Errorf("status.Cluster.PodName; wanted %q, got %q", op.Name(), status.Cluster.PodName) - } - if msg := statusMessage(status); msg != expectedPendingMsg { - t.Errorf("ErrorMessage(%v); wanted %q, got %q", status, expectedPendingMsg, msg) - } - if status.StartTime.IsZero() { - t.Errorf("status.StartTime; want non-zero, got %v", status.StartTime) - } - if status.CompletionTime.IsZero() { - t.Errorf("status.CompletionTime; want non-zero, got %v", status.CompletionTime) - } - if len(status.StepStates) != 1 { - t.Errorf("StepStates contained %d states, want 1: %+v", len(status.StepStates), status.StepStates) - } else if status.StepStates[0].Waiting.Reason != expectedErrorReason { - t.Errorf("StepStates[0] reason got %q, want %q", status.StepStates[0].Waiting.Reason, expectedErrorReason) - } - }() - // Wait until the test thread is ready for us to update things. - readyForUpdate.Wait() - - // We should be able to fetch the Pod that b.Execute() created in our fake client. - podsclient := cs.CoreV1().Pods(namespace) - pod, err := podsclient.Get(op.Name(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Unexpected error fetching Pod: %v", err) - } - - pod.Status.Phase = corev1.PodPending - pod.Status.Message = expectedErrorMessage - pod.Status.InitContainerStatuses = []corev1.ContainerStatus{{ - State: corev1.ContainerState{ - Waiting: &corev1.ContainerStateWaiting{ - Message: expectedErrorMessage, - Reason: expectedErrorReason, - }, - }, - }} - pod.Status.StartTime = &metav1.Time{Time: time.Now()} - pod, err = podsclient.Update(pod) - if err != nil { - t.Fatalf("Unexpected error updating Pod: %v", err) - } - - // The informer doesn't seem to properly pick up this update via the fake, - // so trigger the update event manually. - builder.updatePodEvent(nil, pod) - - checksComplete.WaitUntil(5*time.Second, buildtest.WaitNop, func() { - t.Fatal("timed out in op.Wait()") - }) -} - -func TestStepFailureFlow(t *testing.T) { - cs := fakek8s.NewSimpleClientset(&corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default"}}) - builder := newBuilder(cs) - b, err := builder.BuildFromSpec(&v1alpha1.Build{ - Spec: v1alpha1.BuildSpec{ - Steps: []corev1.Container{{ - Name: "step-name", - Image: "ubuntu:latest", - Command: []string{"false"}, - }}, - }, - }) - if err != nil { - t.Fatalf("Unexpected error creating builder.Build from Spec: %v", err) - } - op, err := b.Execute() - if err != nil { - t.Fatalf("Unexpected error executing builder.Build: %v", err) - } - - build := &v1alpha1.Build{ - Status: v1alpha1.BuildStatus{}, - } - if err := op.Checkpoint(build, &build.Status); err != nil { - t.Fatalf("Unexpected error executing op.Checkpoint: %v", err) - } - if buildercommon.IsDone(&build.Status) { - t.Errorf("IsDone(%v); wanted not done, got done.", build.Status) - } - if build.Status.StartTime.IsZero() { - t.Errorf("build.Status.StartTime; want non-zero, got %v", build.Status.StartTime) - } - if !build.Status.CompletionTime.IsZero() { - t.Errorf("build.Status.CompletionTime; want zero, got %v", build.Status.CompletionTime) - } - op, err = builder.OperationFromStatus(&build.Status) - if err != nil { - t.Fatalf("Unexpected error executing OperationFromStatus: %v", err) - } - - checksComplete := buildtest.NewWait() - readyForUpdate := buildtest.NewWait() - go func() { - // Wait sufficiently long for Wait() to have been called and then - // signal to the main test thread that it should perform the update. - readyForUpdate.In(1 * time.Second) - - defer checksComplete.Done() - status, err := op.Wait() - if err != nil { - t.Fatalf("Unexpected error waiting for builder.Operation: %v", err) - } - - // Check that status came out how we expect. - if !buildercommon.IsDone(status) { - t.Errorf("IsDone(%v); got false, want true", status) - } - if status.Cluster.PodName != op.Name() { - t.Errorf("status.Cluster.PodName; got %q, want %q", status.Cluster.PodName, op.Name()) - } - if msg, failed := buildercommon.ErrorMessage(status); !failed || - // We expect the error to contain the step name and exit code. - !strings.Contains(msg, `"step-name"`) || !strings.Contains(msg, "128") { - t.Errorf("ErrorMessage(%v); got %q, want %q", status, msg, expectedErrorMessage) - } - if status.StartTime.IsZero() { - t.Errorf("status.StartTime; got %v, want non-zero", status.StartTime) - } - if status.CompletionTime.IsZero() { - t.Errorf("status.CompletionTime; got %v, want non-zero", status.CompletionTime) - } - }() - // Wait until the test thread is ready for us to update things. - readyForUpdate.Wait() - - // We should be able to fetch the Pod that b.Execute() created in our fake client. - podsclient := cs.CoreV1().Pods(namespace) - pod, err := podsclient.Get(op.Name(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Unexpected error fetching Pod: %v", err) - } - // Now modify it to look done. - pod.Status.Phase = corev1.PodFailed - pod.Status.InitContainerStatuses = []corev1.ContainerStatus{{ - Name: "step-name", - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 128, - }, - }, - ImageID: "docker-pullable://ubuntu@sha256:deadbeef", - }} - pod.Status.Message = "don't expect this!" - pod.Status.StartTime = &metav1.Time{Time: time.Now()} - - pod, err = podsclient.Update(pod) - if err != nil { - t.Fatalf("Unexpected error updating Pod: %v", err) - } - - // The informer doesn't seem to properly pick up this update via the fake, - // so trigger the update event manually. - builder.updatePodEvent(nil, pod) - - checksComplete.WaitUntil(5*time.Second, buildtest.WaitNop, func() { - t.Fatal("timed out in op.Wait()") - }) -} - -func TestBasicFlowWithCredentials(t *testing.T) { - name := "my-secret-identity" - cs := fakek8s.NewSimpleClientset( - &corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - }, - Secrets: []corev1.ObjectReference{{ - Name: name, - }, { - Name: "not-annotated", - }}, - }, - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Annotations: map[string]string{ - "build.knative.dev/docker-0": "https://gcr.io", - }, - }, - Type: corev1.SecretTypeBasicAuth, - Data: map[string][]byte{ - corev1.BasicAuthUsernameKey: []byte("user1"), - corev1.BasicAuthPasswordKey: []byte("password"), - }, - }, - &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "not-annotated", - }, - Type: corev1.SecretTypeBasicAuth, - Data: map[string][]byte{ - corev1.BasicAuthUsernameKey: []byte("user2"), - corev1.BasicAuthPasswordKey: []byte("password"), - }, - }) - builder := newBuilder(cs) - b, err := builder.BuildFromSpec(&v1alpha1.Build{}) - if err != nil { - t.Fatalf("Unexpected error creating builder.Build from Spec: %v", err) - } - op, err := b.Execute() - if err != nil { - t.Fatalf("Unexpected error executing builder.Build: %v", err) - } - - // We should be able to fetch the Pod that b.Execute() created in our fake client. - podsclient := cs.CoreV1().Pods(namespace) - pod, err := podsclient.Get(op.Name(), metav1.GetOptions{}) - if err != nil { - t.Fatalf("Unexpected error fetching Pod: %v", err) - } - - credInit := pod.Spec.InitContainers[0] - if got, want := len(credInit.Args), 1; got != want { - t.Errorf("len(CredInit.Args); got %v, want %v", got, want) - } - if !strings.Contains(credInit.Args[0], name) { - t.Errorf("arg[0]; got: %v, wanted string containing %q", credInit.Args[0], name) - } -} - -func statusMessage(status *v1alpha1.BuildStatus) string { - for _, cond := range status.Conditions { - if cond.Type == v1alpha1.BuildSucceeded && cond.Status == corev1.ConditionUnknown { - return cond.Message - } - } - return "" -} - -func TestStripStepStates(t *testing.T) { - for _, c := range []struct { - desc string - statuses []corev1.ContainerStatus - build *v1alpha1.Build - }{{ - desc: "only creds-init", - statuses: []corev1.ContainerStatus{{ - State: corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{Reason: "creds-init: should be stripped"}}, - }, { - State: corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{Reason: "real step: should be retained"}}, - }}, - build: &v1alpha1.Build{ - // No source. - Status: v1alpha1.BuildStatus{}, - }, - }, { - desc: "has source", - statuses: []corev1.ContainerStatus{{ - Name: "creds-init", - State: corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{Reason: "creds-init: should be stripped"}}, - }, { - Name: "git-init", - State: corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{Reason: "git-init: should be stripped"}}, - }, { - State: corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{Reason: "real step: should be retained"}}, - }}, - build: &v1alpha1.Build{ - // No source. - Spec: v1alpha1.BuildSpec{ - Source: &v1alpha1.SourceSpec{ - Git: &v1alpha1.GitSourceSpec{}, - }, - }, - Status: v1alpha1.BuildStatus{}, - }, - }} { - t.Run(c.desc, func(t *testing.T) { - status := &v1alpha1.BuildStatus{} - op := &operation{ - statuses: c.statuses, - builder: &builder{}, - namespace: "namespace", - name: "podname", - } - if err := op.Checkpoint(c.build, status); err != nil { - t.Fatalf("Checkpoint: %v", err) - } - - // In both cases, we want the same status, stripped of implicit - // steps' states. - wantStatus := &v1alpha1.BuildStatus{ - Builder: v1alpha1.ClusterBuildProvider, - Cluster: &v1alpha1.ClusterSpec{ - Namespace: "namespace", - PodName: "podname", - }, - StepStates: []corev1.ContainerState{{ - Terminated: &corev1.ContainerStateTerminated{Reason: "real step: should be retained"}, - }}, - StepsCompleted: []string{""}, - Conditions: duckv1alpha1.Conditions{{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Building", - }}, - } - if d := cmp.Diff(wantStatus, status, ignoreVolatileTime); d != "" { - t.Errorf("Diff:\n%s", d) - } - }) - } -} diff --git a/pkg/builder/interface.go b/pkg/builder/interface.go deleted file mode 100644 index 96555d09..00000000 --- a/pkg/builder/interface.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package builder - -import ( - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" -) - -// Operation defines the interface for interacting with an Operation of a particular BuildProvider. -type Operation interface { - // Name provides the unique name for this operation, see OperationFromStatus. - Name() string - - // Checkpoint augments the provided BuildStatus with sufficient state to be - // restored by OperationFromStatus on an appropriate BuildProvider. - // - // This takes into account necessary information about the provided Build. - Checkpoint(*v1alpha1.Build, *v1alpha1.BuildStatus) error - - // Wait blocks until the Operation completes, returning either a status for the build or an error. - // TODO(mattmoor): This probably shouldn't be BuildStatus, but some sort of smaller-scope thing. - Wait() (*v1alpha1.BuildStatus, error) - - // Terminate cleans up this particular operation and returns an error if it fails - Terminate() error -} - -// Build defines the interface for launching a build and getting an Operation by which to track it to completion. -type Build interface { - // Execute launches this particular build and returns an Operation to track it's progress. - Execute() (Operation, error) -} - -// Interface defines the set of operations that all builders must implement. -type Interface interface { - // Which builder are we? - Builder() v1alpha1.BuildProvider - - // Validate a Build for this flavor of builder. - Validate(*v1alpha1.Build) error - - // Construct a Build for this flavor of builder from our CRD specification. - BuildFromSpec(*v1alpha1.Build) (Build, error) - - // Construct an Operation for this flavor of builder from a BuildStatus. - OperationFromStatus(*v1alpha1.BuildStatus) (Operation, error) -} - -// IsDone returns true if the build's status indicates the build is done. -func IsDone(status *v1alpha1.BuildStatus) bool { - if status == nil || len(status.Conditions) == 0 { - return false - } - for _, cond := range status.Conditions { - if cond.Type == v1alpha1.BuildSucceeded { - return cond.Status != corev1.ConditionUnknown - } - } - return false -} - -// IsTimeout returns true if the build's execution time is greater than -// specified build spec timeout. -func IsTimeout(status *v1alpha1.BuildStatus, buildTimeout *metav1.Duration) bool { - var timeout time.Duration - var defaultTimeout = 10 * time.Minute - - if status == nil { - return false - } - - if buildTimeout == nil { - // Set default timeout to 10 minute if build timeout is not set - timeout = defaultTimeout - } else { - timeout = buildTimeout.Duration - } - - // If build has not started timeout, startTime should be zero. - if status.StartTime.Time.IsZero() { - return false - } - return time.Since(status.StartTime.Time).Seconds() > timeout.Seconds() -} - -// ErrorMessage returns the error message from the status. -func ErrorMessage(status *v1alpha1.BuildStatus) (string, bool) { - if status == nil || len(status.Conditions) == 0 { - return "", false - } - for _, cond := range status.Conditions { - if cond.Type == v1alpha1.BuildSucceeded && cond.Status == corev1.ConditionFalse { - return cond.Message, true - } - } - return "", false -} diff --git a/pkg/builder/nop/builder.go b/pkg/builder/nop/builder.go deleted file mode 100644 index fc23688d..00000000 --- a/pkg/builder/nop/builder.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package nop provides a no-op builder implementation. -package nop - -import ( - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" - buildercommon "github.com/knative/build/pkg/builder" - duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" -) - -const operationName = "nop" - -var ( - startTime = metav1.NewTime(time.Unix(0, 0)) - completionTime = metav1.NewTime(time.Unix(30, 0)) -) - -type operation struct { - builder *Builder -} - -func (nb *operation) Name() string { return operationName } - -func (nb *operation) Checkpoint(_ *v1alpha1.Build, status *v1alpha1.BuildStatus) error { - // Masquerade as the Google builder. - status.Builder = v1alpha1.GoogleBuildProvider - if status.Google == nil { - status.Google = &v1alpha1.GoogleSpec{} - } - status.Google.Operation = nb.Name() - status.StartTime = &startTime - status.SetCondition(&duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Building", - }) - return nil -} - -func (nb *operation) Terminate() error { - return nb.builder.OpErr -} - -func (nb *operation) Wait() (*v1alpha1.BuildStatus, error) { - bs := &v1alpha1.BuildStatus{ - // Masquerade as the Google builder. - Builder: v1alpha1.GoogleBuildProvider, - Google: &v1alpha1.GoogleSpec{ - Operation: nb.Name(), - }, - StartTime: &startTime, - CompletionTime: &completionTime, - } - - if nb.builder.ErrorMessage != "" { - bs.SetCondition(&duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionFalse, - Reason: "NopFailed", - Message: nb.builder.ErrorMessage, - }) - } else { - bs.SetCondition(&duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionTrue, - }) - } - - return bs, nil -} - -type build struct { - builder *Builder - err error -} - -func (nb *build) Execute() (buildercommon.Operation, error) { - if nb.err != nil { - return nil, nb.err - } - return &operation{builder: nb.builder}, nil -} - -// Builder is a no-op Builder implementation. -type Builder struct { - // ErrorMessage is the error message that should be returned by builds - // executed by this builder. - ErrorMessage string - - // Err is the error that should be returned from calls to this builder. - Err error - - // Operation error - OpErr error -} - -func (nb *Builder) Builder() v1alpha1.BuildProvider { - // Masquerade as the Google builder. - return v1alpha1.GoogleBuildProvider -} - -// Validate does nothing. -func (nb *Builder) Validate(u *v1alpha1.Build) error { return nil } - -// BuildFromSpec returns the converted build, or the builder's predefined error. -func (nb *Builder) BuildFromSpec(*v1alpha1.Build) (buildercommon.Build, error) { - b := &build{builder: nb} - if nb.Err != nil { - b.err = nb.Err - } - return b, nil -} - -// OperationFromStatus returns the no-op operation. -func (nb *Builder) OperationFromStatus(*v1alpha1.BuildStatus) (buildercommon.Operation, error) { - if nb.Err != nil { - return nil, nb.Err - } - return &operation{builder: nb}, nil -} diff --git a/pkg/builder/nop/builder_test.go b/pkg/builder/nop/builder_test.go deleted file mode 100644 index 103b6736..00000000 --- a/pkg/builder/nop/builder_test.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nop - -import ( - "errors" - - v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" - buildercommon "github.com/knative/build/pkg/builder" - - "testing" -) - -func TestBasicFlow(t *testing.T) { - builder := Builder{} - b, err := builder.BuildFromSpec(&v1alpha1.Build{}) - if err != nil { - t.Fatalf("Unexpected error creating builder.Build from Spec: %v", err) - } - op, err := b.Execute() - if err != nil { - t.Fatalf("Unexpected error executing builder.Build: %v", err) - } - - build := &v1alpha1.Build{ - Status: v1alpha1.BuildStatus{}, - } - if err := op.Checkpoint(build, &build.Status); err != nil { - t.Fatalf("Unexpected error executing op.Checkpoint: %v", err) - } - if buildercommon.IsDone(&build.Status) { - t.Errorf("IsDone(%v); wanted not done, got done.", build.Status) - } - op, err = builder.OperationFromStatus(&build.Status) - if err != nil { - t.Fatalf("Unexpected error executing OperationFromStatus: %v", err) - } - if build.Status.StartTime.IsZero() { - t.Errorf("build.Status.StartTime; want non-zero, got %v", build.Status.StartTime) - } - if !build.Status.CompletionTime.IsZero() { - t.Errorf("build.Status.CompletionTime; want zero, got %v", build.Status.CompletionTime) - } - - status, err := op.Wait() - if err != nil { - t.Fatalf("Unexpected error waiting for builder.Operation: %v", err) - } - - // Check that status came out how we expect. - if !buildercommon.IsDone(status) { - t.Errorf("IsDone(%v); wanted true, got false", status) - } - if status.Google.Operation != op.Name() { - t.Errorf("status.Google.Operation; wanted %q, got %q", op.Name(), status.Google.Operation) - } - if msg, failed := buildercommon.ErrorMessage(status); failed { - t.Errorf("ErrorMessage(%v); wanted not failed, got %q", status, msg) - } - if status.StartTime.IsZero() { - t.Errorf("status.StartTime; want non-zero, got %v", status.StartTime) - } - if status.CompletionTime.IsZero() { - t.Errorf("status.CompletionTime; want non-zero, got %v", status.CompletionTime) - } -} - -func TestBasicFlowWithError(t *testing.T) { - expectedMsg := "Boom!" - builder := Builder{ErrorMessage: expectedMsg} - b, err := builder.BuildFromSpec(&v1alpha1.Build{}) - if err != nil { - t.Fatalf("Unexpected error creating builder.Build from Spec: %v", err) - } - op, err := b.Execute() - if err != nil { - t.Fatalf("Unexpected error executing builder.Build: %v", err) - } - status, err := op.Wait() - if err != nil { - t.Fatalf("Unexpected error waiting for builder.Operation: %v", err) - } - - // Check that status came out how we expect. - if !buildercommon.IsDone(status) { - t.Errorf("IsDone(%v); wanted true, got false", status) - } - if status.Google.Operation != op.Name() { - t.Errorf("status.Google.Operation; wanted %q, got %q", op.Name(), status.Google.Operation) - } - if msg, failed := buildercommon.ErrorMessage(status); !failed || msg != expectedMsg { - t.Errorf("ErrorMessage(%v); wanted %q, got %q", status, expectedMsg, msg) - } -} - -func TestOperationFromStatus(t *testing.T) { - builder := Builder{} - op, err := builder.OperationFromStatus(&v1alpha1.BuildStatus{ - Google: &v1alpha1.GoogleSpec{ - Operation: operationName, - }, - }) - if err != nil { - t.Fatalf("Unexpected error executing builder.Build: %v", err) - } - status, err := op.Wait() - if err != nil { - t.Fatalf("Unexpected error waiting for builder.Operation: %v", err) - } - - // Check that status came out how we expect. - if !buildercommon.IsDone(status) { - t.Errorf("IsDone(%v); wanted true, got false", status) - } - if status.Google.Operation != op.Name() { - t.Errorf("status.Google.Operation; wanted %q, got %q", op.Name(), status.Google.Operation) - } - if msg, failed := buildercommon.ErrorMessage(status); failed { - t.Errorf("ErrorMessage(%v); wanted not failed, got %q", status, msg) - } -} - -func TestTerminate(t *testing.T) { - builder := Builder{ - OpErr: errors.New("testerr"), - } - op, err := builder.OperationFromStatus(&v1alpha1.BuildStatus{ - Google: &v1alpha1.GoogleSpec{ - Operation: operationName, - }, - }) - if err != nil { - t.Fatalf("Unexpected error waiting for builder.Operation: %v", err) - } - if op.Terminate() == nil { - t.Errorf("Expected error from operation.Terminate") - } -} diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index e86aac87..4c920ed6 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -17,16 +17,9 @@ limitations under the License. package controller import ( - "go.uber.org/zap" - kubeinformers "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" - "github.com/knative/build/pkg/builder" - - clientset "github.com/knative/build/pkg/client/clientset/versioned" buildscheme "github.com/knative/build/pkg/client/clientset/versioned/scheme" - informers "github.com/knative/build/pkg/client/informers/externalversions" ) func init() { @@ -39,13 +32,3 @@ func init() { type Interface interface { Run(threadiness int, stopCh <-chan struct{}) error } - -// Constructor defines the method signature for a controller constructor. -type Constructor func( - builder.Interface, - kubernetes.Interface, - clientset.Interface, - kubeinformers.SharedInformerFactory, - informers.SharedInformerFactory, - *zap.SugaredLogger, -) Interface diff --git a/pkg/reconciler/build/build.go b/pkg/reconciler/build/build.go index ae65d241..f42bd006 100644 --- a/pkg/reconciler/build/build.go +++ b/pkg/reconciler/build/build.go @@ -22,12 +22,12 @@ import ( "time" v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" - "github.com/knative/build/pkg/builder" clientset "github.com/knative/build/pkg/client/clientset/versioned" buildscheme "github.com/knative/build/pkg/client/clientset/versioned/scheme" informers "github.com/knative/build/pkg/client/informers/externalversions/build/v1alpha1" listers "github.com/knative/build/pkg/client/listers/build/v1alpha1" "github.com/knative/build/pkg/reconciler" + "github.com/knative/build/pkg/reconciler/build/resources" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" "github.com/knative/pkg/controller" "github.com/knative/pkg/logging" @@ -37,6 +37,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" + kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/cache" @@ -55,8 +56,6 @@ type Reconciler struct { buildTemplatesLister listers.BuildTemplateLister clusterBuildTemplatesLister listers.ClusterBuildTemplateLister - builder builder.Interface - // Sugared logger is easier to use but is not as performant as the // raw logger. In performance critical paths, call logger.Desugar() // and use the returned raw logger instead. In addition to the @@ -78,11 +77,11 @@ func init() { func NewController( logger *zap.SugaredLogger, kubeclientset kubernetes.Interface, + kubeinformers kubeinformers.SharedInformerFactory, buildclientset clientset.Interface, buildInformer informers.BuildInformer, buildTemplateInformer informers.BuildTemplateInformer, clusterBuildTemplateInformer informers.ClusterBuildTemplateInformer, - builder builder.Interface, ) *controller.Impl { // Enrich the logs with controller name @@ -94,7 +93,6 @@ func NewController( buildsLister: buildInformer.Lister(), buildTemplatesLister: buildTemplateInformer.Lister(), clusterBuildTemplatesLister: clusterBuildTemplateInformer.Lister(), - builder: builder, Logger: logger, } impl := controller.NewImpl(r, logger, "Builds", @@ -107,8 +105,13 @@ func NewController( UpdateFunc: controller.PassNew(impl.Enqueue), }) - // TODO(mattmoor): Set up a Pod informer, so that Pod updates - // trigger Build reconciliations. + // Set up a Pod informer, so that Pod updates trigger Build + // reconciliations. + kubeinformers.Core().V1().Pods().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: r.addPodEvent, + UpdateFunc: r.updatePodEvent, + DeleteFunc: r.deletePodEvent, + }) return impl } @@ -144,56 +147,103 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { } // If the build's done, then ignore it. - if builder.IsDone(&build.Status) { + if isDone(&build.Status) { return nil } - // If the build is not done, but is in progress (has an operation), then asynchronously wait for it. - // TODO(mattmoor): Check whether the Builder matches the kind of our c.builder. - if build.Status.Builder != "" { - op, err := c.builder.OperationFromStatus(&build.Status) - if err != nil { + // If the build is ongoing, check if it's timed out. + if build.Status.Cluster != nil && build.Status.Cluster.PodName != "" { + // Check if build has timed out + return c.checkTimeout(build) + } + + // If the build hasn't started yet, create a Pod for it and record that + // pod's name in the build status. + p, err := c.startPodForBuild(build) + if err != nil { + build.Status.SetCondition(&duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionFalse, + Reason: "BuildExecuteFailed", + Message: err.Error(), + }) + if err := c.updateStatus(build); err != nil { return err } + return err + } - // Check if build has timed out - if builder.IsTimeout(&build.Status, build.Spec.Timeout) { - //cleanup operation and update status - timeoutMsg := fmt.Sprintf("Build %q failed to finish within %q", build.Name, build.Spec.Timeout.Duration.String()) + // If Pod creation was successful, update the Build's status. + bs := buildStatusFromPod(p, build) + bs.StartTime = &metav1.Time{time.Now()} + build.Status = bs + return c.updateStatus(build) +} - if err := op.Terminate(); err != nil { - c.Logger.Errorf("Failed to terminate pod: %v", err) - return err - } +func (c *Reconciler) updateStatus(u *v1alpha1.Build) error { + newb, err := c.buildclientset.BuildV1alpha1().Builds(u.Namespace).Get(u.Name, metav1.GetOptions{}) + if err != nil { + return err + } + newb.Status = u.Status - build.Status.SetCondition(&duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionFalse, - Reason: "BuildTimeout", - Message: timeoutMsg, - }) - // update build completed time - build.Status.CompletionTime = &metav1.Time{time.Now()} - - if _, err := c.updateStatus(build); err != nil { - c.Logger.Errorf("Failed to update status for pod: %v", err) - return err - } + // Until #38113 is merged, we must use Update instead of UpdateStatus to + // update the Status block of the Build resource. UpdateStatus will not + // allow changes to the Spec of the resource, which is ideal for ensuring + // nothing other than resource status has been updated. + _, err = c.buildclientset.BuildV1alpha1().Builds(u.Namespace).Update(newb) + return err +} - c.Logger.Errorf("Timeout: %v", timeoutMsg) - return nil - } +// addPodEvent handles the informer's AddFunc event for Pods. +func (c *Reconciler) addPodEvent(obj interface{}) { + p, ok := obj.(*corev1.Pod) + if !ok { + return + } + ownerRef := metav1.GetControllerOf(p) - // if not timed out then wait async - go c.waitForOperation(build, op) - return nil + // If this object is not owned by a Build, we should not do anything + // more with it. + if ownerRef == nil || ownerRef.Kind != "Build" { + return + } + + // Get the build for this pod. + buildName := ownerRef.Name + build, err := c.buildsLister.Builds(p.Namespace).Get(buildName) + if err != nil { + c.Logger.Errorf("Error getting build %q for pod %q in namespace %q", buildName, p.Name, p.Namespace) + return + } + + // Update the build's status from the pod's status. + build = build.DeepCopy() + build.Status = buildStatusFromPod(p, build) + if err := c.updateStatus(build); err != nil { + c.Logger.Errorf("Error updating build %q in response to pod event: %v", buildName, err) } +} + +// updatePodEvent handles the informer's UpdateFunc event for Pods. +func (c *Reconciler) updatePodEvent(old, new interface{}) { + c.addPodEvent(new) +} - // If the build hasn't even started, then start it and record the operation in our status. - // Note that by recording our status, we will trigger a reconciliation, so the wait above - // will kick in. - build.Status.Builder = c.builder.Builder() +// deletePodEvent handles the informer's DeleteFunc event for Pods. +func (c *Reconciler) deletePodEvent(obj interface{}) { + // TODO(mattmoor): If a pod gets deleted and someone's watching, we should propagate our + // own error message so that we don't leak a go routine waiting forever. + c.Logger.Errorf("NYI: delete event for: %v", obj) +} + +// startPodForBuild starts a new Pod to execute the build. +// +// This applies any build template that's specified, and creates the pod. +func (c *Reconciler) startPodForBuild(build *v1alpha1.Build) (*corev1.Pod, error) { + namespace := build.Namespace var tmpl v1alpha1.BuildTemplateInterface + var err error if build.Spec.Template != nil { if build.Spec.Template.Kind == v1alpha1.ClusterBuildTemplateKind { tmpl, err = c.clusterBuildTemplatesLister.Get(build.Spec.Template.Name) @@ -202,7 +252,7 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { if errors.IsNotFound(err) { runtime.HandleError(fmt.Errorf("cluster build template %q does not exist", build.Spec.Template.Name)) } - return err + return nil, err } } else { tmpl, err = c.buildTemplatesLister.BuildTemplates(namespace).Get(build.Spec.Template.Name) @@ -211,68 +261,193 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { if errors.IsNotFound(err) { runtime.HandleError(fmt.Errorf("build template %q in namespace %q does not exist", build.Spec.Template.Name, namespace)) } - return err + return nil, err } } } build, err = ApplyTemplate(build, tmpl) if err != nil { - return err + return nil, err } - // TODO: Validate build except steps+template - b, err := c.builder.BuildFromSpec(build) + + p, err := resources.MakePod(build, c.kubeclientset) if err != nil { + return nil, err + } + return c.kubeclientset.CoreV1().Pods(p.Namespace).Create(p) +} + +func (c *Reconciler) terminatePod(namespace, name string) error { + if err := c.kubeclientset.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { return err } - op, err := b.Execute() - if err != nil { - build.Status.SetCondition(&duckv1alpha1.Condition{ + return nil +} + +// TODO: move to pod.go +func buildStatusFromPod(p *corev1.Pod, build *v1alpha1.Build) v1alpha1.BuildStatus { + status := build.Status.DeepCopy() + status.Builder = v1alpha1.ClusterBuildProvider + status.Cluster = &v1alpha1.ClusterSpec{ + Namespace: p.Namespace, + PodName: p.Name, + } + status.StepStates = nil + status.StepsCompleted = nil + + status.SetCondition(&duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Building", + }) + + // Always ignore the first pod status, which is creds-init. + skip := 1 + if build.Spec.Source != nil { + // If the build specifies source, skip another container status, which + // is the source-fetching container. + skip++ + } + // TODO: Handle multiple sources here. + if skip <= len(p.Status.InitContainerStatuses) { + for _, s := range p.Status.InitContainerStatuses[skip:] { + if s.State.Terminated != nil { + status.StepsCompleted = append(status.StepsCompleted, s.Name) + } + status.StepStates = append(status.StepStates, s.State) + } + } + + switch p.Status.Phase { + case corev1.PodRunning: + status.SetCondition(&duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Building", + }) + case corev1.PodFailed: + msg := getFailureMessage(p) + status.SetCondition(&duckv1alpha1.Condition{ Type: v1alpha1.BuildSucceeded, Status: corev1.ConditionFalse, - Reason: "BuildExecuteFailed", - Message: err.Error(), + Message: msg, }) + case corev1.PodPending: + msg := getWaitingMessage(p) + status.SetCondition(&duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Pending", + Message: msg, + }) + case corev1.PodSucceeded: + status.SetCondition(&duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionTrue, + }) + } + return *status +} - if _, err := c.updateStatus(build); err != nil { - return err +func getWaitingMessage(pod *corev1.Pod) string { + // First, try to surface reason for pending/unknown about the actual build step. + for _, status := range pod.Status.InitContainerStatuses { + wait := status.State.Waiting + if wait != nil && wait.Message != "" { + return fmt.Sprintf("build step %q is pending with reason %q", + status.Name, wait.Message) } - return err } - if err := op.Checkpoint(build, &build.Status); err != nil { - return err + // Try to surface underlying reason by inspecting pod's recent status if condition is not true + for i, podStatus := range pod.Status.Conditions { + if podStatus.Status != corev1.ConditionTrue { + return fmt.Sprintf("pod status %q:%q; message: %q", + pod.Status.Conditions[i].Type, + pod.Status.Conditions[i].Status, + pod.Status.Conditions[i].Message) + } } - build, err = c.updateStatus(build) - if err != nil { - return err + // Next, return the Pod's status message if it has one. + if pod.Status.Message != "" { + return pod.Status.Message } - return nil + + // Lastly fall back on a generic pending message. + return "Pending" } -func (c *Reconciler) waitForOperation(build *v1alpha1.Build, op builder.Operation) error { - status, err := op.Wait() - if err != nil { - c.Logger.Errorf("Error while waiting for operation: %v", err) - return err +func getFailureMessage(pod *corev1.Pod) string { + // First, try to surface an error about the actual build step that failed. + for _, status := range pod.Status.InitContainerStatuses { + term := status.State.Terminated + if term != nil && term.ExitCode != 0 { + return fmt.Sprintf("build step %q exited with code %d (image: %q); for logs run: kubectl -n %s logs %s -c %s", + status.Name, term.ExitCode, status.ImageID, + pod.Namespace, pod.Name, status.Name) + } } - build.Status = *status - if _, err := c.updateStatus(build); err != nil { - c.Logger.Errorf("Error updating build status: %v", err) - return err + // Next, return the Pod's status message if it has one. + if pod.Status.Message != "" { + return pod.Status.Message + } + // Lastly fall back on a generic error message. + return "build failed for unspecified reasons." +} + +// IsDone returns true if the build's status indicates the build is done. +func isDone(status *v1alpha1.BuildStatus) bool { + cond := status.GetCondition(v1alpha1.BuildSucceeded) + return cond != nil && cond.Status != corev1.ConditionUnknown +} + +func (c *Reconciler) checkTimeout(build *v1alpha1.Build) error { + namespace := build.Namespace + if isTimeout(&build.Status, build.Spec.Timeout) { + if err := c.kubeclientset.CoreV1().Pods(namespace).Delete(build.Status.Cluster.PodName, &metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { + c.Logger.Errorf("Failed to terminate pod: %v", err) + return err + } + + timeoutMsg := fmt.Sprintf("Build %q failed to finish within %q", build.Name, build.Spec.Timeout.Duration.String()) + build.Status.SetCondition(&duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionFalse, + Reason: "BuildTimeout", + Message: timeoutMsg, + }) + // update build completed time + build.Status.CompletionTime = &metav1.Time{time.Now()} + + if err := c.updateStatus(build); err != nil { + c.Logger.Errorf("Failed to update status for pod: %v", err) + return err + } + + c.Logger.Errorf("Timeout: %v", timeoutMsg) } return nil } -func (c *Reconciler) updateStatus(u *v1alpha1.Build) (*v1alpha1.Build, error) { - buildClient := c.buildclientset.BuildV1alpha1().Builds(u.Namespace) - newu, err := buildClient.Get(u.Name, metav1.GetOptions{}) - if err != nil { - return nil, err +// IsTimeout returns true if the build's execution time is greater than +// specified build spec timeout. +func isTimeout(status *v1alpha1.BuildStatus, buildTimeout *metav1.Duration) bool { + var timeout time.Duration + var defaultTimeout = 10 * time.Minute + + if status == nil { + return false } - newu.Status = u.Status - // Until #38113 is merged, we must use Update instead of UpdateStatus to - // update the Status block of the Build resource. UpdateStatus will not - // allow changes to the Spec of the resource, which is ideal for ensuring - // nothing other than resource status has been updated. - return buildClient.Update(newu) + if buildTimeout == nil { + // Set default timeout to 10 minute if build timeout is not set + timeout = defaultTimeout + } else { + timeout = buildTimeout.Duration + } + + // If build has not started timeout, startTime should be zero. + if status.StartTime == nil { + return false + } + return time.Since(status.StartTime.Time).Seconds() > timeout.Seconds() } diff --git a/pkg/reconciler/build/build_test.go b/pkg/reconciler/build/build_test.go index ac420aff..edbae91d 100644 --- a/pkg/reconciler/build/build_test.go +++ b/pkg/reconciler/build/build_test.go @@ -18,13 +18,12 @@ package build import ( "context" - "errors" "fmt" "testing" "time" "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" + "github.com/knative/pkg/apis" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" "github.com/knative/pkg/controller" "go.uber.org/zap" @@ -38,19 +37,18 @@ import ( "k8s.io/client-go/tools/cache" v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" - "github.com/knative/build/pkg/builder" - "github.com/knative/build/pkg/builder/nop" "github.com/knative/build/pkg/client/clientset/versioned/fake" informers "github.com/knative/build/pkg/client/informers/externalversions" ) -const ( - noErrorMessage = "" -) +// TODO(jasonhall): Test pod creation fails +// TODO(jasonhall): Test build update fails -const ( - noResyncPeriod time.Duration = 0 -) +const noResyncPeriod time.Duration = 0 + +var ignoreVolatileTime = cmp.Comparer(func(_, _ apis.VolatileTime) bool { + return true +}) type fixture struct { t *testing.T @@ -85,15 +83,15 @@ func (f *fixture) createServceAccount() { } } -func (f *fixture) newController(b builder.Interface) (*controller.Impl, informers.SharedInformerFactory, kubeinformers.SharedInformerFactory) { +func (f *fixture) newReconciler() (controller.Reconciler, informers.SharedInformerFactory, kubeinformers.SharedInformerFactory) { k8sI := kubeinformers.NewSharedInformerFactory(f.kubeclient, noResyncPeriod) logger := zap.NewExample().Sugar() i := informers.NewSharedInformerFactory(f.client, noResyncPeriod) buildInformer := i.Build().V1alpha1().Builds() buildTemplateInformer := i.Build().V1alpha1().BuildTemplates() clusterBuildTemplateInformer := i.Build().V1alpha1().ClusterBuildTemplates() - c := NewController(logger, f.kubeclient, f.client, buildInformer, buildTemplateInformer, clusterBuildTemplateInformer, b) - return c, i, k8sI + c := NewController(logger, f.kubeclient, k8sI, f.client, buildInformer, buildTemplateInformer, clusterBuildTemplateInformer) + return c.Reconciler, i, k8sI } func (f *fixture) updateIndex(i informers.SharedInformerFactory, bl []*v1alpha1.Build) { @@ -112,8 +110,6 @@ func getKey(build *v1alpha1.Build, t *testing.T) string { } func TestBuildNotFoundFlow(t *testing.T) { - bldr := &nop.Builder{} - build := newBuild("test") f := &fixture{ t: t, @@ -136,35 +132,30 @@ func TestBuildNotFoundFlow(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) - c, i, k8sI := f.newController(bldr) + r, i, k8sI := f.newReconciler() f.updateIndex(i, []*v1alpha1.Build{build}) i.Start(stopCh) k8sI.Start(stopCh) - if err := c.Reconciler.Reconcile(context.Background(), getKey(build, t)); err == nil { + if err := r.Reconcile(context.Background(), getKey(build, t)); err == nil { t.Errorf("Expect error syncing build") } } func TestBuildWithBadKey(t *testing.T) { - bldr := &nop.Builder{} - f := &fixture{ t: t, kubeclient: k8sfake.NewSimpleClientset(), } f.createServceAccount() - c, _, _ := f.newController(bldr) - - if err := c.Reconciler.Reconcile(context.Background(), "bad/worse/worst"); err != nil { + r, _, _ := f.newReconciler() + if err := r.Reconcile(context.Background(), "bad/worse/worst"); err != nil { t.Errorf("Unexpected error while syncing build: %s", err.Error()) } } func TestBuildNotFoundError(t *testing.T) { - bldr := &nop.Builder{} - build := newBuild("test") f := &fixture{ t: t, @@ -177,12 +168,12 @@ func TestBuildNotFoundError(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) - c, i, k8sI := f.newController(bldr) + r, i, k8sI := f.newReconciler() // Don't update build informers with test build object i.Start(stopCh) k8sI.Start(stopCh) - if err := c.Reconciler.Reconcile(context.Background(), getKey(build, t)); err != nil { + if err := r.Reconcile(context.Background(), getKey(build, t)); err != nil { t.Errorf("Unexpected error while syncing build: %s", err.Error()) } } @@ -208,18 +199,19 @@ func TestBuildWithNonExistentTemplates(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) - c, i, k8sI := f.newController(&nop.Builder{}) + r, i, k8sI := f.newReconciler() f.updateIndex(i, []*v1alpha1.Build{build}) i.Start(stopCh) k8sI.Start(stopCh) - if err := c.Reconciler.Reconcile(context.Background(), getKey(build, t)); err == nil { + if err := r.Reconcile(context.Background(), getKey(build, t)); err == nil { t.Errorf("Expect error syncing build") } else if !kuberrors.IsNotFound(err) { t.Errorf("Expect error to be not found err: %s", err.Error()) } } } + func TestBuildWithTemplate(t *testing.T) { tmpl := &v1alpha1.BuildTemplate{ TypeMeta: metav1.TypeMeta{APIVersion: v1alpha1.SchemeGroupVersion.String()}, @@ -250,7 +242,7 @@ func TestBuildWithTemplate(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) - c, i, k8sI := f.newController(&nop.Builder{}) + r, i, k8sI := f.newReconciler() err := i.Build().V1alpha1().BuildTemplates().Informer().GetIndexer().Add(tmpl) if err != nil { @@ -261,7 +253,7 @@ func TestBuildWithTemplate(t *testing.T) { i.Start(stopCh) k8sI.Start(stopCh) - if err = c.Reconciler.Reconcile(context.Background(), getKey(build, t)); err != nil { + if err = r.Reconcile(context.Background(), getKey(build, t)); err != nil { t.Errorf("unexpected expecting error while syncing build: %s", err.Error()) } @@ -274,126 +266,205 @@ func TestBuildWithTemplate(t *testing.T) { t.Errorf("error matching build template spec: expected %#v; got %#v; diff %v", buildTemplateSpec, b.Spec.Template, d) } } + func TestBasicFlows(t *testing.T) { tests := []struct { - bldr builder.Interface - setup func() - expectedErrorMessage string + desc string + podStatus corev1.PodStatus + wantCondition *duckv1alpha1.Condition }{{ - bldr: &nop.Builder{}, - expectedErrorMessage: noErrorMessage, + desc: "success", + podStatus: corev1.PodStatus{Phase: corev1.PodSucceeded}, + wantCondition: &duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionTrue, + }, + }, { + desc: "running", + podStatus: corev1.PodStatus{Phase: corev1.PodRunning}, + wantCondition: &duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Building", + }, + }, { + desc: "failure-terminated", + podStatus: corev1.PodStatus{ + Phase: corev1.PodFailed, + InitContainerStatuses: []corev1.ContainerStatus{{ + // creds-init status; ignored + }, { + Name: "status-name", + ImageID: "image-id", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }, + }, + }}, + }, + wantCondition: &duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionFalse, + Message: `build step "status-name" exited with code 123 (image: "image-id"); for logs run: kubectl -n default logs pod-for-failure-terminated -c status-name`, + }, + }, { + desc: "failure-message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodFailed, + Message: "boom", + }, + wantCondition: &duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionFalse, + Message: "boom", + }, + }, { + desc: "failure-unspecified", + podStatus: corev1.PodStatus{Phase: corev1.PodFailed}, + wantCondition: &duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionFalse, + Message: "build failed for unspecified reasons.", + }, }, { - bldr: &nop.Builder{ErrorMessage: "boom"}, - expectedErrorMessage: "boom", + desc: "pending-waiting-message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodPending, + InitContainerStatuses: []corev1.ContainerStatus{{ + // creds-init status; ignored + }, { + Name: "status-name", + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Message: "i'm pending", + }, + }, + }}, + }, + wantCondition: &duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Pending", + Message: `build step "status-name" is pending with reason "i'm pending"`, + }, + }, { + desc: "pending-pod-condition", + podStatus: corev1.PodStatus{ + Phase: corev1.PodPending, + Conditions: []corev1.PodCondition{{ + Status: corev1.ConditionUnknown, + Type: "the type", + Message: "the message", + }}, + }, + wantCondition: &duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Pending", + Message: `pod status "the type":"Unknown"; message: "the message"`, + }, + }, { + desc: "pending-message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodPending, + Message: "pod status message", + }, + wantCondition: &duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Pending", + Message: "pod status message", + }, + }, { + desc: "pending-no-message", + podStatus: corev1.PodStatus{Phase: corev1.PodPending}, + wantCondition: &duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Pending", + Message: "Pending", + }, }} - for idx, test := range tests { - build := newBuild("test") - f := &fixture{ - t: t, - objects: []runtime.Object{build}, - client: fake.NewSimpleClientset(build), - kubeclient: k8sfake.NewSimpleClientset(), - } - f.createServceAccount() - - stopCh := make(chan struct{}) - defer close(stopCh) - - c, i, k8sI := f.newController(test.bldr) - f.updateIndex(i, []*v1alpha1.Build{build}) - i.Start(stopCh) - k8sI.Start(stopCh) - - // Run a single iteration of the syncHandler. - ctx := context.Background() - if err := c.Reconciler.Reconcile(ctx, getKey(build, t)); err != nil { - t.Errorf("error syncing build: %v", err) - } - - buildClient := f.client.BuildV1alpha1().Builds(build.Namespace) - first, err := buildClient.Get(build.Name, metav1.GetOptions{}) - if err != nil { - t.Errorf("error fetching build: %v", err) - } - // Update status to current time - first.Status.StartTime = &metav1.Time{time.Now()} - - if builder.IsDone(&first.Status) { - t.Errorf("First IsDone(%d); wanted not done, got done.", idx) - } - if msg, failed := builder.ErrorMessage(&first.Status); failed { - t.Errorf("First ErrorMessage(%d); wanted not failed, got %q.", idx, msg) - } - - // We have to manually update the index, or the controller won't see the update. - f.updateIndex(i, []*v1alpha1.Build{first}) - - // Run a second iteration of the syncHandler. - if err := c.Reconciler.Reconcile(ctx, getKey(build, t)); err != nil { - t.Errorf("error syncing build: %v", err) - } - // A second reconciliation will trigger an asynchronous "Wait()", which - // should immediately return and trigger an update. Sleep to ensure that - // is all done before further checks. - time.Sleep(1 * time.Second) - - second, err := buildClient.Get(build.Name, metav1.GetOptions{}) - if err != nil { - t.Errorf("error fetching build: %v", err) - } - - if !builder.IsDone(&second.Status) { - t.Errorf("Second IsDone(%d, %v); wanted done, got not done.", idx, second.Status) - } - if msg, _ := builder.ErrorMessage(&second.Status); test.expectedErrorMessage != msg { - t.Errorf("Second ErrorMessage(%d); wanted %q, got %q.", idx, test.expectedErrorMessage, msg) - } - } -} - -func TestErrFlows(t *testing.T) { - bldrErr := errors.New("not okay") - bldr := &nop.Builder{Err: bldrErr} - - build := newBuild("test-err") - f := &fixture{ - t: t, - objects: []runtime.Object{build}, - client: fake.NewSimpleClientset(build), - kubeclient: k8sfake.NewSimpleClientset(), - } - f.createServceAccount() - - stopCh := make(chan struct{}) - defer close(stopCh) - - c, i, k8sI := f.newController(bldr) - f.updateIndex(i, []*v1alpha1.Build{build}) - i.Start(stopCh) - k8sI.Start(stopCh) - - if err := c.Reconciler.Reconcile(context.Background(), getKey(build, t)); err == nil { - t.Errorf("Expect error syncing build") - } - - // Fetch the build object and check the status - buildClient := f.client.BuildV1alpha1().Builds(build.Namespace) - b, err := buildClient.Get(build.Name, metav1.GetOptions{}) - if err != nil { - t.Errorf("error fetching build: %v", err) - } - - if !builder.IsDone(&b.Status) { - t.Error("Builder IsDone(); wanted done, got not done.") - } - if msg, _ := builder.ErrorMessage(&b.Status); bldrErr.Error() != msg { - t.Errorf("Builder ErrorMessage(); wanted %q, got %q.", bldrErr.Error(), msg) + for _, c := range tests { + t.Run(c.desc, func(t *testing.T) { + build := newBuild(c.desc) + f := &fixture{ + t: t, + objects: []runtime.Object{build}, + client: fake.NewSimpleClientset(build), + kubeclient: k8sfake.NewSimpleClientset(), + } + f.createServceAccount() + + stopCh := make(chan struct{}) + defer close(stopCh) + + r, i, k8sI := f.newReconciler() + f.updateIndex(i, []*v1alpha1.Build{build}) + i.Start(stopCh) + k8sI.Start(stopCh) + + // Reconcile to pick up changes. + ctx := context.Background() + if err := r.Reconcile(ctx, getKey(build, t)); err != nil { + t.Errorf("error syncing build: %v", err) + } + + buildClient := f.client.BuildV1alpha1().Builds(build.Namespace) + b, err := buildClient.Get(build.Name, metav1.GetOptions{}) + if err != nil { + t.Errorf("error fetching build: %v", err) + } + + // Check that build is Building. + if d := cmp.Diff(b.Status.GetCondition(duckv1alpha1.ConditionSucceeded), &duckv1alpha1.Condition{ + Type: duckv1alpha1.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Building", + }, ignoreVolatileTime); d != "" { + t.Errorf("build status was not building: %+v", b.Status) + } + + // Update the underlying pod's status. + b, err = buildClient.Get(build.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("error getting build: %v", err) + } + if b.Status.Cluster == nil || b.Status.Cluster.PodName == "" { + t.Fatalf("build status did not specify podName: %v", b.Status.Cluster) + } + + podName := b.Status.Cluster.PodName + p, err := f.kubeclient.CoreV1().Pods(metav1.NamespaceDefault).Get(podName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("error getting pod %q: %v", podName, err) + } + p.Status = c.podStatus + if _, err := f.kubeclient.CoreV1().Pods(metav1.NamespaceDefault).Update(p); err != nil { + t.Fatalf("error updating pod %q: %v", podName, err) + } + + // Sleep to give Reconciler time to pick up the Pod event. + time.Sleep(500 * time.Millisecond) + + b, err = buildClient.Get(build.Name, metav1.GetOptions{}) + if err != nil { + t.Errorf("error fetching build: %v", err) + } + + // Check that build has the expected status. + gotCondition := b.Status.GetCondition(duckv1alpha1.ConditionSucceeded) + if d := cmp.Diff(gotCondition, c.wantCondition, ignoreVolatileTime); d != "" { + t.Errorf("Unexpected build status %s", d) + } + }) } } func TestTimeoutFlows(t *testing.T) { - build := newBuild("test") + build := newBuild("timeout") buffer := 1 * time.Minute build.Spec.Timeout = &metav1.Duration{Duration: 1 * time.Second} @@ -409,152 +480,46 @@ func TestTimeoutFlows(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) - c, i, k8sI := f.newController(&nop.Builder{}) + r, i, k8sI := f.newReconciler() f.updateIndex(i, []*v1alpha1.Build{build}) i.Start(stopCh) k8sI.Start(stopCh) ctx := context.Background() - if err := c.Reconciler.Reconcile(ctx, getKey(build, t)); err != nil { + if err := r.Reconcile(ctx, getKey(build, t)); err != nil { t.Errorf("Not Expect error when syncing build") } buildClient := f.client.BuildV1alpha1().Builds(build.Namespace) - first, err := buildClient.Get(build.Name, metav1.GetOptions{}) + b, err := buildClient.Get(build.Name, metav1.GetOptions{}) if err != nil { t.Errorf("error fetching build: %v", err) } - // Update status to past time by substracting buffer time - first.Status.StartTime.Time = metav1.Now().Time.Add(-buffer) + // Update build status to falsely indicate the build started 1m ago, + // which is longer than the build's timeout. + b.Status.StartTime.Time = metav1.Now().Time.Add(-buffer) - if builder.IsDone(&first.Status) { - t.Error("First IsDone; wanted not done, got done.") - } - - // We have to manually update the index, or the controller won't see the update. - f.updateIndex(i, []*v1alpha1.Build{first}) - - // Run a second iteration of the syncHandler. - if err := c.Reconciler.Reconcile(ctx, getKey(build, t)); err != nil { + // Reconcile to pick up changes. + // We have to manually update the index, or Reconcile won't see the update. + f.updateIndex(i, []*v1alpha1.Build{b}) + if err := r.Reconcile(ctx, getKey(build, t)); err != nil { t.Errorf("Unexpected error while syncing build: %v", err) } - second, err := buildClient.Get(build.Name, metav1.GetOptions{}) + b, err = buildClient.Get(build.Name, metav1.GetOptions{}) if err != nil { t.Errorf("error fetching build: %v", err) } - // Ignore last transition time for comparing status objects - var ignoreLastTransitionTime = cmpopts.IgnoreTypes(duckv1alpha1.Condition{}.LastTransitionTime.Inner.Time) - - buildStatusMsg := fmt.Sprintf("Build %q failed to finish within \"1s\"", second.Name) - - buildStatus := second.Status.GetCondition(duckv1alpha1.ConditionSucceeded) - expectedStatus := &duckv1alpha1.Condition{ + // Check that the build has the expected timeout status. + if d := cmp.Diff(b.Status.GetCondition(duckv1alpha1.ConditionSucceeded), &duckv1alpha1.Condition{ Type: duckv1alpha1.ConditionSucceeded, Status: corev1.ConditionFalse, Reason: "BuildTimeout", - Message: buildStatusMsg, - } - - if d := cmp.Diff(buildStatus, expectedStatus, ignoreLastTransitionTime); d != "" { - t.Errorf("Mismatch of build status: expected %#v ; got %#v; diff %s", expectedStatus, buildStatus, d) - } -} - -func TestTimeoutFlowWithFailedOperation(t *testing.T) { - oppErr := errors.New("test-err") - bldr := &nop.Builder{ - OpErr: oppErr, // Include error while terminating build - } - - build := newBuild("test") - buffer := 10 * time.Minute - - build.Spec.Timeout = &metav1.Duration{Duration: 1 * time.Second} - - f := &fixture{ - t: t, - objects: []runtime.Object{build}, - client: fake.NewSimpleClientset(build), - kubeclient: k8sfake.NewSimpleClientset(), - } - f.createServceAccount() - - stopCh := make(chan struct{}) - defer close(stopCh) - - c, i, k8sI := f.newController(bldr) - - f.updateIndex(i, []*v1alpha1.Build{build}) - i.Start(stopCh) - k8sI.Start(stopCh) - - ctx := context.Background() - if err := c.Reconciler.Reconcile(ctx, getKey(build, t)); err != nil { - t.Errorf("Not Expect error when syncing build: %s", err.Error()) - } - - buildClient := f.client.BuildV1alpha1().Builds(build.Namespace) - first, err := buildClient.Get(build.Name, metav1.GetOptions{}) - if err != nil { - t.Errorf("error fetching build: %v", err) - } - - // Update status to past time by substracting buffer time - first.Status.StartTime.Time = metav1.Now().Time.Add(-buffer) - - // We have to manually update the index, or the controller won't see the update. - f.updateIndex(i, []*v1alpha1.Build{first}) - - // Run a second iteration of the syncHandler to receive error from operation. - if err = c.Reconciler.Reconcile(ctx, getKey(build, t)); err != oppErr { - t.Errorf("Expect error %#v when syncing build", oppErr) - } -} - -func TestRunController(t *testing.T) { - build := newBuild("test-run") - - f := &fixture{ - t: t, - objects: []runtime.Object{build}, - client: fake.NewSimpleClientset(build), - kubeclient: k8sfake.NewSimpleClientset(), - } - - stopCh := make(chan struct{}) - errChan := make(chan error, 1) - - defer close(errChan) - - c, i, _ := f.newController(&nop.Builder{}) - - i.Start(stopCh) - - go func() { - errChan <- c.Run(2, stopCh) - }() - - // Shut down the controller after 2 second timeout - go func() { - time.Sleep(2 * time.Second) - close(stopCh) - }() - - buildClient := f.client.BuildV1alpha1().Builds(build.Namespace) - b, err := buildClient.Get(build.Name, metav1.GetOptions{}) - if err != nil { - t.Errorf("error creating build: %v", err) - } - - if d := cmp.Diff(b, build); d != "" { - t.Errorf("Build mismatch; diff: %s; got %v; wanted: %v", d, b, build) - } - - if errRun := <-errChan; errRun != nil { - t.Errorf("Unexpected error from Run(): %#v", errRun) + Message: fmt.Sprintf("Build %q failed to finish within \"1s\"", b.Name), + }, ignoreVolatileTime); d != "" { + t.Errorf("Unexpected build status %s", b.Status) } } diff --git a/pkg/reconciler/build/resources/pod.go b/pkg/reconciler/build/resources/pod.go index af69b661..ed085cbe 100644 --- a/pkg/reconciler/build/resources/pod.go +++ b/pkg/reconciler/build/resources/pod.go @@ -329,8 +329,7 @@ func MakePod(build *v1alpha1.Build, kubeclient kubernetes.Interface) (*corev1.Po // We execute the build's pod in the same namespace as where the build was // created so that it can access colocated resources. Namespace: build.Namespace, - // Ensure our Pod gets a unique name. - GenerateName: fmt.Sprintf("%s-", build.Name), + Name: fmt.Sprintf("pod-for-%s", build.Name), // If our parent Build is deleted, then we should be as well. OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(build, schema.GroupVersionKind{ diff --git a/pkg/reconciler/build/validate_build.go b/pkg/reconciler/build/validate_build.go index 46c9061a..fce1eda0 100644 --- a/pkg/reconciler/build/validate_build.go +++ b/pkg/reconciler/build/validate_build.go @@ -22,6 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/knative/build/pkg/apis/build/v1alpha1" + "github.com/knative/build/pkg/reconciler/build/resources" ) func (ac *Reconciler) validateBuild(b *v1alpha1.Build) error { @@ -63,8 +64,9 @@ func (ac *Reconciler) validateBuild(b *v1alpha1.Build) error { } } - // Do builder-implementation-specific validation. - return ac.builder.Validate(b) + // Ensure the build can be translated to a Pod. + _, err = resources.MakePod(b, ac.kubeclientset) + return err } // validateSecrets checks that if the Build specifies a ServiceAccount, that it diff --git a/pkg/reconciler/build/validation_test.go b/pkg/reconciler/build/validation_test.go index 79a71755..39ca9b4e 100644 --- a/pkg/reconciler/build/validation_test.go +++ b/pkg/reconciler/build/validation_test.go @@ -19,7 +19,6 @@ import ( "testing" "github.com/knative/build/pkg/apis/build/v1alpha1" - "github.com/knative/build/pkg/builder/nop" fakebuildclientset "github.com/knative/build/pkg/client/clientset/versioned/fake" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" @@ -329,7 +328,6 @@ func TestValidateBuild(t *testing.T) { testLogger := zap.NewNop().Sugar() ac := &Reconciler{ - builder: &nop.Builder{}, kubeclientset: client, buildclientset: buildClient, Logger: testLogger, From f223a1cf5f020a9499dd52cf01f5d2834e3ed0e5 Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Fri, 9 Nov 2018 22:10:24 -0500 Subject: [PATCH 02/14] Remove pkg/buildtest/wait.go --- pkg/buildtest/wait.go | 68 ------------------------------------------- 1 file changed, 68 deletions(-) delete mode 100644 pkg/buildtest/wait.go diff --git a/pkg/buildtest/wait.go b/pkg/buildtest/wait.go deleted file mode 100644 index 636d537a..00000000 --- a/pkg/buildtest/wait.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package buildtest - -import ( - "sync" - "time" -) - -// Wait extends WaitGroup with a few useful methods to avoid tests having a -// failure mode of hanging. -type Wait struct { - sync.WaitGroup -} - -// Add should be overridden by implementors. -func (w *Wait) Add(delta int) { - panic("Don't call Add(int) on wait!") -} - -// In calls Done() after the specified duration. -func (w *Wait) In(d time.Duration) { - go func() { - time.Sleep(d) - w.Done() - }() -} - -// WaitNop is a convenience for passing into WaitUntil. -func WaitNop() {} - -// WaitUntil waits until Done() has been called or the specified duration has -// elapsed. If Done() is called, then onSuccess is called. If the duration -// elapses without Done() being called, then onTimeout is called. -func (w *Wait) WaitUntil(d time.Duration, onSuccess func(), onTimeout func()) { - ch := make(chan struct{}) - go func() { - w.Wait() - close(ch) - }() - select { - case <-ch: - onSuccess() - case <-time.After(d): - onTimeout() - } -} - -// NewWait is a convenience for creating a Wait object. -func NewWait() *Wait { - w := Wait{} - w.WaitGroup.Add(1) - return &w -} From 07e746c54d9e9548e44d9c90410b2a8c2beaae74 Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Fri, 9 Nov 2018 22:15:17 -0500 Subject: [PATCH 03/14] Create pods like -pod --- pkg/reconciler/build/build_test.go | 9 ++++----- pkg/reconciler/build/resources/pod.go | 11 +++++------ 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/pkg/reconciler/build/build_test.go b/pkg/reconciler/build/build_test.go index edbae91d..0f4818cb 100644 --- a/pkg/reconciler/build/build_test.go +++ b/pkg/reconciler/build/build_test.go @@ -23,6 +23,9 @@ import ( "time" "github.com/google/go-cmp/cmp" + v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" + "github.com/knative/build/pkg/client/clientset/versioned/fake" + informers "github.com/knative/build/pkg/client/informers/externalversions" "github.com/knative/pkg/apis" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" "github.com/knative/pkg/controller" @@ -35,10 +38,6 @@ import ( k8sfake "k8s.io/client-go/kubernetes/fake" clientgotesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" - - v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" - "github.com/knative/build/pkg/client/clientset/versioned/fake" - informers "github.com/knative/build/pkg/client/informers/externalversions" ) // TODO(jasonhall): Test pod creation fails @@ -306,7 +305,7 @@ func TestBasicFlows(t *testing.T) { wantCondition: &duckv1alpha1.Condition{ Type: v1alpha1.BuildSucceeded, Status: corev1.ConditionFalse, - Message: `build step "status-name" exited with code 123 (image: "image-id"); for logs run: kubectl -n default logs pod-for-failure-terminated -c status-name`, + Message: `build step "status-name" exited with code 123 (image: "image-id"); for logs run: kubectl -n default logs failure-terminated-pod -c status-name`, }, }, { desc: "failure-message", diff --git a/pkg/reconciler/build/resources/pod.go b/pkg/reconciler/build/resources/pod.go index ed085cbe..802b79e0 100644 --- a/pkg/reconciler/build/resources/pod.go +++ b/pkg/reconciler/build/resources/pod.go @@ -24,16 +24,15 @@ import ( "path/filepath" "strconv" + v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" + "github.com/knative/build/pkg/credentials" + "github.com/knative/build/pkg/credentials/dockercreds" + "github.com/knative/build/pkg/credentials/gitcreds" "github.com/knative/pkg/apis" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes" - - v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" - "github.com/knative/build/pkg/credentials" - "github.com/knative/build/pkg/credentials/dockercreds" - "github.com/knative/build/pkg/credentials/gitcreds" ) const workspaceDir = "/workspace" @@ -329,7 +328,7 @@ func MakePod(build *v1alpha1.Build, kubeclient kubernetes.Interface) (*corev1.Po // We execute the build's pod in the same namespace as where the build was // created so that it can access colocated resources. Namespace: build.Namespace, - Name: fmt.Sprintf("pod-for-%s", build.Name), + Name: fmt.Sprintf("%s-pod", build.Name), // If our parent Build is deleted, then we should be as well. OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(build, schema.GroupVersionKind{ From 9cd59488c9e17d9f21fc4fcedde3f58fe44119b2 Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Fri, 9 Nov 2018 22:42:06 -0500 Subject: [PATCH 04/14] Remove pkg/controller --- cmd/controller/main.go | 4 ++-- pkg/controller/controller.go | 34 ---------------------------------- 2 files changed, 2 insertions(+), 36 deletions(-) delete mode 100644 pkg/controller/controller.go diff --git a/cmd/controller/main.go b/cmd/controller/main.go index e60af01c..ea73fca9 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -24,6 +24,7 @@ import ( cachingclientset "github.com/knative/caching/pkg/client/clientset/versioned" cachinginformers "github.com/knative/caching/pkg/client/informers/externalversions" "github.com/knative/pkg/configmap" + "github.com/knative/pkg/controller" "github.com/knative/pkg/logging" "github.com/knative/pkg/logging/logkey" "github.com/knative/pkg/signals" @@ -39,7 +40,6 @@ import ( buildclientset "github.com/knative/build/pkg/client/clientset/versioned" informers "github.com/knative/build/pkg/client/informers/externalversions" - "github.com/knative/build/pkg/controller" "github.com/knative/build/pkg/reconciler/build" "github.com/knative/build/pkg/reconciler/buildtemplate" "github.com/knative/build/pkg/reconciler/clusterbuildtemplate" @@ -104,7 +104,7 @@ func main() { imageInformer := cachingInformerFactory.Caching().V1alpha1().Images() // Build all of our controllers, with the clients constructed above. - controllers := []controller.Interface{ + controllers := []*controller.Impl{ build.NewController(logger, kubeClient, kubeInformerFactory, buildClient, buildInformer, buildTemplateInformer, clusterBuildTemplateInformer), clusterbuildtemplate.NewController(logger, kubeClient, buildClient, cachingClient, clusterBuildTemplateInformer, imageInformer), diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go deleted file mode 100644 index 4c920ed6..00000000 --- a/pkg/controller/controller.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright 2018 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "k8s.io/client-go/kubernetes/scheme" - - buildscheme "github.com/knative/build/pkg/client/clientset/versioned/scheme" -) - -func init() { - // Add build types to the default Kubernetes Scheme so Events can be - // logged for build types. - buildscheme.AddToScheme(scheme.Scheme) -} - -// Interface is the interface of a controller. -type Interface interface { - Run(threadiness int, stopCh <-chan struct{}) error -} From 4b7ed5fe93b78536a4344c5b0218cd57d84c26c9 Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Sat, 10 Nov 2018 14:38:50 -0500 Subject: [PATCH 05/14] Improve TestLowTimeout, still needs work --- test/e2e/e2e.go | 27 +++++++++++++++++-------- test/e2e/simple_test.go | 44 +++++++++++++++++++++++++---------------- 2 files changed, 46 insertions(+), 25 deletions(-) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 0d48f598..da3d9f19 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -22,18 +22,17 @@ import ( "errors" "fmt" + "github.com/knative/build/pkg/apis/build/v1alpha1" + buildversioned "github.com/knative/build/pkg/client/clientset/versioned" + buildtyped "github.com/knative/build/pkg/client/clientset/versioned/typed/build/v1alpha1" "github.com/knative/pkg/test" "github.com/knative/pkg/test/logging" corev1 "k8s.io/api/core/v1" + kuberrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/client-go/tools/clientcmd" - - "github.com/knative/build/pkg/apis/build/v1alpha1" - buildversioned "github.com/knative/build/pkg/client/clientset/versioned" - buildtyped "github.com/knative/build/pkg/client/clientset/versioned/typed/build/v1alpha1" - kuberrors "k8s.io/apimachinery/pkg/api/errors" ) type clients struct { @@ -43,6 +42,14 @@ type clients struct { const buildTestNamespace = "build-tests" +var ( + // Sentinel error from watchBuild when the build failed. + errBuildFailed = errors.New("build failed") + + // Sentinal error from watchBuild when watch timed out before build finished + errWatchTimeout = errors.New("watch ended before build finished") +) + func teardownNamespace(clients *clients, logger *logging.BaseLogger) { if clients != nil && clients.kubeClient != nil { logger.Infof("Deleting namespace %q", buildTestNamespace) @@ -144,7 +151,9 @@ type buildClient struct { func (c *buildClient) watchBuild(name string) (*v1alpha1.Build, error) { ls := metav1.SingleObject(metav1.ObjectMeta{Name: name}) // TODO: Update watchBuild function to take this as parameter depending on test requirements - // Set build timeout to 120 seconds. This will trigger watch timeout error + + // Any build that takes longer than this timeout will result in + // errWatchTimeout. var timeout int64 = 120 ls.TimeoutSeconds = &timeout @@ -152,6 +161,7 @@ func (c *buildClient) watchBuild(name string) (*v1alpha1.Build, error) { if err != nil { return nil, err } + var latest *v1alpha1.Build for evt := range w.ResultChan() { switch evt.Type { case watch.Deleted: @@ -164,6 +174,7 @@ func (c *buildClient) watchBuild(name string) (*v1alpha1.Build, error) { if !ok { return nil, fmt.Errorf("object was not a Build: %v", err) } + latest = b for _, cond := range b.Status.Conditions { if cond.Type == v1alpha1.BuildSucceeded { @@ -171,12 +182,12 @@ func (c *buildClient) watchBuild(name string) (*v1alpha1.Build, error) { case corev1.ConditionTrue: return b, nil case corev1.ConditionFalse: - return b, errors.New("build failed") + return b, errBuildFailed case corev1.ConditionUnknown: continue } } } } - return nil, errors.New("watch ended before build completion") + return latest, errWatchTimeout } diff --git a/test/e2e/simple_test.go b/test/e2e/simple_test.go index 4ce35c4a..6c9d16b9 100644 --- a/test/e2e/simple_test.go +++ b/test/e2e/simple_test.go @@ -21,20 +21,27 @@ package e2e import ( "context" "flag" + "fmt" "os" "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/knative/build/pkg/apis/build/v1alpha1" + "github.com/knative/pkg/apis" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" "github.com/knative/pkg/test" "github.com/knative/pkg/test/logging" "go.opencensus.io/trace" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/knative/build/pkg/apis/build/v1alpha1" ) +var ignoreVolatileTime = cmp.Comparer(func(_, _ apis.VolatileTime) bool { + return true +}) + // TestMain is called by the test binary generated by "go test", and is // responsible for setting up and tearing down the testing environment, namely // the test namespace. @@ -240,12 +247,17 @@ func TestBuildLowTimeout(t *testing.T) { logger := logging.GetContextLogger("TestBuildLowTimeout") clients := buildClients(logger) - buildName := "build-low-timeout" + // TODO(jasonhall): builds created in quick succession, e.g., by `go + // test --count=N`, can sometimes produce confusing behavior around + // statuses, as the Build controller sees updates for a previous + // iteration of the identically-named build. Generate a unique name + // here to avoid this behavior. We need a better general solution. + buildName := fmt.Sprintf("build-low-timeout-%d", time.Now().Unix()) test.CleanupOnInterrupt(func() { teardownBuild(clients, logger, buildName) }, logger) defer teardownBuild(clients, logger, buildName) - buildTimeout := 50 * time.Second + buildTimeout := 10 * time.Second if _, err := clients.buildClient.builds.Create(&v1alpha1.Build{ ObjectMeta: metav1.ObjectMeta{ @@ -255,7 +267,6 @@ func TestBuildLowTimeout(t *testing.T) { Spec: v1alpha1.BuildSpec{ Timeout: &metav1.Duration{Duration: buildTimeout}, Steps: []corev1.Container{{ - Name: "lowtimeoutstep", Image: "ubuntu", Command: []string{"/bin/bash"}, Args: []string{"-c", "sleep 2000"}, @@ -266,24 +277,23 @@ func TestBuildLowTimeout(t *testing.T) { } b, err := clients.buildClient.watchBuild(buildName) - if err == nil { - t.Fatalf("watchBuild did not return expected BuildTimeout error") + if err != errBuildFailed { + t.Fatalf("watchBuild got %v, want %v (build status: %+v)", err, errBuildFailed, b.Status) } - if &b.Status == nil { - t.Fatalf("wanted build status to be set; got nil") + if d := cmp.Diff(b.Status.GetCondition(duckv1alpha1.ConditionSucceeded), &duckv1alpha1.Condition{ + Type: duckv1alpha1.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: "BuildTimeout", + Message: fmt.Sprintf("Build %q failed to finish within %q", b.Name, buildTimeout), + }, ignoreVolatileTime); d != "" { + t.Errorf("Unexpected build status %s", b.Status) } - successCondition := b.Status.GetCondition(v1alpha1.BuildSucceeded) - - if successCondition == nil { - t.Fatalf("wanted build status to be set; got %q", b.Status) + if b.Status.CompletionTime == nil || b.Status.StartTime == nil { + t.Fatalf("missing start time (%v) or completion time (%v)", b.Status.StartTime, b.Status.CompletionTime) } - // verify reason for build failure is timeout - if successCondition.Reason != "BuildTimeout" { - t.Fatalf("wanted BuildTimeout; got %q", successCondition.Reason) - } buildDuration := b.Status.CompletionTime.Time.Sub(b.Status.StartTime.Time).Seconds() higherEnd := buildTimeout + 30*time.Second + 10*time.Second // build timeout + 30 sec poll time + 10 sec From e57ea6663db84c68de27e3a6b66ac1ca9eaa27b0 Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Sat, 10 Nov 2018 14:46:06 -0500 Subject: [PATCH 06/14] Move BuildStatusFromPod to resources pkg --- .gitignore | 4 + pkg/reconciler/build/build.go | 114 +------------------------- pkg/reconciler/build/build_test.go | 9 -- pkg/reconciler/build/resources/pod.go | 104 +++++++++++++++++++++++ 4 files changed, 110 insertions(+), 121 deletions(-) diff --git a/.gitignore b/.gitignore index a0c1e5dd..08d88546 100644 --- a/.gitignore +++ b/.gitignore @@ -36,3 +36,7 @@ # JetBrains IDE config .idea + +# e2e test cruft +kubernetes.tar.gz +kubernetes/ diff --git a/pkg/reconciler/build/build.go b/pkg/reconciler/build/build.go index f42bd006..35d5da89 100644 --- a/pkg/reconciler/build/build.go +++ b/pkg/reconciler/build/build.go @@ -174,7 +174,7 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { } // If Pod creation was successful, update the Build's status. - bs := buildStatusFromPod(p, build) + bs := resources.BuildStatusFromPod(p, build.Spec) bs.StartTime = &metav1.Time{time.Now()} build.Status = bs return c.updateStatus(build) @@ -219,7 +219,7 @@ func (c *Reconciler) addPodEvent(obj interface{}) { // Update the build's status from the pod's status. build = build.DeepCopy() - build.Status = buildStatusFromPod(p, build) + build.Status = resources.BuildStatusFromPod(p, build.Spec) if err := c.updateStatus(build); err != nil { c.Logger.Errorf("Error updating build %q in response to pod event: %v", buildName, err) } @@ -284,116 +284,6 @@ func (c *Reconciler) terminatePod(namespace, name string) error { return nil } -// TODO: move to pod.go -func buildStatusFromPod(p *corev1.Pod, build *v1alpha1.Build) v1alpha1.BuildStatus { - status := build.Status.DeepCopy() - status.Builder = v1alpha1.ClusterBuildProvider - status.Cluster = &v1alpha1.ClusterSpec{ - Namespace: p.Namespace, - PodName: p.Name, - } - status.StepStates = nil - status.StepsCompleted = nil - - status.SetCondition(&duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Building", - }) - - // Always ignore the first pod status, which is creds-init. - skip := 1 - if build.Spec.Source != nil { - // If the build specifies source, skip another container status, which - // is the source-fetching container. - skip++ - } - // TODO: Handle multiple sources here. - if skip <= len(p.Status.InitContainerStatuses) { - for _, s := range p.Status.InitContainerStatuses[skip:] { - if s.State.Terminated != nil { - status.StepsCompleted = append(status.StepsCompleted, s.Name) - } - status.StepStates = append(status.StepStates, s.State) - } - } - - switch p.Status.Phase { - case corev1.PodRunning: - status.SetCondition(&duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Building", - }) - case corev1.PodFailed: - msg := getFailureMessage(p) - status.SetCondition(&duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionFalse, - Message: msg, - }) - case corev1.PodPending: - msg := getWaitingMessage(p) - status.SetCondition(&duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Pending", - Message: msg, - }) - case corev1.PodSucceeded: - status.SetCondition(&duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionTrue, - }) - } - return *status -} - -func getWaitingMessage(pod *corev1.Pod) string { - // First, try to surface reason for pending/unknown about the actual build step. - for _, status := range pod.Status.InitContainerStatuses { - wait := status.State.Waiting - if wait != nil && wait.Message != "" { - return fmt.Sprintf("build step %q is pending with reason %q", - status.Name, wait.Message) - } - } - // Try to surface underlying reason by inspecting pod's recent status if condition is not true - for i, podStatus := range pod.Status.Conditions { - if podStatus.Status != corev1.ConditionTrue { - return fmt.Sprintf("pod status %q:%q; message: %q", - pod.Status.Conditions[i].Type, - pod.Status.Conditions[i].Status, - pod.Status.Conditions[i].Message) - } - } - // Next, return the Pod's status message if it has one. - if pod.Status.Message != "" { - return pod.Status.Message - } - - // Lastly fall back on a generic pending message. - return "Pending" -} - -func getFailureMessage(pod *corev1.Pod) string { - // First, try to surface an error about the actual build step that failed. - for _, status := range pod.Status.InitContainerStatuses { - term := status.State.Terminated - if term != nil && term.ExitCode != 0 { - return fmt.Sprintf("build step %q exited with code %d (image: %q); for logs run: kubectl -n %s logs %s -c %s", - status.Name, term.ExitCode, status.ImageID, - pod.Namespace, pod.Name, status.Name) - } - } - // Next, return the Pod's status message if it has one. - if pod.Status.Message != "" { - return pod.Status.Message - } - // Lastly fall back on a generic error message. - return "build failed for unspecified reasons." -} - // IsDone returns true if the build's status indicates the build is done. func isDone(status *v1alpha1.BuildStatus) bool { cond := status.GetCondition(v1alpha1.BuildSucceeded) diff --git a/pkg/reconciler/build/build_test.go b/pkg/reconciler/build/build_test.go index 0f4818cb..7b8fcc08 100644 --- a/pkg/reconciler/build/build_test.go +++ b/pkg/reconciler/build/build_test.go @@ -417,15 +417,6 @@ func TestBasicFlows(t *testing.T) { t.Errorf("error fetching build: %v", err) } - // Check that build is Building. - if d := cmp.Diff(b.Status.GetCondition(duckv1alpha1.ConditionSucceeded), &duckv1alpha1.Condition{ - Type: duckv1alpha1.ConditionSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Building", - }, ignoreVolatileTime); d != "" { - t.Errorf("build status was not building: %+v", b.Status) - } - // Update the underlying pod's status. b, err = buildClient.Get(build.Name, metav1.GetOptions{}) if err != nil { diff --git a/pkg/reconciler/build/resources/pod.go b/pkg/reconciler/build/resources/pod.go index 802b79e0..2fb7d8bd 100644 --- a/pkg/reconciler/build/resources/pod.go +++ b/pkg/reconciler/build/resources/pod.go @@ -29,6 +29,7 @@ import ( "github.com/knative/build/pkg/credentials/dockercreds" "github.com/knative/build/pkg/credentials/gitcreds" "github.com/knative/pkg/apis" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -359,3 +360,106 @@ func MakePod(build *v1alpha1.Build, kubeclient kubernetes.Interface) (*corev1.Po }, }, nil } + +// BuildStatusFromPod returns a BuildStatus based on the Pod and the original BuildSpec. +func BuildStatusFromPod(p *corev1.Pod, buildSpec v1alpha1.BuildSpec) v1alpha1.BuildStatus { + status := v1alpha1.BuildStatus{ + Builder: v1alpha1.ClusterBuildProvider, + Cluster: &v1alpha1.ClusterSpec{ + Namespace: p.Namespace, + PodName: p.Name, + }, + } + + // Always ignore the first pod status, which is creds-init. + skip := 1 + if buildSpec.Source != nil { + // If the build specifies source, skip another container status, which + // is the source-fetching container. + skip++ + } + // TODO: Handle multiple sources here. + if skip <= len(p.Status.InitContainerStatuses) { + for _, s := range p.Status.InitContainerStatuses[skip:] { + if s.State.Terminated != nil { + status.StepsCompleted = append(status.StepsCompleted, s.Name) + } + status.StepStates = append(status.StepStates, s.State) + } + } + + switch p.Status.Phase { + case corev1.PodRunning: + status.SetCondition(&duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Building", + }) + case corev1.PodFailed: + msg := getFailureMessage(p) + status.SetCondition(&duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionFalse, + Message: msg, + }) + case corev1.PodPending: + msg := getWaitingMessage(p) + status.SetCondition(&duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Pending", + Message: msg, + }) + case corev1.PodSucceeded: + status.SetCondition(&duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionTrue, + }) + } + return status +} + +func getWaitingMessage(pod *corev1.Pod) string { + // First, try to surface reason for pending/unknown about the actual build step. + for _, status := range pod.Status.InitContainerStatuses { + wait := status.State.Waiting + if wait != nil && wait.Message != "" { + return fmt.Sprintf("build step %q is pending with reason %q", + status.Name, wait.Message) + } + } + // Try to surface underlying reason by inspecting pod's recent status if condition is not true + for i, podStatus := range pod.Status.Conditions { + if podStatus.Status != corev1.ConditionTrue { + return fmt.Sprintf("pod status %q:%q; message: %q", + pod.Status.Conditions[i].Type, + pod.Status.Conditions[i].Status, + pod.Status.Conditions[i].Message) + } + } + // Next, return the Pod's status message if it has one. + if pod.Status.Message != "" { + return pod.Status.Message + } + + // Lastly fall back on a generic pending message. + return "Pending" +} + +func getFailureMessage(pod *corev1.Pod) string { + // First, try to surface an error about the actual build step that failed. + for _, status := range pod.Status.InitContainerStatuses { + term := status.State.Terminated + if term != nil && term.ExitCode != 0 { + return fmt.Sprintf("build step %q exited with code %d (image: %q); for logs run: kubectl -n %s logs %s -c %s", + status.Name, term.ExitCode, status.ImageID, + pod.Namespace, pod.Name, status.Name) + } + } + // Next, return the Pod's status message if it has one. + if pod.Status.Message != "" { + return pod.Status.Message + } + // Lastly fall back on a generic error message. + return "build failed for unspecified reasons." +} From eb02e12190db461d1de57addd9e4b93c1da0fe45 Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Sat, 10 Nov 2018 14:59:20 -0500 Subject: [PATCH 07/14] Skip statuses for multiple sources; add tests for BuildStatusFromPod --- pkg/reconciler/build/resources/pod.go | 3 +- pkg/reconciler/build/resources/pod_test.go | 136 ++++++++++++++++++++- 2 files changed, 136 insertions(+), 3 deletions(-) diff --git a/pkg/reconciler/build/resources/pod.go b/pkg/reconciler/build/resources/pod.go index 2fb7d8bd..e5fb886c 100644 --- a/pkg/reconciler/build/resources/pod.go +++ b/pkg/reconciler/build/resources/pod.go @@ -378,7 +378,8 @@ func BuildStatusFromPod(p *corev1.Pod, buildSpec v1alpha1.BuildSpec) v1alpha1.Bu // is the source-fetching container. skip++ } - // TODO: Handle multiple sources here. + // Also skip multiple sourcees specified by the build. + skip += len(buildSpec.Sources) if skip <= len(p.Status.InitContainerStatuses) { for _, s := range p.Status.InitContainerStatuses[skip:] { if s.State.Terminated != nil { diff --git a/pkg/reconciler/build/resources/pod_test.go b/pkg/reconciler/build/resources/pod_test.go index 82f9b1d3..e26185e3 100644 --- a/pkg/reconciler/build/resources/pod_test.go +++ b/pkg/reconciler/build/resources/pod_test.go @@ -21,12 +21,12 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" + "github.com/knative/build/pkg/system" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fakek8s "k8s.io/client-go/kubernetes/fake" - - v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" ) var ignorePrivateResourceFields = cmpopts.IgnoreUnexported(resource.Quantity{}) @@ -462,3 +462,135 @@ func TestMakePod(t *testing.T) { }) } } + +func TestBuildStatusFromPod(t *testing.T) { + for _, c := range []struct { + desc string + podStatus corev1.PodStatus + buildSpec v1alpha1.BuildSpec + want v1alpha1.BuildStatus + }{{ + desc: "empty", + podStatus: corev1.PodStatus{}, + buildSpec: v1alpha1.BuildSpec{}, + want: v1alpha1.BuildStatus{}, + }, { + desc: "ignore-creds-init", + podStatus: corev1.PodStatus{ + InitContainerStatuses: []corev1.ContainerStatus{{ + // creds-init; ignored + }, { + Name: "state-name", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }, + }, + }}, + }, + buildSpec: v1alpha1.BuildSpec{ + // no sources. + }, + want: v1alpha1.BuildStatus{ + StepsCompleted: []string{"state-name"}, + StepStates: []corev1.ContainerState{{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }, + }}, + }, + }, { + desc: "ignore-creds-init-and-source", + podStatus: corev1.PodStatus{ + InitContainerStatuses: []corev1.ContainerStatus{{ + // creds-init; ignored. + }, { + // git-init; ignored. + }, { + Name: "state-name", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }, + }, + }}, + }, + buildSpec: v1alpha1.BuildSpec{ + Source: &v1alpha1.SourceSpec{ + Git: &v1alpha1.GitSourceSpec{ + Url: "example.com", + Revision: "master", + }, + }, + }, + want: v1alpha1.BuildStatus{ + StepsCompleted: []string{"state-name"}, + StepStates: []corev1.ContainerState{{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }, + }}, + }, + }, { + desc: "ignore-creds-init-and-multiple-sources", + podStatus: corev1.PodStatus{ + InitContainerStatuses: []corev1.ContainerStatus{{ + // creds-init; ignored. + }, { + // first git-init; ignored. + }, { + // second git-init; ignored. + }, { + Name: "state-name", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }, + }, + }}, + }, + buildSpec: v1alpha1.BuildSpec{ + Sources: []v1alpha1.SourceSpec{{ + Git: &v1alpha1.GitSourceSpec{ + Url: "example.com", + Revision: "master", + }, + }, { + Git: &v1alpha1.GitSourceSpec{ + Url: "yahoo.com", + Revision: "so cool", + }, + }}, + }, + want: v1alpha1.BuildStatus{ + StepsCompleted: []string{"state-name"}, + StepStates: []corev1.ContainerState{{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }, + }}, + }, + }} { + t.Run(c.desc, func(t *testing.T) { + p := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: system.Namespace, + }, + Status: c.podStatus, + } + got := BuildStatusFromPod(p, c.buildSpec) + + // Common traits, set for test case brevity. + c.want.Cluster = &v1alpha1.ClusterSpec{ + PodName: "pod", + Namespace: system.Namespace, + } + c.want.Builder = v1alpha1.ClusterBuildProvider + + if d := cmp.Diff(got, c.want); d != "" { + t.Errorf("Diff:\n%s", d) + } + }) + } +} From afd2d71bd33560b0c0557dc8bea1acea391225da Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Mon, 12 Nov 2018 17:10:32 -0500 Subject: [PATCH 08/14] Lower timeout, log more --- pkg/reconciler/build/build.go | 14 +++++++++----- .../build-with-timeout/build-with-low-timeout.yaml | 4 ++-- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/pkg/reconciler/build/build.go b/pkg/reconciler/build/build.go index 35d5da89..254d0b89 100644 --- a/pkg/reconciler/build/build.go +++ b/pkg/reconciler/build/build.go @@ -292,7 +292,8 @@ func isDone(status *v1alpha1.BuildStatus) bool { func (c *Reconciler) checkTimeout(build *v1alpha1.Build) error { namespace := build.Namespace - if isTimeout(&build.Status, build.Spec.Timeout) { + if c.isTimeout(&build.Status, build.Spec.Timeout) { + c.Logger.Infof("Build %q is timeout", build.Name) if err := c.kubeclientset.CoreV1().Pods(namespace).Delete(build.Status.Cluster.PodName, &metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { c.Logger.Errorf("Failed to terminate pod: %v", err) return err @@ -312,15 +313,13 @@ func (c *Reconciler) checkTimeout(build *v1alpha1.Build) error { c.Logger.Errorf("Failed to update status for pod: %v", err) return err } - - c.Logger.Errorf("Timeout: %v", timeoutMsg) } return nil } // IsTimeout returns true if the build's execution time is greater than // specified build spec timeout. -func isTimeout(status *v1alpha1.BuildStatus, buildTimeout *metav1.Duration) bool { +func (c *Reconciler) isTimeout(status *v1alpha1.BuildStatus, buildTimeout *metav1.Duration) bool { var timeout time.Duration var defaultTimeout = 10 * time.Minute @@ -339,5 +338,10 @@ func isTimeout(status *v1alpha1.BuildStatus, buildTimeout *metav1.Duration) bool if status.StartTime == nil { return false } - return time.Since(status.StartTime.Time).Seconds() > timeout.Seconds() + over := time.Since(status.StartTime.Time) > timeout + if over { + c.Logger.Infof("Build has timed out!") + } + c.Logger.Infof("Build timeout=%s, runtime=%s", timeout, time.Since(status.StartTime.Time)) + return over } diff --git a/test/build-with-timeout/build-with-low-timeout.yaml b/test/build-with-timeout/build-with-low-timeout.yaml index 76a30176..e642ac31 100644 --- a/test/build-with-timeout/build-with-low-timeout.yaml +++ b/test/build-with-timeout/build-with-low-timeout.yaml @@ -18,9 +18,9 @@ metadata: labels: expect: failed spec: - timeout: 50s + timeout: 5s steps: - name: step1 image: ubuntu command: ["/bin/bash"] - args: ["-c", "sleep 2000"] + args: ["-c", "sleep 600"] From d70091c2483471b2b0c831dd13bbbcf73967041e Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Mon, 12 Nov 2018 17:33:20 -0500 Subject: [PATCH 09/14] pass build to isTimeout --- pkg/reconciler/build/build.go | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/pkg/reconciler/build/build.go b/pkg/reconciler/build/build.go index 254d0b89..17779e63 100644 --- a/pkg/reconciler/build/build.go +++ b/pkg/reconciler/build/build.go @@ -43,7 +43,10 @@ import ( "k8s.io/client-go/tools/cache" ) -const controllerAgentName = "build-controller" +const ( + controllerAgentName = "build-controller" + defaultTimeout = 10 * time.Minute +) // Reconciler is the controller.Reconciler implementation for Builds resources type Reconciler struct { @@ -292,7 +295,7 @@ func isDone(status *v1alpha1.BuildStatus) bool { func (c *Reconciler) checkTimeout(build *v1alpha1.Build) error { namespace := build.Namespace - if c.isTimeout(&build.Status, build.Spec.Timeout) { + if c.isTimeout(build) { c.Logger.Infof("Build %q is timeout", build.Name) if err := c.kubeclientset.CoreV1().Pods(namespace).Delete(build.Status.Cluster.PodName, &metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { c.Logger.Errorf("Failed to terminate pod: %v", err) @@ -319,29 +322,27 @@ func (c *Reconciler) checkTimeout(build *v1alpha1.Build) error { // IsTimeout returns true if the build's execution time is greater than // specified build spec timeout. -func (c *Reconciler) isTimeout(status *v1alpha1.BuildStatus, buildTimeout *metav1.Duration) bool { - var timeout time.Duration - var defaultTimeout = 10 * time.Minute +func (c *Reconciler) isTimeout(build *v1alpha1.Build) bool { - if status == nil { + // If build has not started timeout, startTime should be zero. + if build.Status.StartTime == nil { + c.Logger.Infof("Build has not started") return false } - if buildTimeout == nil { + var timeout time.Duration + if build.Spec.Timeout == nil { // Set default timeout to 10 minute if build timeout is not set timeout = defaultTimeout } else { - timeout = buildTimeout.Duration + timeout = build.Spec.Timeout.Duration } - // If build has not started timeout, startTime should be zero. - if status.StartTime == nil { - return false - } - over := time.Since(status.StartTime.Time) > timeout + c.Logger.Infof("Build has not yet timed out (timeout=%s, elapsed=%q)", build.Spec.Timeout, time.Since(build.Status.StartTime.Time)) + over := time.Since(build.Status.StartTime.Time) > timeout if over { c.Logger.Infof("Build has timed out!") } - c.Logger.Infof("Build timeout=%s, runtime=%s", timeout, time.Since(status.StartTime.Time)) + c.Logger.Infof("Build timeout=%s, runtime=%s", timeout, time.Since(build.Status.StartTime.Time)) return over } From 3ef77785f7e3cd6919ce7fd063e8e9c35ab8d82a Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Tue, 13 Nov 2018 10:53:55 -0500 Subject: [PATCH 10/14] Generate unique names for each pod, otherwise conflict; move tests to pod_test.go --- pkg/reconciler/build/build_test.go | 66 -------- pkg/reconciler/build/resources/pod.go | 21 ++- pkg/reconciler/build/resources/pod_test.go | 180 ++++++++++++++++++++- 3 files changed, 193 insertions(+), 74 deletions(-) diff --git a/pkg/reconciler/build/build_test.go b/pkg/reconciler/build/build_test.go index 7b8fcc08..0162a810 100644 --- a/pkg/reconciler/build/build_test.go +++ b/pkg/reconciler/build/build_test.go @@ -286,27 +286,6 @@ func TestBasicFlows(t *testing.T) { Status: corev1.ConditionUnknown, Reason: "Building", }, - }, { - desc: "failure-terminated", - podStatus: corev1.PodStatus{ - Phase: corev1.PodFailed, - InitContainerStatuses: []corev1.ContainerStatus{{ - // creds-init status; ignored - }, { - Name: "status-name", - ImageID: "image-id", - State: corev1.ContainerState{ - Terminated: &corev1.ContainerStateTerminated{ - ExitCode: 123, - }, - }, - }}, - }, - wantCondition: &duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionFalse, - Message: `build step "status-name" exited with code 123 (image: "image-id"); for logs run: kubectl -n default logs failure-terminated-pod -c status-name`, - }, }, { desc: "failure-message", podStatus: corev1.PodStatus{ @@ -318,14 +297,6 @@ func TestBasicFlows(t *testing.T) { Status: corev1.ConditionFalse, Message: "boom", }, - }, { - desc: "failure-unspecified", - podStatus: corev1.PodStatus{Phase: corev1.PodFailed}, - wantCondition: &duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionFalse, - Message: "build failed for unspecified reasons.", - }, }, { desc: "pending-waiting-message", podStatus: corev1.PodStatus{ @@ -347,43 +318,6 @@ func TestBasicFlows(t *testing.T) { Reason: "Pending", Message: `build step "status-name" is pending with reason "i'm pending"`, }, - }, { - desc: "pending-pod-condition", - podStatus: corev1.PodStatus{ - Phase: corev1.PodPending, - Conditions: []corev1.PodCondition{{ - Status: corev1.ConditionUnknown, - Type: "the type", - Message: "the message", - }}, - }, - wantCondition: &duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Pending", - Message: `pod status "the type":"Unknown"; message: "the message"`, - }, - }, { - desc: "pending-message", - podStatus: corev1.PodStatus{ - Phase: corev1.PodPending, - Message: "pod status message", - }, - wantCondition: &duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Pending", - Message: "pod status message", - }, - }, { - desc: "pending-no-message", - podStatus: corev1.PodStatus{Phase: corev1.PodPending}, - wantCondition: &duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionUnknown, - Reason: "Pending", - Message: "Pending", - }, }} for _, c := range tests { diff --git a/pkg/reconciler/build/resources/pod.go b/pkg/reconciler/build/resources/pod.go index e5fb886c..239f6683 100644 --- a/pkg/reconciler/build/resources/pod.go +++ b/pkg/reconciler/build/resources/pod.go @@ -19,8 +19,12 @@ limitations under the License. package resources import ( + "crypto/rand" + "encoding/hex" "flag" "fmt" + "io" + "io/ioutil" "path/filepath" "strconv" @@ -62,6 +66,10 @@ var ( Name: "home", VolumeSource: emptyVolumeSource, }} + + // Random byte reader used for pod name generation. + // var for testing. + randReader = rand.Reader ) const ( @@ -324,12 +332,23 @@ func MakePod(build *v1alpha1.Build, kubeclient kubernetes.Interface) (*corev1.Po return nil, err } + // Generate a short random hex string. + b, err := ioutil.ReadAll(io.LimitReader(randReader, 3)) + if err != nil { + return nil, err + } + gibberish := hex.EncodeToString(b) + return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ // We execute the build's pod in the same namespace as where the build was // created so that it can access colocated resources. Namespace: build.Namespace, - Name: fmt.Sprintf("%s-pod", build.Name), + // Generate a unique name based on the build's name. + // Add a unique suffix to avoid confusion when a build + // is deleted and re-created with the same name. + // We don't use GenerateName here because k8s fakes don't support it. + Name: fmt.Sprintf("%s-pod-%s", build.Name, gibberish), // If our parent Build is deleted, then we should be as well. OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(build, schema.GroupVersionKind{ diff --git a/pkg/reconciler/build/resources/pod_test.go b/pkg/reconciler/build/resources/pod_test.go index e26185e3..6bfffa98 100644 --- a/pkg/reconciler/build/resources/pod_test.go +++ b/pkg/reconciler/build/resources/pod_test.go @@ -17,23 +17,32 @@ limitations under the License. package resources import ( + "crypto/rand" + "strings" "testing" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" "github.com/knative/build/pkg/system" + "github.com/knative/pkg/apis" + duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fakek8s "k8s.io/client-go/kubernetes/fake" ) -var ignorePrivateResourceFields = cmpopts.IgnoreUnexported(resource.Quantity{}) -var nopContainer = corev1.Container{ - Name: "nop", - Image: *nopImage, -} +var ( + ignorePrivateResourceFields = cmpopts.IgnoreUnexported(resource.Quantity{}) + nopContainer = corev1.Container{ + Name: "nop", + Image: *nopImage, + } + ignoreVolatileTime = cmp.Comparer(func(_, _ apis.VolatileTime) bool { + return true + }) +) func TestMakePod(t *testing.T) { subPath := "subpath" @@ -59,6 +68,9 @@ func TestMakePod(t *testing.T) { VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: "multi-creds"}}, }) + randReader = strings.NewReader(strings.Repeat("a", 10000)) + defer func() { randReader = rand.Reader }() + for _, c := range []struct { desc string b v1alpha1.BuildSpec @@ -451,11 +463,23 @@ func TestMakePod(t *testing.T) { }, }, ) - got, err := MakePod(&v1alpha1.Build{Spec: c.b}, cs) + b := &v1alpha1.Build{ + ObjectMeta: metav1.ObjectMeta{ + Name: "build-name", + }, + Spec: c.b, + } + got, err := MakePod(b, cs) if err != c.wantErr { t.Fatalf("MakePod: %v", err) } + // Generated name from hexlifying a stream of 'a's. + wantName := "build-name-pod-616161" + if got.Name != wantName { + t.Errorf("Pod name got %q, want %q", got.Name, wantName) + } + if d := cmp.Diff(&got.Spec, c.want, ignorePrivateResourceFields); d != "" { t.Errorf("Diff:\n%s", d) } @@ -570,6 +594,148 @@ func TestBuildStatusFromPod(t *testing.T) { }, }}, }, + }, { + desc: "success", + podStatus: corev1.PodStatus{Phase: corev1.PodSucceeded}, + want: v1alpha1.BuildStatus{ + Conditions: []duckv1alpha1.Condition{{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionTrue, + }}, + }, + }, { + desc: "running", + podStatus: corev1.PodStatus{Phase: corev1.PodRunning}, + want: v1alpha1.BuildStatus{ + Conditions: []duckv1alpha1.Condition{{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Building", + }}, + }, + }, { + desc: "failure-terminated", + podStatus: corev1.PodStatus{ + Phase: corev1.PodFailed, + InitContainerStatuses: []corev1.ContainerStatus{{ + // creds-init status; ignored + }, { + Name: "status-name", + ImageID: "image-id", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }, + }, + }}, + }, + want: v1alpha1.BuildStatus{ + StepsCompleted: []string{"status-name"}, + StepStates: []corev1.ContainerState{{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 123, + }, + }}, + Conditions: []duckv1alpha1.Condition{{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionFalse, + Message: `build step "status-name" exited with code 123 (image: "image-id"); for logs run: kubectl -n knative-build logs pod -c status-name`, + }}, + }, + }, { + desc: "failure-message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodFailed, + Message: "boom", + }, + want: v1alpha1.BuildStatus{ + Conditions: []duckv1alpha1.Condition{{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionFalse, + Message: "boom", + }}, + }, + }, { + desc: "failure-unspecified", + podStatus: corev1.PodStatus{Phase: corev1.PodFailed}, + want: v1alpha1.BuildStatus{ + Conditions: []duckv1alpha1.Condition{{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionFalse, + Message: "build failed for unspecified reasons.", + }}, + }, + }, { + desc: "pending-waiting-message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodPending, + InitContainerStatuses: []corev1.ContainerStatus{{ + // creds-init status; ignored + }, { + Name: "status-name", + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Message: "i'm pending", + }, + }, + }}, + }, + want: v1alpha1.BuildStatus{ + StepStates: []corev1.ContainerState{{ + Waiting: &corev1.ContainerStateWaiting{ + Message: "i'm pending", + }, + }}, + Conditions: []duckv1alpha1.Condition{{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Pending", + Message: `build step "status-name" is pending with reason "i'm pending"`, + }}, + }, + }, { + desc: "pending-pod-condition", + podStatus: corev1.PodStatus{ + Phase: corev1.PodPending, + Conditions: []corev1.PodCondition{{ + Status: corev1.ConditionUnknown, + Type: "the type", + Message: "the message", + }}, + }, + want: v1alpha1.BuildStatus{ + Conditions: []duckv1alpha1.Condition{{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Pending", + Message: `pod status "the type":"Unknown"; message: "the message"`, + }}, + }, + }, { + desc: "pending-message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodPending, + Message: "pod status message", + }, + want: v1alpha1.BuildStatus{ + Conditions: []duckv1alpha1.Condition{{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Pending", + Message: "pod status message", + }}, + }, + }, { + desc: "pending-no-message", + podStatus: corev1.PodStatus{Phase: corev1.PodPending}, + want: v1alpha1.BuildStatus{ + Conditions: []duckv1alpha1.Condition{{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionUnknown, + Reason: "Pending", + Message: "Pending", + }}, + }, }} { t.Run(c.desc, func(t *testing.T) { p := &corev1.Pod{ @@ -588,7 +754,7 @@ func TestBuildStatusFromPod(t *testing.T) { } c.want.Builder = v1alpha1.ClusterBuildProvider - if d := cmp.Diff(got, c.want); d != "" { + if d := cmp.Diff(got, c.want, ignoreVolatileTime); d != "" { t.Errorf("Diff:\n%s", d) } }) From cd5d4db007e9c902dcc7fa92c194cfb355f5fcd5 Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Tue, 13 Nov 2018 16:02:29 -0500 Subject: [PATCH 11/14] Sync pod lister before starting controller --- cmd/controller/main.go | 19 +++++++++---------- pkg/reconciler/build/build.go | 9 ++++++--- pkg/reconciler/build/build_test.go | 3 ++- 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/cmd/controller/main.go b/cmd/controller/main.go index ea73fca9..9af97828 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -21,6 +21,11 @@ import ( "log" "time" + buildclientset "github.com/knative/build/pkg/client/clientset/versioned" + informers "github.com/knative/build/pkg/client/informers/externalversions" + "github.com/knative/build/pkg/reconciler/build" + "github.com/knative/build/pkg/reconciler/buildtemplate" + "github.com/knative/build/pkg/reconciler/clusterbuildtemplate" cachingclientset "github.com/knative/caching/pkg/client/clientset/versioned" cachinginformers "github.com/knative/caching/pkg/client/informers/externalversions" "github.com/knative/pkg/configmap" @@ -33,16 +38,8 @@ import ( kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clientcmd" - - // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). + "k8s.io/client-go/tools/clientcmd" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - - buildclientset "github.com/knative/build/pkg/client/clientset/versioned" - informers "github.com/knative/build/pkg/client/informers/externalversions" - "github.com/knative/build/pkg/reconciler/build" - "github.com/knative/build/pkg/reconciler/buildtemplate" - "github.com/knative/build/pkg/reconciler/clusterbuildtemplate" ) const ( @@ -102,10 +99,11 @@ func main() { buildTemplateInformer := buildInformerFactory.Build().V1alpha1().BuildTemplates() clusterBuildTemplateInformer := buildInformerFactory.Build().V1alpha1().ClusterBuildTemplates() imageInformer := cachingInformerFactory.Caching().V1alpha1().Images() + podInformer := kubeInformerFactory.Core().V1().Pods() // Build all of our controllers, with the clients constructed above. controllers := []*controller.Impl{ - build.NewController(logger, kubeClient, kubeInformerFactory, buildClient, buildInformer, buildTemplateInformer, clusterBuildTemplateInformer), + build.NewController(logger, kubeClient, podInformer, buildClient, buildInformer, buildTemplateInformer, clusterBuildTemplateInformer), clusterbuildtemplate.NewController(logger, kubeClient, buildClient, cachingClient, clusterBuildTemplateInformer, imageInformer), buildtemplate.NewController(logger, kubeClient, buildClient, @@ -121,6 +119,7 @@ func main() { buildTemplateInformer.Informer().HasSynced, clusterBuildTemplateInformer.Informer().HasSynced, imageInformer.Informer().HasSynced, + podInformer.Informer().HasSynced, } { if ok := cache.WaitForCacheSync(stopCh, synced); !ok { logger.Fatalf("failed to wait for cache at index %v to sync", i) diff --git a/pkg/reconciler/build/build.go b/pkg/reconciler/build/build.go index 17779e63..9a8f6ec1 100644 --- a/pkg/reconciler/build/build.go +++ b/pkg/reconciler/build/build.go @@ -37,9 +37,10 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" - kubeinformers "k8s.io/client-go/informers" + coreinformers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" + corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" ) @@ -58,6 +59,7 @@ type Reconciler struct { buildsLister listers.BuildLister buildTemplatesLister listers.BuildTemplateLister clusterBuildTemplatesLister listers.ClusterBuildTemplateLister + podsLister corelisters.PodLister // Sugared logger is easier to use but is not as performant as the // raw logger. In performance critical paths, call logger.Desugar() @@ -80,7 +82,7 @@ func init() { func NewController( logger *zap.SugaredLogger, kubeclientset kubernetes.Interface, - kubeinformers kubeinformers.SharedInformerFactory, + podInformer coreinformers.PodInformer, buildclientset clientset.Interface, buildInformer informers.BuildInformer, buildTemplateInformer informers.BuildTemplateInformer, @@ -96,6 +98,7 @@ func NewController( buildsLister: buildInformer.Lister(), buildTemplatesLister: buildTemplateInformer.Lister(), clusterBuildTemplatesLister: clusterBuildTemplateInformer.Lister(), + podsLister: podInformer.Lister(), Logger: logger, } impl := controller.NewImpl(r, logger, "Builds", @@ -110,7 +113,7 @@ func NewController( // Set up a Pod informer, so that Pod updates trigger Build // reconciliations. - kubeinformers.Core().V1().Pods().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: r.addPodEvent, UpdateFunc: r.updatePodEvent, DeleteFunc: r.deletePodEvent, diff --git a/pkg/reconciler/build/build_test.go b/pkg/reconciler/build/build_test.go index 0162a810..4aec4206 100644 --- a/pkg/reconciler/build/build_test.go +++ b/pkg/reconciler/build/build_test.go @@ -89,7 +89,8 @@ func (f *fixture) newReconciler() (controller.Reconciler, informers.SharedInform buildInformer := i.Build().V1alpha1().Builds() buildTemplateInformer := i.Build().V1alpha1().BuildTemplates() clusterBuildTemplateInformer := i.Build().V1alpha1().ClusterBuildTemplates() - c := NewController(logger, f.kubeclient, k8sI, f.client, buildInformer, buildTemplateInformer, clusterBuildTemplateInformer) + podInformer := k8sI.Core().V1().Pods() + c := NewController(logger, f.kubeclient, podInformer, f.client, buildInformer, buildTemplateInformer, clusterBuildTemplateInformer) return c.Reconciler, i, k8sI } From 0170839f141d09e1f6d34dc9443547ac57fd04fd Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Tue, 13 Nov 2018 16:12:14 -0500 Subject: [PATCH 12/14] update deps --- Gopkg.lock | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Gopkg.lock b/Gopkg.lock index c6d3d09a..7e0370b8 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -831,10 +831,12 @@ "k8s.io/client-go/discovery", "k8s.io/client-go/discovery/fake", "k8s.io/client-go/informers", + "k8s.io/client-go/informers/core/v1", "k8s.io/client-go/kubernetes", "k8s.io/client-go/kubernetes/fake", "k8s.io/client-go/kubernetes/scheme", "k8s.io/client-go/kubernetes/typed/core/v1", + "k8s.io/client-go/listers/core/v1", "k8s.io/client-go/plugin/pkg/client/auth", "k8s.io/client-go/plugin/pkg/client/auth/gcp", "k8s.io/client-go/rest", From 9061535d75a9b1b153f5deb6e91cb578a3f1c3bb Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Tue, 20 Nov 2018 11:38:00 -0500 Subject: [PATCH 13/14] Reconcile in response to pod events --- pkg/reconciler/build/build.go | 174 +++++++-------------- pkg/reconciler/build/build_test.go | 174 +++++++++++---------- pkg/reconciler/build/resources/pod.go | 1 + pkg/reconciler/build/resources/pod_test.go | 12 +- 4 files changed, 153 insertions(+), 208 deletions(-) diff --git a/pkg/reconciler/build/build.go b/pkg/reconciler/build/build.go index 9a8f6ec1..dc2ea8e0 100644 --- a/pkg/reconciler/build/build.go +++ b/pkg/reconciler/build/build.go @@ -113,10 +113,12 @@ func NewController( // Set up a Pod informer, so that Pod updates trigger Build // reconciliations. - podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: r.addPodEvent, - UpdateFunc: r.updatePodEvent, - DeleteFunc: r.deletePodEvent, + podInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("Build")), + Handler: cache.ResourceEventHandlerFuncs{ + AddFunc: impl.EnqueueControllerOf, + UpdateFunc: controller.PassNew(impl.EnqueueControllerOf), + }, }) return impl @@ -146,43 +148,51 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { // Don't mutate the informer's copy of our object. build = build.DeepCopy() - // Validate build - if err = c.validateBuild(build); err != nil { - c.Logger.Errorf("Failed to validate build: %v", err) - return err - } - // If the build's done, then ignore it. if isDone(&build.Status) { return nil } - // If the build is ongoing, check if it's timed out. - if build.Status.Cluster != nil && build.Status.Cluster.PodName != "" { - // Check if build has timed out - return c.checkTimeout(build) - } - - // If the build hasn't started yet, create a Pod for it and record that - // pod's name in the build status. - p, err := c.startPodForBuild(build) - if err != nil { - build.Status.SetCondition(&duckv1alpha1.Condition{ - Type: v1alpha1.BuildSucceeded, - Status: corev1.ConditionFalse, - Reason: "BuildExecuteFailed", - Message: err.Error(), - }) - if err := c.updateStatus(build); err != nil { + // If the build hasn't started yet, validate it and create a Pod for it + // and record that pod's name in the build status. + var p *corev1.Pod + if build.Status.Cluster == nil || build.Status.Cluster.PodName == "" { + if err = c.validateBuild(build); err != nil { + logger.Errorf("Failed to validate build: %v", err) + return err + } + p, err = c.startPodForBuild(build) + if err != nil { + build.Status.SetCondition(&duckv1alpha1.Condition{ + Type: v1alpha1.BuildSucceeded, + Status: corev1.ConditionFalse, + Reason: "BuildExecuteFailed", + Message: err.Error(), + }) + if err := c.updateStatus(build); err != nil { + return err + } + return err + } + } else { + // If the build is ongoing, update its status based on its pod, and + // check if it's timed out. + p, err = c.podsLister.Pods(build.Namespace).Get(build.Status.Cluster.PodName) + if err != nil { + // TODO: What if the pod is deleted out from under us? return err } + } + + // Update the build's status based on the pod's status. + build.Status = resources.BuildStatusFromPod(p, build.Spec) + + // Check if build has timed out; if it is, this will set its status + // accordingly. + if err := c.checkTimeout(build); err != nil { return err } - // If Pod creation was successful, update the Build's status. - bs := resources.BuildStatusFromPod(p, build.Spec) - bs.StartTime = &metav1.Time{time.Now()} - build.Status = bs return c.updateStatus(build) } @@ -201,48 +211,6 @@ func (c *Reconciler) updateStatus(u *v1alpha1.Build) error { return err } -// addPodEvent handles the informer's AddFunc event for Pods. -func (c *Reconciler) addPodEvent(obj interface{}) { - p, ok := obj.(*corev1.Pod) - if !ok { - return - } - ownerRef := metav1.GetControllerOf(p) - - // If this object is not owned by a Build, we should not do anything - // more with it. - if ownerRef == nil || ownerRef.Kind != "Build" { - return - } - - // Get the build for this pod. - buildName := ownerRef.Name - build, err := c.buildsLister.Builds(p.Namespace).Get(buildName) - if err != nil { - c.Logger.Errorf("Error getting build %q for pod %q in namespace %q", buildName, p.Name, p.Namespace) - return - } - - // Update the build's status from the pod's status. - build = build.DeepCopy() - build.Status = resources.BuildStatusFromPod(p, build.Spec) - if err := c.updateStatus(build); err != nil { - c.Logger.Errorf("Error updating build %q in response to pod event: %v", buildName, err) - } -} - -// updatePodEvent handles the informer's UpdateFunc event for Pods. -func (c *Reconciler) updatePodEvent(old, new interface{}) { - c.addPodEvent(new) -} - -// deletePodEvent handles the informer's DeleteFunc event for Pods. -func (c *Reconciler) deletePodEvent(obj interface{}) { - // TODO(mattmoor): If a pod gets deleted and someone's watching, we should propagate our - // own error message so that we don't leak a go routine waiting forever. - c.Logger.Errorf("NYI: delete event for: %v", obj) -} - // startPodForBuild starts a new Pod to execute the build. // // This applies any build template that's specified, and creates the pod. @@ -280,16 +248,10 @@ func (c *Reconciler) startPodForBuild(build *v1alpha1.Build) (*corev1.Pod, error if err != nil { return nil, err } + c.Logger.Infof("Creating pod %q in namespace %q for build %q", p.Name, p.Namespace, build.Name) return c.kubeclientset.CoreV1().Pods(p.Namespace).Create(p) } -func (c *Reconciler) terminatePod(namespace, name string) error { - if err := c.kubeclientset.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { - return err - } - return nil -} - // IsDone returns true if the build's status indicates the build is done. func isDone(status *v1alpha1.BuildStatus) bool { cond := status.GetCondition(v1alpha1.BuildSucceeded) @@ -297,15 +259,25 @@ func isDone(status *v1alpha1.BuildStatus) bool { } func (c *Reconciler) checkTimeout(build *v1alpha1.Build) error { - namespace := build.Namespace - if c.isTimeout(build) { - c.Logger.Infof("Build %q is timeout", build.Name) - if err := c.kubeclientset.CoreV1().Pods(namespace).Delete(build.Status.Cluster.PodName, &metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { + // If build has not started timeout, startTime should be zero. + if build.Status.StartTime.IsZero() { + return nil + } + + // Use default timeout to 10 minute if build timeout is not set. + timeout := defaultTimeout + if build.Spec.Timeout != nil { + timeout = build.Spec.Timeout.Duration + } + runtime := time.Since(build.Status.StartTime.Time) + if runtime > timeout { + c.Logger.Infof("Build %q is timeout (runtime %s over %s), deleting pod", build.Name, runtime, timeout) + if err := c.kubeclientset.CoreV1().Pods(build.Namespace).Delete(build.Status.Cluster.PodName, &metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { c.Logger.Errorf("Failed to terminate pod: %v", err) return err } - timeoutMsg := fmt.Sprintf("Build %q failed to finish within %q", build.Name, build.Spec.Timeout.Duration.String()) + timeoutMsg := fmt.Sprintf("Build %q failed to finish within %q", build.Name, timeout.String()) build.Status.SetCondition(&duckv1alpha1.Condition{ Type: v1alpha1.BuildSucceeded, Status: corev1.ConditionFalse, @@ -314,38 +286,6 @@ func (c *Reconciler) checkTimeout(build *v1alpha1.Build) error { }) // update build completed time build.Status.CompletionTime = &metav1.Time{time.Now()} - - if err := c.updateStatus(build); err != nil { - c.Logger.Errorf("Failed to update status for pod: %v", err) - return err - } } return nil } - -// IsTimeout returns true if the build's execution time is greater than -// specified build spec timeout. -func (c *Reconciler) isTimeout(build *v1alpha1.Build) bool { - - // If build has not started timeout, startTime should be zero. - if build.Status.StartTime == nil { - c.Logger.Infof("Build has not started") - return false - } - - var timeout time.Duration - if build.Spec.Timeout == nil { - // Set default timeout to 10 minute if build timeout is not set - timeout = defaultTimeout - } else { - timeout = build.Spec.Timeout.Duration - } - - c.Logger.Infof("Build has not yet timed out (timeout=%s, elapsed=%q)", build.Spec.Timeout, time.Since(build.Status.StartTime.Time)) - over := time.Since(build.Status.StartTime.Time) > timeout - if over { - c.Logger.Infof("Build has timed out!") - } - c.Logger.Infof("Build timeout=%s, runtime=%s", timeout, time.Since(build.Status.StartTime.Time)) - return over -} diff --git a/pkg/reconciler/build/build_test.go b/pkg/reconciler/build/build_test.go index 4aec4206..ae619910 100644 --- a/pkg/reconciler/build/build_test.go +++ b/pkg/reconciler/build/build_test.go @@ -45,9 +45,7 @@ import ( const noResyncPeriod time.Duration = 0 -var ignoreVolatileTime = cmp.Comparer(func(_, _ apis.VolatileTime) bool { - return true -}) +var ignoreVolatileTime = cmp.Comparer(func(_, _ apis.VolatileTime) bool { return true }) type fixture struct { t *testing.T @@ -94,30 +92,31 @@ func (f *fixture) newReconciler() (controller.Reconciler, informers.SharedInform return c.Reconciler, i, k8sI } -func (f *fixture) updateIndex(i informers.SharedInformerFactory, bl []*v1alpha1.Build) { - for _, f := range bl { - i.Build().V1alpha1().Builds().Informer().GetIndexer().Add(f) - } +func (f *fixture) updateIndex(i informers.SharedInformerFactory, b *v1alpha1.Build) { + i.Build().V1alpha1().Builds().Informer().GetIndexer().Add(b) +} + +func (f *fixture) updatePodIndex(i kubeinformers.SharedInformerFactory, p *corev1.Pod) { + i.Core().V1().Pods().Informer().GetIndexer().Add(p) } -func getKey(build *v1alpha1.Build, t *testing.T) string { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(build) +func getKey(b *v1alpha1.Build, t *testing.T) string { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(b) if err != nil { - t.Errorf("unexpected error getting key for build %v: %v", build.Name, err) + t.Errorf("unexpected error getting key for build %v: %v", b.Name, err) return "" } return key } func TestBuildNotFoundFlow(t *testing.T) { - build := newBuild("test") + b := newBuild("test") f := &fixture{ t: t, - objects: []runtime.Object{build}, - client: fake.NewSimpleClientset(build), + objects: []runtime.Object{b}, + client: fake.NewSimpleClientset(b), kubeclient: k8sfake.NewSimpleClientset(), } - f.createServceAccount() // induce failure when fetching build information in controller @@ -129,15 +128,14 @@ func TestBuildNotFoundFlow(t *testing.T) { } f.client.PrependReactor("*", "*", reactor) + r, i, k8sI := f.newReconciler() + f.updateIndex(i, b) stopCh := make(chan struct{}) defer close(stopCh) - - r, i, k8sI := f.newReconciler() - f.updateIndex(i, []*v1alpha1.Build{build}) i.Start(stopCh) k8sI.Start(stopCh) - if err := r.Reconcile(context.Background(), getKey(build, t)); err == nil { + if err := r.Reconcile(context.Background(), getKey(b, t)); err == nil { t.Errorf("Expect error syncing build") } } @@ -156,33 +154,32 @@ func TestBuildWithBadKey(t *testing.T) { } func TestBuildNotFoundError(t *testing.T) { - build := newBuild("test") + b := newBuild("test") f := &fixture{ t: t, - objects: []runtime.Object{build}, - client: fake.NewSimpleClientset(build), + objects: []runtime.Object{b}, + client: fake.NewSimpleClientset(b), kubeclient: k8sfake.NewSimpleClientset(), } f.createServceAccount() - stopCh := make(chan struct{}) - defer close(stopCh) - r, i, k8sI := f.newReconciler() // Don't update build informers with test build object + stopCh := make(chan struct{}) + defer close(stopCh) i.Start(stopCh) k8sI.Start(stopCh) - if err := r.Reconcile(context.Background(), getKey(build, t)); err != nil { + if err := r.Reconcile(context.Background(), getKey(b, t)); err != nil { t.Errorf("Unexpected error while syncing build: %s", err.Error()) } } func TestBuildWithNonExistentTemplates(t *testing.T) { for _, kind := range []v1alpha1.TemplateKind{v1alpha1.BuildTemplateKind, v1alpha1.ClusterBuildTemplateKind} { - build := newBuild("test-buildtemplate") + b := newBuild("test-buildtemplate") - build.Spec = v1alpha1.BuildSpec{ + b.Spec = v1alpha1.BuildSpec{ Template: &v1alpha1.TemplateInstantiationSpec{ Kind: kind, Name: "not-existent-template", @@ -190,21 +187,20 @@ func TestBuildWithNonExistentTemplates(t *testing.T) { } f := &fixture{ t: t, - objects: []runtime.Object{build}, - client: fake.NewSimpleClientset(build), + objects: []runtime.Object{b}, + client: fake.NewSimpleClientset(b), kubeclient: k8sfake.NewSimpleClientset(), } f.createServceAccount() + r, i, k8sI := f.newReconciler() + f.updateIndex(i, b) stopCh := make(chan struct{}) defer close(stopCh) - - r, i, k8sI := f.newReconciler() - f.updateIndex(i, []*v1alpha1.Build{build}) i.Start(stopCh) k8sI.Start(stopCh) - if err := r.Reconcile(context.Background(), getKey(build, t)); err == nil { + if err := r.Reconcile(context.Background(), getKey(b, t)); err == nil { t.Errorf("Expect error syncing build") } else if !kuberrors.IsNotFound(err) { t.Errorf("Expect error to be not found err: %s", err.Error()) @@ -226,39 +222,41 @@ func TestBuildWithTemplate(t *testing.T) { Env: []corev1.EnvVar{{Value: "testvalue", Name: "testkey"}}, } - build := newBuild("test-buildtemplate") - build.Spec = v1alpha1.BuildSpec{ + b := newBuild("test-buildtemplate") + b.Spec = v1alpha1.BuildSpec{ Template: buildTemplateSpec, } f := &fixture{ t: t, - objects: []runtime.Object{build, tmpl}, - client: fake.NewSimpleClientset(build, tmpl), + objects: []runtime.Object{b, tmpl}, + client: fake.NewSimpleClientset(b, tmpl), kubeclient: k8sfake.NewSimpleClientset(), } f.createServceAccount() + r, i, k8sI := f.newReconciler() + f.updateIndex(i, b) stopCh := make(chan struct{}) defer close(stopCh) - - r, i, k8sI := f.newReconciler() + i.Start(stopCh) + k8sI.Start(stopCh) err := i.Build().V1alpha1().BuildTemplates().Informer().GetIndexer().Add(tmpl) if err != nil { t.Errorf("Unexpected error when adding cluster build template to build informer: %s", err.Error()) } - f.updateIndex(i, []*v1alpha1.Build{build}) + f.updateIndex(i, b) i.Start(stopCh) k8sI.Start(stopCh) - if err = r.Reconcile(context.Background(), getKey(build, t)); err != nil { + if err = r.Reconcile(context.Background(), getKey(b, t)); err != nil { t.Errorf("unexpected expecting error while syncing build: %s", err.Error()) } - buildClient := f.client.BuildV1alpha1().Builds(build.Namespace) - b, err := buildClient.Get(build.Name, metav1.GetOptions{}) + buildClient := f.client.BuildV1alpha1().Builds(b.Namespace) + b, err = buildClient.Get(b.Name, metav1.GetOptions{}) if err != nil { t.Errorf("error fetching build: %v", err) } @@ -268,7 +266,7 @@ func TestBuildWithTemplate(t *testing.T) { } func TestBasicFlows(t *testing.T) { - tests := []struct { + for _, c := range []struct { desc string podStatus corev1.PodStatus wantCondition *duckv1alpha1.Condition @@ -319,41 +317,38 @@ func TestBasicFlows(t *testing.T) { Reason: "Pending", Message: `build step "status-name" is pending with reason "i'm pending"`, }, - }} - - for _, c := range tests { + }} { t.Run(c.desc, func(t *testing.T) { - build := newBuild(c.desc) + b := newBuild(c.desc) f := &fixture{ t: t, - objects: []runtime.Object{build}, - client: fake.NewSimpleClientset(build), + objects: []runtime.Object{b}, + client: fake.NewSimpleClientset(b), kubeclient: k8sfake.NewSimpleClientset(), } f.createServceAccount() + r, i, k8sI := f.newReconciler() + f.updateIndex(i, b) stopCh := make(chan struct{}) defer close(stopCh) - - r, i, k8sI := f.newReconciler() - f.updateIndex(i, []*v1alpha1.Build{build}) i.Start(stopCh) k8sI.Start(stopCh) // Reconcile to pick up changes. ctx := context.Background() - if err := r.Reconcile(ctx, getKey(build, t)); err != nil { + if err := r.Reconcile(ctx, getKey(b, t)); err != nil { t.Errorf("error syncing build: %v", err) } - buildClient := f.client.BuildV1alpha1().Builds(build.Namespace) - b, err := buildClient.Get(build.Name, metav1.GetOptions{}) + buildClient := f.client.BuildV1alpha1().Builds(b.Namespace) + b, err := buildClient.Get(b.Name, metav1.GetOptions{}) if err != nil { t.Errorf("error fetching build: %v", err) } // Update the underlying pod's status. - b, err = buildClient.Get(build.Name, metav1.GetOptions{}) + b, err = buildClient.Get(b.Name, metav1.GetOptions{}) if err != nil { t.Fatalf("error getting build: %v", err) } @@ -371,10 +366,14 @@ func TestBasicFlows(t *testing.T) { t.Fatalf("error updating pod %q: %v", podName, err) } - // Sleep to give Reconciler time to pick up the Pod event. - time.Sleep(500 * time.Millisecond) + // Reconcile to pick up pod changes. + f.updatePodIndex(k8sI, p) + f.updateIndex(i, b) + if err := r.Reconcile(ctx, getKey(b, t)); err != nil { + t.Errorf("error syncing build: %v", err) + } - b, err = buildClient.Get(build.Name, metav1.GetOptions{}) + b, err = buildClient.Get(b.Name, metav1.GetOptions{}) if err != nil { t.Errorf("error fetching build: %v", err) } @@ -388,63 +387,66 @@ func TestBasicFlows(t *testing.T) { } } -func TestTimeoutFlows(t *testing.T) { - build := newBuild("timeout") - buffer := 1 * time.Minute - - build.Spec.Timeout = &metav1.Duration{Duration: 1 * time.Second} +func TestTimeoutFlow(t *testing.T) { + b := newBuild("timeout") + b.Spec.Timeout = &metav1.Duration{Duration: 1 * time.Second} f := &fixture{ t: t, - objects: []runtime.Object{build}, - client: fake.NewSimpleClientset(build), + objects: []runtime.Object{b}, + client: fake.NewSimpleClientset(b), kubeclient: k8sfake.NewSimpleClientset(), } f.createServceAccount() + r, i, k8sI := f.newReconciler() + f.updateIndex(i, b) stopCh := make(chan struct{}) defer close(stopCh) - - r, i, k8sI := f.newReconciler() - - f.updateIndex(i, []*v1alpha1.Build{build}) i.Start(stopCh) k8sI.Start(stopCh) ctx := context.Background() - if err := r.Reconcile(ctx, getKey(build, t)); err != nil { + if err := r.Reconcile(ctx, getKey(b, t)); err != nil { t.Errorf("Not Expect error when syncing build") } - buildClient := f.client.BuildV1alpha1().Builds(build.Namespace) - b, err := buildClient.Get(build.Name, metav1.GetOptions{}) + buildClient := f.client.BuildV1alpha1().Builds(b.Namespace) + b, err := buildClient.Get(b.Name, metav1.GetOptions{}) if err != nil { t.Errorf("error fetching build: %v", err) } - // Update build status to falsely indicate the build started 1m ago, - // which is longer than the build's timeout. - b.Status.StartTime.Time = metav1.Now().Time.Add(-buffer) + // Update the pod to indicate it was created 10m ago, which is + // longer than the build's timeout. + podName := b.Status.Cluster.PodName + p, err := f.kubeclient.CoreV1().Pods(metav1.NamespaceDefault).Get(podName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("error getting pod %q: %v", podName, err) + } + p.CreationTimestamp.Time = metav1.Now().Time.Add(-10 * time.Minute) + if _, err := f.kubeclient.CoreV1().Pods(metav1.NamespaceDefault).Update(p); err != nil { + t.Fatalf("error updating pod %q: %v", podName, err) + } - // Reconcile to pick up changes. - // We have to manually update the index, or Reconcile won't see the update. - f.updateIndex(i, []*v1alpha1.Build{b}) - if err := r.Reconcile(ctx, getKey(build, t)); err != nil { - t.Errorf("Unexpected error while syncing build: %v", err) + // Reconcile to pick up pod changes. + f.updatePodIndex(k8sI, p) + f.updateIndex(i, b) + if err := r.Reconcile(ctx, getKey(b, t)); err != nil { + t.Errorf("error syncing build: %v", err) } - b, err = buildClient.Get(build.Name, metav1.GetOptions{}) + // Check that the build has the expected timeout status. + b, err = buildClient.Get(b.Name, metav1.GetOptions{}) if err != nil { t.Errorf("error fetching build: %v", err) } - - // Check that the build has the expected timeout status. if d := cmp.Diff(b.Status.GetCondition(duckv1alpha1.ConditionSucceeded), &duckv1alpha1.Condition{ Type: duckv1alpha1.ConditionSucceeded, Status: corev1.ConditionFalse, Reason: "BuildTimeout", Message: fmt.Sprintf("Build %q failed to finish within \"1s\"", b.Name), }, ignoreVolatileTime); d != "" { - t.Errorf("Unexpected build status %s", b.Status) + t.Errorf("Unexpected build status %s", d) } } diff --git a/pkg/reconciler/build/resources/pod.go b/pkg/reconciler/build/resources/pod.go index 239f6683..73781747 100644 --- a/pkg/reconciler/build/resources/pod.go +++ b/pkg/reconciler/build/resources/pod.go @@ -388,6 +388,7 @@ func BuildStatusFromPod(p *corev1.Pod, buildSpec v1alpha1.BuildSpec) v1alpha1.Bu Namespace: p.Namespace, PodName: p.Name, }, + StartTime: &p.CreationTimestamp, } // Always ignore the first pod status, which is creds-init. diff --git a/pkg/reconciler/build/resources/pod_test.go b/pkg/reconciler/build/resources/pod_test.go index 6bfffa98..40343bf4 100644 --- a/pkg/reconciler/build/resources/pod_test.go +++ b/pkg/reconciler/build/resources/pod_test.go @@ -35,13 +35,12 @@ import ( var ( ignorePrivateResourceFields = cmpopts.IgnoreUnexported(resource.Quantity{}) + ignoreVolatileTime = cmp.Comparer(func(_, _ apis.VolatileTime) bool { return true }) + ignoreVolatileTimePtr = cmp.Comparer(func(_, _ *apis.VolatileTime) bool { return true }) nopContainer = corev1.Container{ Name: "nop", Image: *nopImage, } - ignoreVolatileTime = cmp.Comparer(func(_, _ apis.VolatileTime) bool { - return true - }) ) func TestMakePod(t *testing.T) { @@ -738,10 +737,12 @@ func TestBuildStatusFromPod(t *testing.T) { }, }} { t.Run(c.desc, func(t *testing.T) { + now := metav1.Now() p := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: "pod", - Namespace: system.Namespace, + Name: "pod", + Namespace: system.Namespace, + CreationTimestamp: now, }, Status: c.podStatus, } @@ -753,6 +754,7 @@ func TestBuildStatusFromPod(t *testing.T) { Namespace: system.Namespace, } c.want.Builder = v1alpha1.ClusterBuildProvider + c.want.StartTime = &now if d := cmp.Diff(got, c.want, ignoreVolatileTime); d != "" { t.Errorf("Diff:\n%s", d) From 1093277f3a2651287f1cff662565b83dabb08712 Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Tue, 20 Nov 2018 11:42:54 -0500 Subject: [PATCH 14/14] Remove commented import --- cmd/controller/main.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 9af97828..b75c46fc 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -38,8 +38,7 @@ import ( kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clientcmd" // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "k8s.io/client-go/tools/clientcmd" ) const (