diff --git a/pkg/component/component.go b/pkg/component/component.go index 4ed2c72000c..999ad4de171 100644 --- a/pkg/component/component.go +++ b/pkg/component/component.go @@ -181,7 +181,7 @@ func GetComponentPorts(client *occlient.Client, componentName string, applicatio componentLabels := componentlabels.GetLabels(componentName, applicationName, false) componentSelector := util.ConvertLabelsToSelector(componentLabels) - dc, err := client.GetOneDeploymentConfigFromSelector(componentSelector) + dc, err := client.GetDeploymentConfigFromSelector(componentSelector) if err != nil { return nil, errors.Wrapf(err, "unable to fetch deployment configs for the selector %v", componentSelector) } @@ -200,7 +200,7 @@ func GetComponentLinkedSecretNames(client *occlient.Client, componentName string componentLabels := componentlabels.GetLabels(componentName, applicationName, false) componentSelector := util.ConvertLabelsToSelector(componentLabels) - dc, err := client.GetOneDeploymentConfigFromSelector(componentSelector) + dc, err := client.GetDeploymentConfigFromSelector(componentSelector) if err != nil { return nil, errors.Wrapf(err, "unable to fetch deployment configs for the selector %v", componentSelector) } @@ -283,7 +283,7 @@ func CreateFromPath(client *occlient.Client, params occlient.CreateArgs) error { } podSelector := fmt.Sprintf("deploymentconfig=%s", selectorLabels) - _, err = client.WaitAndGetPod(podSelector, corev1.PodRunning, "Waiting for component to start") + _, err = client.GetKubeClient().WaitAndGetPodWithEvents(podSelector, corev1.PodRunning, "Waiting for component to start") if err != nil { return err } @@ -664,7 +664,7 @@ func PushLocal(client *occlient.Client, componentName string, applicationName st // Find DeploymentConfig for component componentLabels := componentlabels.GetLabels(componentName, applicationName, false) componentSelector := util.ConvertLabelsToSelector(componentLabels) - dc, err := client.GetOneDeploymentConfigFromSelector(componentSelector) + dc, err := client.GetDeploymentConfigFromSelector(componentSelector) if err != nil { return errors.Wrap(err, "unable to get deployment for component") } @@ -672,7 +672,7 @@ func PushLocal(client *occlient.Client, componentName string, applicationName st podSelector := fmt.Sprintf("deploymentconfig=%s", dc.Name) // Wait for Pod to be in running state otherwise we can't sync data to it. - pod, err := client.WaitAndGetPod(podSelector, corev1.PodRunning, "Waiting for component to start") + pod, err := client.GetKubeClient().WaitAndGetPodWithEvents(podSelector, corev1.PodRunning, "Waiting for component to start") if err != nil { return errors.Wrapf(err, "error while waiting for pod %s", podSelector) } @@ -899,7 +899,7 @@ func List(client *occlient.Client, applicationName string, localConfigInfo *conf if deploymentConfigSupported && client != nil { // retrieve all the deployment configs that are associated with this application - dcList, err := client.GetDeploymentConfigsFromSelector(applicationSelector) + dcList, err := client.ListDeploymentConfigs(applicationSelector) if err != nil { return ComponentList{}, errors.Wrapf(err, "unable to list components") } @@ -1503,13 +1503,13 @@ func getRemoteComponentMetadata(client *occlient.Client, componentName string, a linkedComponents := make(map[string][]string) linkedSecretNames := fromCluster.GetLinkedSecretNames() for _, secretName := range linkedSecretNames { - secret, err := client.GetSecret(secretName, projectName) + secret, err := client.GetKubeClient().GetSecret(secretName, projectName) if err != nil { return Component{}, errors.Wrapf(err, "unable to get info about secret %s", secretName) } componentName, containsComponentLabel := secret.Labels[componentlabels.ComponentLabel] if containsComponentLabel { - if port, ok := secret.Annotations[occlient.ComponentPortAnnotationName]; ok { + if port, ok := secret.Annotations[kclient.ComponentPortAnnotationName]; ok { linkedComponents[componentName] = append(linkedComponents[componentName], port) } } else { @@ -1537,7 +1537,7 @@ func GetLogs(client *occlient.Client, componentName string, applicationName stri } // Retrieve the logs - err = client.DisplayDeploymentConfigLog(namespacedOpenShiftObject, follow, stdout) + err = client.DisplayDeploymentConfigLog(namespacedOpenShiftObject, follow) if err != nil { return err } diff --git a/pkg/component/component_full_description.go b/pkg/component/component_full_description.go index dde79e9569d..926a121a9d5 100644 --- a/pkg/component/component_full_description.go +++ b/pkg/component/component_full_description.go @@ -267,7 +267,7 @@ func (cfd *ComponentFullDescription) Print(client *occlient.Client) error { for _, linkedService := range cfd.Status.LinkedServices { // Let's also get the secrets / environment variables that are being passed in.. (if there are any) - secrets, err := client.GetSecret(linkedService, cfd.GetNamespace()) + secrets, err := client.GetKubeClient().GetSecret(linkedService, cfd.GetNamespace()) if err != nil { return err } diff --git a/pkg/component/pushed_component.go b/pkg/component/pushed_component.go index 1bc7561c649..c683f5eedaf 100644 --- a/pkg/component/pushed_component.go +++ b/pkg/component/pushed_component.go @@ -234,7 +234,7 @@ func getType(component provider) (string, error) { func GetPushedComponents(c *occlient.Client, applicationName string) (map[string]PushedComponent, error) { applicationSelector := fmt.Sprintf("%s=%s", applabels.ApplicationLabel, applicationName) - dcList, err := c.GetDeploymentConfigsFromSelector(applicationSelector) + dcList, err := c.ListDeploymentConfigs(applicationSelector) if err != nil { if !isIgnorableError(err) { return nil, err diff --git a/pkg/debug/portforward.go b/pkg/debug/portforward.go index 3c20d558569..ed4b95bd5dd 100644 --- a/pkg/debug/portforward.go +++ b/pkg/debug/portforward.go @@ -62,7 +62,7 @@ func (f *DefaultPortForwarder) ForwardPorts(portPair string, stopChan, readyChan return err } - pod, err = f.client.GetPodUsingComponentName(f.componentName, f.appName) + pod, err = f.client.GetPodUsingDeploymentConfig(f.componentName, f.appName) if err != nil { return err } @@ -77,12 +77,7 @@ func (f *DefaultPortForwarder) ForwardPorts(portPair string, stopChan, readyChan return err } - var req *rest.Request - if f.kClient != nil && isDevfile { - req = f.kClient.GeneratePortForwardReq(pod.Name) - } else { - req = f.client.BuildPortForwardReq(pod.Name) - } + req := f.kClient.GeneratePortForwardReq(pod.Name) dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL()) fw, err := portforward.New(dialer, []string{portPair}, stopChan, readyChan, f.Out, f.ErrOut) diff --git a/pkg/kclient/events.go b/pkg/kclient/events.go new file mode 100644 index 00000000000..2ca257d3bbf --- /dev/null +++ b/pkg/kclient/events.go @@ -0,0 +1,66 @@ +package kclient + +import ( + "fmt" + "github.com/openshift/odo/pkg/log" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" + "sync" +) + +// We use a mutex here in order to make 100% sure that functions such as CollectEvents +// so that there are no race conditions +var mu sync.Mutex + +const ( + failedEventCount = 5 +) + +// CollectEvents collects events in a Goroutine by manipulating a spinner. +// We don't care about the error (it's usually ran in a go routine), so erroring out is not needed. +func (c *Client) CollectEvents(selector string, events map[string]corev1.Event, spinner *log.Status, quit <-chan int) { + + // Secondly, we will start a go routine for watching for events related to the pod and update our pod status accordingly. + eventWatcher, err := c.KubeClient.CoreV1().Events(c.Namespace).Watch(metav1.ListOptions{}) + if err != nil { + log.Warningf("Unable to watch for events: %s", err) + return + } + defer eventWatcher.Stop() + + // Create an endless loop for collecting + for { + select { + case <-quit: + klog.V(3).Info("Quitting collect events") + return + case val, ok := <-eventWatcher.ResultChan(): + mu.Lock() + if !ok { + log.Warning("Watch channel was closed") + return + } + if e, ok := val.Object.(*corev1.Event); ok { + + // If there are many warning events happening during deployment, let's log them. + if e.Type == "Warning" { + + if e.Count >= failedEventCount { + newEvent := e + (events)[e.Name] = *newEvent + klog.V(3).Infof("Warning Event: Count: %d, Reason: %s, Message: %s", e.Count, e.Reason, e.Message) + // Change the spinner message to show the warning + spinner.WarningStatus(fmt.Sprintf("WARNING x%d: %s", e.Count, e.Reason)) + } + + } + + } else { + log.Warning("Unable to convert object to event") + return + } + mu.Unlock() + } + } +} diff --git a/pkg/kclient/events_test.go b/pkg/kclient/events_test.go new file mode 100644 index 00000000000..1b8a13d8ab3 --- /dev/null +++ b/pkg/kclient/events_test.go @@ -0,0 +1,88 @@ +package kclient + +import ( + "fmt" + "github.com/openshift/odo/pkg/log" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + ktesting "k8s.io/client-go/testing" + "strings" + "testing" + time "time" +) + +func fakeEventStatus(podName string, eventWarningMessage string, count int32) *corev1.Event { + return &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + }, + Type: "Warning", + Count: count, + Reason: eventWarningMessage, + Message: "Foobar", + } +} + +func TestCollectEvents(t *testing.T) { + tests := []struct { + name string + podName string + eventWarningMessage string + }{ + { + name: "Case 1: Collect an arbitrary amount of events", + podName: "ruby", + eventWarningMessage: "Fake event warning message", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + // Create a fake client + fakeClient, fakeClientSet := FakeNew() + fakeEventWatch := watch.NewRaceFreeFake() + podSelector := fmt.Sprintf("deploymentconfig=%s", tt.podName) + + // Create a fake event status / watch reactor for faking the events we are collecting + fakeEvent := fakeEventStatus(tt.podName, tt.eventWarningMessage, 10) + go func(event *corev1.Event) { + fakeEventWatch.Add(event) + }(fakeEvent) + + fakeClientSet.Kubernetes.PrependWatchReactor("events", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { + return true, fakeEventWatch, nil + }) + + // Create a test spinner / variables / quit variable for the go channel + spinner := log.Spinner("Test spinner") + events := make(map[string]corev1.Event) + quit := make(chan int) + go fakeClient.CollectEvents(podSelector, events, spinner, quit) + + // Sleep in order to make sure we actually collect some events + time.Sleep(2 * time.Second) + close(quit) + + // We make sure to lock in order to prevent race conditions when retrieving the events (since they are a pointer + // by default since we pass in a map) + mu.Lock() + if len(events) == 0 { + t.Errorf("Expected events, got none") + } + mu.Unlock() + + // Collect the first event in the map + var firstEvent corev1.Event + for _, val := range events { + firstEvent = val + } + + if !strings.Contains(firstEvent.Reason, tt.eventWarningMessage) { + t.Errorf("expected warning message: '%s' in event message: '%+v'", tt.eventWarningMessage, firstEvent.Reason) + } + + }) + } +} diff --git a/pkg/kclient/pods.go b/pkg/kclient/pods.go index 654675c8948..ac9083a98ce 100644 --- a/pkg/kclient/pods.go +++ b/pkg/kclient/pods.go @@ -4,7 +4,10 @@ import ( "bytes" "encoding/json" "fmt" + "github.com/olekukonko/tablewriter" + "github.com/openshift/odo/pkg/preference" "io" + "strconv" "strings" "time" @@ -95,6 +98,120 @@ func (c *Client) WaitAndGetPod(watchOptions metav1.ListOptions, desiredPhase cor } } +// WaitAndGetPod block and waits until pod matching selector is in in Running state +// desiredPhase cannot be PodFailed or PodUnknown +func (c *Client) WaitAndGetPodWithEvents(selector string, desiredPhase corev1.PodPhase, waitMessage string) (*corev1.Pod, error) { + + // Try to grab the preference in order to set a timeout.. but if not, we'll use the default. + pushTimeout := preference.DefaultPushTimeout * time.Second + cfg, configReadErr := preference.New() + if configReadErr != nil { + klog.V(3).Info(errors.Wrap(configReadErr, "unable to read config file")) + } else { + pushTimeout = time.Duration(cfg.GetPushTimeout()) * time.Second + } + + klog.V(3).Infof("Waiting for %s pod", selector) + spinner := log.Spinner(waitMessage) + defer spinner.End(false) + + w, err := c.KubeClient.CoreV1().Pods(c.Namespace).Watch(metav1.ListOptions{ + LabelSelector: selector, + }) + if err != nil { + return nil, errors.Wrapf(err, "unable to watch pod") + } + defer w.Stop() + + // Here we are going to start a loop watching for the pod status + podChannel := make(chan *corev1.Pod) + watchErrorChannel := make(chan error) + go func(spinny *log.Status) { + loop: + for { + val, ok := <-w.ResultChan() + if !ok { + watchErrorChannel <- errors.New("watch channel was closed") + break loop + } + if e, ok := val.Object.(*corev1.Pod); ok { + klog.V(3).Infof("Status of %s pod is %s", e.Name, e.Status.Phase) + for _, cond := range e.Status.Conditions { + // using this just for debugging message, so ignoring error on purpose + jsonCond, _ := json.Marshal(cond) + klog.V(3).Infof("Pod Conditions: %s", string(jsonCond)) + } + for _, status := range e.Status.ContainerStatuses { + // using this just for debugging message, so ignoring error on purpose + jsonStatus, _ := json.Marshal(status) + klog.V(3).Infof("Container Status: %s", string(jsonStatus)) + } + switch e.Status.Phase { + case desiredPhase: + klog.V(3).Infof("Pod %s is %v", e.Name, desiredPhase) + podChannel <- e + break loop + case corev1.PodFailed, corev1.PodUnknown: + watchErrorChannel <- errors.Errorf("pod %s status %s", e.Name, e.Status.Phase) + break loop + } + } else { + watchErrorChannel <- errors.New("unable to convert event object to Pod") + break loop + } + } + close(podChannel) + close(watchErrorChannel) + }(spinner) + + // Collect all the events in a separate go routine + failedEvents := make(map[string]corev1.Event) + quit := make(chan int) + go c.CollectEvents(selector, failedEvents, spinner, quit) + defer close(quit) + + select { + case val := <-podChannel: + spinner.End(true) + return val, nil + case err := <-watchErrorChannel: + return nil, err + case <-time.After(pushTimeout): + + // Create a useful error if there are any failed events + errorMessage := fmt.Sprintf(`waited %s but couldn't find running pod matching selector: '%s'`, pushTimeout, selector) + + if len(failedEvents) != 0 { + + // Create an output table + tableString := &strings.Builder{} + table := tablewriter.NewWriter(tableString) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) + table.SetCenterSeparator("") + table.SetColumnSeparator("") + table.SetRowSeparator("") + + // Header + table.SetHeader([]string{"Name", "Count", "Reason", "Message"}) + + // List of events + for name, event := range failedEvents { + table.Append([]string{name, strconv.Itoa(int(event.Count)), event.Reason, event.Message}) + } + + // Here we render the table as well as a helpful error message + table.Render() + errorMessage = fmt.Sprintf(`waited %s but was unable to find a running pod matching selector: '%s' +For more information to help determine the cause of the error, re-run with '-v'. +See below for a list of failed events that occured more than %d times during deployment: +%s`, pushTimeout, selector, failedEventCount, tableString) + } + + return nil, errors.Errorf(errorMessage) + } +} + // ExecCMDInContainer execute command in the container of a pod, pass an empty string for containerName to execute in the first container of the pod func (c *Client) ExecCMDInContainer(compInfo common.ComponentInfo, cmd []string, stdout io.Writer, stderr io.Writer, stdin io.Reader, tty bool) error { podExecOptions := corev1.PodExecOptions{ diff --git a/pkg/kclient/pods_test.go b/pkg/kclient/pods_test.go index 203b28f5e82..8f006df5fc5 100644 --- a/pkg/kclient/pods_test.go +++ b/pkg/kclient/pods_test.go @@ -14,6 +14,17 @@ import ( ktesting "k8s.io/client-go/testing" ) +func fakePodStatus(status corev1.PodPhase, podName string) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + }, + Status: corev1.PodStatus{ + Phase: status, + }, + } +} + func TestWaitAndGetPod(t *testing.T) { tests := []struct { @@ -85,6 +96,76 @@ func TestWaitAndGetPod(t *testing.T) { } } +// NOTE: We do *not* collection the amount of actions taken in this function as there could be any number of fake +// 'event' actions that are happening in the background. +func TestWaitAndGetPodWithEvents(t *testing.T) { + tests := []struct { + name string + podName string + status corev1.PodPhase + wantEventWarning bool + wantErr bool + eventWarningMessage string + }{ + { + name: "Case 1: Pod running", + podName: "ruby", + status: corev1.PodRunning, + wantEventWarning: false, + wantErr: false, + }, + { + name: "Case 2: Pod failed", + podName: "ruby", + status: corev1.PodFailed, + wantEventWarning: false, + wantErr: true, + }, + { + name: "Case 3: Pod unknown", + podName: "ruby", + status: corev1.PodUnknown, + wantEventWarning: false, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + fakeClient, fakeClientSet := FakeNew() + fakePodWatch := watch.NewRaceFreeFake() + + // Watch for Pods + fakePod := fakePodStatus(tt.status, tt.podName) + go func(pod *corev1.Pod) { + fakePodWatch.Modify(pod) + }(fakePod) + + // Prepend watch reactor (beginning of the chain) + fakeClientSet.Kubernetes.PrependWatchReactor("pods", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { + return true, fakePodWatch, nil + }) + + podSelector := fmt.Sprintf("deploymentconfig=%s", tt.podName) + + pod, err := fakeClient.WaitAndGetPodWithEvents(podSelector, corev1.PodRunning, "Waiting for component to start") + + if !tt.wantErr == (err != nil) { + t.Errorf("client.WaitAndGetPod(string) unexpected error %v, wantErr %v", err, tt.wantErr) + return + } + + if err == nil { + if pod.Name != tt.podName { + t.Errorf("pod name is not matching to expected name, expected: %s, got %s", tt.podName, pod.Name) + } + } + + }) + } +} + func TestGetOnePodFromSelector(t *testing.T) { fakePod := FakePodStatus(corev1.PodRunning, "nodejs") fakePod.Labels["component"] = "nodejs" diff --git a/pkg/kclient/secrets.go b/pkg/kclient/secrets.go index f6afe8de885..eede5a1860f 100644 --- a/pkg/kclient/secrets.go +++ b/pkg/kclient/secrets.go @@ -8,7 +8,10 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/klog" "math/big" + "strings" "time" "github.com/pkg/errors" @@ -16,6 +19,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// ComponentPortAnnotationName annotation is used on the secrets that are created for each exposed port of the component +const ComponentPortAnnotationName = "component-port" + // CreateTLSSecret creates a TLS Secret with the given certificate and private key // serviceName is the name of the service for the target reference // ingressDomain is the ingress domain to use for the ingress @@ -89,3 +95,111 @@ func GenerateSelfSignedCertificate(host string) (SelfSignedCertificate, error) { return SelfSignedCertificate{CertPem: certPemByteArr, KeyPem: keyPemByteArr}, nil } + +// GetSecret returns the Secret object in the given namespace +func (c *Client) GetSecret(name, namespace string) (*corev1.Secret, error) { + secret, err := c.KubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, errors.Wrapf(err, "unable to get the secret %s", secret) + } + return secret, nil +} + +// CreateSecret generates and creates the secret +// commonObjectMeta is the ObjectMeta for the service +func (c *Client) CreateSecret(objectMeta metav1.ObjectMeta, data map[string]string, ownerReference metav1.OwnerReference) error { + + secret := corev1.Secret{ + ObjectMeta: objectMeta, + Type: corev1.SecretTypeOpaque, + StringData: data, + } + secret.SetOwnerReferences(append(secret.GetOwnerReferences(), ownerReference)) + _, err := c.KubeClient.CoreV1().Secrets(c.Namespace).Create(&secret) + if err != nil { + return errors.Wrapf(err, "unable to create secret for %s", objectMeta.Name) + } + return nil +} + +// Create a secret for each port, containing the host and port of the component +// This is done so other components can later inject the secret into the environment +// and have the "coordinates" to communicate with this component +func (c *Client) CreateSecrets(componentName string, commonObjectMeta metav1.ObjectMeta, svc *corev1.Service, ownerReference metav1.OwnerReference) error { + originalName := commonObjectMeta.Name + for _, svcPort := range svc.Spec.Ports { + portAsString := fmt.Sprintf("%v", svcPort.Port) + + // we need to create multiple secrets, so each one has to contain the port in it's name + // so we change the name of each secret by adding the port number + commonObjectMeta.Name = fmt.Sprintf("%v-%v", originalName, portAsString) + + // we also add the port as an annotation to the secret + // this comes in handy when we need to "query" for the appropriate secret + // of a component based on the port + commonObjectMeta.Annotations[ComponentPortAnnotationName] = portAsString + + err := c.CreateSecret( + commonObjectMeta, + map[string]string{ + secretKeyName(componentName, "host"): svc.Name, + secretKeyName(componentName, "port"): portAsString, + }, + ownerReference) + + if err != nil { + return errors.Wrapf(err, "unable to create Secret for %s", commonObjectMeta.Name) + } + } + + // restore the original values of the fields we changed + commonObjectMeta.Name = originalName + delete(commonObjectMeta.Annotations, ComponentPortAnnotationName) + + return nil +} + +// ListSecrets lists all the secrets based on the given label selector +func (c *Client) ListSecrets(labelSelector string) ([]corev1.Secret, error) { + listOptions := metav1.ListOptions{} + if len(labelSelector) > 0 { + listOptions = metav1.ListOptions{ + LabelSelector: labelSelector, + } + } + + secretList, err := c.KubeClient.CoreV1().Secrets(c.Namespace).List(listOptions) + if err != nil { + return nil, errors.Wrap(err, "unable to get secret list") + } + + return secretList.Items, nil +} + +// WaitAndGetSecret blocks and waits until the secret is available +func (c *Client) WaitAndGetSecret(name string, namespace string) (*corev1.Secret, error) { + klog.V(3).Infof("Waiting for secret %s to become available", name) + + w, err := c.KubeClient.CoreV1().Secrets(namespace).Watch(metav1.ListOptions{ + FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String(), + }) + if err != nil { + return nil, errors.Wrapf(err, "unable to watch secret") + } + defer w.Stop() + for { + val, ok := <-w.ResultChan() + if !ok { + break + } + if e, ok := val.Object.(*corev1.Secret); ok { + klog.V(3).Infof("Secret %s now exists", e.Name) + return e, nil + } + } + return nil, errors.Errorf("unknown error while waiting for secret '%s'", name) +} + +func secretKeyName(componentName, baseKeyName string) string { + return fmt.Sprintf("COMPONENT_%v_%v", strings.Replace(strings.ToUpper(componentName), "-", "_", -1), strings.ToUpper(baseKeyName)) +} diff --git a/pkg/kclient/secrets_test.go b/pkg/kclient/secrets_test.go index 83279114742..7520b1b1c6b 100644 --- a/pkg/kclient/secrets_test.go +++ b/pkg/kclient/secrets_test.go @@ -1,6 +1,9 @@ package kclient import ( + "fmt" + "k8s.io/apimachinery/pkg/watch" + "reflect" "testing" corev1 "k8s.io/api/core/v1" @@ -110,3 +113,191 @@ func TestGenerateSelfSignedCertificate(t *testing.T) { }) } } + +func TestGetSecret(t *testing.T) { + tests := []struct { + name string + secretNS string + secretName string + wantErr bool + want *corev1.Secret + }{ + { + name: "Case: Valid request for retrieving a secret", + secretNS: "", + secretName: "foo", + want: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + }, + wantErr: false, + }, + { + name: "Case: Invalid request for retrieving a secret", + secretNS: "", + secretName: "foo2", + want: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient, fakeClientSet := FakeNew() + + // Fake getting Secret + fakeClientSet.Kubernetes.PrependReactor("get", "secrets", func(action ktesting.Action) (bool, runtime.Object, error) { + if tt.want.Name != tt.secretName { + return true, nil, fmt.Errorf("'get' called with a different secret name") + } + return true, tt.want, nil + }) + + returnValue, err := fakeClient.GetSecret(tt.secretName, tt.secretNS) + + // Check for validating return value + if err == nil && returnValue != tt.want { + t.Errorf("error in return value got: %v, expected %v", returnValue, tt.want) + } + + if !tt.wantErr == (err != nil) { + t.Errorf("\nclient.GetSecret(secretNS, secretName) unexpected error %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestListSecrets(t *testing.T) { + + tests := []struct { + name string + secretList corev1.SecretList + output []corev1.Secret + wantErr bool + }{ + { + name: "Case 1: Ensure secrets are properly listed", + secretList: corev1.SecretList{ + Items: []corev1.Secret{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "secret2", + }, + }, + }, + }, + output: []corev1.Secret{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "secret1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "secret2", + }, + }, + }, + + wantErr: false, + }, + } + + for _, tt := range tests { + client, fakeClientSet := FakeNew() + + fakeClientSet.Kubernetes.PrependReactor("list", "secrets", func(action ktesting.Action) (bool, runtime.Object, error) { + return true, &tt.secretList, nil + }) + + secretsList, err := client.ListSecrets("") + + if !reflect.DeepEqual(tt.output, secretsList) { + t.Errorf("expected output: %#v,got: %#v", tt.secretList, secretsList) + } + + if err == nil && !tt.wantErr { + if len(fakeClientSet.Kubernetes.Actions()) != 1 { + t.Errorf("expected 1 action in ListSecrets got: %v", fakeClientSet.Kubernetes.Actions()) + } + } else if err == nil && tt.wantErr { + t.Error("test failed, expected: false, got true") + } else if err != nil && !tt.wantErr { + t.Errorf("test failed, expected: no error, got error: %s", err.Error()) + } + } +} + +func TestWaitAndGetSecret(t *testing.T) { + + tests := []struct { + name string + secretName string + namespace string + wantErr bool + }{ + { + name: "Case 1: no error expected", + secretName: "ruby", + namespace: "dummy", + wantErr: false, + }, + + { + name: "Case 2: error expected", + secretName: "", + namespace: "dummy", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + fkclient, fkclientset := FakeNew() + fkWatch := watch.NewFake() + + // Change the status + go func() { + fkWatch.Modify(&corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.secretName, + }, + }) + }() + + fkclientset.Kubernetes.PrependWatchReactor("secrets", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { + if len(tt.secretName) == 0 { + return true, nil, fmt.Errorf("error watching secret") + } + return true, fkWatch, nil + }) + + pod, err := fkclient.WaitAndGetSecret(tt.secretName, tt.namespace) + + if !tt.wantErr == (err != nil) { + t.Errorf(" client.WaitAndGetSecret(string, string) unexpected error %v, wantErr %v", err, tt.wantErr) + return + } + + if len(fkclientset.Kubernetes.Actions()) != 1 { + t.Errorf("expected 1 action in WaitAndGetSecret got: %v", fkclientset.Kubernetes.Actions()) + } + + if err == nil { + if pod.Name != tt.secretName { + t.Errorf("secret name is not matching to expected name, expected: %s, got %s", tt.secretName, pod.Name) + } + } + }) + } +} diff --git a/pkg/kclient/services.go b/pkg/kclient/services.go index 8180bd743d0..122b08f7042 100644 --- a/pkg/kclient/services.go +++ b/pkg/kclient/services.go @@ -34,3 +34,15 @@ func (c *Client) UpdateService(commonObjectMeta metav1.ObjectMeta, svcSpec corev } return service, err } + +// ListServices returns an array of Service resources which match the +// given selector +func (c *Client) ListServices(selector string) ([]corev1.Service, error) { + serviceList, err := c.KubeClient.CoreV1().Services(c.Namespace).List(metav1.ListOptions{ + LabelSelector: selector, + }) + if err != nil { + return nil, errors.Wrap(err, "unable to list Services") + } + return serviceList.Items, nil +} diff --git a/pkg/kclient/services_test.go b/pkg/kclient/services_test.go index 02d77ad8fa1..64a2a2ad1dd 100644 --- a/pkg/kclient/services_test.go +++ b/pkg/kclient/services_test.go @@ -1,6 +1,7 @@ package kclient import ( + "reflect" "testing" devfilev1 "github.com/devfile/api/pkg/apis/workspaces/v1alpha2" @@ -168,3 +169,57 @@ func TestUpdateService(t *testing.T) { }) } } + +func TestListServices(t *testing.T) { + type args struct { + selector string + } + tests := []struct { + name string + args args + returnedServices corev1.ServiceList + want []corev1.Service + wantErr bool + }{ + { + name: "case 1: returned 3 services", + args: args{ + selector: "component-name=nodejs", + }, + returnedServices: corev1.ServiceList{ + Items: testingutil.FakeKubeServices("nodejs"), + }, + want: testingutil.FakeKubeServices("nodejs"), + }, + { + name: "case 2: no service retuned", + args: args{ + selector: "component-name=nodejs", + }, + returnedServices: corev1.ServiceList{ + Items: nil, + }, + want: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // initialising the fakeclient + fkclient, fkclientset := FakeNew() + fkclient.Namespace = "default" + + fkclientset.Kubernetes.PrependReactor("list", "services", func(action ktesting.Action) (bool, runtime.Object, error) { + return true, &tt.returnedServices, nil + }) + + got, err := fkclient.ListServices(tt.args.selector) + if (err != nil) != tt.wantErr { + t.Errorf("ListServices() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ListServices() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/occlient/deploymentConfigs.go b/pkg/occlient/deploymentConfigs.go new file mode 100644 index 00000000000..00bd0528ac0 --- /dev/null +++ b/pkg/occlient/deploymentConfigs.go @@ -0,0 +1,219 @@ +package occlient + +import ( + "encoding/json" + "fmt" + appsv1 "github.com/openshift/api/apps/v1" + appsschema "github.com/openshift/client-go/apps/clientset/versioned/scheme" + "github.com/openshift/odo/pkg/util" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/klog" + "sort" + "time" +) + +// IsDeploymentConfigSupported checks if DeploymentConfig type is present on the cluster +func (c *Client) IsDeploymentConfigSupported() (bool, error) { + const Group = "apps.openshift.io" + const Version = "v1" + + return c.isResourceSupported(Group, Version, "deploymentconfigs") +} + +// GetDeploymentConfigFromName returns the Deployment Config resource given +// the Deployment Config name +func (c *Client) GetDeploymentConfigFromName(name string) (*appsv1.DeploymentConfig, error) { + klog.V(3).Infof("Getting DeploymentConfig: %s", name) + deploymentConfig, err := c.appsClient.DeploymentConfigs(c.Namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return deploymentConfig, nil +} + +// GetDeploymentConfigFromSelector returns the Deployment Config object associated +// with the given selector. +// An error is thrown when exactly one Deployment Config is not found for the +// selector. +func (c *Client) GetDeploymentConfigFromSelector(selector string) (*appsv1.DeploymentConfig, error) { + deploymentConfigs, err := c.ListDeploymentConfigs(selector) + if err != nil { + return nil, errors.Wrapf(err, "unable to get DeploymentConfigs for the selector: %v", selector) + } + + numDC := len(deploymentConfigs) + if numDC == 0 { + return nil, fmt.Errorf("no Deployment Config was found for the selector: %v", selector) + } else if numDC > 1 { + return nil, fmt.Errorf("multiple Deployment Configs exist for the selector: %v. Only one must be present", selector) + } + + return &deploymentConfigs[0], nil +} + +// ListDeploymentConfigs returns an array of Deployment Config +// resources which match the given selector +func (c *Client) ListDeploymentConfigs(selector string) ([]appsv1.DeploymentConfig, error) { + var dcList *appsv1.DeploymentConfigList + var err error + + if selector != "" { + dcList, err = c.appsClient.DeploymentConfigs(c.Namespace).List(metav1.ListOptions{ + LabelSelector: selector, + }) + } else { + dcList, err = c.appsClient.DeploymentConfigs(c.Namespace).List(metav1.ListOptions{ + FieldSelector: fields.Set{"metadata.namespace": c.Namespace}.AsSelector().String(), + }) + } + if err != nil { + return nil, errors.Wrap(err, "unable to list DeploymentConfigs") + } + return dcList.Items, nil +} + +// WaitAndGetDC block and waits until the DeploymentConfig has updated it's annotation +// Parameters: +// name: Name of DC +// timeout: Interval of time.Duration to wait for before timing out waiting for its rollout +// waitCond: Function indicating when to consider dc rolled out +// Returns: +// Updated DC and errors if any +func (c *Client) WaitAndGetDC(name string, desiredRevision int64, timeout time.Duration, waitCond func(*appsv1.DeploymentConfig, int64) bool) (*appsv1.DeploymentConfig, error) { + + w, err := c.appsClient.DeploymentConfigs(c.Namespace).Watch(metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s", name), + }) + defer w.Stop() + + if err != nil { + return nil, errors.Wrapf(err, "unable to watch dc") + } + + timeoutChannel := time.After(timeout) + // Keep trying until we're timed out or got a result or got an error + for { + select { + + // Timout after X amount of seconds + case <-timeoutChannel: + return nil, errors.New("Timed out waiting for annotation to update") + + // Each loop we check the result + case val, ok := <-w.ResultChan(): + + if !ok { + break + } + if e, ok := val.Object.(*appsv1.DeploymentConfig); ok { + for _, cond := range e.Status.Conditions { + // using this just for debugging message, so ignoring error on purpose + jsonCond, _ := json.Marshal(cond) + klog.V(3).Infof("DeploymentConfig Condition: %s", string(jsonCond)) + } + // If the annotation has been updated, let's exit + if waitCond(e, desiredRevision) { + return e, nil + } + + } + } + } +} + +// GetDeploymentConfigLabelValues get label values of given label from objects in project that are matching selector +// returns slice of unique label values +func (c *Client) GetDeploymentConfigLabelValues(label string, selector string) ([]string, error) { + + // List DeploymentConfig according to selectors + dcList, err := c.appsClient.DeploymentConfigs(c.Namespace).List(metav1.ListOptions{LabelSelector: selector}) + if err != nil { + return nil, errors.Wrap(err, "unable to list DeploymentConfigs") + } + + // Grab all the matched strings + var values []string + for _, elem := range dcList.Items { + for key, val := range elem.Labels { + if key == label { + values = append(values, val) + } + } + } + + // Sort alphabetically + sort.Strings(values) + + return values, nil +} + +// DisplayDeploymentConfigLog logs the deployment config to stdout +func (c *Client) DisplayDeploymentConfigLog(deploymentConfigName string, followLog bool) error { + + // Set standard log options + deploymentLogOptions := appsv1.DeploymentLogOptions{Follow: false, NoWait: true} + + // If the log is being followed, set it to follow / don't wait + if followLog { + // TODO: https://github.com/kubernetes/kubernetes/pull/60696 + // Unable to set to 0, until openshift/client-go updates their Kubernetes vendoring to 1.11.0 + // Set to 1 for now. + tailLines := int64(1) + deploymentLogOptions = appsv1.DeploymentLogOptions{Follow: true, NoWait: false, Previous: false, TailLines: &tailLines} + } + + // RESTClient call to OpenShift + rd, err := c.appsClient.RESTClient().Get(). + Namespace(c.Namespace). + Name(deploymentConfigName). + Resource("deploymentconfigs"). + SubResource("log"). + VersionedParams(&deploymentLogOptions, appsschema.ParameterCodec). + Stream() + if err != nil { + return errors.Wrapf(err, "unable get deploymentconfigs log %s", deploymentConfigName) + } + if rd == nil { + return errors.New("unable to retrieve DeploymentConfig from OpenShift, does your component exist?") + } + + return util.DisplayLog(followLog, rd, deploymentConfigName) +} + +// StartDeployment instantiates a given deployment +// deploymentName is the name of the deployment to instantiate +func (c *Client) StartDeployment(deploymentName string) (string, error) { + if deploymentName == "" { + return "", errors.Errorf("deployment name is empty") + } + klog.V(3).Infof("Deployment %s started.", deploymentName) + deploymentRequest := appsv1.DeploymentRequest{ + Name: deploymentName, + // latest is set to true to prevent image name resolution issue + // inspired from https://github.com/openshift/origin/blob/882ed02142fbf7ba16da9f8efeb31dab8cfa8889/pkg/oc/cli/rollout/latest.go#L194 + Latest: true, + Force: true, + } + result, err := c.appsClient.DeploymentConfigs(c.Namespace).Instantiate(deploymentName, &deploymentRequest) + if err != nil { + return "", errors.Wrapf(err, "unable to instantiate Deployment for %s", deploymentName) + } + klog.V(3).Infof("Deployment %s for DeploymentConfig %s triggered.", deploymentName, result.Name) + + return result.Name, nil +} + +// GetPodUsingDeploymentConfig gets the pod using deployment config name +func (c *Client) GetPodUsingDeploymentConfig(componentName, appName string) (*corev1.Pod, error) { + deploymentConfigName, err := util.NamespaceOpenShiftObject(componentName, appName) + if err != nil { + return nil, err + } + + // Find Pod for component + podSelector := fmt.Sprintf("deploymentconfig=%s", deploymentConfigName) + return c.GetKubeClient().GetOnePodFromSelector(podSelector) +} diff --git a/pkg/occlient/deploymentConfigs_test.go b/pkg/occlient/deploymentConfigs_test.go new file mode 100644 index 00000000000..852463941b1 --- /dev/null +++ b/pkg/occlient/deploymentConfigs_test.go @@ -0,0 +1,387 @@ +package occlient + +import ( + "fmt" + appsv1 "github.com/openshift/api/apps/v1" + applabels "github.com/openshift/odo/pkg/application/labels" + "github.com/openshift/odo/pkg/testingutil" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + ktesting "k8s.io/client-go/testing" + "reflect" + "testing" + "time" +) + +func TestGetDeploymentConfigLabelValues(t *testing.T) { + type args struct { + deploymentConfigList appsv1.DeploymentConfigList + expectedOutput []string + } + tests := []struct { + applicationName string + name string + args args + wantErr bool + actions int + }{ + { + name: "Case 1 - Retrieve list", + applicationName: "app", + args: args{ + expectedOutput: []string{"app", "app2"}, + deploymentConfigList: appsv1.DeploymentConfigList{ + Items: []appsv1.DeploymentConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/part-of": "app", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/part-of": "app2", + }, + }, + }, + }, + }, + }, + wantErr: false, + actions: 1, + }, + { + name: "Case 1 - Retrieve list, different order", + applicationName: "app", + args: args{ + expectedOutput: []string{"app", "app2"}, + deploymentConfigList: appsv1.DeploymentConfigList{ + Items: []appsv1.DeploymentConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/part-of": "app2", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/part-of": "app", + }, + }, + }, + }, + }, + }, + wantErr: false, + actions: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + fakeClient, fakeClientSet := FakeNew() + + fakeClientSet.AppsClientset.PrependReactor("list", "deploymentconfigs", func(action ktesting.Action) (bool, runtime.Object, error) { + return true, &tt.args.deploymentConfigList, nil + }) + + // Run function GetServiceInstanceLabelValues + list, err := fakeClient.GetDeploymentConfigLabelValues(applabels.ApplicationLabel, applabels.ApplicationLabel) + + if err == nil && !tt.wantErr { + + // Compare arrays + if !reflect.DeepEqual(list, tt.args.expectedOutput) { + t.Errorf("expected %s output, got %s", tt.args.expectedOutput, list) + } + + if (len(fakeClientSet.AppsClientset.Actions()) != tt.actions) && !tt.wantErr { + t.Errorf("expected %v action(s) in GetServiceInstanceLabelValues got %v: %v", tt.actions, len(fakeClientSet.AppsClientset.Actions()), fakeClientSet.AppsClientset.Actions()) + } + + } else if err == nil && tt.wantErr { + t.Error("test failed, expected: false, got true") + } else if err != nil && !tt.wantErr { + t.Errorf("test failed, expected: no error, got error: %s", err.Error()) + } + + }) + } +} + +func TestListDeploymentConfigs(t *testing.T) { + tests := []struct { + name string + selector string + label map[string]string + wantErr bool + }{ + { + name: "true case", + selector: "app.kubernetes.io/part-of=app", + label: map[string]string{ + "app.kubernetes.io/part-of": "app", + }, + wantErr: false, + }, + { + name: "true case", + selector: "app.kubernetes.io/part-of=app1", + label: map[string]string{ + "app.kubernetes.io/part-of": "app", + }, + wantErr: false, + }, + } + + listOfDC := appsv1.DeploymentConfigList{ + Items: []appsv1.DeploymentConfig{ + { + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/part-of": "app", + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient, fakeClientSet := FakeNew() + + fakeClientSet.AppsClientset.PrependReactor("list", "deploymentconfigs", func(action ktesting.Action) (bool, runtime.Object, error) { + if !reflect.DeepEqual(action.(ktesting.ListAction).GetListRestrictions().Labels.String(), tt.selector) { + return true, nil, fmt.Errorf("labels not matching with expected values, expected:%s, got:%s", tt.selector, action.(ktesting.ListAction).GetListRestrictions()) + } + return true, &listOfDC, nil + }) + dc, err := fakeClient.ListDeploymentConfigs(tt.selector) + + if len(fakeClientSet.AppsClientset.Actions()) != 1 { + t.Errorf("expected 1 AppsClientset.Actions() in ListDeploymentConfigs, got: %v", fakeClientSet.AppsClientset.Actions()) + } + + if tt.wantErr == false && err != nil { + t.Errorf("test failed, %#v", dc[0].Labels) + } + + for _, dc1 := range dc { + if !reflect.DeepEqual(dc1.Labels, tt.label) { + t.Errorf("labels are not matching with expected labels, expected: %s, got %s", tt.label, dc1.Labels) + } + } + + }) + } +} + +func TestWaitAndGetDC(t *testing.T) { + type args struct { + name string + annotation string + value string + dc appsv1.DeploymentConfig + timeout time.Duration + } + tests := []struct { + name string + args args + wantErr bool + actions int + }{ + { + name: "Case 1 - Check that the function actually works", + args: args{ + name: "foo", + annotation: "app.kubernetes.io/component-source-type", + value: "git", + dc: *fakeDeploymentConfig("foo", "bar", + []corev1.EnvVar{}, []corev1.EnvFromSource{}, t), + timeout: 3 * time.Second, + }, + wantErr: false, + actions: 1, + }, + { + name: "Case 2 - Purposefully timeout / error", + args: args{ + name: "foo", + annotation: "app.kubernetes.io/component-source-type", + value: "foobar", + dc: *fakeDeploymentConfig("foo", "bar", + []corev1.EnvVar{}, []corev1.EnvFromSource{}, t), + timeout: 3 * time.Second, + }, + wantErr: true, + actions: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient, fakeClientSet := FakeNew() + fkWatch := watch.NewFake() + go func() { + fkWatch.Modify(&tt.args.dc) + }() + fakeClientSet.AppsClientset.PrependWatchReactor("deploymentconfigs", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { + return true, fkWatch, nil + }) + // Run function WaitAndGetDC + _, err := fakeClient.WaitAndGetDC(tt.args.name, 0, tt.args.timeout, func(*appsv1.DeploymentConfig, int64) bool { + return !tt.wantErr + }) + // Error checking WaitAndGetDC + if !tt.wantErr == (err != nil) { + t.Errorf(" client.WaitAndGetDC() unexpected error %v, wantErr %v", err, tt.wantErr) + } + if err == nil && !tt.wantErr { + // Check to see how many actions are being ran + if (len(fakeClientSet.AppsClientset.Actions()) != tt.actions) && !tt.wantErr { + t.Errorf("expected %v action(s) in WaitAndGetDC got %v: %v", tt.actions, len(fakeClientSet.AppsClientset.Actions()), fakeClientSet.AppsClientset.Actions()) + } + } else if err == nil && tt.wantErr { + t.Error("test failed, expected: false, got true") + } else if err != nil && !tt.wantErr { + t.Errorf("test failed, expected: no error, got error: %s", err.Error()) + } + }) + } +} + +func TestStartDeployment(t *testing.T) { + tests := []struct { + name string + deploymentName string + wantErr bool + }{ + { + name: "Case 1: Testing valid name", + deploymentName: "ruby", + wantErr: false, + }, + { + name: "Case 2: Testing invalid name", + deploymentName: "", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + fkclient, fkclientset := FakeNew() + + fkclientset.AppsClientset.PrependReactor("create", "deploymentconfigs", func(action ktesting.Action) (bool, runtime.Object, error) { + deploymentConfig := appsv1.DeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.deploymentName, + }, + } + return true, &deploymentConfig, nil + }) + + _, err := fkclient.StartDeployment(tt.deploymentName) + if !tt.wantErr == (err != nil) { + t.Errorf(" client.StartDeployment(string) unexpected error %v, wantErr %v", err, tt.wantErr) + } + + if err == nil { + + if len(fkclientset.AppsClientset.Actions()) != 1 { + t.Errorf("expected 1 action in StartDeployment got: %v", fkclientset.AppsClientset.Actions()) + } else { + startedDeployment := fkclientset.AppsClientset.Actions()[0].(ktesting.CreateAction).GetObject().(*appsv1.DeploymentRequest) + + if startedDeployment.Name != tt.deploymentName { + t.Errorf("deployment name is not matching to expected name, expected: %s, got %s", tt.deploymentName, startedDeployment.Name) + } + + if startedDeployment.Latest == false { + t.Errorf("deployment is not set to latest") + } + } + } + }) + } +} + +func TestGetDeploymentConfigFromSelector(t *testing.T) { + type args struct { + selector string + } + tests := []struct { + name string + args args + returnedDCList *appsv1.DeploymentConfigList + want *appsv1.DeploymentConfig + wantErr bool + }{ + { + name: "case 1: only one dc returned", + args: args{ + "app=app", + }, + returnedDCList: &appsv1.DeploymentConfigList{ + Items: []appsv1.DeploymentConfig{ + *testingutil.OneFakeDeploymentConfigWithMounts("comp-0", "nodejs", "app", nil), + }, + }, + want: testingutil.OneFakeDeploymentConfigWithMounts("comp-0", "nodejs", "app", nil), + }, + { + name: "case 2: no dc returned", + args: args{ + "app=app", + }, + returnedDCList: &appsv1.DeploymentConfigList{ + Items: []appsv1.DeploymentConfig{}, + }, + wantErr: true, + }, + { + name: "case 3: two dc returned", + args: args{ + "app=app", + }, + returnedDCList: &appsv1.DeploymentConfigList{ + Items: []appsv1.DeploymentConfig{ + *testingutil.OneFakeDeploymentConfigWithMounts("comp-0", "nodejs", "app", nil), + *testingutil.OneFakeDeploymentConfigWithMounts("comp-1", "nodejs", "app", nil), + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient, fakeClientSet := FakeNew() + + fakeClientSet.AppsClientset.PrependReactor("list", "deploymentconfigs", func(action ktesting.Action) (bool, runtime.Object, error) { + if !reflect.DeepEqual(action.(ktesting.ListAction).GetListRestrictions().Labels.String(), tt.args.selector) { + return true, nil, fmt.Errorf("labels not matching with expected values, expected:%s, got:%s", tt.args.selector, action.(ktesting.ListAction).GetListRestrictions()) + } + return true, tt.returnedDCList, nil + }) + + got, err := fakeClient.GetDeploymentConfigFromSelector(tt.args.selector) + if (err != nil) != tt.wantErr { + t.Errorf("GetDeploymentConfigFromSelector() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wantErr && err != nil { + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetDeploymentConfigFromSelector() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/occlient/occlient.go b/pkg/occlient/occlient.go index 534dd54d298..a19b09846cc 100644 --- a/pkg/occlient/occlient.go +++ b/pkg/occlient/occlient.go @@ -11,10 +11,8 @@ import ( "sort" "strconv" "strings" - "sync" "time" - "github.com/olekukonko/tablewriter" "github.com/pkg/errors" "github.com/openshift/odo/pkg/config" @@ -26,7 +24,6 @@ import ( // api clientsets servicecatalogclienset "github.com/kubernetes-sigs/service-catalog/pkg/client/clientset_generated/clientset/typed/servicecatalog/v1beta1" - appsschema "github.com/openshift/client-go/apps/clientset/versioned/scheme" appsclientset "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1" buildschema "github.com/openshift/client-go/build/clientset/versioned/scheme" buildclientset "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" @@ -49,12 +46,10 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/version" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/remotecommand" "k8s.io/klog" @@ -65,10 +60,6 @@ var ( DEPLOYMENT_CONFIG_NOT_FOUND error = fmt.Errorf("Requested deployment config does not exist") ) -// We use a mutex here in order to make 100% sure that functions such as CollectEvents -// so that there are no race conditions -var mu sync.Mutex - // CreateArgs is a container of attributes of component create action type CreateArgs struct { Name string @@ -88,7 +79,6 @@ type CreateArgs struct { } const ( - failedEventCount = 5 OcUpdateTimeout = 5 * time.Minute OpenShiftNameSpace = "openshift" waitForComponentDeletionTimeout = 120 * time.Second @@ -99,9 +89,6 @@ const ( // The length of the string to be generated for names of resources nameLength = 5 - // ComponentPortAnnotationName annotation is used on the secrets that are created for each exposed port of the component - ComponentPortAnnotationName = "component-port" - // EnvS2IScriptsURL is an env var exposed to https://github.com/openshift/odo-init-image/blob/master/assemble-and-restart to indicate location of s2i scripts in this case assemble script EnvS2IScriptsURL = "ODO_S2I_SCRIPTS_URL" @@ -629,15 +616,6 @@ func (c *Client) GetImageStream(imageNS string, imageName string, imageTag strin return imageStream, nil } -// GetSecret returns the Secret object in the given namespace -func (c *Client) GetSecret(name, namespace string) (*corev1.Secret, error) { - secret, err := c.kubeClient.KubeClient.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "unable to get the secret %s", secret) - } - return secret, nil -} - // GetImageStreamImage returns image and error if any, corresponding to the passed imagestream and image tag func (c *Client) GetImageStreamImage(imageStream *imagev1.ImageStream, imageTag string) (*imagev1.ImageStreamImage, error) { imageNS := imageStream.ObjectMeta.Namespace @@ -785,58 +763,18 @@ func (c *Client) NewAppS2I(params CreateArgs, commonObjectMeta metav1.ObjectMeta } // Create a service - svc, err := c.CreateService(commonObjectMeta, dc.Spec.Template.Spec.Containers[0].Ports, ownerReference) + commonObjectMeta.SetOwnerReferences(append(commonObjectMeta.GetOwnerReferences(), ownerReference)) + svc, err := c.GetKubeClient().CreateService(commonObjectMeta, generateServiceSpec(commonObjectMeta, dc.Spec.Template.Spec.Containers[0].Ports)) if err != nil { return errors.Wrapf(err, "unable to create Service for %s", commonObjectMeta.Name) } // Create secret(s) - err = c.createSecrets(params.Name, commonObjectMeta, svc, ownerReference) + err = c.GetKubeClient().CreateSecrets(params.Name, commonObjectMeta, svc, ownerReference) return err } -// Create a secret for each port, containing the host and port of the component -// This is done so other components can later inject the secret into the environment -// and have the "coordinates" to communicate with this component -func (c *Client) createSecrets(componentName string, commonObjectMeta metav1.ObjectMeta, svc *corev1.Service, ownerReference metav1.OwnerReference) error { - originalName := commonObjectMeta.Name - for _, svcPort := range svc.Spec.Ports { - portAsString := fmt.Sprintf("%v", svcPort.Port) - - // we need to create multiple secrets, so each one has to contain the port in it's name - // so we change the name of each secret by adding the port number - commonObjectMeta.Name = fmt.Sprintf("%v-%v", originalName, portAsString) - - // we also add the port as an annotation to the secret - // this comes in handy when we need to "query" for the appropriate secret - // of a component based on the port - commonObjectMeta.Annotations[ComponentPortAnnotationName] = portAsString - - err := c.CreateSecret( - commonObjectMeta, - map[string]string{ - secretKeyName(componentName, "host"): svc.Name, - secretKeyName(componentName, "port"): portAsString, - }, - ownerReference) - - if err != nil { - return errors.Wrapf(err, "unable to create Secret for %s", commonObjectMeta.Name) - } - } - - // restore the original values of the fields we changed - commonObjectMeta.Name = originalName - delete(commonObjectMeta.Annotations, ComponentPortAnnotationName) - - return nil -} - -func secretKeyName(componentName, baseKeyName string) string { - return fmt.Sprintf("COMPONENT_%v_%v", strings.Replace(strings.ToUpper(componentName), "-", "_", -1), strings.ToUpper(baseKeyName)) -} - // getS2ILabelValue returns the requested S2I label value from the passed set of labels attached to builder image // and the hard coded possible list(the labels are not uniform across different builder images) of expected labels func getS2ILabelValue(labels map[string]string, expectedLabelsSet []string) string { @@ -1100,12 +1038,15 @@ func (c *Client) BootstrapSupervisoredS2I(params CreateArgs, commonObjectMeta me } } - svc, err := c.CreateService(commonObjectMeta, dc.Spec.Template.Spec.Containers[0].Ports, ownerReference) + // Create a service + commonObjectMeta.SetOwnerReferences(append(commonObjectMeta.GetOwnerReferences(), ownerReference)) + + svc, err := c.GetKubeClient().CreateService(commonObjectMeta, generateServiceSpec(commonObjectMeta, dc.Spec.Template.Spec.Containers[0].Ports)) if err != nil { return errors.Wrapf(err, "unable to create Service for %s", commonObjectMeta.Name) } - err = c.createSecrets(params.Name, commonObjectMeta, svc, ownerReference) + err = c.GetKubeClient().CreateSecrets(params.Name, commonObjectMeta, svc, ownerReference) if err != nil { return err } @@ -1119,57 +1060,6 @@ func (c *Client) BootstrapSupervisoredS2I(params CreateArgs, commonObjectMeta me return nil } -// CreateService generates and creates the service -// commonObjectMeta is the ObjectMeta for the service -// dc is the deploymentConfig to get the container ports -func (c *Client) CreateService(commonObjectMeta metav1.ObjectMeta, containerPorts []corev1.ContainerPort, ownerReference metav1.OwnerReference) (*corev1.Service, error) { - // generate and create Service - var svcPorts []corev1.ServicePort - for _, containerPort := range containerPorts { - svcPort := corev1.ServicePort{ - - Name: containerPort.Name, - Port: containerPort.ContainerPort, - Protocol: containerPort.Protocol, - TargetPort: intstr.FromInt(int(containerPort.ContainerPort)), - } - svcPorts = append(svcPorts, svcPort) - } - svc := corev1.Service{ - ObjectMeta: commonObjectMeta, - Spec: corev1.ServiceSpec{ - Ports: svcPorts, - Selector: map[string]string{ - "deploymentconfig": commonObjectMeta.Name, - }, - }, - } - svc.SetOwnerReferences(append(svc.GetOwnerReferences(), ownerReference)) - - createdSvc, err := c.kubeClient.KubeClient.CoreV1().Services(c.Namespace).Create(&svc) - if err != nil { - return nil, errors.Wrapf(err, "unable to create Service for %s", commonObjectMeta.Name) - } - return createdSvc, err -} - -// CreateSecret generates and creates the secret -// commonObjectMeta is the ObjectMeta for the service -func (c *Client) CreateSecret(objectMeta metav1.ObjectMeta, data map[string]string, ownerReference metav1.OwnerReference) error { - - secret := corev1.Secret{ - ObjectMeta: objectMeta, - Type: corev1.SecretTypeOpaque, - StringData: data, - } - secret.SetOwnerReferences(append(secret.GetOwnerReferences(), ownerReference)) - _, err := c.kubeClient.KubeClient.CoreV1().Secrets(c.Namespace).Create(&secret) - if err != nil { - return errors.Wrapf(err, "unable to create secret for %s", objectMeta.Name) - } - return nil -} - // updateEnvVar updates the environmental variables to the container in the DC // dc is the deployment config to be updated // envVars is the array containing the corev1.EnvVar values @@ -1655,241 +1545,6 @@ func (c *Client) WaitForBuildToFinish(buildName string, stdout io.Writer, buildT } } -// WaitAndGetDC block and waits until the DeploymentConfig has updated it's annotation -// Parameters: -// name: Name of DC -// timeout: Interval of time.Duration to wait for before timing out waiting for its rollout -// waitCond: Function indicating when to consider dc rolled out -// Returns: -// Updated DC and errors if any -func (c *Client) WaitAndGetDC(name string, desiredRevision int64, timeout time.Duration, waitCond func(*appsv1.DeploymentConfig, int64) bool) (*appsv1.DeploymentConfig, error) { - - w, err := c.appsClient.DeploymentConfigs(c.Namespace).Watch(metav1.ListOptions{ - FieldSelector: fmt.Sprintf("metadata.name=%s", name), - }) - defer w.Stop() - - if err != nil { - return nil, errors.Wrapf(err, "unable to watch dc") - } - - timeoutChannel := time.After(timeout) - // Keep trying until we're timed out or got a result or got an error - for { - select { - - // Timout after X amount of seconds - case <-timeoutChannel: - return nil, errors.New("Timed out waiting for annotation to update") - - // Each loop we check the result - case val, ok := <-w.ResultChan(): - - if !ok { - break - } - if e, ok := val.Object.(*appsv1.DeploymentConfig); ok { - for _, cond := range e.Status.Conditions { - // using this just for debugging message, so ignoring error on purpose - jsonCond, _ := json.Marshal(cond) - klog.V(3).Infof("DeploymentConfig Condition: %s", string(jsonCond)) - } - // If the annotation has been updated, let's exit - if waitCond(e, desiredRevision) { - return e, nil - } - - } - } - } -} - -// CollectEvents collects events in a Goroutine by manipulating a spinner. -// We don't care about the error (it's usually ran in a go routine), so erroring out is not needed. -func (c *Client) CollectEvents(selector string, events map[string]corev1.Event, spinner *log.Status, quit <-chan int) { - - // Secondly, we will start a go routine for watching for events related to the pod and update our pod status accordingly. - eventWatcher, err := c.kubeClient.KubeClient.CoreV1().Events(c.Namespace).Watch(metav1.ListOptions{}) - if err != nil { - log.Warningf("Unable to watch for events: %s", err) - return - } - defer eventWatcher.Stop() - - // Create an endless loop for collecting - for { - select { - case <-quit: - klog.V(3).Info("Quitting collect events") - return - case val, ok := <-eventWatcher.ResultChan(): - mu.Lock() - if !ok { - log.Warning("Watch channel was closed") - return - } - if e, ok := val.Object.(*corev1.Event); ok { - - // If there are many warning events happening during deployment, let's log them. - if e.Type == "Warning" { - - if e.Count >= failedEventCount { - newEvent := e - (events)[e.Name] = *newEvent - klog.V(3).Infof("Warning Event: Count: %d, Reason: %s, Message: %s", e.Count, e.Reason, e.Message) - // Change the spinner message to show the warning - spinner.WarningStatus(fmt.Sprintf("WARNING x%d: %s", e.Count, e.Reason)) - } - - } - - } else { - log.Warning("Unable to convert object to event") - return - } - mu.Unlock() - } - } -} - -// WaitAndGetPod block and waits until pod matching selector is in in Running state -// desiredPhase cannot be PodFailed or PodUnknown -func (c *Client) WaitAndGetPod(selector string, desiredPhase corev1.PodPhase, waitMessage string) (*corev1.Pod, error) { - - // Try to grab the preference in order to set a timeout.. but if not, we'll use the default. - pushTimeout := preference.DefaultPushTimeout * time.Second - cfg, configReadErr := preference.New() - if configReadErr != nil { - klog.V(3).Info(errors.Wrap(configReadErr, "unable to read config file")) - } else { - pushTimeout = time.Duration(cfg.GetPushTimeout()) * time.Second - } - - klog.V(3).Infof("Waiting for %s pod", selector) - spinner := log.Spinner(waitMessage) - defer spinner.End(false) - - w, err := c.kubeClient.KubeClient.CoreV1().Pods(c.Namespace).Watch(metav1.ListOptions{ - LabelSelector: selector, - }) - if err != nil { - return nil, errors.Wrapf(err, "unable to watch pod") - } - defer w.Stop() - - // Here we are going to start a loop watching for the pod status - podChannel := make(chan *corev1.Pod) - watchErrorChannel := make(chan error) - go func(spinny *log.Status) { - loop: - for { - val, ok := <-w.ResultChan() - if !ok { - watchErrorChannel <- errors.New("watch channel was closed") - break loop - } - if e, ok := val.Object.(*corev1.Pod); ok { - klog.V(3).Infof("Status of %s pod is %s", e.Name, e.Status.Phase) - for _, cond := range e.Status.Conditions { - // using this just for debugging message, so ignoring error on purpose - jsonCond, _ := json.Marshal(cond) - klog.V(3).Infof("Pod Conditions: %s", string(jsonCond)) - } - for _, status := range e.Status.ContainerStatuses { - // using this just for debugging message, so ignoring error on purpose - jsonStatus, _ := json.Marshal(status) - klog.V(3).Infof("Container Status: %s", string(jsonStatus)) - } - switch e.Status.Phase { - case desiredPhase: - klog.V(3).Infof("Pod %s is %v", e.Name, desiredPhase) - podChannel <- e - break loop - case corev1.PodFailed, corev1.PodUnknown: - watchErrorChannel <- errors.Errorf("pod %s status %s", e.Name, e.Status.Phase) - break loop - } - } else { - watchErrorChannel <- errors.New("unable to convert event object to Pod") - break loop - } - } - close(podChannel) - close(watchErrorChannel) - }(spinner) - - // Collect all the events in a separate go routine - failedEvents := make(map[string]corev1.Event) - quit := make(chan int) - go c.CollectEvents(selector, failedEvents, spinner, quit) - defer close(quit) - - select { - case val := <-podChannel: - spinner.End(true) - return val, nil - case err := <-watchErrorChannel: - return nil, err - case <-time.After(pushTimeout): - - // Create a useful error if there are any failed events - errorMessage := fmt.Sprintf(`waited %s but couldn't find running pod matching selector: '%s'`, pushTimeout, selector) - - if len(failedEvents) != 0 { - - // Create an output table - tableString := &strings.Builder{} - table := tablewriter.NewWriter(tableString) - table.SetAlignment(tablewriter.ALIGN_LEFT) - table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) - table.SetCenterSeparator("") - table.SetColumnSeparator("") - table.SetRowSeparator("") - - // Header - table.SetHeader([]string{"Name", "Count", "Reason", "Message"}) - - // List of events - for name, event := range failedEvents { - table.Append([]string{name, strconv.Itoa(int(event.Count)), event.Reason, event.Message}) - } - - // Here we render the table as well as a helpful error message - table.Render() - errorMessage = fmt.Sprintf(`waited %s but was unable to find a running pod matching selector: '%s' -For more information to help determine the cause of the error, re-run with '-v'. -See below for a list of failed events that occured more than %d times during deployment: -%s`, pushTimeout, selector, failedEventCount, tableString) - } - - return nil, errors.Errorf(errorMessage) - } -} - -// WaitAndGetSecret blocks and waits until the secret is available -func (c *Client) WaitAndGetSecret(name string, namespace string) (*corev1.Secret, error) { - klog.V(3).Infof("Waiting for secret %s to become available", name) - - w, err := c.kubeClient.KubeClient.CoreV1().Secrets(namespace).Watch(metav1.ListOptions{ - FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String(), - }) - if err != nil { - return nil, errors.Wrapf(err, "unable to watch secret") - } - defer w.Stop() - for { - val, ok := <-w.ResultChan() - if !ok { - break - } - if e, ok := val.Object.(*corev1.Secret); ok { - klog.V(3).Infof("Secret %s now exists", e.Name) - return e, nil - } - } - return nil, errors.Errorf("unknown error while waiting for secret '%s'", name) -} - // FollowBuildLog stream build log to stdout func (c *Client) FollowBuildLog(buildName string, stdout io.Writer, buildTimeout time.Duration) error { buildLogOptions := buildv1.BuildLogOptions{ @@ -1918,39 +1573,6 @@ func (c *Client) FollowBuildLog(buildName string, stdout io.Writer, buildTimeout return nil } -// DisplayDeploymentConfigLog logs the deployment config to stdout -func (c *Client) DisplayDeploymentConfigLog(deploymentConfigName string, followLog bool, stdout io.Writer) error { - - // Set standard log options - deploymentLogOptions := appsv1.DeploymentLogOptions{Follow: false, NoWait: true} - - // If the log is being followed, set it to follow / don't wait - if followLog { - // TODO: https://github.com/kubernetes/kubernetes/pull/60696 - // Unable to set to 0, until openshift/client-go updates their Kubernetes vendoring to 1.11.0 - // Set to 1 for now. - tailLines := int64(1) - deploymentLogOptions = appsv1.DeploymentLogOptions{Follow: true, NoWait: false, Previous: false, TailLines: &tailLines} - } - - // RESTClient call to OpenShift - rd, err := c.appsClient.RESTClient().Get(). - Namespace(c.Namespace). - Name(deploymentConfigName). - Resource("deploymentconfigs"). - SubResource("log"). - VersionedParams(&deploymentLogOptions, appsschema.ParameterCodec). - Stream() - if err != nil { - return errors.Wrapf(err, "unable get deploymentconfigs log %s", deploymentConfigName) - } - if rd == nil { - return errors.New("unable to retrieve DeploymentConfig from OpenShift, does your component exist?") - } - - return util.DisplayLog(followLog, rd, deploymentConfigName) -} - // Delete takes labels as a input and based on it, deletes respective resource func (c *Client) Delete(labels map[string]string, wait bool) error { @@ -2067,32 +1689,6 @@ func (c *Client) DeleteServiceInstance(labels map[string]string) error { return nil } -// GetDeploymentConfigLabelValues get label values of given label from objects in project that are matching selector -// returns slice of unique label values -func (c *Client) GetDeploymentConfigLabelValues(label string, selector string) ([]string, error) { - - // List DeploymentConfig according to selectors - dcList, err := c.appsClient.DeploymentConfigs(c.Namespace).List(metav1.ListOptions{LabelSelector: selector}) - if err != nil { - return nil, errors.Wrap(err, "unable to list DeploymentConfigs") - } - - // Grab all the matched strings - var values []string - for _, elem := range dcList.Items { - for key, val := range elem.Labels { - if key == label { - values = append(values, val) - } - } - } - - // Sort alphabetically - sort.Strings(values) - - return values, nil -} - // GetServiceInstanceLabelValues get label values of given label from objects in project that match the selector func (c *Client) GetServiceInstanceLabelValues(label string, selector string) ([]string, error) { @@ -2381,23 +1977,6 @@ func (c *Client) GetAllClusterServicePlans() ([]scv1beta1.ClusterServicePlan, er return planList.Items, nil } -// ListSecrets lists all the secrets based on the given label selector -func (c *Client) ListSecrets(labelSelector string) ([]corev1.Secret, error) { - listOptions := metav1.ListOptions{} - if len(labelSelector) > 0 { - listOptions = metav1.ListOptions{ - LabelSelector: labelSelector, - } - } - - secretList, err := c.kubeClient.KubeClient.CoreV1().Secrets(c.Namespace).List(listOptions) - if err != nil { - return nil, errors.Wrap(err, "unable to get secret list") - } - - return secretList.Items, nil -} - // DeleteBuildConfig deletes the given BuildConfig by name using CommonObjectMeta.. func (c *Client) DeleteBuildConfig(commonObjectMeta metav1.ObjectMeta) error { @@ -2410,109 +1989,6 @@ func (c *Client) DeleteBuildConfig(commonObjectMeta metav1.ObjectMeta) error { return c.buildClient.BuildConfigs(c.Namespace).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector}) } -// GetDeploymentConfigsFromSelector returns an array of Deployment Config -// resources which match the given selector -func (c *Client) GetDeploymentConfigsFromSelector(selector string) ([]appsv1.DeploymentConfig, error) { - var dcList *appsv1.DeploymentConfigList - var err error - - if selector != "" { - dcList, err = c.appsClient.DeploymentConfigs(c.Namespace).List(metav1.ListOptions{ - LabelSelector: selector, - }) - } else { - dcList, err = c.appsClient.DeploymentConfigs(c.Namespace).List(metav1.ListOptions{ - FieldSelector: fields.Set{"metadata.namespace": c.Namespace}.AsSelector().String(), - }) - } - if err != nil { - return nil, errors.Wrap(err, "unable to list DeploymentConfigs") - } - return dcList.Items, nil -} - -// GetServicesFromSelector returns an array of Service resources which match the -// given selector -func (c *Client) GetServicesFromSelector(selector string) ([]corev1.Service, error) { - serviceList, err := c.kubeClient.KubeClient.CoreV1().Services(c.Namespace).List(metav1.ListOptions{ - LabelSelector: selector, - }) - if err != nil { - return nil, errors.Wrap(err, "unable to list Services") - } - return serviceList.Items, nil -} - -// GetDeploymentConfigFromName returns the Deployment Config resource given -// the Deployment Config name -func (c *Client) GetDeploymentConfigFromName(name string) (*appsv1.DeploymentConfig, error) { - klog.V(3).Infof("Getting DeploymentConfig: %s", name) - deploymentConfig, err := c.appsClient.DeploymentConfigs(c.Namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return deploymentConfig, nil -} - -// GetOneDeploymentConfigFromSelector returns the Deployment Config object associated -// with the given selector. -// An error is thrown when exactly one Deployment Config is not found for the -// selector. -func (c *Client) GetOneDeploymentConfigFromSelector(selector string) (*appsv1.DeploymentConfig, error) { - deploymentConfigs, err := c.GetDeploymentConfigsFromSelector(selector) - if err != nil { - return nil, errors.Wrapf(err, "unable to get DeploymentConfigs for the selector: %v", selector) - } - - numDC := len(deploymentConfigs) - if numDC == 0 { - return nil, fmt.Errorf("no Deployment Config was found for the selector: %v", selector) - } else if numDC > 1 { - return nil, fmt.Errorf("multiple Deployment Configs exist for the selector: %v. Only one must be present", selector) - } - - return &deploymentConfigs[0], nil -} - -// GetOnePodFromSelector returns the Pod object associated with the given selector. -// An error is thrown when exactly one Pod is not found. -func (c *Client) GetOnePodFromSelector(selector string) (*corev1.Pod, error) { - - pods, err := c.kubeClient.KubeClient.CoreV1().Pods(c.Namespace).List(metav1.ListOptions{ - LabelSelector: selector, - }) - if err != nil { - return nil, errors.Wrapf(err, "unable to get Pod for the selector: %v", selector) - } - numPods := len(pods.Items) - if numPods == 0 { - return nil, fmt.Errorf("no Pod was found for the selector: %v", selector) - } else if numPods > 1 { - return nil, fmt.Errorf("multiple Pods exist for the selector: %v. Only one must be present", selector) - } - - return &pods.Items[0], nil -} - -// GetOneServiceFromSelector returns the Service object associated with the -// given selector. -// An error is thrown when exactly one Service is not found for the selector -func (c *Client) GetOneServiceFromSelector(selector string) (*corev1.Service, error) { - services, err := c.GetServicesFromSelector(selector) - if err != nil { - return nil, errors.Wrapf(err, "unable to get services for the selector: %v", selector) - } - - numServices := len(services) - if numServices == 0 { - return nil, fmt.Errorf("no Service was found for the selector: %v", selector) - } else if numServices > 1 { - return nil, fmt.Errorf("multiple Services exist for the selector: %v. Only one must be present", selector) - } - - return &services[0], nil -} - // AddEnvironmentVariablesToDeploymentConfig adds the given environment // variables to the only container in the Deployment Config and updates in the // cluster @@ -2661,16 +2137,6 @@ func (c *Client) ExtractProjectToComponent(compInfo common.ComponentInfo, target return nil } -// BuildPortForwardReq builds a port forward request -func (c *Client) BuildPortForwardReq(podName string) *rest.Request { - return c.kubeClient.KubeClient.CoreV1().RESTClient(). - Post(). - Resource("pods"). - Namespace(c.Namespace). - Name(podName). - SubResource("portforward") -} - func (c *Client) GetKubeClient() *kclient.Client { return c.kubeClient } @@ -2833,29 +2299,6 @@ func (c *Client) PropagateDeletes(targetPodName string, delSrcRelPaths []string, return err } -// StartDeployment instantiates a given deployment -// deploymentName is the name of the deployment to instantiate -func (c *Client) StartDeployment(deploymentName string) (string, error) { - if deploymentName == "" { - return "", errors.Errorf("deployment name is empty") - } - klog.V(3).Infof("Deployment %s started.", deploymentName) - deploymentRequest := appsv1.DeploymentRequest{ - Name: deploymentName, - // latest is set to true to prevent image name resolution issue - // inspired from https://github.com/openshift/origin/blob/882ed02142fbf7ba16da9f8efeb31dab8cfa8889/pkg/oc/cli/rollout/latest.go#L194 - Latest: true, - Force: true, - } - result, err := c.appsClient.DeploymentConfigs(c.Namespace).Instantiate(deploymentName, &deploymentRequest) - if err != nil { - return "", errors.Wrapf(err, "unable to instantiate Deployment for %s", deploymentName) - } - klog.V(3).Infof("Deployment %s for DeploymentConfig %s triggered.", deploymentName, result.Name) - - return result.Name, nil -} - func injectS2IPaths(existingVars []corev1.EnvVar, s2iPaths S2IPaths) []corev1.EnvVar { return uniqueAppendOrOverwriteEnvVars( existingVars, @@ -2887,14 +2330,6 @@ func injectS2IPaths(existingVars []corev1.EnvVar, s2iPaths S2IPaths) []corev1.En } -// IsDeploymentConfigSupported checks if DeploymentConfig type is present on the cluster -func (c *Client) IsDeploymentConfigSupported() (bool, error) { - const Group = "apps.openshift.io" - const Version = "v1" - - return c.isResourceSupported(Group, Version, "deploymentconfigs") -} - func isSubDir(baseDir, otherDir string) bool { cleanedBaseDir := filepath.Clean(baseDir) cleanedOtherDir := filepath.Clean(otherDir) diff --git a/pkg/occlient/occlient_test.go b/pkg/occlient/occlient_test.go index fd27fec8fca..89d85747fad 100644 --- a/pkg/occlient/occlient_test.go +++ b/pkg/occlient/occlient_test.go @@ -21,14 +21,12 @@ import ( componentlabels "github.com/openshift/odo/pkg/component/labels" "github.com/openshift/odo/pkg/config" "github.com/openshift/odo/pkg/devfile/adapters/common" - "github.com/openshift/odo/pkg/log" "github.com/openshift/odo/pkg/testingutil" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/watch" ktesting "k8s.io/client-go/testing" ) @@ -205,29 +203,6 @@ func fakeBuildStatus(status buildv1.BuildPhase, buildName string) *buildv1.Build } } -func fakeEventStatus(podName string, eventWarningMessage string, count int32) *corev1.Event { - return &corev1.Event{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - }, - Type: "Warning", - Count: count, - Reason: eventWarningMessage, - Message: "Foobar", - } -} - -func fakePodStatus(status corev1.PodPhase, podName string) *corev1.Pod { - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - }, - Status: corev1.PodStatus{ - Phase: status, - }, - } -} - func fakeImageStreamImage(imageName string, ports []string, containerConfig string) *imagev1.ImageStreamImage { exposedPorts := make(map[string]struct{}) var s struct{} @@ -1172,63 +1147,6 @@ func TestGetExposedPorts(t *testing.T) { } } -func TestGetSecret(t *testing.T) { - tests := []struct { - name string - secretNS string - secretName string - wantErr bool - want *corev1.Secret - }{ - { - name: "Case: Valid request for retrieving a secret", - secretNS: "", - secretName: "foo", - want: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - }, - wantErr: false, - }, - { - name: "Case: Invalid request for retrieving a secret", - secretNS: "", - secretName: "foo2", - want: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fakeClient, fakeClientSet := FakeNew() - - // Fake getting Secret - fakeClientSet.Kubernetes.PrependReactor("get", "secrets", func(action ktesting.Action) (bool, runtime.Object, error) { - if tt.want.Name != tt.secretName { - return true, nil, fmt.Errorf("'get' called with a different secret name") - } - return true, tt.want, nil - }) - - returnValue, err := fakeClient.GetSecret(tt.secretName, tt.secretNS) - - // Check for validating return value - if err == nil && returnValue != tt.want { - t.Errorf("error in return value got: %v, expected %v", returnValue, tt.want) - } - - if !tt.wantErr == (err != nil) { - t.Errorf("\nclient.GetSecret(secretNS, secretName) unexpected error %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - func TestCreateServiceBinding(t *testing.T) { tests := []struct { name string @@ -1572,72 +1490,6 @@ func TestUnlinkSecret(t *testing.T) { } } -func TestListSecrets(t *testing.T) { - - tests := []struct { - name string - secretList corev1.SecretList - output []corev1.Secret - wantErr bool - }{ - { - name: "Case 1: Ensure secrets are properly listed", - secretList: corev1.SecretList{ - Items: []corev1.Secret{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "secret1", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "secret2", - }, - }, - }, - }, - output: []corev1.Secret{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "secret1", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "secret2", - }, - }, - }, - - wantErr: false, - }, - } - - for _, tt := range tests { - client, fakeClientSet := FakeNew() - - fakeClientSet.Kubernetes.PrependReactor("list", "secrets", func(action ktesting.Action) (bool, runtime.Object, error) { - return true, &tt.secretList, nil - }) - - secretsList, err := client.ListSecrets("") - - if !reflect.DeepEqual(tt.output, secretsList) { - t.Errorf("expected output: %#v,got: %#v", tt.secretList, secretsList) - } - - if err == nil && !tt.wantErr { - if len(fakeClientSet.Kubernetes.Actions()) != 1 { - t.Errorf("expected 1 action in ListSecrets got: %v", fakeClientSet.Kubernetes.Actions()) - } - } else if err == nil && tt.wantErr { - t.Error("test failed, expected: false, got true") - } else if err != nil && !tt.wantErr { - t.Errorf("test failed, expected: no error, got error: %s", err.Error()) - } - } -} - func TestGetImageStream(t *testing.T) { tests := []struct { name string @@ -1952,366 +1804,6 @@ func TestWaitForBuildToFinish(t *testing.T) { } -func TestCollectEvents(t *testing.T) { - tests := []struct { - name string - podName string - eventWarningMessage string - }{ - { - name: "Case 1: Collect an arbitrary amount of events", - podName: "ruby", - eventWarningMessage: "Fake event warning message", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - // Create a fake client - fakeClient, fakeClientSet := FakeNew() - fakeEventWatch := watch.NewRaceFreeFake() - podSelector := fmt.Sprintf("deploymentconfig=%s", tt.podName) - - // Create a fake event status / watch reactor for faking the events we are collecting - fakeEvent := fakeEventStatus(tt.podName, tt.eventWarningMessage, 10) - go func(event *corev1.Event) { - fakeEventWatch.Add(event) - }(fakeEvent) - - fakeClientSet.Kubernetes.PrependWatchReactor("events", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { - return true, fakeEventWatch, nil - }) - - // Create a test spinner / variables / quit variable for the go channel - spinner := log.Spinner("Test spinner") - events := make(map[string]corev1.Event) - quit := make(chan int) - go fakeClient.CollectEvents(podSelector, events, spinner, quit) - - // Sleep in order to make sure we actually collect some events - time.Sleep(2 * time.Second) - close(quit) - - // We make sure to lock in order to prevent race conditions when retrieving the events (since they are a pointer - // by default since we pass in a map) - mu.Lock() - if len(events) == 0 { - t.Errorf("Expected events, got none") - } - mu.Unlock() - - // Collect the first event in the map - var firstEvent corev1.Event - for _, val := range events { - firstEvent = val - } - - if !strings.Contains(firstEvent.Reason, tt.eventWarningMessage) { - t.Errorf("expected warning message: '%s' in event message: '%+v'", tt.eventWarningMessage, firstEvent.Reason) - } - - }) - } -} - -// NOTE: We do *not* collection the amount of actions taken in this function as there could be any number of fake -// 'event' actions that are happening in the background. -func TestWaitAndGetPod(t *testing.T) { - tests := []struct { - name string - podName string - status corev1.PodPhase - wantEventWarning bool - wantErr bool - eventWarningMessage string - }{ - { - name: "Case 1: Pod running", - podName: "ruby", - status: corev1.PodRunning, - wantEventWarning: false, - wantErr: false, - }, - { - name: "Case 2: Pod failed", - podName: "ruby", - status: corev1.PodFailed, - wantEventWarning: false, - wantErr: true, - }, - { - name: "Case 3: Pod unknown", - podName: "ruby", - status: corev1.PodUnknown, - wantEventWarning: false, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - fakeClient, fakeClientSet := FakeNew() - fakePodWatch := watch.NewRaceFreeFake() - - // Watch for Pods - fakePod := fakePodStatus(tt.status, tt.podName) - go func(pod *corev1.Pod) { - fakePodWatch.Modify(pod) - }(fakePod) - - // Prepend watch reactor (beginning of the chain) - fakeClientSet.Kubernetes.PrependWatchReactor("pods", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { - return true, fakePodWatch, nil - }) - - podSelector := fmt.Sprintf("deploymentconfig=%s", tt.podName) - - pod, err := fakeClient.WaitAndGetPod(podSelector, corev1.PodRunning, "Waiting for component to start") - - if !tt.wantErr == (err != nil) { - t.Errorf("client.WaitAndGetPod(string) unexpected error %v, wantErr %v", err, tt.wantErr) - return - } - - if err == nil { - if pod.Name != tt.podName { - t.Errorf("pod name is not matching to expected name, expected: %s, got %s", tt.podName, pod.Name) - } - } - - }) - } -} - -func TestWaitAndGetSecret(t *testing.T) { - - tests := []struct { - name string - secretName string - namespace string - wantErr bool - }{ - { - name: "Case 1: no error expected", - secretName: "ruby", - namespace: "dummy", - wantErr: false, - }, - - { - name: "Case 2: error expected", - secretName: "", - namespace: "dummy", - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - fkclient, fkclientset := FakeNew() - fkWatch := watch.NewFake() - - // Change the status - go func() { - fkWatch.Modify(&corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: tt.secretName, - }, - }) - }() - - fkclientset.Kubernetes.PrependWatchReactor("secrets", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { - if len(tt.secretName) == 0 { - return true, nil, fmt.Errorf("error watching secret") - } - return true, fkWatch, nil - }) - - pod, err := fkclient.WaitAndGetSecret(tt.secretName, tt.namespace) - - if !tt.wantErr == (err != nil) { - t.Errorf(" client.WaitAndGetSecret(string, string) unexpected error %v, wantErr %v", err, tt.wantErr) - return - } - - if len(fkclientset.Kubernetes.Actions()) != 1 { - t.Errorf("expected 1 action in WaitAndGetSecret got: %v", fkclientset.Kubernetes.Actions()) - } - - if err == nil { - if pod.Name != tt.secretName { - t.Errorf("secret name is not matching to expected name, expected: %s, got %s", tt.secretName, pod.Name) - } - } - }) - } -} - -func TestCreateService(t *testing.T) { - tests := []struct { - name string - commonObjectMeta metav1.ObjectMeta - containerPorts []corev1.ContainerPort - wantErr bool - existingDC appsv1.DeploymentConfig - }{ - { - name: "Test case: with valid commonObjectName and containerPorts", - existingDC: *fakeDeploymentConfig("foo", "", nil, nil, t), - commonObjectMeta: metav1.ObjectMeta{ - Name: "nodejs", - Labels: map[string]string{ - "app": "apptmp", - "app.kubernetes.io/instance": "ruby", - "app.kubernetes.io/name": "ruby", - "app.kubernetes.io/part-of": "apptmp", - }, - Annotations: map[string]string{ - "app.openshift.io/vcs-uri": "https://github.com/openshift/ruby", - "app.kubernetes.io/component-source-type": "git", - }, - }, - containerPorts: []corev1.ContainerPort{ - { - Name: "8080-tcp", - ContainerPort: 8080, - Protocol: corev1.ProtocolTCP, - }, - { - Name: "9100-udp", - ContainerPort: 9100, - Protocol: corev1.ProtocolUDP, - }, - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fkclient, fkclientset := FakeNew() - - ownerReference := metav1.OwnerReference{ - APIVersion: "apps.openshift.io/v1", - Kind: "DeploymentConfig", - Name: tt.existingDC.Name, - UID: tt.existingDC.UID, - } - - _, err := fkclient.CreateService(tt.commonObjectMeta, tt.containerPorts, ownerReference) - - tt.commonObjectMeta.SetOwnerReferences(append(tt.commonObjectMeta.GetOwnerReferences(), ownerReference)) - - if err == nil && !tt.wantErr { - if len(fkclientset.Kubernetes.Actions()) != 1 { - t.Errorf("expected 1 Kubernetes.Actions() in CreateService, got: %v", fkclientset.ImageClientset.Actions()) - } - createdSvc := fkclientset.Kubernetes.Actions()[0].(ktesting.CreateAction).GetObject().(*corev1.Service) - if !reflect.DeepEqual(tt.commonObjectMeta, createdSvc.ObjectMeta) { - t.Errorf("ObjectMeta does not match the expected name, expected: %v, got: %v", tt.commonObjectMeta, createdSvc.ObjectMeta) - } - if !reflect.DeepEqual(tt.commonObjectMeta.Name, createdSvc.Spec.Selector["deploymentconfig"]) { - t.Errorf("selector value does not match the expected name, expected: %s, got: %s", tt.commonObjectMeta.Name, createdSvc.Spec.Selector["deploymentconfig"]) - } - for _, port := range tt.containerPorts { - found := false - for _, servicePort := range createdSvc.Spec.Ports { - if servicePort.Port == port.ContainerPort { - found = true - if servicePort.Protocol != port.Protocol { - t.Errorf("service protocol does not match the expected name, expected: %s, got: %s", port.Protocol, servicePort.Protocol) - } - if servicePort.Name != port.Name { - t.Errorf("service name does not match the expected name, expected: %s, got: %s", port.Name, servicePort.Name) - } - if servicePort.TargetPort != intstr.FromInt(int(port.ContainerPort)) { - t.Errorf("target port does not match the expected name, expected: %v, got: %v", intstr.FromInt(int(port.ContainerPort)), servicePort.TargetPort) - } - } - } - if found == false { - t.Errorf("expected service port %s not found in the created Service", tt.name) - break - } - } - } else if err == nil && tt.wantErr { - t.Error("error was expected, but no error was returned") - } else if err != nil && !tt.wantErr { - t.Errorf("test failed, no error was expected, but got unexpected error: %s", err) - } - }) - } -} - -func TestGetDeploymentConfigsFromSelector(t *testing.T) { - tests := []struct { - name string - selector string - label map[string]string - wantErr bool - }{ - { - name: "true case", - selector: "app.kubernetes.io/part-of=app", - label: map[string]string{ - "app.kubernetes.io/part-of": "app", - }, - wantErr: false, - }, - { - name: "true case", - selector: "app.kubernetes.io/part-of=app1", - label: map[string]string{ - "app.kubernetes.io/part-of": "app", - }, - wantErr: false, - }, - } - - listOfDC := appsv1.DeploymentConfigList{ - Items: []appsv1.DeploymentConfig{ - { - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app.kubernetes.io/part-of": "app", - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fakeClient, fakeClientSet := FakeNew() - - fakeClientSet.AppsClientset.PrependReactor("list", "deploymentconfigs", func(action ktesting.Action) (bool, runtime.Object, error) { - if !reflect.DeepEqual(action.(ktesting.ListAction).GetListRestrictions().Labels.String(), tt.selector) { - return true, nil, fmt.Errorf("labels not matching with expected values, expected:%s, got:%s", tt.selector, action.(ktesting.ListAction).GetListRestrictions()) - } - return true, &listOfDC, nil - }) - dc, err := fakeClient.GetDeploymentConfigsFromSelector(tt.selector) - - if len(fakeClientSet.AppsClientset.Actions()) != 1 { - t.Errorf("expected 1 AppsClientset.Actions() in GetDeploymentConfigsFromSelector, got: %v", fakeClientSet.AppsClientset.Actions()) - } - - if tt.wantErr == false && err != nil { - t.Errorf("test failed, %#v", dc[0].Labels) - } - - for _, dc1 := range dc { - if !reflect.DeepEqual(dc1.Labels, tt.label) { - t.Errorf("labels are not matching with expected labels, expected: %s, got %s", tt.label, dc1.Labels) - } - } - - }) - } -} - func TestCreateServiceInstance(t *testing.T) { type args struct { serviceName string @@ -2672,106 +2164,6 @@ func TestGetClusterPlansFromServiceName(t *testing.T) { } } -func TestGetDeploymentConfigLabelValues(t *testing.T) { - type args struct { - deploymentConfigList appsv1.DeploymentConfigList - expectedOutput []string - } - tests := []struct { - applicationName string - name string - args args - wantErr bool - actions int - }{ - { - name: "Case 1 - Retrieve list", - applicationName: "app", - args: args{ - expectedOutput: []string{"app", "app2"}, - deploymentConfigList: appsv1.DeploymentConfigList{ - Items: []appsv1.DeploymentConfig{ - { - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app.kubernetes.io/part-of": "app", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app.kubernetes.io/part-of": "app2", - }, - }, - }, - }, - }, - }, - wantErr: false, - actions: 1, - }, - { - name: "Case 1 - Retrieve list, different order", - applicationName: "app", - args: args{ - expectedOutput: []string{"app", "app2"}, - deploymentConfigList: appsv1.DeploymentConfigList{ - Items: []appsv1.DeploymentConfig{ - { - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app.kubernetes.io/part-of": "app2", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app.kubernetes.io/part-of": "app", - }, - }, - }, - }, - }, - }, - wantErr: false, - actions: 1, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - fakeClient, fakeClientSet := FakeNew() - - fakeClientSet.AppsClientset.PrependReactor("list", "deploymentconfigs", func(action ktesting.Action) (bool, runtime.Object, error) { - return true, &tt.args.deploymentConfigList, nil - }) - - // Run function GetServiceInstanceLabelValues - list, err := fakeClient.GetDeploymentConfigLabelValues(applabels.ApplicationLabel, applabels.ApplicationLabel) - - if err == nil && !tt.wantErr { - - // Compare arrays - if !reflect.DeepEqual(list, tt.args.expectedOutput) { - t.Errorf("expected %s output, got %s", tt.args.expectedOutput, list) - } - - if (len(fakeClientSet.AppsClientset.Actions()) != tt.actions) && !tt.wantErr { - t.Errorf("expected %v action(s) in GetServiceInstanceLabelValues got %v: %v", tt.actions, len(fakeClientSet.AppsClientset.Actions()), fakeClientSet.AppsClientset.Actions()) - } - - } else if err == nil && tt.wantErr { - t.Error("test failed, expected: false, got true") - } else if err != nil && !tt.wantErr { - t.Errorf("test failed, expected: no error, got error: %s", err.Error()) - } - - }) - } -} - func TestGetServiceInstanceLabelValues(t *testing.T) { type args struct { serviceList scv1beta1.ServiceInstanceList @@ -5252,79 +4644,6 @@ func Test_findContainer(t *testing.T) { } } -func TestWaitAndGetDC(t *testing.T) { - type args struct { - name string - annotation string - value string - dc appsv1.DeploymentConfig - timeout time.Duration - } - tests := []struct { - name string - args args - wantErr bool - actions int - }{ - { - name: "Case 1 - Check that the function actually works", - args: args{ - name: "foo", - annotation: "app.kubernetes.io/component-source-type", - value: "git", - dc: *fakeDeploymentConfig("foo", "bar", - []corev1.EnvVar{}, []corev1.EnvFromSource{}, t), - timeout: 3 * time.Second, - }, - wantErr: false, - actions: 1, - }, - { - name: "Case 2 - Purposefully timeout / error", - args: args{ - name: "foo", - annotation: "app.kubernetes.io/component-source-type", - value: "foobar", - dc: *fakeDeploymentConfig("foo", "bar", - []corev1.EnvVar{}, []corev1.EnvFromSource{}, t), - timeout: 3 * time.Second, - }, - wantErr: true, - actions: 1, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fakeClient, fakeClientSet := FakeNew() - fkWatch := watch.NewFake() - go func() { - fkWatch.Modify(&tt.args.dc) - }() - fakeClientSet.AppsClientset.PrependWatchReactor("deploymentconfigs", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) { - return true, fkWatch, nil - }) - // Run function WaitAndGetDC - _, err := fakeClient.WaitAndGetDC(tt.args.name, 0, tt.args.timeout, func(*appsv1.DeploymentConfig, int64) bool { - return !tt.wantErr - }) - // Error checking WaitAndGetDC - if !tt.wantErr == (err != nil) { - t.Errorf(" client.WaitAndGetDC() unexpected error %v, wantErr %v", err, tt.wantErr) - } - if err == nil && !tt.wantErr { - // Check to see how many actions are being ran - if (len(fakeClientSet.AppsClientset.Actions()) != tt.actions) && !tt.wantErr { - t.Errorf("expected %v action(s) in WaitAndGetDC got %v: %v", tt.actions, len(fakeClientSet.AppsClientset.Actions()), fakeClientSet.AppsClientset.Actions()) - } - } else if err == nil && tt.wantErr { - t.Error("test failed, expected: false, got true") - } else if err != nil && !tt.wantErr { - t.Errorf("test failed, expected: no error, got error: %s", err.Error()) - } - }) - } -} - func TestCreateBuildConfig(t *testing.T) { type args struct { commonObjectMeta metav1.ObjectMeta @@ -5574,63 +4893,6 @@ func TestGetMatchingPlans(t *testing.T) { }) } -func TestStartDeployment(t *testing.T) { - tests := []struct { - name string - deploymentName string - wantErr bool - }{ - { - name: "Case 1: Testing valid name", - deploymentName: "ruby", - wantErr: false, - }, - { - name: "Case 2: Testing invalid name", - deploymentName: "", - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - fkclient, fkclientset := FakeNew() - - fkclientset.AppsClientset.PrependReactor("create", "deploymentconfigs", func(action ktesting.Action) (bool, runtime.Object, error) { - deploymentConfig := appsv1.DeploymentConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: tt.deploymentName, - }, - } - return true, &deploymentConfig, nil - }) - - _, err := fkclient.StartDeployment(tt.deploymentName) - if !tt.wantErr == (err != nil) { - t.Errorf(" client.StartDeployment(string) unexpected error %v, wantErr %v", err, tt.wantErr) - } - - if err == nil { - - if len(fkclientset.AppsClientset.Actions()) != 1 { - t.Errorf("expected 1 action in StartDeployment got: %v", fkclientset.AppsClientset.Actions()) - } else { - startedDeployment := fkclientset.AppsClientset.Actions()[0].(ktesting.CreateAction).GetObject().(*appsv1.DeploymentRequest) - - if startedDeployment.Name != tt.deploymentName { - t.Errorf("deployment name is not matching to expected name, expected: %s, got %s", tt.deploymentName, startedDeployment.Name) - } - - if startedDeployment.Latest == false { - t.Errorf("deployment is not set to latest") - } - } - } - }) - } -} - func TestGetPortsFromBuilderImage(t *testing.T) { type args struct { diff --git a/pkg/occlient/pods.go b/pkg/occlient/pods.go deleted file mode 100644 index b5f7a689786..00000000000 --- a/pkg/occlient/pods.go +++ /dev/null @@ -1,22 +0,0 @@ -package occlient - -import ( - "fmt" - componentlabels "github.com/openshift/odo/pkg/component/labels" - "github.com/openshift/odo/pkg/util" - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" -) - -func (c *Client) GetPodUsingComponentName(componentName, appName string) (*corev1.Pod, error) { - componentLabels := componentlabels.GetLabels(componentName, appName, false) - componentSelector := util.ConvertLabelsToSelector(componentLabels) - dc, err := c.GetOneDeploymentConfigFromSelector(componentSelector) - if err != nil { - return nil, errors.Wrap(err, "unable to get deployment for component") - } - // Find Pod for component - podSelector := fmt.Sprintf("deploymentconfig=%s", dc.Name) - - return c.GetOnePodFromSelector(podSelector) -} diff --git a/pkg/occlient/utils.go b/pkg/occlient/utils.go index 9c6fcca420e..83c01eb330f 100644 --- a/pkg/occlient/utils.go +++ b/pkg/occlient/utils.go @@ -2,6 +2,8 @@ package occlient import ( "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" appsv1 "github.com/openshift/api/apps/v1" imagev1 "github.com/openshift/api/image/v1" @@ -128,3 +130,26 @@ func GetS2IEnvForDevfile(sourceType string, env config.EnvVarList, imageStreamIm return configEnvs, nil } + +// generateServiceSpec generates the service spec for s2i components +func generateServiceSpec(commonObjectMeta metav1.ObjectMeta, containerPorts []corev1.ContainerPort) corev1.ServiceSpec { + // generate the Service spec + var svcPorts []corev1.ServicePort + for _, containerPort := range containerPorts { + svcPort := corev1.ServicePort{ + + Name: containerPort.Name, + Port: containerPort.ContainerPort, + Protocol: containerPort.Protocol, + TargetPort: intstr.FromInt(int(containerPort.ContainerPort)), + } + svcPorts = append(svcPorts, svcPort) + } + + return corev1.ServiceSpec{ + Ports: svcPorts, + Selector: map[string]string{ + "deploymentconfig": commonObjectMeta.Name, + }, + } +} diff --git a/pkg/odo/cli/component/common_link.go b/pkg/odo/cli/component/common_link.go index 02599646ae9..6e6480333b9 100644 --- a/pkg/odo/cli/component/common_link.go +++ b/pkg/odo/cli/component/common_link.go @@ -259,11 +259,11 @@ func (o *commonLinkOptions) validate(wait bool) (err error) { // service is in running state. // This can take a long time to occur if the image of the service has yet to be downloaded log.Progressf("Waiting for secret of service %s to come up", o.secretName) - _, err = o.Client.WaitAndGetSecret(o.secretName, o.Project) + _, err = o.Client.GetKubeClient().WaitAndGetSecret(o.secretName, o.Project) } else { // we also need to check whether there is a secret with the same name as the service // the secret should have been created along with the secret - _, err = o.Client.GetSecret(o.secretName, o.Project) + _, err = o.Client.GetKubeClient().GetSecret(o.secretName, o.Project) if err != nil { return fmt.Errorf("The service %s created by 'odo service create' is being provisioned. You may have to wait a few seconds until OpenShift fully provisions it before executing 'odo %s'.", o.secretName, o.operationName) } @@ -353,7 +353,7 @@ func (o *commonLinkOptions) run() (err error) { return fmt.Errorf("unknown operation %s", o.operationName) } - secret, err := o.Client.GetSecret(o.secretName, o.Project) + secret, err := o.Client.GetKubeClient().GetSecret(o.secretName, o.Project) if err != nil { return err } @@ -415,13 +415,13 @@ func (o *commonLinkOptions) waitForLinkToComplete() (err error) { // first wait for the pod to be pending (meaning that the deployment is being put into effect) // we need this intermediate wait because there is a change that the this point could be reached // without Openshift having had the time to launch the new deployment - _, err = o.Client.WaitAndGetPod(podSelector, corev1.PodPending, "Waiting for component to redeploy") + _, err = o.Client.GetKubeClient().WaitAndGetPodWithEvents(podSelector, corev1.PodPending, "Waiting for component to redeploy") if err != nil { return err } // now wait for the pod to be running - _, err = o.Client.WaitAndGetPod(podSelector, corev1.PodRunning, "Waiting for component to start") + _, err = o.Client.GetKubeClient().WaitAndGetPodWithEvents(podSelector, corev1.PodRunning, "Waiting for component to start") return err } diff --git a/pkg/odo/cli/service/create.go b/pkg/odo/cli/service/create.go index bc54fe31585..9fd608fb38d 100644 --- a/pkg/odo/cli/service/create.go +++ b/pkg/odo/cli/service/create.go @@ -483,7 +483,7 @@ func (o *ServiceCreateOptions) Run() (err error) { if o.wait { s = log.Spinner("Waiting for service to come up") - _, err = o.Client.WaitAndGetSecret(o.ServiceName, o.Project) + _, err = o.Client.GetKubeClient().WaitAndGetSecret(o.ServiceName, o.Project) if err == nil { s.End(true) log.Successf(`Service '%s' is ready for use`, o.ServiceName) diff --git a/pkg/odo/util/cmdutils.go b/pkg/odo/util/cmdutils.go index 9bce194ecf2..635ce38b6b2 100644 --- a/pkg/odo/util/cmdutils.go +++ b/pkg/odo/util/cmdutils.go @@ -189,7 +189,7 @@ func PrintComponentInfo(client *occlient.Client, currentComponentName string, co for _, linkedService := range componentDesc.Status.LinkedServices { // Let's also get the secrets / environment variables that are being passed in.. (if there are any) - secrets, err := client.GetSecret(linkedService, project) + secrets, err := client.GetKubeClient().GetSecret(linkedService, project) LogErrorAndExit(err, "") if len(secrets.Data) > 0 { diff --git a/pkg/secret/secret.go b/pkg/secret/secret.go index 82af770585c..0c7c2d41420 100644 --- a/pkg/secret/secret.go +++ b/pkg/secret/secret.go @@ -2,6 +2,7 @@ package secret import ( "fmt" + "github.com/openshift/odo/pkg/kclient" "strings" @@ -17,7 +18,7 @@ import ( func DetermineSecretName(client *occlient.Client, componentName, applicationName, port string) (string, error) { labelSelector := fmt.Sprintf("%v=%v", applabels.ApplicationLabel, applicationName) + fmt.Sprintf(",%v=%v", componentlabels.ComponentLabel, componentName) - secrets, err := client.ListSecrets(labelSelector) + secrets, err := client.GetKubeClient().ListSecrets(labelSelector) if err != nil { return "", err } @@ -40,7 +41,7 @@ Please delete the component and recreate it using 'odo create'`, componentName) // search each secret to see which port is corresponds to for _, secret := range secrets { - if secret.Annotations[occlient.ComponentPortAnnotationName] == port { + if secret.Annotations[kclient.ComponentPortAnnotationName] == port { return secret.Name, nil } } @@ -52,7 +53,7 @@ Please delete the component and recreate it using 'odo create'`, componentName) func availablePorts(secrets []corev1.Secret) []string { ports := make([]string, 0, len(secrets)) for _, secret := range secrets { - ports = append(ports, secret.Annotations[occlient.ComponentPortAnnotationName]) + ports = append(ports, secret.Annotations[kclient.ComponentPortAnnotationName]) } return ports } diff --git a/pkg/secret/secret_test.go b/pkg/secret/secret_test.go index a117bcd3e72..75222bd1e45 100644 --- a/pkg/secret/secret_test.go +++ b/pkg/secret/secret_test.go @@ -1,6 +1,7 @@ package secret import ( + "github.com/openshift/odo/pkg/kclient" "testing" applabels "github.com/openshift/odo/pkg/application/labels" @@ -52,7 +53,7 @@ func TestGetServiceInstanceList(t *testing.T) { componentlabels.ComponentTypeLabel: "java", }, Annotations: map[string]string{ - occlient.ComponentPortAnnotationName: "8080", + kclient.ComponentPortAnnotationName: "8080", }, }, }, @@ -77,7 +78,7 @@ func TestGetServiceInstanceList(t *testing.T) { componentlabels.ComponentTypeLabel: "java", }, Annotations: map[string]string{ - occlient.ComponentPortAnnotationName: "8080", + kclient.ComponentPortAnnotationName: "8080", }, }, }, @@ -90,7 +91,7 @@ func TestGetServiceInstanceList(t *testing.T) { componentlabels.ComponentTypeLabel: "java", }, Annotations: map[string]string{ - occlient.ComponentPortAnnotationName: "8080", + kclient.ComponentPortAnnotationName: "8080", }, }, }, @@ -115,7 +116,7 @@ func TestGetServiceInstanceList(t *testing.T) { componentlabels.ComponentTypeLabel: "java", }, Annotations: map[string]string{ - occlient.ComponentPortAnnotationName: "8080", + kclient.ComponentPortAnnotationName: "8080", }, }, }, @@ -128,7 +129,7 @@ func TestGetServiceInstanceList(t *testing.T) { componentlabels.ComponentTypeLabel: "java", }, Annotations: map[string]string{ - occlient.ComponentPortAnnotationName: "8443", + kclient.ComponentPortAnnotationName: "8443", }, }, }, @@ -153,7 +154,7 @@ func TestGetServiceInstanceList(t *testing.T) { componentlabels.ComponentTypeLabel: "java", }, Annotations: map[string]string{ - occlient.ComponentPortAnnotationName: "8080", + kclient.ComponentPortAnnotationName: "8080", }, }, }, @@ -166,7 +167,7 @@ func TestGetServiceInstanceList(t *testing.T) { componentlabels.ComponentTypeLabel: "java", }, Annotations: map[string]string{ - occlient.ComponentPortAnnotationName: "8443", + kclient.ComponentPortAnnotationName: "8443", }, }, }, @@ -191,7 +192,7 @@ func TestGetServiceInstanceList(t *testing.T) { componentlabels.ComponentTypeLabel: "java", }, Annotations: map[string]string{ - occlient.ComponentPortAnnotationName: "8443", + kclient.ComponentPortAnnotationName: "8443", }, }, }, @@ -204,7 +205,7 @@ func TestGetServiceInstanceList(t *testing.T) { componentlabels.ComponentTypeLabel: "java", }, Annotations: map[string]string{ - occlient.ComponentPortAnnotationName: "8080", + kclient.ComponentPortAnnotationName: "8080", }, }, }, @@ -217,7 +218,7 @@ func TestGetServiceInstanceList(t *testing.T) { componentlabels.ComponentTypeLabel: "java", }, Annotations: map[string]string{ - occlient.ComponentPortAnnotationName: "8779", + kclient.ComponentPortAnnotationName: "8779", }, }, }, diff --git a/pkg/service/service.go b/pkg/service/service.go index 293e4d63fd1..3edaeae69cb 100644 --- a/pkg/service/service.go +++ b/pkg/service/service.go @@ -117,7 +117,7 @@ func DeleteServiceAndUnlinkComponents(client *occlient.Client, serviceName strin // lookup all the components of the application applicationSelector := fmt.Sprintf("%s=%s", applabels.ApplicationLabel, applicationName) - componentsDCs, err := client.GetDeploymentConfigsFromSelector(applicationSelector) + componentsDCs, err := client.ListDeploymentConfigs(applicationSelector) if err != nil { return errors.Wrapf(err, "unable to list the components in order to check if they need to be unlinked") } @@ -252,7 +252,7 @@ func ListWithDetailedStatus(client *occlient.Client, applicationName string) (Se } // retrieve secrets in order to set status - secrets, err := client.ListSecrets("") + secrets, err := client.GetKubeClient().ListSecrets("") if err != nil { return ServiceList{}, errors.Wrapf(err, "unable to list secrets as part of the bindings check") } @@ -265,7 +265,7 @@ func ListWithDetailedStatus(client *occlient.Client, applicationName string) (Se applabels.ApplicationLabel: applicationName, } applicationSelector := util.ConvertLabelsToSelector(labels) - deploymentConfigs, err := client.GetDeploymentConfigsFromSelector(applicationSelector) + deploymentConfigs, err := client.ListDeploymentConfigs(applicationSelector) if err != nil { return ServiceList{}, err } diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index a2f1b1985f6..34e04693355 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -67,7 +67,7 @@ func Unmount(client *occlient.Client, storageName string, componentName string, // Get DeploymentConfig for the given component componentLabels := componentlabels.GetLabels(componentName, applicationName, false) componentSelector := util.ConvertLabelsToSelector(componentLabels) - dc, err := client.GetOneDeploymentConfigFromSelector(componentSelector) + dc, err := client.GetDeploymentConfigFromSelector(componentSelector) if err != nil { return errors.Wrapf(err, "unable to get Deployment Config for component: %v in application: %v", componentName, applicationName) } @@ -120,7 +120,7 @@ func List(client *occlient.Client, componentName string, applicationName string) componentLabels := componentlabels.GetLabels(componentName, applicationName, false) componentSelector := util.ConvertLabelsToSelector(componentLabels) - dc, err := client.GetOneDeploymentConfigFromSelector(componentSelector) + dc, err := client.GetDeploymentConfigFromSelector(componentSelector) if err != nil { return StorageList{}, errors.Wrapf(err, "unable to get Deployment Config associated with component %v", componentName) } @@ -349,7 +349,7 @@ func Mount(client *occlient.Client, path string, storageName string, componentNa // Get DeploymentConfig for the given component componentLabels := componentlabels.GetLabels(componentName, applicationName, false) componentSelector := util.ConvertLabelsToSelector(componentLabels) - dc, err := client.GetOneDeploymentConfigFromSelector(componentSelector) + dc, err := client.GetDeploymentConfigFromSelector(componentSelector) if err != nil { return errors.Wrapf(err, "unable to get Deployment Config for component: %v in application: %v", componentName, applicationName) } diff --git a/pkg/testingutil/services.go b/pkg/testingutil/services.go index 74fb281c526..57160dc0dbe 100644 --- a/pkg/testingutil/services.go +++ b/pkg/testingutil/services.go @@ -7,6 +7,7 @@ import ( scv1beta1 "github.com/kubernetes-sigs/service-catalog/pkg/apis/servicecatalog/v1beta1" applabels "github.com/openshift/odo/pkg/application/labels" componentlabels "github.com/openshift/odo/pkg/component/labels" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -157,3 +158,22 @@ func FakeServiceClassInstance(serviceInstanceName string, serviceClassName strin } return service } + +func FakeKubeService(componentName, serviceName string) corev1.Service { + return corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Labels: map[string]string{ + "component-name": componentName, + }, + }, + } +} + +func FakeKubeServices(componentName string) []corev1.Service { + return []corev1.Service{ + FakeKubeService(componentName, "service-1"), + FakeKubeService(componentName, "service-2"), + FakeKubeService(componentName, "service-3"), + } +}