Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactors, cleans and moves some resources to the new files. #4250

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 9 additions & 9 deletions pkg/component/component.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ func GetComponentPorts(client *occlient.Client, componentName string, applicatio
componentLabels := componentlabels.GetLabels(componentName, applicationName, false)
componentSelector := util.ConvertLabelsToSelector(componentLabels)

dc, err := client.GetOneDeploymentConfigFromSelector(componentSelector)
dc, err := client.GetDeploymentConfigFromSelector(componentSelector)
if err != nil {
return nil, errors.Wrapf(err, "unable to fetch deployment configs for the selector %v", componentSelector)
}
Expand All @@ -200,7 +200,7 @@ func GetComponentLinkedSecretNames(client *occlient.Client, componentName string
componentLabels := componentlabels.GetLabels(componentName, applicationName, false)
componentSelector := util.ConvertLabelsToSelector(componentLabels)

dc, err := client.GetOneDeploymentConfigFromSelector(componentSelector)
dc, err := client.GetDeploymentConfigFromSelector(componentSelector)
if err != nil {
return nil, errors.Wrapf(err, "unable to fetch deployment configs for the selector %v", componentSelector)
}
Expand Down Expand Up @@ -283,7 +283,7 @@ func CreateFromPath(client *occlient.Client, params occlient.CreateArgs) error {
}

podSelector := fmt.Sprintf("deploymentconfig=%s", selectorLabels)
_, err = client.WaitAndGetPod(podSelector, corev1.PodRunning, "Waiting for component to start")
_, err = client.GetKubeClient().WaitAndGetPodWithEvents(podSelector, corev1.PodRunning, "Waiting for component to start")
if err != nil {
return err
}
Expand Down Expand Up @@ -664,15 +664,15 @@ func PushLocal(client *occlient.Client, componentName string, applicationName st
// Find DeploymentConfig for component
componentLabels := componentlabels.GetLabels(componentName, applicationName, false)
componentSelector := util.ConvertLabelsToSelector(componentLabels)
dc, err := client.GetOneDeploymentConfigFromSelector(componentSelector)
dc, err := client.GetDeploymentConfigFromSelector(componentSelector)
if err != nil {
return errors.Wrap(err, "unable to get deployment for component")
}
// Find Pod for component
podSelector := fmt.Sprintf("deploymentconfig=%s", dc.Name)

// Wait for Pod to be in running state otherwise we can't sync data to it.
pod, err := client.WaitAndGetPod(podSelector, corev1.PodRunning, "Waiting for component to start")
pod, err := client.GetKubeClient().WaitAndGetPodWithEvents(podSelector, corev1.PodRunning, "Waiting for component to start")
if err != nil {
return errors.Wrapf(err, "error while waiting for pod %s", podSelector)
}
Expand Down Expand Up @@ -899,7 +899,7 @@ func List(client *occlient.Client, applicationName string, localConfigInfo *conf

if deploymentConfigSupported && client != nil {
// retrieve all the deployment configs that are associated with this application
dcList, err := client.GetDeploymentConfigsFromSelector(applicationSelector)
dcList, err := client.ListDeploymentConfigs(applicationSelector)
if err != nil {
return ComponentList{}, errors.Wrapf(err, "unable to list components")
}
Expand Down Expand Up @@ -1503,13 +1503,13 @@ func getRemoteComponentMetadata(client *occlient.Client, componentName string, a
linkedComponents := make(map[string][]string)
linkedSecretNames := fromCluster.GetLinkedSecretNames()
for _, secretName := range linkedSecretNames {
secret, err := client.GetSecret(secretName, projectName)
secret, err := client.GetKubeClient().GetSecret(secretName, projectName)
if err != nil {
return Component{}, errors.Wrapf(err, "unable to get info about secret %s", secretName)
}
componentName, containsComponentLabel := secret.Labels[componentlabels.ComponentLabel]
if containsComponentLabel {
if port, ok := secret.Annotations[occlient.ComponentPortAnnotationName]; ok {
if port, ok := secret.Annotations[kclient.ComponentPortAnnotationName]; ok {
linkedComponents[componentName] = append(linkedComponents[componentName], port)
}
} else {
Expand Down Expand Up @@ -1537,7 +1537,7 @@ func GetLogs(client *occlient.Client, componentName string, applicationName stri
}

// Retrieve the logs
err = client.DisplayDeploymentConfigLog(namespacedOpenShiftObject, follow, stdout)
err = client.DisplayDeploymentConfigLog(namespacedOpenShiftObject, follow)
if err != nil {
return err
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/component/component_full_description.go
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ func (cfd *ComponentFullDescription) Print(client *occlient.Client) error {
for _, linkedService := range cfd.Status.LinkedServices {

// Let's also get the secrets / environment variables that are being passed in.. (if there are any)
secrets, err := client.GetSecret(linkedService, cfd.GetNamespace())
secrets, err := client.GetKubeClient().GetSecret(linkedService, cfd.GetNamespace())
if err != nil {
return err
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/component/pushed_component.go
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ func getType(component provider) (string, error) {
func GetPushedComponents(c *occlient.Client, applicationName string) (map[string]PushedComponent, error) {
applicationSelector := fmt.Sprintf("%s=%s", applabels.ApplicationLabel, applicationName)

dcList, err := c.GetDeploymentConfigsFromSelector(applicationSelector)
dcList, err := c.ListDeploymentConfigs(applicationSelector)
if err != nil {
if !isIgnorableError(err) {
return nil, err
Expand Down
9 changes: 2 additions & 7 deletions pkg/debug/portforward.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func (f *DefaultPortForwarder) ForwardPorts(portPair string, stopChan, readyChan
return err
}

pod, err = f.client.GetPodUsingComponentName(f.componentName, f.appName)
pod, err = f.client.GetPodUsingDeploymentConfig(f.componentName, f.appName)
if err != nil {
return err
}
Expand All @@ -77,12 +77,7 @@ func (f *DefaultPortForwarder) ForwardPorts(portPair string, stopChan, readyChan
return err
}

var req *rest.Request
if f.kClient != nil && isDevfile {
req = f.kClient.GeneratePortForwardReq(pod.Name)
} else {
req = f.client.BuildPortForwardReq(pod.Name)
}
req := f.kClient.GeneratePortForwardReq(pod.Name)

dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
fw, err := portforward.New(dialer, []string{portPair}, stopChan, readyChan, f.Out, f.ErrOut)
Expand Down
66 changes: 66 additions & 0 deletions pkg/kclient/events.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
package kclient

import (
"fmt"
"github.com/openshift/odo/pkg/log"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
"sync"
)

// We use a mutex here in order to make 100% sure that functions such as CollectEvents
// so that there are no race conditions
var mu sync.Mutex

const (
failedEventCount = 5
)

// CollectEvents collects events in a Goroutine by manipulating a spinner.
// We don't care about the error (it's usually ran in a go routine), so erroring out is not needed.
func (c *Client) CollectEvents(selector string, events map[string]corev1.Event, spinner *log.Status, quit <-chan int) {

// Secondly, we will start a go routine for watching for events related to the pod and update our pod status accordingly.
eventWatcher, err := c.KubeClient.CoreV1().Events(c.Namespace).Watch(metav1.ListOptions{})
if err != nil {
log.Warningf("Unable to watch for events: %s", err)
return
}
defer eventWatcher.Stop()

// Create an endless loop for collecting
for {
select {
case <-quit:
klog.V(3).Info("Quitting collect events")
return
case val, ok := <-eventWatcher.ResultChan():
mu.Lock()
if !ok {
log.Warning("Watch channel was closed")
return
}
if e, ok := val.Object.(*corev1.Event); ok {

// If there are many warning events happening during deployment, let's log them.
if e.Type == "Warning" {

if e.Count >= failedEventCount {
newEvent := e
(events)[e.Name] = *newEvent
klog.V(3).Infof("Warning Event: Count: %d, Reason: %s, Message: %s", e.Count, e.Reason, e.Message)
// Change the spinner message to show the warning
spinner.WarningStatus(fmt.Sprintf("WARNING x%d: %s", e.Count, e.Reason))
}

}

} else {
log.Warning("Unable to convert object to event")
return
}
mu.Unlock()
}
}
}
88 changes: 88 additions & 0 deletions pkg/kclient/events_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
package kclient

import (
"fmt"
"github.com/openshift/odo/pkg/log"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
ktesting "k8s.io/client-go/testing"
"strings"
"testing"
time "time"
)

func fakeEventStatus(podName string, eventWarningMessage string, count int32) *corev1.Event {
return &corev1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Type: "Warning",
Count: count,
Reason: eventWarningMessage,
Message: "Foobar",
}
}

func TestCollectEvents(t *testing.T) {
tests := []struct {
name string
podName string
eventWarningMessage string
}{
{
name: "Case 1: Collect an arbitrary amount of events",
podName: "ruby",
eventWarningMessage: "Fake event warning message",
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

// Create a fake client
fakeClient, fakeClientSet := FakeNew()
fakeEventWatch := watch.NewRaceFreeFake()
podSelector := fmt.Sprintf("deploymentconfig=%s", tt.podName)

// Create a fake event status / watch reactor for faking the events we are collecting
fakeEvent := fakeEventStatus(tt.podName, tt.eventWarningMessage, 10)
go func(event *corev1.Event) {
fakeEventWatch.Add(event)
}(fakeEvent)

fakeClientSet.Kubernetes.PrependWatchReactor("events", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) {
return true, fakeEventWatch, nil
})

// Create a test spinner / variables / quit variable for the go channel
spinner := log.Spinner("Test spinner")
events := make(map[string]corev1.Event)
quit := make(chan int)
go fakeClient.CollectEvents(podSelector, events, spinner, quit)

// Sleep in order to make sure we actually collect some events
time.Sleep(2 * time.Second)
close(quit)

// We make sure to lock in order to prevent race conditions when retrieving the events (since they are a pointer
// by default since we pass in a map)
mu.Lock()
if len(events) == 0 {
t.Errorf("Expected events, got none")
}
mu.Unlock()

// Collect the first event in the map
var firstEvent corev1.Event
for _, val := range events {
firstEvent = val
}

if !strings.Contains(firstEvent.Reason, tt.eventWarningMessage) {
t.Errorf("expected warning message: '%s' in event message: '%+v'", tt.eventWarningMessage, firstEvent.Reason)
}

})
}
}
Loading