Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

resync routes 3Scale #1145

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
14 changes: 14 additions & 0 deletions pkg/products/threescale/objects_test.go
Expand Up @@ -305,6 +305,19 @@ var threescaleRoute5 = &v1.Route{
},
}

var threescaleRoute6 = &v1.Route{
ObjectMeta: metav1.ObjectMeta{
Name: "3scale-system-provider-route-6",
Namespace: "3scale",
Labels: map[string]string{
"zync.3scale.net/route-to": "backend",
},
},
Spec: v1.RouteSpec{
Host: "3scale-admin.backend",
},
}

var postgres = &crov1.Postgres{
ObjectMeta: metav1.ObjectMeta{
Name: "threescale-postgres-test-installation",
Expand Down Expand Up @@ -445,6 +458,7 @@ func getSuccessfullTestPreReqs(integreatlyOperatorNamespace, threeScaleInstallat
threescaleRoute3,
threescaleRoute4,
threescaleRoute5,
threescaleRoute6,
postgres,
postgresSec,
redis,
Expand Down
95 changes: 69 additions & 26 deletions pkg/products/threescale/reconciler.go
Expand Up @@ -341,31 +341,6 @@ func (r *Reconciler) backupSystemSecrets(ctx context.Context, serverClient k8scl
return integreatlyv1alpha1.PhaseCompleted, nil
}

// CreateResource Creates a generic kubernetes resource from a template
func (r *Reconciler) createResource(ctx context.Context, resourceName string, serverClient k8sclient.Client) (runtime.Object, error) {
if r.extraParams == nil {
r.extraParams = map[string]string{}
}
r.extraParams["MonitoringKey"] = r.Config.GetLabelSelector()
r.extraParams["Namespace"] = r.Config.GetNamespace()

templateHelper := monitoring.NewTemplateHelper(r.extraParams)
resource, err := templateHelper.CreateResource(resourceName)

if err != nil {
return nil, fmt.Errorf("createResource failed: %w", err)
}

err = serverClient.Create(ctx, resource)
if err != nil {
if !k8serr.IsAlreadyExists(err) {
return nil, fmt.Errorf("error creating resource: %w", err)
}
}

return resource, nil
}

func (r *Reconciler) getOauthClientSecret(ctx context.Context, serverClient k8sclient.Client) (string, error) {
oauthClientSecrets := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Expand Down Expand Up @@ -462,6 +437,7 @@ func (r *Reconciler) reconcileSMTPCredentials(ctx context.Context, serverClient
}

func (r *Reconciler) reconcileComponents(ctx context.Context, serverClient k8sclient.Client) (integreatlyv1alpha1.StatusPhase, error) {

fss, err := r.getBlobStorageFileStorageSpec(ctx, serverClient)
if err != nil {
return integreatlyv1alpha1.PhaseFailed, err
Expand Down Expand Up @@ -564,15 +540,82 @@ func (r *Reconciler) reconcileComponents(ctx context.Context, serverClient k8scl
if err != nil {
return integreatlyv1alpha1.PhaseFailed, err
}
return integreatlyv1alpha1.PhaseCompleted, nil
} else if err != nil {
return integreatlyv1alpha1.PhaseFailed, err
}
// Its not enough to just check if the system-provider route exists. This can exist but system-master, for example, may not
exist, err := r.routesExist(ctx, serverClient)
if err != nil {
return integreatlyv1alpha1.PhaseFailed, err
}
if exist {
return integreatlyv1alpha1.PhaseCompleted, nil
} else {
// If the system-provider route does not exist at this point (i.e. when Deployments are ready)
// we can force a resync of routes. see below for more details on why this is required:
// https://access.redhat.com/documentation/en-us/red_hat_3scale_api_management/2.7/html/operating_3scale/backup-restore#creating_equivalent_zync_routes
// This scenario will manifest during a backup and restore and also if the product ns was accidentially deleted.
return r.resyncRoutes(ctx, serverClient)
}
}

return integreatlyv1alpha1.PhaseInProgress, nil
}

func (r *Reconciler) routesExist(ctx context.Context, serverClient k8sclient.Client) (bool, error) {
expectedRoutes := 6
opts := k8sclient.ListOptions{
Namespace: r.Config.GetNamespace(),
}

routes := routev1.RouteList{}
err := serverClient.List(ctx, &routes, &opts)
if err != nil {
return false, err
}

if len(routes.Items) >= expectedRoutes {
return true, nil
}
return false, nil
}

func (r *Reconciler) resyncRoutes(ctx context.Context, client k8sclient.Client) (integreatlyv1alpha1.StatusPhase, error) {
ns := r.Config.GetNamespace()
podname := ""

pods := &corev1.PodList{}
listOpts := []k8sclient.ListOption{
k8sclient.InNamespace(ns),
k8sclient.MatchingLabels(map[string]string{"deploymentConfig": "system-sidekiq"}),
}
err := client.List(ctx, pods, listOpts...)

for _, pod := range pods.Items {
if pod.Status.Phase == "Running" {
podname = pod.ObjectMeta.Name
break
}
}

if podname == "" {
logrus.Info("Waiting on system-sidekiq pod to start, 3Scale install in progress")
return integreatlyv1alpha1.PhaseInProgress, nil
}

stdout, stderr, err := resources.ExecuteRemoteCommand(ns, podname, "bundle exec rake zync:resync:domains")
if err != nil {
logrus.Errorf("Failed to resync 3Scale routes %v", err)
return integreatlyv1alpha1.PhaseFailed, nil
} else if stderr != "" {
logrus.Errorf("Error attempting to resync 3Scale routes %s", stderr)
return integreatlyv1alpha1.PhaseFailed, nil
} else {
logrus.Infof("Resync 3Scale routes result: %s", stdout)
return integreatlyv1alpha1.PhaseInProgress, nil
}
}

func (r *Reconciler) reconcileBlobStorage(ctx context.Context, serverClient k8sclient.Client) (integreatlyv1alpha1.StatusPhase, error) {
logrus.Info("Reconciling blob storage")
ns := r.installation.Namespace
Expand Down
77 changes: 77 additions & 0 deletions pkg/resources/podHelper.go
@@ -0,0 +1,77 @@
package resources

import (
"bytes"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
kube "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/remotecommand"
)

// ExecCmd exec command on specific pod and wait the command's output.
//func ExecuteRemoteCommand(ns string, podName string, command string, container string) (string, string, error) {
func ExecuteRemoteCommand(ns string, podName string, command string) (string, string, error) {
cmd := []string{
"/bin/bash",
"-c",
command,
}

kubeClient, restConfig, err := getClient()
if err != nil {
return "", "", errors.Wrapf(err, "Failed to get client")
}

req := kubeClient.CoreV1().RESTClient().Post().Resource("pods").Name(podName).
Namespace(ns).SubResource("exec")
option := &v1.PodExecOptions{
Command: cmd,
Stdin: false,
Stdout: true,
Stderr: true,
TTY: true,
//Container: container,
}
req.VersionedParams(
option,
scheme.ParameterCodec,
)
exec, err := remotecommand.NewSPDYExecutor(restConfig, "POST", req.URL())
if err != nil {
return "", "", errors.Wrapf(err, "Failed executing command %s on %s/%s", command, ns, podName)
}

buf := &bytes.Buffer{}
errBuf := &bytes.Buffer{}

logrus.Infof("Executing command, %s, on pod, %s", command, podName)

err = exec.Stream(remotecommand.StreamOptions{
Stdout: buf,
Stderr: errBuf,
})
if err != nil {
return "", "", errors.Wrapf(err, "Failed executing command %s on %s/%s", command, ns, podName)
}

return buf.String(), errBuf.String(), nil
}

func getClient() (*kube.Clientset, *restclient.Config, error) {

kubeCfg := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
clientcmd.NewDefaultClientConfigLoadingRules(),
&clientcmd.ConfigOverrides{},
)
restCfg, err := kubeCfg.ClientConfig()

kubeClient, err := kube.NewForConfig(restCfg)
if err != nil {
return nil, nil, errors.Wrapf(err, "Failed to generate new client")
}
return kubeClient, restCfg, nil
}
Expand Up @@ -25,8 +25,6 @@ Namespaces for manual deletion:
- redhat-rhmi-3scale
- redhat-rhmi-3scale-operator

**Note known bug:** 3scale is being stucked in "in progress" state after ns deletion - workaround: https://github.com/RHCloudServices/integreatly-help/blob/master/sops/2.x/backup_restore/restore_namespace.md#3scale

## Steps

1. By default, this test is not run as part of the functional test suite. To run the test as part of the functional test suite, run the following `makefile` command from the RHMI operator repo against a target cluster:
Expand Down
5 changes: 2 additions & 3 deletions test/common/namespace_restoration.go
Expand Up @@ -71,9 +71,8 @@ var (
FuseOperatorNamespace,
RHSSOUserProductOperatorNamespace,
RHSSOUserOperatorNamespace,
// Removing 3scale product ns from test as "bundle exec rake zync:resync:domains" needs to be executed in system-app pod terminal to restore properly
// ThreeScaleProductNamespace,
// ThreeScaleOperatorNamespace,
ThreeScaleProductNamespace,
ThreeScaleOperatorNamespace,
UPSProductNamespace,
UPSOperatorNamespace,
},
Expand Down