Skip to content

Commit

Permalink
Backport of Proxy Lifecycle helm, connect-inject and acceptance tests…
Browse files Browse the repository at this point in the history
… into release/1.2.x (#2233) (#2482)

* Proxy Lifecycle helm, connect-inject and acceptance tests (#2233)

Proxy Lifecycle helm, connect-inject and acceptance tests (#2233)



* disable lifecycle test

---------

Co-authored-by: Curt Bushko <cbushko@gmail.com>
Co-authored-by: Mike Morris <mikemorris@users.noreply.github.com>
Co-authored-by: Nitya Dhanushkodi <nitya@hashicorp.com>
  • Loading branch information
4 people committed Jun 29, 2023
1 parent 9efbcba commit d4c4afb
Show file tree
Hide file tree
Showing 21 changed files with 1,364 additions and 256 deletions.
3 changes: 3 additions & 0 deletions .changelog/2233.txt
@@ -0,0 +1,3 @@
```release-note:feature
Add support for configuring graceful shutdown proxy lifecycle management settings.
```
15 changes: 9 additions & 6 deletions acceptance/framework/config/config.go
Expand Up @@ -46,12 +46,14 @@ type TestConfig struct {

DisablePeering bool

HelmChartVersion string
ConsulImage string
ConsulK8SImage string
ConsulVersion *version.Version
EnvoyImage string
ConsulCollectorImage string
HelmChartVersion string
ConsulImage string
ConsulK8SImage string
ConsulDataplaneImage string
ConsulVersion *version.Version
ConsulDataplaneVersion *version.Version
EnvoyImage string
ConsulCollectorImage string

HCPResourceID string

Expand Down Expand Up @@ -110,6 +112,7 @@ func (t *TestConfig) HelmValuesFromConfig() (map[string]string, error) {
setIfNotEmpty(helmValues, "global.image", t.ConsulImage)
setIfNotEmpty(helmValues, "global.imageK8S", t.ConsulK8SImage)
setIfNotEmpty(helmValues, "global.imageEnvoy", t.EnvoyImage)
setIfNotEmpty(helmValues, "global.imageConsulDataplane", t.ConsulDataplaneImage)

return helmValues, nil
}
Expand Down
34 changes: 19 additions & 15 deletions acceptance/framework/connhelper/connect_helper.go
Expand Up @@ -50,8 +50,8 @@ type ConnectHelper struct {
// consulCluster is the cluster to use for the test.
consulCluster consul.Cluster

// consulClient is the client used to test service mesh connectivity.
consulClient *api.Client
// ConsulClient is the client used to test service mesh connectivity.
ConsulClient *api.Client
}

// Setup creates a new cluster using the New*Cluster function and assigns it
Expand All @@ -69,14 +69,14 @@ func (c *ConnectHelper) Setup(t *testing.T) {
func (c *ConnectHelper) Install(t *testing.T) {
logger.Log(t, "Installing Consul cluster")
c.consulCluster.Create(t)
c.consulClient, _ = c.consulCluster.SetupConsulClient(t, c.Secure)
c.ConsulClient, _ = c.consulCluster.SetupConsulClient(t, c.Secure)
}

// Upgrade uses the existing Consul cluster and upgrades it using Helm values
// set by the Secure, AutoEncrypt, and HelmValues fields.
func (c *ConnectHelper) Upgrade(t *testing.T) {
require.NotNil(t, c.consulCluster, "consulCluster must be set before calling Upgrade (Call Install first).")
require.NotNil(t, c.consulClient, "consulClient must be set before calling Upgrade (Call Install first).")
require.NotNil(t, c.ConsulClient, "ConsulClient must be set before calling Upgrade (Call Install first).")

logger.Log(t, "upgrading Consul cluster")
c.consulCluster.Upgrade(t, c.helmValues())
Expand All @@ -94,9 +94,9 @@ func (c *ConnectHelper) DeployClientAndServer(t *testing.T) {
// deployments because golang will execute them in reverse order
// (i.e. the last registered cleanup function will be executed first).
t.Cleanup(func() {
retrier := &retry.Timer{Timeout: 30 * time.Second, Wait: 100 * time.Millisecond}
retrier := &retry.Timer{Timeout: 60 * time.Second, Wait: 100 * time.Millisecond}
retry.RunWith(retrier, t, func(r *retry.R) {
tokens, _, err := c.consulClient.ACL().TokenList(nil)
tokens, _, err := c.ConsulClient.ACL().TokenList(nil)
require.NoError(r, err)
for _, token := range tokens {
require.NotContains(r, token.Description, StaticServerName)
Expand All @@ -117,14 +117,18 @@ func (c *ConnectHelper) DeployClientAndServer(t *testing.T) {

// Check that both static-server and static-client have been injected and
// now have 2 containers.
for _, labelSelector := range []string{"app=static-server", "app=static-client"} {
podList, err := c.Ctx.KubernetesClient(t).CoreV1().Pods(c.Ctx.KubectlOptions(t).Namespace).List(context.Background(), metav1.ListOptions{
LabelSelector: labelSelector,
})
require.NoError(t, err)
require.Len(t, podList.Items, 1)
require.Len(t, podList.Items[0].Spec.Containers, 2)
}

retrier := &retry.Timer{Timeout: 300 * time.Second, Wait: 100 * time.Millisecond}
retry.RunWith(retrier, t, func(r *retry.R) {
for _, labelSelector := range []string{"app=static-server", "app=static-client"} {
podList, err := c.Ctx.KubernetesClient(t).CoreV1().Pods(c.Ctx.KubectlOptions(t).Namespace).List(context.Background(), metav1.ListOptions{
LabelSelector: labelSelector,
})
require.NoError(t, err)
require.Len(t, podList.Items, 1)
require.Len(t, podList.Items[0].Spec.Containers, 2)
}
})
}

// TestConnectionFailureWithoutIntention ensures the connection to the static
Expand All @@ -142,7 +146,7 @@ func (c *ConnectHelper) TestConnectionFailureWithoutIntention(t *testing.T) {
// the static-client pod.
func (c *ConnectHelper) CreateIntention(t *testing.T) {
logger.Log(t, "creating intention")
_, _, err := c.consulClient.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{
_, _, err := c.ConsulClient.ConfigEntries().Set(&api.ServiceIntentionsConfigEntry{
Kind: api.ServiceIntentions,
Name: StaticServerName,
Sources: []*api.SourceIntention{
Expand Down
42 changes: 24 additions & 18 deletions acceptance/framework/flags/flags.go
Expand Up @@ -34,14 +34,16 @@ type TestFlags struct {

flagEnableTransparentProxy bool

flagHelmChartVersion string
flagConsulImage string
flagConsulK8sImage string
flagConsulVersion string
flagEnvoyImage string
flagConsulCollectorImage string
flagVaultHelmChartVersion string
flagVaultServerVersion string
flagHelmChartVersion string
flagConsulImage string
flagConsulK8sImage string
flagConsulVersion string
flagEnvoyImage string
flagConsulCollectorImage string
flagVaultHelmChartVersion string
flagVaultServerVersion string
flagConsulDataplaneImage string
flagConsulDataplaneVersion string

flagHCPResourceID string

Expand Down Expand Up @@ -75,7 +77,9 @@ func (t *TestFlags) init() {

flag.StringVar(&t.flagConsulImage, "consul-image", "", "The Consul image to use for all tests.")
flag.StringVar(&t.flagConsulK8sImage, "consul-k8s-image", "", "The consul-k8s image to use for all tests.")
flag.StringVar(&t.flagConsulDataplaneImage, "consul-dataplane-image", "", "The consul-dataplane image to use for all tests.")
flag.StringVar(&t.flagConsulVersion, "consul-version", "", "The consul version used for all tests.")
flag.StringVar(&t.flagConsulDataplaneVersion, "consul-dataplane-version", "", "The consul-dataplane version used for all tests.")
flag.StringVar(&t.flagHelmChartVersion, "helm-chart-version", config.HelmChartPath, "The helm chart used for all tests.")
flag.StringVar(&t.flagEnvoyImage, "envoy-image", "", "The Envoy image to use for all tests.")
flag.StringVar(&t.flagConsulCollectorImage, "consul-collector-image", "", "The consul collector image to use for all tests.")
Expand Down Expand Up @@ -155,6 +159,7 @@ func (t *TestFlags) TestConfigFromFlags() *config.TestConfig {

// if the Version is empty consulVersion will be nil
consulVersion, _ := version.NewVersion(t.flagConsulVersion)
consulDataplaneVersion, _ := version.NewVersion(t.flagConsulDataplaneVersion)
//vaultserverVersion, _ := version.NewVersion(t.flagVaultServerVersion)

return &config.TestConfig{
Expand All @@ -180,16 +185,17 @@ func (t *TestFlags) TestConfigFromFlags() *config.TestConfig {

DisablePeering: t.flagDisablePeering,

HelmChartVersion: t.flagHelmChartVersion,
ConsulImage: t.flagConsulImage,
ConsulK8SImage: t.flagConsulK8sImage,
ConsulVersion: consulVersion,
EnvoyImage: t.flagEnvoyImage,
ConsulCollectorImage: t.flagConsulCollectorImage,
VaultHelmChartVersion: t.flagVaultHelmChartVersion,
VaultServerVersion: t.flagVaultServerVersion,

HCPResourceID: t.flagHCPResourceID,
HelmChartVersion: t.flagHelmChartVersion,
ConsulImage: t.flagConsulImage,
ConsulK8SImage: t.flagConsulK8sImage,
ConsulVersion: consulVersion,
EnvoyImage: t.flagEnvoyImage,
ConsulCollectorImage: t.flagConsulCollectorImage,
VaultHelmChartVersion: t.flagVaultHelmChartVersion,
VaultServerVersion: t.flagVaultServerVersion,
ConsulDataplaneImage: t.flagConsulDataplaneImage,
ConsulDataplaneVersion: consulDataplaneVersion,
HCPResourceID: t.flagHCPResourceID,

NoCleanupOnFailure: t.flagNoCleanupOnFailure,
DebugDirectory: tempDir,
Expand Down
207 changes: 207 additions & 0 deletions acceptance/tests/connect/connect_proxy_lifecycle_test.go
@@ -0,0 +1,207 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0

package connect

import (
"context"
"fmt"
"strconv"
"strings"
"testing"
"time"

"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/hashicorp/consul-k8s/acceptance/framework/connhelper"
"github.com/hashicorp/consul-k8s/acceptance/framework/consul"
"github.com/hashicorp/consul-k8s/acceptance/framework/helpers"
"github.com/hashicorp/consul-k8s/acceptance/framework/logger"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

type LifecycleShutdownConfig struct {
secure bool
helmValues map[string]string
}

const (
helmDrainListenersKey = "connectInject.sidecarProxy.lifecycle.defaultEnableShutdownDrainListeners"
helmGracePeriodSecondsKey = "connectInject.sidecarProxy.lifecycle.defaultShutdownGracePeriodSeconds"
)

// Test the endpoints controller cleans up force-killed pods.
func TestConnectInject_ProxyLifecycleShutdown(t *testing.T) {
t.Skipf("skiping this test, will be re-added in a future commit")
cfg := suite.Config()

for _, testCfg := range []LifecycleShutdownConfig{
{secure: false, helmValues: map[string]string{
helmDrainListenersKey: "true",
helmGracePeriodSecondsKey: "15",
}},
{secure: true, helmValues: map[string]string{
helmDrainListenersKey: "true",
helmGracePeriodSecondsKey: "15",
}},
{secure: false, helmValues: map[string]string{
helmDrainListenersKey: "false",
helmGracePeriodSecondsKey: "15",
}},
{secure: true, helmValues: map[string]string{
helmDrainListenersKey: "false",
helmGracePeriodSecondsKey: "15",
}},
{secure: false, helmValues: map[string]string{
helmDrainListenersKey: "false",
helmGracePeriodSecondsKey: "0",
}},
{secure: true, helmValues: map[string]string{
helmDrainListenersKey: "false",
helmGracePeriodSecondsKey: "0",
}},
} {
// Determine if listeners should be expected to drain inbound connections
var drainListenersEnabled bool
var err error
val, ok := testCfg.helmValues[helmDrainListenersKey]
if ok {
drainListenersEnabled, err = strconv.ParseBool(val)
require.NoError(t, err)
}

// Determine expected shutdown grace period
var gracePeriodSeconds int64
val, ok = testCfg.helmValues[helmGracePeriodSecondsKey]
if ok {
gracePeriodSeconds, err = strconv.ParseInt(val, 10, 64)
require.NoError(t, err)
} else {
// Half of the helm default to speed tests up
gracePeriodSeconds = 15
}

name := fmt.Sprintf("secure: %t, drainListeners: %t, gracePeriodSeconds: %d", testCfg.secure, drainListenersEnabled, gracePeriodSeconds)
t.Run(name, func(t *testing.T) {
ctx := suite.Environment().DefaultContext(t)
releaseName := helpers.RandomName()

connHelper := connhelper.ConnectHelper{
ClusterKind: consul.Helm,
Secure: testCfg.secure,
ReleaseName: releaseName,
Ctx: ctx,
Cfg: cfg,
HelmValues: testCfg.helmValues,
}

connHelper.Setup(t)
connHelper.Install(t)
connHelper.DeployClientAndServer(t)

// TODO: should this move into connhelper.DeployClientAndServer?
logger.Log(t, "waiting for static-client and static-server to be registered with Consul")
retry.Run(t, func(r *retry.R) {
for _, name := range []string{
"static-client",
"static-client-sidecar-proxy",
"static-server",
"static-server-sidecar-proxy",
} {
logger.Logf(t, "checking for %s service in Consul catalog", name)
instances, _, err := connHelper.ConsulClient.Catalog().Service(name, "", nil)
r.Check(err)

if len(instances) != 1 {
r.Errorf("expected 1 instance of %s", name)
}
}
})

if testCfg.secure {
connHelper.TestConnectionFailureWithoutIntention(t)
connHelper.CreateIntention(t)
}

connHelper.TestConnectionSuccess(t)

// Get static-client pod name
ns := ctx.KubectlOptions(t).Namespace
pods, err := ctx.KubernetesClient(t).CoreV1().Pods(ns).List(
context.Background(),
metav1.ListOptions{
LabelSelector: "app=static-client",
},
)
require.NoError(t, err)
require.Len(t, pods.Items, 1)
clientPodName := pods.Items[0].Name

var terminationGracePeriod int64 = 60
logger.Logf(t, "killing the %q pod with %dseconds termination grace period", clientPodName, terminationGracePeriod)
err = ctx.KubernetesClient(t).CoreV1().Pods(ns).Delete(context.Background(), clientPodName, metav1.DeleteOptions{GracePeriodSeconds: &terminationGracePeriod})
require.NoError(t, err)

// Exec into terminating pod, not just any static-client pod
args := []string{"exec", clientPodName, "-c", connhelper.StaticClientName, "--", "curl", "-vvvsSf"}

if cfg.EnableTransparentProxy {
args = append(args, "http://static-server")
} else {
args = append(args, "http://localhost:1234")
}

if gracePeriodSeconds > 0 {
// Ensure outbound requests are still successful during grace
// period.
retry.RunWith(&retry.Timer{Timeout: time.Duration(gracePeriodSeconds) * time.Second, Wait: 2 * time.Second}, t, func(r *retry.R) {
output, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptions(t), args...)
require.NoError(r, err)
require.Condition(r, func() bool {
exists := false
if strings.Contains(output, "curl: (7) Failed to connect") {
exists = true
}
return !exists
})
})

// If listener draining is enabled, ensure inbound
// requests are rejected during grace period.
// connHelper.TestConnectionSuccess(t)
} else {
// Ensure outbound requests fail because proxy has terminated
retry.RunWith(&retry.Timer{Timeout: time.Duration(terminationGracePeriod) * time.Second, Wait: 2 * time.Second}, t, func(r *retry.R) {
output, err := k8s.RunKubectlAndGetOutputE(t, ctx.KubectlOptions(t), args...)
require.Error(r, err)
require.Condition(r, func() bool {
exists := false
if strings.Contains(output, "curl: (7) Failed to connect") {
exists = true
}
return exists
})
})
}

logger.Log(t, "ensuring pod is deregistered after termination")
retry.Run(t, func(r *retry.R) {
for _, name := range []string{
"static-client",
"static-client-sidecar-proxy",
} {
logger.Logf(t, "checking for %s service in Consul catalog", name)
instances, _, err := connHelper.ConsulClient.Catalog().Service(name, "", nil)
r.Check(err)

for _, instance := range instances {
if strings.Contains(instance.ServiceID, clientPodName) {
r.Errorf("%s is still registered", instance.ServiceID)
}
}
}
})
})
}
}

0 comments on commit d4c4afb

Please sign in to comment.