Skip to content

Commit

Permalink
Add integration test for AddPackageRepository with TLS config (#4376)
Browse files Browse the repository at this point in the history
* attempt #2

* add TLS integration test for AddPackageRepository
re-organize integration tests by the functionality

* remove Chart.lock

* moved test-related files a bit so they don't all crowd testdata directory
  • Loading branch information
gfichtenholt committed Mar 4, 2022
1 parent 8c7d8ac commit bb11130
Show file tree
Hide file tree
Showing 40 changed files with 941 additions and 787 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,241 @@
// Copyright 2021-2022 the Kubeapps contributors.
// SPDX-License-Identifier: Apache-2.0

package main

import (
"context"
"fmt"
"strings"
"testing"
"time"

corev1 "github.com/kubeapps/kubeapps/cmd/kubeapps-apis/gen/core/packages/v1alpha1"
"golang.org/x/sync/semaphore"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/util/sets"
)

// This is an integration test: it tests the full integration of flux plugin with flux back-end
// To run these tests, enable ENABLE_FLUX_INTEGRATION_TESTS variable
// pre-requisites for these tests to run:
// 1) kind cluster with flux deployed
// 2) kubeapps apis apiserver service running with fluxv2 plug-in enabled, port forwarded to 8080, e.g.
// kubectl -n kubeapps port-forward svc/kubeapps-internal-kubeappsapis 8080:8080
// 3) run './kind-cluster-setup.sh deploy' once prior to these tests

// this integration test is meant to test a scenario when the redis cache is confiured with maxmemory
// too small to be able to fit all the repos needed to satisfy the request for GetAvailablePackageSummaries
// and redis cache eviction kicks in. Also, the kubeapps-apis pod should have a large memory limit (1Gb) set
// To set up such environment one can use "-f ./docs/user/manifests/kubeapps-local-dev-redis-tiny-values.yaml"
// option when installing kubeapps via "helm upgrade"
// It is worth noting that exactly how many copies of bitnami repo can be held in the cache at any given time varies
// This is because the size of the index.yaml we get from bitnami does fluctuate quite a bit over time:
// [kubeapps]$ ls -l bitnami_index.yaml
// -rw-r--r--@ 1 gfichtenholt staff 8432962 Jun 20 02:35 bitnami_index.yaml
// [kubeapps]$ ls -l bitnami_index.yaml
// -rw-rw-rw-@ 1 gfichtenholt staff 10394218 Nov 7 19:41 bitnami_index.yaml
// Also now we are caching helmcharts themselves for each repo so that will affect how many will fit too
func TestKindClusterGetAvailablePackageSummariesForLargeReposAndTinyRedis(t *testing.T) {
fluxPlugin, _ := checkEnv(t)

redisCli, err := newRedisClientForIntegrationTest(t)
if err != nil {
t.Fatalf("%+v", err)
}

// assume 30Mb redis cache for now. See comment above
if err = redisCheckTinyMaxMemory(t, redisCli, "31457280"); err != nil {
t.Fatalf("%v", err)
}

// ref https://redis.io/topics/notifications
if err = redisCli.ConfigSet(redisCli.Context(), "notify-keyspace-events", "EA").Err(); err != nil {
t.Fatalf("%+v", err)
}
t.Cleanup(func() {
t.Logf("Resetting notify-keyspace-events")
if err = redisCli.ConfigSet(redisCli.Context(), "notify-keyspace-events", "").Err(); err != nil {
t.Logf("%v", err)
}
})

if err = initNumberOfChartsInBitnamiCatalog(t); err != nil {
t.Errorf("Failed to get number of charts in bitnami catalog due to: %v", err)
}

const MAX_REPOS_NEVER = 100
var totalRepos = 0
// ref https://stackoverflow.com/questions/32840687/timeout-for-waitgroup-wait
evictedRepos := sets.String{}

// do this part in a func so we can defer subscribe.Close
func() {
// ref https://medium.com/nerd-for-tech/redis-getting-notified-when-a-key-is-expired-or-changed-ca3e1f1c7f0a
subscribe := redisCli.PSubscribe(redisCli.Context(), "__keyevent@0__:*")
defer subscribe.Close()

sem := semaphore.NewWeighted(MAX_REPOS_NEVER)
if err := sem.Acquire(context.Background(), MAX_REPOS_NEVER); err != nil {
t.Fatalf("%v", err)
}

go redisReceiveNotificationsLoop(t, subscribe.Channel(), sem, &evictedRepos)

// now load some large repos (bitnami)
// I didn't want to store a large (>10MB) copy of bitnami repo in our git,
// so for now let it fetch directly from bitnami website
// we'll keep adding repos one at a time, until we get an event from redis
// about the first evicted repo entry
for ; totalRepos < MAX_REPOS_NEVER && evictedRepos.Len() == 0; totalRepos++ {
repo := fmt.Sprintf("bitnami-%d", totalRepos)
// this is to make sure we allow enough time for repository to be created and come to ready state
if err = kubeAddHelmRepository(t, repo, "https://charts.bitnami.com/bitnami", "default", ""); err != nil {
t.Fatalf("%v", err)
}
t.Cleanup(func() {
if err = kubeDeleteHelmRepository(t, repo, "default"); err != nil {
t.Logf("%v", err)
}
})
// wait until this repo have been indexed and cached up to 10 minutes
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10)
defer cancel()
if err := sem.Acquire(ctx, 1); err != nil {
t.Fatalf("Timed out waiting for Redis event: %v", err)
}
}
t.Logf("Done with first part of the test, total repos: [%d], evicted repos: [%d]",
totalRepos, len(evictedRepos))
}()

if evictedRepos.Len() == 0 {
t.Fatalf("Failing because redis did not evict any entries")
}

if keys, err := redisCli.Keys(redisCli.Context(), "helmrepositories:*").Result(); err != nil {
t.Fatalf("%v", err)
} else {
// the cache should only big enough to be able to hold at most (totalRepos-1) of the keys
// one (or more) entries may have been evicted
if len(keys) > totalRepos-1 {
t.Fatalf("Expected at most [%d] keys in cache but got: %s", totalRepos-1, keys)
}
}

// one particular code path I'd like to test:
// make sure that GetAvailablePackageVersions() works w.r.t. a cache entry that's been evicted
grpcContext := newGrpcAdminContext(t, "test-create-admin")

// copy the evicted list because before ForEach loop below will modify it in a goroutine
evictedCopy := sets.StringKeySet(evictedRepos)

// do this part in a func so we can defer subscribe.Close
func() {
subscribe := redisCli.PSubscribe(redisCli.Context(), "__keyevent@0__:*")
defer subscribe.Close()

go redisReceiveNotificationsLoop(t, subscribe.Channel(), nil, &evictedRepos)

for _, k := range evictedCopy.List() {
name := strings.Split(k, ":")[2]
t.Logf("Checking apache version in repo [%s]...", name)
grpcContext, cancel := context.WithTimeout(grpcContext, defaultContextTimeout)
defer cancel()
resp, err := fluxPlugin.GetAvailablePackageVersions(
grpcContext, &corev1.GetAvailablePackageVersionsRequest{
AvailablePackageRef: &corev1.AvailablePackageReference{
Context: &corev1.Context{
Namespace: "default",
},
Identifier: name + "/apache",
},
})
if err != nil {
t.Fatalf("%v", err)
} else if len(resp.PackageAppVersions) < 5 {
t.Fatalf("Expected at least 5 versions for apache chart, got: %s", resp)
}
}

t.Logf("Done with second part of the test")
}()

// do this part in a func so we can defer subscribe.Close
func() {
subscribe := redisCli.PSubscribe(redisCli.Context(), "__keyevent@0__:*")
defer subscribe.Close()

// above loop should cause a few more entries to be evicted, but just to be sure let's
// load a few more copies of bitnami repo into the cache. The goal of this for loop is
// to force redis to evict more repo(s)
sem := semaphore.NewWeighted(MAX_REPOS_NEVER)
if err := sem.Acquire(context.Background(), MAX_REPOS_NEVER); err != nil {
t.Fatalf("%v", err)
}
go redisReceiveNotificationsLoop(t, subscribe.Channel(), sem, &evictedRepos)

for ; totalRepos < MAX_REPOS_NEVER && evictedRepos.Len() == evictedCopy.Len(); totalRepos++ {
repo := fmt.Sprintf("bitnami-%d", totalRepos)
// this is to make sure we allow enough time for repository to be created and come to ready state
if err = kubeAddHelmRepository(t, repo, "https://charts.bitnami.com/bitnami", "default", ""); err != nil {
t.Fatalf("%v", err)
}
t.Cleanup(func() {
if err = kubeDeleteHelmRepository(t, repo, "default"); err != nil {
t.Logf("%v", err)
}
})
// wait until this repo have been indexed and cached up to 10 minutes
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10)
defer cancel()
if err := sem.Acquire(ctx, 1); err != nil {
t.Fatalf("Timed out waiting for Redis event: %v", err)
}
}

t.Logf("Done with third part of the test")
}()

if keys, err := redisCli.Keys(redisCli.Context(), "helmrepositories:*").Result(); err != nil {
t.Fatalf("%v", err)
} else {
// the cache should only big enough to be able to hold at most (totalRepos-1) of the keys
// one (or more) entries MUST have been evicted
if len(keys) > totalRepos-1 {
t.Fatalf("Expected at most %d keys in cache but got [%s]", totalRepos-1, keys)
}
}

// not related to low maxmemory but as long as we are here might as well check that
// there is a Unauthenticated failure when there are no credenitals in the request
_, err = fluxPlugin.GetAvailablePackageSummaries(context.TODO(), &corev1.GetAvailablePackageSummariesRequest{})
if err == nil || status.Code(err) != codes.Unauthenticated {
t.Fatalf("Expected Unauthenticated, got %v", err)
}

grpcContext, cancel := context.WithTimeout(grpcContext, 60*time.Second)
defer cancel()
resp2, err := fluxPlugin.GetAvailablePackageSummaries(grpcContext, &corev1.GetAvailablePackageSummariesRequest{})
if err != nil {
t.Fatalf("%v", err)
}

// we need to make sure that response contains packages from all existing repositories
// regardless whether they're in the cache or not
expected := sets.String{}
for i := 0; i < totalRepos; i++ {
repo := fmt.Sprintf("bitnami-%d", i)
expected.Insert(repo)
}
for _, s := range resp2.AvailablePackageSummaries {
id := strings.Split(s.AvailablePackageRef.Identifier, "/")
expected.Delete(id[0])
}

if expected.Len() != 0 {
t.Fatalf("Expected to get packages from these repositories: %s, but did not get any",
expected.List())
}
}
24 changes: 5 additions & 19 deletions cmd/kubeapps-apis/plugins/fluxv2/packages/v1alpha1/chart_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,15 +95,7 @@ func TestGetAvailablePackageDetail(t *testing.T) {

// these will be used further on for TLS-related scenarios. Init
// byte arrays up front so they can be re-used in multiple places later
var ca, pub, priv []byte
var err error
if ca, err = ioutil.ReadFile("testdata/rootCA.crt"); err != nil {
t.Fatalf("%+v", err)
} else if pub, err = ioutil.ReadFile("testdata/crt.pem"); err != nil {
t.Fatalf("%+v", err)
} else if priv, err = ioutil.ReadFile("testdata/key.pem"); err != nil {
t.Fatalf("%+v", err)
}
ca, pub, priv := getCertsForTesting(t)

for _, tc := range testCases {
t.Run(tc.testName, func(t *testing.T) {
Expand Down Expand Up @@ -140,12 +132,6 @@ func TestGetAvailablePackageDetail(t *testing.T) {
}
var ts *httptest.Server
if tc.tls {
// I cheated a bit in this test. Instead of generating my own certificates
// and keys using openssl tool, which I found time consuming and overly complicated,
// I just copied the ones being used by helm.sh tool for testing purposes
// from https://github.com/helm/helm/tree/main/testdata
// in order to save some time. Should n't affect any functionality of productionn
// code
ts = httptest.NewUnstartedServer(handler)
tlsConf, err := httpclient.NewClientTLS(pub, priv, ca)
if err != nil {
Expand Down Expand Up @@ -694,7 +680,7 @@ func TestChartCacheResyncNotIdle(t *testing.T) {
}

// what I need is a single repo with a whole bunch of unique charts (packages)
tarGzBytes, err := ioutil.ReadFile("./testdata/redis-14.4.0.tgz")
tarGzBytes, err := ioutil.ReadFile("./testdata/charts/redis-14.4.0.tgz")
if err != nil {
t.Fatalf("%+v", err)
}
Expand Down Expand Up @@ -733,7 +719,7 @@ func TestChartCacheResyncNotIdle(t *testing.T) {
repoName := "multitude-of-charts"
repoNamespace := "default"
replaceUrls := make(map[string]string)
replaceUrls["{{testdata/redis-14.4.0.tgz}}"] = ts.URL
replaceUrls["{{testdata/charts/redis-14.4.0.tgz}}"] = ts.URL
ts2, r, err := newRepoWithIndex(
tmpFile.Name(), repoName, repoNamespace, replaceUrls, "")
if err != nil {
Expand Down Expand Up @@ -977,12 +963,12 @@ func compareActualVsExpectedAvailablePackageDetail(t *testing.T, actual *corev1.
var redis_charts_spec = []testSpecChartWithFile{
{
name: "redis",
tgzFile: "testdata/redis-14.4.0.tgz",
tgzFile: "testdata/charts/redis-14.4.0.tgz",
revision: "14.4.0",
},
{
name: "redis",
tgzFile: "testdata/redis-14.3.4.tgz",
tgzFile: "testdata/charts/redis-14.3.4.tgz",
revision: "14.3.4",
},
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,19 @@ const (
// EnvvarFluxIntegrationTests enables tests that run against a local kind cluster
envVarFluxIntegrationTests = "ENABLE_FLUX_INTEGRATION_TESTS"
defaultContextTimeout = 30 * time.Second

// This is local copy of the first few entries
// on "https://stefanprodan.github.io/podinfo/index.yaml" as of Sept 10 2021 with the chart
// urls modified to link to .tgz files also within the local cluster.
// If we want other repos, we'll have add directories and tinker with ./Dockerfile and NGINX conf.
// This relies on fluxv2plugin-testdata-svc service stood up by testdata/kind-cluster-setup.sh
podinfo_repo_url = "http://fluxv2plugin-testdata-svc.default.svc.cluster.local:80/podinfo"

// same as above but requires HTTP basic authentication: user: foo, password: bar
podinfo_basic_auth_repo_url = "http://fluxv2plugin-testdata-svc.default.svc.cluster.local:80/podinfo-basic-auth"

// same as above but requires TLS
podinfo_tls_repo_url = "https://fluxv2plugin-testdata-ssl-svc.default.svc.cluster.local:443"
)

func checkEnv(t *testing.T) (fluxplugin.FluxV2PackagesServiceClient, fluxplugin.FluxV2RepositoriesServiceClient) {
Expand Down

0 comments on commit bb11130

Please sign in to comment.