Skip to content

Commit

Permalink
pkg/clustermesh: Replace gocheck with built-in go test
Browse files Browse the repository at this point in the history
Co-authored-by: Marco Iorio <marco.iorio@isovalent.com>
Signed-off-by: Tam Mach <tam.mach@cilium.io>
  • Loading branch information
sayboras and giorio94 committed May 6, 2024
1 parent b4a5a37 commit eed2112
Show file tree
Hide file tree
Showing 2 changed files with 186 additions and 225 deletions.
177 changes: 75 additions & 102 deletions pkg/clustermesh/common/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,26 +11,25 @@ import (
"testing"
"time"

. "github.com/cilium/checkmate"
"github.com/cilium/hive/hivetest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/exp/maps"

"github.com/cilium/cilium/pkg/clustermesh/types"
"github.com/cilium/cilium/pkg/kvstore"
"github.com/cilium/cilium/pkg/testutils"
)

const (
content1 = "endpoints:\n- https://cluster1.cilium-etcd.cilium.svc:2379\n"
content2 = "endpoints:\n- https://cluster1.cilium-etcd.cilium.svc:2380\n"
)

func Test(t *testing.T) {
TestingT(t)
}

type ClusterMeshTestSuite struct{}

var _ = Suite(&ClusterMeshTestSuite{})
// Configure a generous timeout to prevent flakes when running in a noisy CI environment.
var (
tick = 10 * time.Millisecond
timeout = 5 * time.Second
)

type fakeRemoteCluster struct{}

Expand All @@ -41,81 +40,73 @@ func (*fakeRemoteCluster) ClusterConfigRequired() bool { return false }
func (*fakeRemoteCluster) Stop() {}
func (*fakeRemoteCluster) Remove() {}

func writeFile(c *C, name, content string) {
func writeFile(t *testing.T, name, content string) {
t.Helper()

err := os.WriteFile(name, []byte(content), 0644)
c.Assert(err, IsNil)
require.NoError(t, err)
}

func expectExists(c *C, cm *clusterMesh, name string) {
cm.mutex.RLock()
defer cm.mutex.RUnlock()
c.Assert(cm.clusters[name], Not(IsNil))
}
func expectChange(t *testing.T, cm *clusterMesh, name string) {
t.Helper()

func expectChange(c *C, cm *clusterMesh, name string) {
cm.mutex.RLock()
require.Contains(t, cm.clusters, name)
cluster := cm.clusters[name]
cm.mutex.RUnlock()
c.Assert(cluster, Not(IsNil))

select {
case <-cluster.changed:
case <-time.After(time.Second):
c.Fatal("timeout while waiting for changed event")
t.Fatal("timeout while waiting for changed event")
}
}

func expectNoChange(c *C, cm *clusterMesh, name string) {
func expectNoChange(t *testing.T, cm *clusterMesh, name string) {
t.Helper()

cm.mutex.RLock()
cluster := cm.clusters[name]
cm.mutex.RUnlock()
c.Assert(cluster, Not(IsNil))
require.NotNil(t, cluster)

select {
case <-cluster.changed:
c.Fatal("unexpected changed event detected")
t.Fatal("unexpected changed event detected")
case <-time.After(100 * time.Millisecond):
}
}

func expectNotExist(c *C, cm *clusterMesh, name string) {
cm.mutex.RLock()
defer cm.mutex.RUnlock()
c.Assert(cm.clusters[name], IsNil)
}

func (s *ClusterMeshTestSuite) TestWatchConfigDirectory(c *C) {
func TestWatchConfigDirectory(t *testing.T) {
skipKvstoreConnection = true
defer func() {
skipKvstoreConnection = false
}()

baseDir, err := os.MkdirTemp("", "multicluster")
c.Assert(err, IsNil)
defer os.RemoveAll(baseDir)
baseDir := t.TempDir()

dataDir := path.Join(baseDir, "..data")
dataDirTmp := path.Join(baseDir, "..data_tmp")
dataDir1 := path.Join(baseDir, "..data-1")
dataDir2 := path.Join(baseDir, "..data-2")
dataDir3 := path.Join(baseDir, "..data-3")

c.Assert(os.Symlink(dataDir1, dataDir), IsNil)
c.Assert(os.Mkdir(dataDir1, 0755), IsNil)
c.Assert(os.Mkdir(dataDir2, 0755), IsNil)
c.Assert(os.Mkdir(dataDir3, 0755), IsNil)
require.NoError(t, os.Symlink(dataDir1, dataDir))
require.NoError(t, os.Mkdir(dataDir1, 0755))
require.NoError(t, os.Mkdir(dataDir2, 0755))
require.NoError(t, os.Mkdir(dataDir3, 0755))

file1 := path.Join(baseDir, "cluster1")
file2 := path.Join(baseDir, "cluster2")
file3 := path.Join(baseDir, "cluster3")

writeFile(c, file1, content1)
writeFile(c, path.Join(dataDir1, "cluster2"), content1)
writeFile(c, path.Join(dataDir2, "cluster2"), content2)
writeFile(c, path.Join(dataDir3, "cluster2"), content1)
writeFile(t, file1, content1)
writeFile(t, path.Join(dataDir1, "cluster2"), content1)
writeFile(t, path.Join(dataDir2, "cluster2"), content2)
writeFile(t, path.Join(dataDir3, "cluster2"), content1)

// Create an indirect link, as in case of Kubernetes COnfigMaps/Secret mounted inside pods.
c.Assert(os.Symlink(path.Join(dataDir, "cluster2"), file2), IsNil)
require.NoError(t, os.Symlink(path.Join(dataDir, "cluster2"), file2))

gcm := NewClusterMesh(Configuration{
Config: Config{ClusterMeshConfig: baseDir},
Expand All @@ -124,114 +115,96 @@ func (s *ClusterMeshTestSuite) TestWatchConfigDirectory(c *C) {
Metrics: MetricsProvider("clustermesh")(),
})
cm := gcm.(*clusterMesh)
hivetest.Lifecycle(c).Append(cm)
hivetest.Lifecycle(t).Append(cm)

// wait for cluster1 and cluster2 to appear
c.Assert(testutils.WaitUntil(func() bool {
require.EventuallyWithT(t, func(c *assert.CollectT) {
cm.mutex.RLock()
defer cm.mutex.RUnlock()
return len(cm.clusters) == 2
}, time.Second), IsNil)
expectExists(c, cm, "cluster1")
expectExists(c, cm, "cluster2")
expectNotExist(c, cm, "cluster3")
assert.ElementsMatch(c, maps.Keys(cm.clusters), []string{"cluster1", "cluster2"})
}, timeout, tick)

err = os.RemoveAll(file1)
c.Assert(err, IsNil)
require.NoError(t, os.RemoveAll(file1))

// wait for cluster1 to disappear
c.Assert(testutils.WaitUntil(func() bool {
require.EventuallyWithT(t, func(c *assert.CollectT) {
cm.mutex.RLock()
defer cm.mutex.RUnlock()
return len(cm.clusters) == 1
}, time.Second), IsNil)
assert.ElementsMatch(c, maps.Keys(cm.clusters), []string{"cluster2"})
}, timeout, tick)

writeFile(c, file3, content1)
writeFile(t, file3, content1)

// wait for cluster3 to appear
c.Assert(testutils.WaitUntil(func() bool {
require.EventuallyWithT(t, func(c *assert.CollectT) {
cm.mutex.RLock()
defer cm.mutex.RUnlock()
return len(cm.clusters) == 2
}, time.Second), IsNil)
expectNotExist(c, cm, "cluster1")
expectExists(c, cm, "cluster2")
expectExists(c, cm, "cluster3")
assert.ElementsMatch(c, maps.Keys(cm.clusters), []string{"cluster2", "cluster3"})
}, timeout, tick)

// Test renaming of file from cluster3 to cluster1
err = os.Rename(file3, file1)
c.Assert(err, IsNil)
require.NoError(t, os.Rename(file3, file1))

// wait for cluster1 to appear
c.Assert(testutils.WaitUntil(func() bool {
require.EventuallyWithT(t, func(c *assert.CollectT) {
cm.mutex.RLock()
defer cm.mutex.RUnlock()
return cm.clusters["cluster1"] != nil
}, time.Second), IsNil)
expectExists(c, cm, "cluster2")
expectNotExist(c, cm, "cluster3")
assert.ElementsMatch(c, maps.Keys(cm.clusters), []string{"cluster1", "cluster2"})
}, timeout, tick)

// touch file
c.Assert(os.Chtimes(file1, time.Now(), time.Now()), IsNil)
expectNoChange(c, cm, "cluster1")
require.NoError(t, os.Chtimes(file1, time.Now(), time.Now()))
expectNoChange(t, cm, "cluster1")

// update file content changing the symlink target, adopting
// the same approach of the kubelet on ConfigMap/Secret update
c.Assert(os.Symlink(dataDir2, dataDirTmp), IsNil)
c.Assert(os.Rename(dataDirTmp, dataDir), IsNil)
c.Assert(os.RemoveAll(dataDir1), IsNil)
expectChange(c, cm, "cluster2")
require.NoError(t, os.Symlink(dataDir2, dataDirTmp))
require.NoError(t, os.Rename(dataDirTmp, dataDir))
require.NoError(t, os.RemoveAll(dataDir1))
expectChange(t, cm, "cluster2")

// update file content once more
c.Assert(os.Symlink(dataDir3, dataDirTmp), IsNil)
c.Assert(os.Rename(dataDirTmp, dataDir), IsNil)
c.Assert(os.RemoveAll(dataDir2), IsNil)
expectChange(c, cm, "cluster2")
require.NoError(t, os.Symlink(dataDir3, dataDirTmp))
require.NoError(t, os.Rename(dataDirTmp, dataDir))
require.NoError(t, os.RemoveAll(dataDir2))
expectChange(t, cm, "cluster2")

err = os.RemoveAll(file1)
c.Assert(err, IsNil)
err = os.RemoveAll(file2)
c.Assert(err, IsNil)
require.NoError(t, os.RemoveAll(file1))
require.NoError(t, os.RemoveAll(file2))

// wait for all clusters to disappear
c.Assert(testutils.WaitUntil(func() bool {
require.EventuallyWithT(t, func(c *assert.CollectT) {
cm.mutex.RLock()
defer cm.mutex.RUnlock()
return len(cm.clusters) == 0
}, time.Second), IsNil)
expectNotExist(c, cm, "cluster1")
expectNotExist(c, cm, "cluster2")
expectNotExist(c, cm, "cluster3")
assert.Empty(c, cm.clusters)
}, timeout, tick)

// Ensure that per-config watches are removed properly
wl := cm.configWatcher.watcher.WatchList()
c.Assert(wl, HasLen, 1)
c.Assert(wl[0], Equals, baseDir)
require.ElementsMatch(t, wl, []string{baseDir})
}

func (s *ClusterMeshTestSuite) TestIsEtcdConfigFile(c *C) {
dir, err := os.MkdirTemp("", "etcdconfig")
c.Assert(err, IsNil)
defer os.RemoveAll(dir)
func TestIsEtcdConfigFile(t *testing.T) {
dir := t.TempDir()

validPath := path.Join(dir, "valid")
content := []byte("endpoints:\n- https://cluster1.cilium-etcd.cilium.svc:2379\n")
err = os.WriteFile(validPath, content, 0644)
c.Assert(err, IsNil)
err := os.WriteFile(validPath, content, 0644)
require.NoError(t, err)

isConfig, hash := isEtcdConfigFile(validPath)
c.Assert(isConfig, Equals, true)
c.Assert(hash, Equals, fhash(sha256.Sum256(content)))
require.True(t, isConfig)
require.Equal(t, fhash(sha256.Sum256(content)), hash)

invalidPath := path.Join(dir, "valid")
err = os.WriteFile(invalidPath, []byte("sf324kj234lkjsdvl\nwl34kj23l4k\nendpoints"), 0644)
c.Assert(err, IsNil)
require.NoError(t, err)

isConfig, hash = isEtcdConfigFile(validPath)
c.Assert(isConfig, Equals, false)
c.Assert(hash, Equals, fhash{})
require.False(t, isConfig)
require.Equal(t, fhash{}, hash)

isConfig, hash = isEtcdConfigFile(dir)
c.Assert(isConfig, Equals, false)
c.Assert(hash, Equals, fhash{})
require.False(t, isConfig)
require.Equal(t, fhash{}, hash)
}

0 comments on commit eed2112

Please sign in to comment.