diff --git a/pkg/anonymization/anonymizer.go b/pkg/anonymization/anonymizer.go index ebb4335a7..87ba9262d 100644 --- a/pkg/anonymization/anonymizer.go +++ b/pkg/anonymization/anonymizer.go @@ -9,7 +9,7 @@ // `cluster-api.openshift.example.com` will become `cluster-api.` // - IPv4 addresses. Using a config client, it retrieves cluster networks and uses them to anonymize IP addresses // preserving subnet information. For example, if you have the following networks in your cluster: -// "10.128.0.0/14", "172.30.0.0/16", "127.0.0.1/8"(added by default) the anonymization will handle the IPs like this: +// "10.128.0.0/14", "172.30.0.0/16", "127.0.0.0/8"(added by default) the anonymization will handle the IPs like this: // - 10.128.0.0 -> 10.128.0.0 // subnetwork itself won't be anonymized // - 10.128.0.55 -> 10.128.0.1 // - 10.128.0.56 -> 10.128.0.2 @@ -30,6 +30,7 @@ import ( "strings" configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + networkv1client "github.com/openshift/client-go/network/clientset/versioned/typed/network/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -83,7 +84,7 @@ type ConfigProvider interface { // NewAnonymizer creates a new instance of anonymizer with a provided config observer and sensitive data func NewAnonymizer(clusterBaseDomain string, networks []string, secretsClient corev1client.SecretInterface) (*Anonymizer, error) { - networks = append(networks, "127.0.0.1/8") + networks = append(networks, "127.0.0.0/8") cidrs, err := k8snet.ParseCIDRs(networks) if err != nil { @@ -110,7 +111,10 @@ func NewAnonymizer(clusterBaseDomain string, networks []string, secretsClient co // NewAnonymizerFromConfigClient creates a new instance of anonymizer with a provided openshift config client func NewAnonymizerFromConfigClient( - ctx context.Context, kubeClient kubernetes.Interface, configClient configv1client.ConfigV1Interface, + ctx context.Context, + kubeClient kubernetes.Interface, + configClient configv1client.ConfigV1Interface, + networkClient networkv1client.NetworkV1Interface, ) (*Anonymizer, error) { baseDomain, err := utils.GetClusterBaseDomain(ctx, configClient) if err != nil { @@ -136,13 +140,25 @@ func NewAnonymizerFromConfigClient( return nil, err } - secretsClient := kubeClient.CoreV1().Secrets(secretNamespace) - if installConfig, exists := clusterConfigV1.Data["install-config"]; exists { networkRegex := regexp.MustCompile(Ipv4NetworkRegex) networks = append(networks, networkRegex.FindAllString(installConfig, -1)...) } + // egress subnets + + hostSubnets, err := networkClient.HostSubnets().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for i := range hostSubnets.Items { + hostSubnet := &hostSubnets.Items[i] + for _, egressCIDR := range hostSubnet.EgressCIDRs { + networks = append(networks, string(egressCIDR)) + } + } + // we're sorting by subnet lengths, if they are the same, we use subnet itself utils.SortAndRemoveDuplicates(&networks, func(i, j int) bool { if !strings.Contains(networks[i], "/") || !strings.Contains(networks[j], "/") { @@ -160,6 +176,8 @@ func NewAnonymizerFromConfigClient( return network1[0] > network2[0] }) + secretsClient := kubeClient.CoreV1().Secrets(secretNamespace) + return NewAnonymizer(baseDomain, networks, secretsClient) } @@ -177,7 +195,12 @@ func NewAnonymizerFromConfig( return nil, err } - return NewAnonymizerFromConfigClient(ctx, kubeClient, configClient) + networkClient, err := networkv1client.NewForConfig(kubeConfig) + if err != nil { + return nil, err + } + + return NewAnonymizerFromConfigClient(ctx, kubeClient, configClient, networkClient) } // AnonymizeMemoryRecord takes record.MemoryRecord, removes the sensitive data from it and returns the same object diff --git a/pkg/anonymization/anonymizer_test.go b/pkg/anonymization/anonymizer_test.go index 21e620665..d9db2daa1 100644 --- a/pkg/anonymization/anonymizer_test.go +++ b/pkg/anonymization/anonymizer_test.go @@ -1,14 +1,19 @@ package anonymization import ( + "context" "fmt" "net" "testing" + configv1 "github.com/openshift/api/config/v1" + networkv1 "github.com/openshift/api/network/v1" + configfake "github.com/openshift/client-go/config/clientset/versioned/fake" + networkfake "github.com/openshift/client-go/network/clientset/versioned/fake" + "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - - "github.com/stretchr/testify/assert" kubefake "k8s.io/client-go/kubernetes/fake" corefake "k8s.io/client-go/kubernetes/typed/core/v1/fake" clienttesting "k8s.io/client-go/testing" @@ -241,3 +246,94 @@ func Test_Anonymizer_StoreTranslationTable(t *testing.T) { assert.Equal(t, secret.StringData[fmt.Sprintf("192.168.0.%v", 255-i)], fmt.Sprintf("192.168.0.%v", i+1)) } } + +func TestAnonymizer_NewAnonymizerFromConfigClient(t *testing.T) { + const testClusterBaseDomain = "example.com" + localhostCIDR := "127.0.0.0/8" + _, localhostNet, err := net.ParseCIDR(localhostCIDR) + assert.NoError(t, err) + cidr1 := "55.44.0.0/16" + _, net1, err := net.ParseCIDR(cidr1) + assert.NoError(t, err) + cidr2 := "192.168.0.0/16" + _, net2, err := net.ParseCIDR(cidr2) + assert.NoError(t, err) + egressCIDR := "10.0.0.0/8" + _, egressNet, err := net.ParseCIDR(egressCIDR) + assert.NoError(t, err) + testNetworks := []subnetInformation{ + { + network: *egressNet, + lastIP: net.IPv4(10, 0, 0, 0), + }, + { + network: *net1, + lastIP: net.IPv4(55, 44, 0, 0), + }, + { + network: *net2, + lastIP: net.IPv4(192, 168, 0, 0), + }, + { + network: *localhostNet, + lastIP: net.IPv4(127, 0, 0, 0), + }, + } + + kubeClient := kubefake.NewSimpleClientset() + coreClient := kubeClient.CoreV1() + networkClient := networkfake.NewSimpleClientset().NetworkV1() + configClient := configfake.NewSimpleClientset().ConfigV1() + ctx := context.TODO() + + // create fake resources + _, err = configClient.DNSes().Create(ctx, &configv1.DNS{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Spec: configv1.DNSSpec{BaseDomain: testClusterBaseDomain}, + }, metav1.CreateOptions{}) + assert.NoError(t, err) + + _, err = configClient.Networks().Create(context.TODO(), &configv1.Network{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Spec: configv1.NetworkSpec{ + ClusterNetwork: []configv1.ClusterNetworkEntry{{CIDR: cidr1}}, + ServiceNetwork: []string{cidr2}, + ExternalIP: &configv1.ExternalIPConfig{Policy: &configv1.ExternalIPPolicy{}}, + }, + }, metav1.CreateOptions{}) + assert.NoError(t, err) + + _, err = coreClient.ConfigMaps("kube-system").Create(ctx, &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster-config-v1"}, + }, metav1.CreateOptions{}) + assert.NoError(t, err) + + _, err = networkClient.HostSubnets().Create(ctx, &networkv1.HostSubnet{ + EgressCIDRs: []networkv1.HostSubnetEgressCIDR{networkv1.HostSubnetEgressCIDR(egressCIDR)}, + }, metav1.CreateOptions{}) + assert.NoError(t, err) + + // test that everything was initialized correctly + + anonymizer, err := NewAnonymizerFromConfigClient( + context.TODO(), + kubeClient, + configClient, + networkClient, + ) + assert.NoError(t, err) + assert.NotNil(t, anonymizer) + + assert.Equal(t, testClusterBaseDomain, anonymizer.clusterBaseDomain) + assert.Empty(t, anonymizer.translationTable) + assert.NotNil(t, anonymizer.ipNetworkRegex) + assert.NotNil(t, anonymizer.secretsClient) + + assert.Equal(t, len(testNetworks), len(anonymizer.networks)) + // the networks are already sorted in anonymizer + for i, subnetInfo := range anonymizer.networks { + expectedSubnetInfo := testNetworks[i] + assert.Equal(t, expectedSubnetInfo.network.Network(), subnetInfo.network.Network()) + assert.Equal(t, expectedSubnetInfo.lastIP.String(), subnetInfo.lastIP.String()) + } +}