From 43012dafab5a7ab4a5ae2384add596b983541a84 Mon Sep 17 00:00:00 2001 From: Alona Kaplan Date: Wed, 2 Feb 2022 14:26:21 +0200 Subject: [PATCH 1/9] masquerade: Don't configure IPv4 on single stack IPv6 - iptables/nftables - k6t-eth0 bridge gateway - dhcp configutation Signed-off-by: Alona Kaplan --- pkg/network/dhcp/masquerade.go | 17 +++- pkg/network/dhcp/masquerade_test.go | 83 +++++++++++----- pkg/network/driver/common.go | 65 ++++++++----- pkg/network/driver/generated_mock_common.go | 19 +++- pkg/network/infraconfigurators/masquerade.go | 50 ++++++---- .../infraconfigurators/masquerade_test.go | 95 +++++++++++-------- 6 files changed, 218 insertions(+), 111 deletions(-) diff --git a/pkg/network/dhcp/masquerade.go b/pkg/network/dhcp/masquerade.go index 1e3ddb16fdc4..0549a9ad1c70 100644 --- a/pkg/network/dhcp/masquerade.go +++ b/pkg/network/dhcp/masquerade.go @@ -48,15 +48,22 @@ func (d *MasqueradeConfigGenerator) Generate() (*cache.DHCPConfig, error) { dhcpConfig.Subdomain = d.subdomain dhcpConfig.Mtu = uint16(podNicLink.Attrs().MTU) - ipv4Gateway, ipv4, err := virtnetlink.GenerateMasqueradeGatewayAndVmIPAddrs(d.vmiSpecNetwork, iptables.ProtocolIPv4) + ipv4Enabled, err := d.handler.HasIPv4GlobalUnicastAddress(d.podInterfaceName) if err != nil { + log.Log.Reason(err).Errorf("failed to verify whether ipv4 is configured on %s", d.podInterfaceName) return nil, err } - dhcpConfig.IP = *ipv4 - dhcpConfig.AdvertisingIPAddr = ipv4Gateway.IP.To4() - dhcpConfig.Gateway = ipv4Gateway.IP.To4() + if ipv4Enabled { + ipv4Gateway, ipv4, err := virtnetlink.GenerateMasqueradeGatewayAndVmIPAddrs(d.vmiSpecNetwork, iptables.ProtocolIPv4) + if err != nil { + return nil, err + } + dhcpConfig.IP = *ipv4 + dhcpConfig.AdvertisingIPAddr = ipv4Gateway.IP.To4() + dhcpConfig.Gateway = ipv4Gateway.IP.To4() + } - ipv6Enabled, err := d.handler.IsIpv6Enabled(d.podInterfaceName) + ipv6Enabled, err := d.handler.HasIPv6GlobalUnicastAddress(d.podInterfaceName) if err != nil { log.Log.Reason(err).Errorf("failed to verify whether ipv6 is configured on %s", d.podInterfaceName) return nil, err diff --git a/pkg/network/dhcp/masquerade_test.go b/pkg/network/dhcp/masquerade_test.go index b6c4a6382023..279d0a381a33 100644 --- a/pkg/network/dhcp/masquerade_test.go +++ b/pkg/network/dhcp/masquerade_test.go @@ -65,16 +65,10 @@ var _ = Describe("Masquerade DHCP configurator", func() { expectedIpv6 = "fd10:0:2::2/120" ) - generateExpectedConfigIPv6Disabled := func(vmiSpecNetwork *v1.Network, macString *string, mtu int, ifaceName string, subdomain string) cache.DHCPConfig { - ipv4, _ := netlink.ParseAddr(expectedIpv4) - ipv4Gateway, _ := netlink.ParseAddr(expectedIpv4Gateway) - + generateExpectedConfig := func(vmiSpecNetwork *v1.Network, macString *string, mtu int, ifaceName string, subdomain string) cache.DHCPConfig { expectedConfig := cache.DHCPConfig{Name: ifaceName, - IP: *ipv4, - Mtu: uint16(mtu), - AdvertisingIPAddr: ipv4Gateway.IP.To4(), - Gateway: ipv4Gateway.IP.To4(), - Subdomain: subdomain, + Mtu: uint16(mtu), + Subdomain: subdomain, } if macString != nil { @@ -85,8 +79,20 @@ var _ = Describe("Masquerade DHCP configurator", func() { return expectedConfig } - generateExpectedConfigIPv6Enabled := func(vmiSpecNetwork *v1.Network, macString *string, mtu int, ifaceName string, subdomain string) cache.DHCPConfig { - expectedConfig := generateExpectedConfigIPv6Disabled(vmiSpecNetwork, macString, mtu, ifaceName, subdomain) + generateExpectedConfigOnlyIPv4Enabled := func(vmiSpecNetwork *v1.Network, macString *string, mtu int, ifaceName string, subdomain string) cache.DHCPConfig { + expectedConfig := generateExpectedConfig(vmiSpecNetwork, macString, mtu, ifaceName, subdomain) + ipv4, _ := netlink.ParseAddr(expectedIpv4) + ipv4Gateway, _ := netlink.ParseAddr(expectedIpv4Gateway) + + expectedConfig.IP = *ipv4 + expectedConfig.AdvertisingIPAddr = ipv4Gateway.IP.To4() + expectedConfig.Gateway = ipv4Gateway.IP.To4() + + return expectedConfig + } + + generateExpectedConfigOnlyIPv6Enabled := func(vmiSpecNetwork *v1.Network, macString *string, mtu int, ifaceName string, subdomain string) cache.DHCPConfig { + expectedConfig := generateExpectedConfig(vmiSpecNetwork, macString, mtu, ifaceName, subdomain) ipv6, _ := netlink.ParseAddr(expectedIpv6) ipv6Gateway, _ := netlink.ParseAddr(expectedIpv6Gateway) @@ -96,6 +102,16 @@ var _ = Describe("Masquerade DHCP configurator", func() { return expectedConfig } + generateExpectedConfigOnlyIPv4AndIPv6Enabled := func(vmiSpecNetwork *v1.Network, macString *string, mtu int, ifaceName string, subdomain string) cache.DHCPConfig { + expectedConfig := generateExpectedConfigOnlyIPv4Enabled(vmiSpecNetwork, macString, mtu, ifaceName, subdomain) + ipv6ExpectedConfig := generateExpectedConfigOnlyIPv6Enabled(vmiSpecNetwork, macString, mtu, ifaceName, subdomain) + + expectedConfig.IPv6 = ipv6ExpectedConfig.IPv6 + expectedConfig.AdvertisingIPv6Addr = ipv6ExpectedConfig.AdvertisingIPv6Addr + + return expectedConfig + } + BeforeEach(func() { vmiSpecNetwork = v1.DefaultPodNetwork() vmiSpecIface = &v1.Interface{Name: "default", InterfaceBindingMethod: v1.InterfaceBindingMethod{Masquerade: &v1.InterfaceMasquerade{}}} @@ -117,34 +133,53 @@ var _ = Describe("Masquerade DHCP configurator", func() { mockHandler.EXPECT().LinkByName(ifaceName).Return(iface, nil) }) - When("IPv6 is enabled", func() { + When("Only Ipv4 is enabled", func() { BeforeEach(func() { - mockHandler.EXPECT().IsIpv6Enabled(ifaceName).Return(true, nil) + mockHandler.EXPECT().HasIPv4GlobalUnicastAddress(ifaceName).Return(true, nil) + mockHandler.EXPECT().HasIPv6GlobalUnicastAddress(ifaceName).Return(false, nil) }) - It("Should return the dhcp configuration", func() { + It("Should return the dhcp configuration with IPv4 only", func() { config, err := generator.Generate() Expect(err).ToNot(HaveOccurred()) - Expect(*config).To(Equal(generateExpectedConfigIPv6Enabled(vmiSpecNetwork, nil, mtu, ifaceName, subdomain))) + Expect(*config).To(Equal(generateExpectedConfigOnlyIPv4Enabled(vmiSpecNetwork, nil, mtu, ifaceName, subdomain))) }) }) - When("IPv6 is disabled", func() { + When("Only IPv6 is enabled", func() { BeforeEach(func() { - mockHandler.EXPECT().IsIpv6Enabled(ifaceName).Return(false, nil) + mockHandler.EXPECT().HasIPv4GlobalUnicastAddress(ifaceName).Return(false, nil) + mockHandler.EXPECT().HasIPv6GlobalUnicastAddress(ifaceName).Return(true, nil) }) - It("Should return the dhcp configuration without IPv6", func() { + It("Should return the dhcp configuration with IPv6 only", func() { config, err := generator.Generate() Expect(err).ToNot(HaveOccurred()) - Expect(*config).To(Equal(generateExpectedConfigIPv6Disabled(vmiSpecNetwork, nil, mtu, ifaceName, subdomain))) + Expect(*config).To(Equal(generateExpectedConfigOnlyIPv6Enabled(vmiSpecNetwork, nil, mtu, ifaceName, subdomain))) }) }) - It("Should return an error if the config discovering fails", func() { - vmiSpecNetwork.Pod.VMNetworkCIDR = "abc" + When("Both Ipv4 and IPv6 are enabled", func() { + BeforeEach(func() { + mockHandler.EXPECT().HasIPv4GlobalUnicastAddress(ifaceName).Return(true, nil) + mockHandler.EXPECT().HasIPv6GlobalUnicastAddress(ifaceName).Return(true, nil) + }) + It("Should return the dhcp configuration with both IPv4 and IPv6", func() { + config, err := generator.Generate() + Expect(err).ToNot(HaveOccurred()) + Expect(*config).To(Equal(generateExpectedConfigOnlyIPv4AndIPv6Enabled(vmiSpecNetwork, nil, mtu, ifaceName, subdomain))) + }) + }) + + When("Config discovering fails", func() { + BeforeEach(func() { + mockHandler.EXPECT().HasIPv4GlobalUnicastAddress(ifaceName).Return(true, nil) + }) + It("Should return an error", func() { + vmiSpecNetwork.Pod.VMNetworkCIDR = "abc" - config, err := generator.Generate() - Expect(err).To(HaveOccurred()) - Expect(config).To(BeNil()) + config, err := generator.Generate() + Expect(err).To(HaveOccurred()) + Expect(config).To(BeNil()) + }) }) }) }) diff --git a/pkg/network/driver/common.go b/pkg/network/driver/common.go index f4ba17910b36..c96927100b59 100644 --- a/pkg/network/driver/common.go +++ b/pkg/network/driver/common.go @@ -72,7 +72,8 @@ type NetworkHandler interface { LinkSetMaster(link netlink.Link, master *netlink.Bridge) error StartDHCP(nic *cache.DHCPConfig, bridgeInterfaceName string, dhcpOptions *v1.DHCPOptions) error HasNatIptables(proto iptables.Protocol) bool - IsIpv6Enabled(interfaceName string) (bool, error) + HasIPv4GlobalUnicastAddress(interfaceName string) (bool, error) + HasIPv6GlobalUnicastAddress(interfaceName string) (bool, error) IsIpv4Primary() (bool, error) ConfigureIpForwarding(proto iptables.Protocol) error ConfigureIpv4ArpIgnore() error @@ -161,7 +162,25 @@ func (h *NetworkUtilsHandler) ConfigureIpForwarding(proto iptables.Protocol) err return err } -func (h *NetworkUtilsHandler) IsIpv6Enabled(interfaceName string) (bool, error) { +func (h *NetworkUtilsHandler) HasIPv4GlobalUnicastAddress(interfaceName string) (bool, error) { + link, err := h.LinkByName(interfaceName) + if err != nil { + return false, err + } + addrList, err := h.AddrList(link, netlink.FAMILY_V4) + if err != nil { + return false, err + } + + for _, addr := range addrList { + if addr.IP.IsGlobalUnicast() { + return true, nil + } + } + return false, nil +} + +func (h *NetworkUtilsHandler) HasIPv6GlobalUnicastAddress(interfaceName string) (bool, error) { link, err := h.LinkByName(interfaceName) if err != nil { return false, err @@ -349,26 +368,28 @@ func (h *NetworkUtilsHandler) StartDHCP(nic *cache.DHCPConfig, bridgeInterfaceNa searchDomains = append([]string{domain}, searchDomains...) } - // panic in case the DHCP server failed during the vm creation - // but ignore dhcp errors when the vm is destroyed or shutting down - go func() { - if err = DHCPServer( - nic.MAC, - nic.IP.IP, - nic.IP.Mask, - bridgeInterfaceName, - nic.AdvertisingIPAddr, - nic.Gateway, - nameservers, - nic.Routes, - searchDomains, - nic.Mtu, - dhcpOptions, - ); err != nil { - log.Log.Errorf("failed to run DHCP: %v", err) - panic(err) - } - }() + if nic.IP.IPNet != nil { + // panic in case the DHCP server failed during the vm creation + // but ignore dhcp errors when the vm is destroyed or shutting down + go func() { + if err = DHCPServer( + nic.MAC, + nic.IP.IP, + nic.IP.Mask, + bridgeInterfaceName, + nic.AdvertisingIPAddr, + nic.Gateway, + nameservers, + nic.Routes, + searchDomains, + nic.Mtu, + dhcpOptions, + ); err != nil { + log.Log.Errorf("failed to run DHCP: %v", err) + panic(err) + } + }() + } if nic.IPv6.IPNet != nil { go func() { diff --git a/pkg/network/driver/generated_mock_common.go b/pkg/network/driver/generated_mock_common.go index 12e9cf36408e..1de3c5f9f282 100644 --- a/pkg/network/driver/generated_mock_common.go +++ b/pkg/network/driver/generated_mock_common.go @@ -223,15 +223,26 @@ func (_mr *_MockNetworkHandlerRecorder) HasNatIptables(arg0 interface{}) *gomock return _mr.mock.ctrl.RecordCall(_mr.mock, "HasNatIptables", arg0) } -func (_m *MockNetworkHandler) IsIpv6Enabled(interfaceName string) (bool, error) { - ret := _m.ctrl.Call(_m, "IsIpv6Enabled", interfaceName) +func (_m *MockNetworkHandler) HasIPv4GlobalUnicastAddress(interfaceName string) (bool, error) { + ret := _m.ctrl.Call(_m, "HasIPv4GlobalUnicastAddress", interfaceName) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } -func (_mr *_MockNetworkHandlerRecorder) IsIpv6Enabled(arg0 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "IsIpv6Enabled", arg0) +func (_mr *_MockNetworkHandlerRecorder) HasIPv4GlobalUnicastAddress(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "HasIPv4GlobalUnicastAddress", arg0) +} + +func (_m *MockNetworkHandler) HasIPv6GlobalUnicastAddress(interfaceName string) (bool, error) { + ret := _m.ctrl.Call(_m, "HasIPv6GlobalUnicastAddress", interfaceName) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockNetworkHandlerRecorder) HasIPv6GlobalUnicastAddress(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "HasIPv6GlobalUnicastAddress", arg0) } func (_m *MockNetworkHandler) IsIpv4Primary() (bool, error) { diff --git a/pkg/network/infraconfigurators/masquerade.go b/pkg/network/infraconfigurators/masquerade.go index 9d2cba4e537c..319c972f2772 100644 --- a/pkg/network/infraconfigurators/masquerade.go +++ b/pkg/network/infraconfigurators/masquerade.go @@ -21,7 +21,7 @@ import ( ) const ( - ipVerifyFailFmt = "failed to verify whether ipv6 is configured on %s" + ipVerifyFailFmt = "failed to verify whether ipv%s is configured on %s" toDest = "--to-destination" src = "--source" dport = "--dport" @@ -63,17 +63,24 @@ func (b *MasqueradePodNetworkConfigurator) DiscoverPodNetworkInterface(podIfaceN } b.podNicLink = link - if err := b.computeIPv4GatewayAndVmIp(); err != nil { + ipv4Enabled, err := b.handler.HasIPv4GlobalUnicastAddress(podIfaceName) + if err != nil { + log.Log.Reason(err).Errorf(ipVerifyFailFmt, "4", podIfaceName) return err } + if ipv4Enabled { + if err := b.computeIPv4GatewayAndVmIp(); err != nil { + return err + } + } - ipv6Enabled, err := b.handler.IsIpv6Enabled(podIfaceName) + ipv6Enabled, err := b.handler.HasIPv6GlobalUnicastAddress(podIfaceName) if err != nil { - log.Log.Reason(err).Errorf(ipVerifyFailFmt, podIfaceName) + log.Log.Reason(err).Errorf(ipVerifyFailFmt, "6", podIfaceName) return err } if ipv6Enabled { - if err := b.discoverIPv6GatewayAndVmIp(); err != nil { + if err := b.computeIPv6GatewayAndVmIp(); err != nil { return err } } @@ -92,7 +99,7 @@ func (b *MasqueradePodNetworkConfigurator) computeIPv4GatewayAndVmIp() error { return nil } -func (b *MasqueradePodNetworkConfigurator) discoverIPv6GatewayAndVmIp() error { +func (b *MasqueradePodNetworkConfigurator) computeIPv6GatewayAndVmIp() error { ipv6Gateway, ipv6, err := virtnetlink.GenerateMasqueradeGatewayAndVmIPAddrs(b.vmiSpecNetwork, iptables.ProtocolIPv6) if err != nil { return err @@ -122,15 +129,22 @@ func (b *MasqueradePodNetworkConfigurator) PreparePodNetworkInterface() error { return err } - err = b.createNatRules(iptables.ProtocolIPv4) + ipv4Enabled, err := b.handler.HasIPv4GlobalUnicastAddress(b.podNicLink.Attrs().Name) if err != nil { - log.Log.Reason(err).Errorf("failed to create ipv4 nat rules for vm error: %v", err) + log.Log.Reason(err).Errorf(ipVerifyFailFmt, "4", b.podNicLink.Attrs().Name) return err } + if ipv4Enabled { + err = b.createNatRules(iptables.ProtocolIPv4) + if err != nil { + log.Log.Reason(err).Errorf("failed to create ipv4 nat rules for vm error: %v", err) + return err + } + } - ipv6Enabled, err := b.handler.IsIpv6Enabled(b.podNicLink.Attrs().Name) + ipv6Enabled, err := b.handler.HasIPv6GlobalUnicastAddress(b.podNicLink.Attrs().Name) if err != nil { - log.Log.Reason(err).Errorf(ipVerifyFailFmt, b.podNicLink.Attrs().Name) + log.Log.Reason(err).Errorf(ipVerifyFailFmt, "6", b.podNicLink.Attrs().Name) return err } if ipv6Enabled { @@ -172,16 +186,14 @@ func (b *MasqueradePodNetworkConfigurator) createBridge() error { return err } - if err := b.handler.AddrAdd(bridge, b.vmGatewayAddr); err != nil { - log.Log.Reason(err).Errorf("failed to set bridge IP") - return err - } - ipv6Enabled, err := b.handler.IsIpv6Enabled(b.podNicLink.Attrs().Name) - if err != nil { - log.Log.Reason(err).Errorf(ipVerifyFailFmt, b.podNicLink.Attrs().Name) - return err + if b.vmGatewayAddr != nil { + if err := b.handler.AddrAdd(bridge, b.vmGatewayAddr); err != nil { + log.Log.Reason(err).Errorf("failed to set bridge IP") + return err + } } - if ipv6Enabled { + + if b.vmGatewayIpv6Addr != nil { if err := b.handler.AddrAdd(bridge, b.vmGatewayIpv6Addr); err != nil { log.Log.Reason(err).Errorf("failed to set bridge IPv6") return err diff --git a/pkg/network/infraconfigurators/masquerade_test.go b/pkg/network/infraconfigurators/masquerade_test.go index 99ace196e0bb..b39d9e930753 100644 --- a/pkg/network/infraconfigurators/masquerade_test.go +++ b/pkg/network/infraconfigurators/masquerade_test.go @@ -129,7 +129,8 @@ var _ = Describe("Masquerade infrastructure configurator", func() { When("the pod interface has an IPv4 address", func() { When("and is missing an IPv6 address", func() { BeforeEach(func() { - handler.EXPECT().IsIpv6Enabled(ifaceName).Return(false, nil) + handler.EXPECT().HasIPv4GlobalUnicastAddress(ifaceName).Return(true, nil) + handler.EXPECT().HasIPv6GlobalUnicastAddress(ifaceName).Return(false, nil) }) It("should succeed discovering the pod link info", func() { @@ -144,7 +145,8 @@ var _ = Describe("Masquerade infrastructure configurator", func() { When("and we fail to understand if there's an IPv6 configuration", func() { BeforeEach(func() { - handler.EXPECT().IsIpv6Enabled(ifaceName).Return(true, fmt.Errorf("failed to check pod's IPv6 configuration")) + handler.EXPECT().HasIPv4GlobalUnicastAddress(ifaceName).Return(true, nil) + handler.EXPECT().HasIPv6GlobalUnicastAddress(ifaceName).Return(true, fmt.Errorf("failed to check pod's IPv6 configuration")) }) It("should fail to discover the pod's link information", func() { @@ -155,7 +157,8 @@ var _ = Describe("Masquerade infrastructure configurator", func() { When("the pod interface has both IPv4 and IPv6 addresses", func() { BeforeEach(func() { - handler.EXPECT().IsIpv6Enabled(ifaceName).Return(true, nil) + handler.EXPECT().HasIPv4GlobalUnicastAddress(ifaceName).Return(true, nil) + handler.EXPECT().HasIPv6GlobalUnicastAddress(ifaceName).Return(true, nil) }) It("should succeed reading the pod link info", func() { @@ -218,8 +221,8 @@ var _ = Describe("Masquerade infrastructure configurator", func() { dhcpConfig = expectedDhcpConfig(ifaceName, podIP, *gatewayAddr, vmIPv6Str, ipv6GwStr, mtu) }) - When("the pod features a properly configured primary link", func() { - table.DescribeTable("should work with", func(vmi *v1.VirtualMachineInstance, mockNetfilterFrontendFunc mockNetfilterFrontend, additionalIPProtocol ...iptables.Protocol) { + When("the pod features a properly configured primary link", func() { // TODO add single stack ipv6 + table.DescribeTable("should work with", func(vmi *v1.VirtualMachineInstance, mockNetfilterFrontendFunc mockNetfilterFrontend, ipProtocols []iptables.Protocol) { masqueradeConfigurator := newMockedMasqueradeConfigurator( vmi, &vmi.Spec.Domain.Devices.Interfaces[0], @@ -233,55 +236,62 @@ var _ = Describe("Masquerade infrastructure configurator", func() { *podIPv6, *gatewayIPv6Addr) mockCreateMasqueradeInfraCreation(handler, inPodBridge, tapDeviceName, queueCount, launcherPID, mtu) - mockVML3Config(*masqueradeConfigurator, ifaceName, inPodBridge, additionalIPProtocol...) - mockNATNetfilterRules(*masqueradeConfigurator, *dhcpConfig, mockNetfilterFrontendFunc, additionalIPProtocol...) + mockVML3Config(masqueradeConfigurator, ifaceName, inPodBridge, ipProtocols) + mockNATNetfilterRules(*masqueradeConfigurator, *dhcpConfig, mockNetfilterFrontendFunc, ipProtocols) Expect(masqueradeConfigurator.PreparePodNetworkInterface()).To(Succeed()) }, table.Entry("NFTables backend on an IPv4 cluster", newVMIMasqueradeInterface(namespace, vmName), - mockNetfilterNFTables), + mockNetfilterNFTables, + []iptables.Protocol{iptables.ProtocolIPv4}), table.Entry("IPTables backend on an IPv4 cluster", newVMIMasqueradeInterface(namespace, vmName), - mockNetfilterIPTables), + mockNetfilterIPTables, + []iptables.Protocol{iptables.ProtocolIPv4}), table.Entry("NFTables backend on an IPv4 cluster when specific ports are specified", newVMIMasqueradeInterface(namespace, vmName, 15000, 18000), - mockNetfilterNFTables), + mockNetfilterNFTables, + []iptables.Protocol{iptables.ProtocolIPv4}), table.Entry("IPTables backend on an IPv4 cluster when specific ports are specified", newVMIMasqueradeInterface(namespace, vmName, 15000, 18000), - mockNetfilterIPTables), + mockNetfilterIPTables, + []iptables.Protocol{iptables.ProtocolIPv4}), table.Entry("NFTables backend on an IPv4 cluster when *reserved* ports are specified", newVMIMasqueradeInterface(namespace, vmName, getReservedPortList(migrationOverTCP)...), - mockNetfilterNFTables), + mockNetfilterNFTables, + []iptables.Protocol{iptables.ProtocolIPv4}), table.Entry("NFTables backend on an IPv4 cluster when using an ISTIO aware VMI", newIstioAwareVMIWithSingleInterface(namespace, vmName), - mockNetfilterNFTables), + mockNetfilterNFTables, + []iptables.Protocol{iptables.ProtocolIPv4}), table.Entry("NFTables backend on a dual stack cluster", newVMIMasqueradeInterface(namespace, vmName), mockNetfilterNFTables, - iptables.ProtocolIPv6), + []iptables.Protocol{iptables.ProtocolIPv4, iptables.ProtocolIPv6}), table.Entry("IPTables backend on a dual stack cluster", newVMIMasqueradeInterface(namespace, vmName), mockNetfilterIPTables, - iptables.ProtocolIPv6), + []iptables.Protocol{iptables.ProtocolIPv4, iptables.ProtocolIPv6}), table.Entry("NFTables backend on a dual stack cluster when specific ports are specified", newVMIMasqueradeInterface(namespace, vmName, 15000, 18000), mockNetfilterNFTables, - iptables.ProtocolIPv6), + []iptables.Protocol{iptables.ProtocolIPv4, iptables.ProtocolIPv6}), table.Entry("IPTables backend on a dual stack cluster when specific ports are specified", newVMIMasqueradeInterface(namespace, vmName, 15000, 18000), mockNetfilterIPTables, - iptables.ProtocolIPv6), + []iptables.Protocol{iptables.ProtocolIPv4, iptables.ProtocolIPv6}), table.Entry("NFTables backend on a dual stack cluster when *reserved* ports are specified", newVMIMasqueradeInterface(namespace, vmName, getReservedPortList(migrationOverTCP)...), mockNetfilterNFTables, - iptables.ProtocolIPv6), + []iptables.Protocol{iptables.ProtocolIPv4, iptables.ProtocolIPv6}), table.Entry("NFTables backend on a dual stack cluster when using an ISTIO aware VMI", newIstioAwareVMIWithSingleInterface(namespace, vmName), mockNetfilterNFTables, - iptables.ProtocolIPv6), + []iptables.Protocol{iptables.ProtocolIPv4, iptables.ProtocolIPv6}), table.Entry("NFTables backend on an IPv4 cluster with migration over sockets", newVMIMasqueradeMigrateOverSockets(namespace, vmName, getReservedPortList(!migrationOverTCP)...), - mockNetfilterNFTables), + mockNetfilterNFTables, + []iptables.Protocol{iptables.ProtocolIPv4}), ) }) }) @@ -345,22 +355,35 @@ func mockCreateMasqueradeInfraCreation(handler *netdriver.MockNetworkHandler, br handler.EXPECT().BindTapDeviceToBridge(tapName, bridge.Name).Return(nil) } -func mockVML3Config(configurator MasqueradePodNetworkConfigurator, podIface string, inPodBridge *netlink.Bridge, optionalIPProtocol ...iptables.Protocol) { - protos := protocols(optionalIPProtocol...) - hasIPv6Config := len(protos) > 1 +func mockVML3Config(configurator *MasqueradePodNetworkConfigurator, podIface string, inPodBridge *netlink.Bridge, ipProtocols []iptables.Protocol) { mockedHandler := configurator.handler.(*netdriver.MockNetworkHandler) - mockedHandler.EXPECT().IsIpv6Enabled(podIface).Return(hasIPv6Config, nil).Times(2) // once on create bridge, another on prepare pod network - for _, l3Protocol := range protos { - gatewayAddr := configurator.vmGatewayAddr + var gatewayAddr *netlink.Addr + var gatewayIPv6Addr *netlink.Addr + for _, l3Protocol := range ipProtocols { + if l3Protocol == iptables.ProtocolIPv4 { + gatewayAddr = configurator.vmGatewayAddr + } if l3Protocol == iptables.ProtocolIPv6 { - gatewayAddr = configurator.vmGatewayIpv6Addr + gatewayIPv6Addr = configurator.vmGatewayIpv6Addr } + } + mockedHandler.EXPECT().HasIPv4GlobalUnicastAddress(podIface).Return(gatewayAddr != nil, nil) + mockedHandler.EXPECT().HasIPv6GlobalUnicastAddress(podIface).Return(gatewayIPv6Addr != nil, nil) + + if gatewayAddr != nil { mockedHandler.EXPECT().AddrAdd(inPodBridge, gatewayAddr).Return(nil) + } else { + configurator.vmGatewayAddr = nil + } + if gatewayIPv6Addr != nil { + mockedHandler.EXPECT().AddrAdd(inPodBridge, gatewayIPv6Addr).Return(nil) + } else { + configurator.vmGatewayIpv6Addr = nil } } -func mockNATNetfilterRules(configurator MasqueradePodNetworkConfigurator, dhcpConfig cache.DHCPConfig, mockFrontendFunc mockNetfilterFrontend, optionalIPProtocol ...iptables.Protocol) { +func mockNATNetfilterRules(configurator MasqueradePodNetworkConfigurator, dhcpConfig cache.DHCPConfig, mockFrontendFunc mockNetfilterFrontend, ipProtocols []iptables.Protocol) { getNFTIPString := func(proto iptables.Protocol) string { ipString := "ip" if proto == iptables.ProtocolIPv6 { @@ -372,9 +395,13 @@ func mockNATNetfilterRules(configurator MasqueradePodNetworkConfigurator, dhcpCo handler := configurator.handler.(*netdriver.MockNetworkHandler) portList := getVMPrimaryInterfacePortList(*configurator.vmi) isMigrationOverSockets := configurator.vmi.Status.MigrationTransport == v1.MigrationTransportUnix - for _, proto := range protocols(optionalIPProtocol...) { - vmIP := dhcpConfig.IP.IP.String() - gwIP := dhcpConfig.AdvertisingIPAddr.String() + for _, proto := range ipProtocols { + var vmIP, gwIP string + if proto == iptables.ProtocolIPv4 { + vmIP = dhcpConfig.IP.IP.String() + gwIP = dhcpConfig.AdvertisingIPAddr.String() + } + if proto == iptables.ProtocolIPv6 { vmIP = dhcpConfig.IPv6.IP.String() gwIP = dhcpConfig.AdvertisingIPv6Addr.String() @@ -580,12 +607,6 @@ func mockIstioNetfilterCalls(handler *netdriver.MockNetworkHandler, proto iptabl "counter", "dnat", "to", vmIP).Return(nil).Times(0) } -func protocols(optionalIPProtocol ...iptables.Protocol) []iptables.Protocol { - return append( - []iptables.Protocol{iptables.ProtocolIPv4}, - optionalIPProtocol...) -} - func getReservedPortList(isMigrationOverSockets bool) []int { var portList []int for _, port := range portsUsedByLiveMigration(isMigrationOverSockets) { From af5e2ba8093c383b4f30ea56a749a7cfd18d24a8 Mon Sep 17 00:00:00 2001 From: Alona Kaplan Date: Sun, 6 Feb 2022 12:37:02 +0200 Subject: [PATCH 2/9] masquerade: single stack ipv6 unit tests Add single stack ipv6 unit tests to masquerade. Signed-off-by: Alona Kaplan --- .../infraconfigurators/masquerade_test.go | 58 ++++++++++++++++++- 1 file changed, 55 insertions(+), 3 deletions(-) diff --git a/pkg/network/infraconfigurators/masquerade_test.go b/pkg/network/infraconfigurators/masquerade_test.go index b39d9e930753..fb4e8465927a 100644 --- a/pkg/network/infraconfigurators/masquerade_test.go +++ b/pkg/network/infraconfigurators/masquerade_test.go @@ -140,6 +140,7 @@ var _ = Describe("Masquerade infrastructure configurator", func() { Expect(masqueradeConfigurator.vmGatewayAddr).To(Equal(expectedGwIP)) expectedVMIP, _ := netlink.ParseAddr(expectedVMInternalIPStr) Expect(masqueradeConfigurator.vmIPv4Addr).To(Equal(*expectedVMIP)) + Expect(masqueradeConfigurator.vmGatewayIpv6Addr).To(BeNil()) }) }) @@ -174,6 +175,25 @@ var _ = Describe("Masquerade infrastructure configurator", func() { Expect(masqueradeConfigurator.vmIPv6Addr).To(Equal(*expectedVMIPv6)) }) }) + + When("the pod interface has an IPv6 address", func() { + When("and is missing an IPv4 address", func() { + BeforeEach(func() { + handler.EXPECT().HasIPv4GlobalUnicastAddress(ifaceName).Return(false, nil) + handler.EXPECT().HasIPv6GlobalUnicastAddress(ifaceName).Return(true, nil) + }) + + It("should succeed discovering the pod link info", func() { + Expect(masqueradeConfigurator.DiscoverPodNetworkInterface(ifaceName)).To(Succeed()) + Expect(masqueradeConfigurator.podNicLink).To(Equal(podLink)) + expectedGwIPv6, _ := netlink.ParseAddr(expectedVMGatewayIPv6Str) + Expect(masqueradeConfigurator.vmGatewayIpv6Addr).To(Equal(expectedGwIPv6)) + expectedVMIPv6, _ := netlink.ParseAddr(expectedVMInternalIPv6Str) + Expect(masqueradeConfigurator.vmIPv6Addr).To(Equal(*expectedVMIPv6)) + Expect(masqueradeConfigurator.vmGatewayAddr).To(BeNil()) + }) + }) + }) }) When("the pod link information cannot be retrieved", func() { @@ -221,7 +241,7 @@ var _ = Describe("Masquerade infrastructure configurator", func() { dhcpConfig = expectedDhcpConfig(ifaceName, podIP, *gatewayAddr, vmIPv6Str, ipv6GwStr, mtu) }) - When("the pod features a properly configured primary link", func() { // TODO add single stack ipv6 + When("the pod features a properly configured primary link", func() { table.DescribeTable("should work with", func(vmi *v1.VirtualMachineInstance, mockNetfilterFrontendFunc mockNetfilterFrontend, ipProtocols []iptables.Protocol) { masqueradeConfigurator := newMockedMasqueradeConfigurator( vmi, @@ -264,6 +284,10 @@ var _ = Describe("Masquerade infrastructure configurator", func() { newIstioAwareVMIWithSingleInterface(namespace, vmName), mockNetfilterNFTables, []iptables.Protocol{iptables.ProtocolIPv4}), + table.Entry("NFTables backend on an IPv4 cluster with migration over sockets", + newVMIMasqueradeMigrateOverSockets(namespace, vmName, getReservedPortList(!migrationOverTCP)...), + mockNetfilterNFTables, + []iptables.Protocol{iptables.ProtocolIPv4}), table.Entry("NFTables backend on a dual stack cluster", newVMIMasqueradeInterface(namespace, vmName), mockNetfilterNFTables, @@ -288,10 +312,38 @@ var _ = Describe("Masquerade infrastructure configurator", func() { newIstioAwareVMIWithSingleInterface(namespace, vmName), mockNetfilterNFTables, []iptables.Protocol{iptables.ProtocolIPv4, iptables.ProtocolIPv6}), - table.Entry("NFTables backend on an IPv4 cluster with migration over sockets", + table.Entry("NFTables backend on a dual stack cluster with migration over sockets", newVMIMasqueradeMigrateOverSockets(namespace, vmName, getReservedPortList(!migrationOverTCP)...), mockNetfilterNFTables, - []iptables.Protocol{iptables.ProtocolIPv4}), + []iptables.Protocol{iptables.ProtocolIPv4, iptables.ProtocolIPv6}), + table.Entry("NFTables backend on an IPv6 cluster", + newVMIMasqueradeInterface(namespace, vmName), + mockNetfilterNFTables, + []iptables.Protocol{iptables.ProtocolIPv6}), + table.Entry("IPTables backend on an IPv6 cluster", + newVMIMasqueradeInterface(namespace, vmName), + mockNetfilterIPTables, + []iptables.Protocol{iptables.ProtocolIPv6}), + table.Entry("NFTables backend on an IPv6 cluster when specific ports are specified", + newVMIMasqueradeInterface(namespace, vmName, 15000, 18000), + mockNetfilterNFTables, + []iptables.Protocol{iptables.ProtocolIPv6}), + table.Entry("IPTables backend on an IPv6 cluster when specific ports are specified", + newVMIMasqueradeInterface(namespace, vmName, 15000, 18000), + mockNetfilterIPTables, + []iptables.Protocol{iptables.ProtocolIPv6}), + table.Entry("NFTables backend on an IPv6 cluster when *reserved* ports are specified", + newVMIMasqueradeInterface(namespace, vmName, getReservedPortList(migrationOverTCP)...), + mockNetfilterNFTables, + []iptables.Protocol{iptables.ProtocolIPv6}), + table.Entry("NFTables backend on an IPv6 cluster when using an ISTIO aware VMI", + newIstioAwareVMIWithSingleInterface(namespace, vmName), + mockNetfilterNFTables, + []iptables.Protocol{iptables.ProtocolIPv6}), + table.Entry("NFTables backend on an IPv6 cluster with migration over sockets", + newVMIMasqueradeMigrateOverSockets(namespace, vmName, getReservedPortList(!migrationOverTCP)...), + mockNetfilterNFTables, + []iptables.Protocol{iptables.ProtocolIPv6}), ) }) }) From 28170c5908b92846578aad9524546cd818a27afd Mon Sep 17 00:00:00 2001 From: Alona Kaplan Date: Tue, 1 Feb 2022 14:10:22 +0200 Subject: [PATCH 3/9] tests, single stack ipv6: Add skips for clusters with no ipv4 or ipv6 Skip ipv4 tests for cluster with no ipv4. Skip ipv6 tests for cluster with no ipv6. Skip dual stack test for cluster that doesn't support both ipv4 and ipv6. Signed-off-by: Alona Kaplan --- tests/libnet/cluster.go | 28 ++++++++++++++++++++++------ tests/libnet/ipv6.go | 4 ++-- tests/libnet/skips.go | 25 +++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 8 deletions(-) diff --git a/tests/libnet/cluster.go b/tests/libnet/cluster.go index 97be6059eda8..2da8986595ca 100644 --- a/tests/libnet/cluster.go +++ b/tests/libnet/cluster.go @@ -14,6 +14,27 @@ import ( ) func IsClusterDualStack(virtClient kubecli.KubevirtClient) (bool, error) { + supportsIpv4, err := ClusterSupportsIpv4(virtClient) + if err != nil { + return false, err + } + + supportsIpv6, err := ClusterSupportsIpv6(virtClient) + if err != nil { + return false, err + } + return supportsIpv4 && supportsIpv6, nil +} + +func ClusterSupportsIpv4(virtClient kubecli.KubevirtClient) (bool, error) { + return clusterAnswersIpCondition(virtClient, netutils.IsIPv4String) +} + +func ClusterSupportsIpv6(virtClient kubecli.KubevirtClient) (bool, error) { + return clusterAnswersIpCondition(virtClient, netutils.IsIPv6String) +} + +func clusterAnswersIpCondition(virtClient kubecli.KubevirtClient, ipCondition func(ip string) bool) (bool, error) { // grab us some neat kubevirt pod; let's say virt-handler is our target. targetPodType := "virt-handler" virtHandlerPod, err := getPodByKubeVirtRole(virtClient, targetPodType) @@ -21,13 +42,8 @@ func IsClusterDualStack(virtClient kubecli.KubevirtClient) (bool, error) { return false, err } - hasMultipleIPAddrs := len(virtHandlerPod.Status.PodIPs) > 1 - if !hasMultipleIPAddrs { - return false, nil - } - for _, ip := range virtHandlerPod.Status.PodIPs { - if netutils.IsIPv6String(ip.IP) { + if ipCondition(ip.IP) { return true, nil } } diff --git a/tests/libnet/ipv6.go b/tests/libnet/ipv6.go index 1560d2cc3de2..0a4945e13059 100644 --- a/tests/libnet/ipv6.go +++ b/tests/libnet/ipv6.go @@ -26,12 +26,12 @@ func configureIPv6OnVMI(vmi *v1.VirtualMachineInstance) error { panic(err) } - isClusterDualStack, err := IsClusterDualStack(virtClient) + clusterSupportsIpv6, err := ClusterSupportsIpv6(virtClient) if err != nil { return err } - if !isClusterDualStack || + if !clusterSupportsIpv6 || (vmi.Spec.Domain.Devices.Interfaces == nil || len(vmi.Spec.Domain.Devices.Interfaces) == 0 || vmi.Spec.Domain.Devices.Interfaces[0].InterfaceBindingMethod.Masquerade == nil) || (vmi.Spec.Domain.Devices.AutoattachPodInterface != nil && !*vmi.Spec.Domain.Devices.AutoattachPodInterface) || (!hasEth0Iface() || hasGlobalIPv6()) { diff --git a/tests/libnet/skips.go b/tests/libnet/skips.go index 4c265007ffca..63efbeb1be01 100644 --- a/tests/libnet/skips.go +++ b/tests/libnet/skips.go @@ -3,6 +3,7 @@ package libnet import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + k8sv1 "k8s.io/api/core/v1" "kubevirt.io/client-go/kubecli" ) @@ -14,3 +15,27 @@ func SkipWhenNotDualStackCluster(virtClient kubecli.KubevirtClient) { Skip("This test requires a dual stack network config.") } } + +func SkipWhenClusterNotSupportIpv4(virtClient kubecli.KubevirtClient) { + clusterSupportsIpv4, err := ClusterSupportsIpv4(virtClient) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "should have been able to infer if the cluster supports ipv4") + if !clusterSupportsIpv4 { + Skip("This test requires an ipv4 network config.") + } +} + +func SkipWhenClusterNotSupportIpv6(virtClient kubecli.KubevirtClient) { + clusterSupportsIpv6, err := ClusterSupportsIpv6(virtClient) + ExpectWithOffset(1, err).NotTo(HaveOccurred(), "should have been able to infer if the cluster supports ipv6") + if !clusterSupportsIpv6 { + Skip("This test requires an ipv6 network config.") + } +} + +func SkipWhenClusterNotSupportIpFamily(virtClient kubecli.KubevirtClient, ipFamily k8sv1.IPFamily) { + if ipFamily == k8sv1.IPv4Protocol { + SkipWhenClusterNotSupportIpv4(virtClient) + } else { + SkipWhenClusterNotSupportIpv6(virtClient) + } +} From 34a61add6d682fe3aaa736f85f414a9384d7f192 Mon Sep 17 00:00:00 2001 From: Alona Kaplan Date: Sun, 6 Feb 2022 14:27:20 +0200 Subject: [PATCH 4/9] tests, single stack ipv6: skip tests that are not supported with no RA Skip tests that are not supported until RA is introduced - issue- https://github.com/kubevirt/kubevirt/issues/7184 Signed-off-by: Alona Kaplan --- tests/network/vmi_subdomain.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/network/vmi_subdomain.go b/tests/network/vmi_subdomain.go index d9716c1e4797..80f0d8fb54a6 100644 --- a/tests/network/vmi_subdomain.go +++ b/tests/network/vmi_subdomain.go @@ -55,6 +55,9 @@ var _ = SIGDescribe("Subdomain", func() { virtClient, err = kubecli.GetKubevirtClient() Expect(err).NotTo(HaveOccurred(), "Should successfully initialize an API client") + // Should be skipped as long as masquerade binding doesn't have dhcpv6 + ra (issue- https://github.com/kubevirt/kubevirt/issues/7184) + libnet.SkipWhenClusterNotSupportIpv4(virtClient) + tests.BeforeTestCleanup() }) From 3c6567015802fb283904280da86cf18280fdc562 Mon Sep 17 00:00:00 2001 From: Alona Kaplan Date: Sun, 13 Feb 2022 15:36:45 +0200 Subject: [PATCH 5/9] tests, single stack ipv6: skip-dual-stack-test flag shoud be true skip-dual-stack-test flag shoud be true in single stack ipv6 cluster. Signed-off-by: Alona Kaplan --- hack/functests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/functests.sh b/hack/functests.sh index 95dc56c8a136..d59f01900032 100755 --- a/hack/functests.sh +++ b/hack/functests.sh @@ -55,7 +55,7 @@ function functest() { -conn-check-dns=${conn_check_dns} \ -migration-network-nic=${migration_network_nic} \ ${KUBEVIRT_FUNC_TEST_SUITE_ARGS}" - if [[ ${KUBEVIRT_PROVIDER} =~ .*(k8s-1\.16)|(k8s-1\.17)|k8s-sriov.* ]]; then + if [[ ${KUBEVIRT_PROVIDER} =~ .*(k8s-1\.16)|(k8s-1\.17)|(k8s-sriov)|(ipv6).* ]]; then echo "Will skip test asserting the cluster is in dual-stack mode." KUBEVIRT_FUNC_TEST_SUITE_ARGS="-skip-dual-stack-test ${KUBEVIRT_FUNC_TEST_SUITE_ARGS}" fi From ee48c0449185f0f5ac1c06bf9323698501cd0c2b Mon Sep 17 00:00:00 2001 From: Alona Kaplan Date: Mon, 14 Feb 2022 10:50:20 +0200 Subject: [PATCH 6/9] tests, single stack ipv6: increase login timeout to cirros The login is taking a bit longer since discovering ipv4 address fails and it takes a ~minute until the discovery gives up. Using cloud-init-nocloud userdata to turn off dhcp on eth0 interface doesn't help to solve the issue since the usedata is invoked after the networking. cloudinit NetworkData or metadata.network-interfaces doesn't help as well, since it is not properly working on cirros. This is a temporary solution. A follow-up solution will be to use alpine instead of cirros. Signed-off-by: Alona Kaplan --- tests/console/BUILD.bazel | 1 + tests/console/login.go | 19 +++++++++++++++-- tests/libnet/BUILD.bazel | 2 +- tests/libnet/cluster/BUILD.bazel | 16 ++++++++++++++ tests/libnet/{ => cluster}/cluster.go | 30 ++++++++++++++++++++------- tests/libnet/ipv6.go | 4 +++- tests/libnet/skips.go | 8 ++++--- tests/network/BUILD.bazel | 1 + tests/network/dual_stack_cluster.go | 5 +++-- tests/network/expose.go | 3 ++- 10 files changed, 71 insertions(+), 18 deletions(-) create mode 100644 tests/libnet/cluster/BUILD.bazel rename tests/libnet/{ => cluster}/cluster.go (64%) diff --git a/tests/console/BUILD.bazel b/tests/console/BUILD.bazel index f0e3350de5ca..1ab102d59a3d 100644 --- a/tests/console/BUILD.bazel +++ b/tests/console/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "//staging/src/kubevirt.io/api/core/v1:go_default_library", "//staging/src/kubevirt.io/client-go/kubecli:go_default_library", "//staging/src/kubevirt.io/client-go/log:go_default_library", + "//tests/libnet/cluster:go_default_library", "//vendor/github.com/google/goexpect:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "@org_golang_google_grpc//codes:go_default_library", diff --git a/tests/console/login.go b/tests/console/login.go index 3e555d12d2b3..8d4ff9bcf9e8 100644 --- a/tests/console/login.go +++ b/tests/console/login.go @@ -13,6 +13,7 @@ import ( "kubevirt.io/client-go/kubecli" "kubevirt.io/client-go/log" "kubevirt.io/kubevirt/pkg/util/net/dns" + "kubevirt.io/kubevirt/tests/libnet/cluster" ) // LoginToFunction represents any of the LoginTo* functions @@ -50,7 +51,7 @@ func LoginToCirros(vmi *v1.VirtualMachineInstance) error { &expect.BExp{R: "Password:"}, &expect.BSnd{S: "gocubsgo\n"}, &expect.BExp{R: PromptExpression}}) - resp, err := expecter.ExpectBatch(b, 180*time.Second) + resp, err := expecter.ExpectBatch(b, 240*time.Second) if err != nil { log.DefaultLogger().Object(vmi).Infof("Login: %v", resp) @@ -97,7 +98,21 @@ func LoginToAlpine(vmi *v1.VirtualMachineInstance) error { &expect.BExp{R: "localhost login:"}, &expect.BSnd{S: "root\n"}, &expect.BExp{R: PromptExpression}}) - res, err := expecter.ExpectBatch(b, 180*time.Second) + + timeout := 180 * time.Second + + clusterSupportsIpv4, err := cluster.SupportsIpv4(virtClient) + if err != nil { + return err + } + clusterSupportsIpv6, err := cluster.SupportsIpv6(virtClient) + if err != nil { + return err + } + if !clusterSupportsIpv4 && clusterSupportsIpv6 { + timeout = 240 * time.Second + } + res, err := expecter.ExpectBatch(b, timeout) if err != nil { log.DefaultLogger().Object(vmi).Infof("Login: %v", res) return err diff --git a/tests/libnet/BUILD.bazel b/tests/libnet/BUILD.bazel index 1d4d7658dd68..cb504fb9500d 100644 --- a/tests/libnet/BUILD.bazel +++ b/tests/libnet/BUILD.bazel @@ -4,7 +4,6 @@ go_library( name = "go_default_library", srcs = [ "cloudinit.go", - "cluster.go", "dns.go", "expose_util.go", "ipaddress.go", @@ -24,6 +23,7 @@ go_library( "//tests/console:go_default_library", "//tests/flags:go_default_library", "//tests/framework/cleanup:go_default_library", + "//tests/libnet/cluster:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", diff --git a/tests/libnet/cluster/BUILD.bazel b/tests/libnet/cluster/BUILD.bazel new file mode 100644 index 000000000000..6be673ffefca --- /dev/null +++ b/tests/libnet/cluster/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["cluster.go"], + importpath = "kubevirt.io/kubevirt/tests/libnet/cluster", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/kubevirt.io/api/core/v1:go_default_library", + "//staging/src/kubevirt.io/client-go/kubecli:go_default_library", + "//tests/flags:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/utils/net:go_default_library", + ], +) diff --git a/tests/libnet/cluster.go b/tests/libnet/cluster/cluster.go similarity index 64% rename from tests/libnet/cluster.go rename to tests/libnet/cluster/cluster.go index 2da8986595ca..8d73416df365 100644 --- a/tests/libnet/cluster.go +++ b/tests/libnet/cluster/cluster.go @@ -1,8 +1,9 @@ -package libnet +package cluster import ( "context" "fmt" + "sync" k8sv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -13,25 +14,38 @@ import ( "kubevirt.io/kubevirt/tests/flags" ) -func IsClusterDualStack(virtClient kubecli.KubevirtClient) (bool, error) { - supportsIpv4, err := ClusterSupportsIpv4(virtClient) +var onceIPv4 sync.Once +var clusterSupportsIpv4 bool +var errIPv4 error +var onceIPv6 sync.Once +var clusterSupportsIpv6 bool +var errIPv6 error + +func DualStack(virtClient kubecli.KubevirtClient) (bool, error) { + supportsIpv4, err := SupportsIpv4(virtClient) if err != nil { return false, err } - supportsIpv6, err := ClusterSupportsIpv6(virtClient) + supportsIpv6, err := SupportsIpv6(virtClient) if err != nil { return false, err } return supportsIpv4 && supportsIpv6, nil } -func ClusterSupportsIpv4(virtClient kubecli.KubevirtClient) (bool, error) { - return clusterAnswersIpCondition(virtClient, netutils.IsIPv4String) +func SupportsIpv4(virtClient kubecli.KubevirtClient) (bool, error) { + onceIPv4.Do(func() { + clusterSupportsIpv4, errIPv4 = clusterAnswersIpCondition(virtClient, netutils.IsIPv4String) + }) + return clusterSupportsIpv4, errIPv4 } -func ClusterSupportsIpv6(virtClient kubecli.KubevirtClient) (bool, error) { - return clusterAnswersIpCondition(virtClient, netutils.IsIPv6String) +func SupportsIpv6(virtClient kubecli.KubevirtClient) (bool, error) { + onceIPv6.Do(func() { + clusterSupportsIpv6, errIPv6 = clusterAnswersIpCondition(virtClient, netutils.IsIPv6String) + }) + return clusterSupportsIpv6, errIPv6 } func clusterAnswersIpCondition(virtClient kubecli.KubevirtClient, ipCondition func(ip string) bool) (bool, error) { diff --git a/tests/libnet/ipv6.go b/tests/libnet/ipv6.go index 0a4945e13059..4eb8ca1d4826 100644 --- a/tests/libnet/ipv6.go +++ b/tests/libnet/ipv6.go @@ -3,6 +3,8 @@ package libnet import ( "time" + "kubevirt.io/kubevirt/tests/libnet/cluster" + v1 "kubevirt.io/api/core/v1" "kubevirt.io/client-go/kubecli" "kubevirt.io/client-go/log" @@ -26,7 +28,7 @@ func configureIPv6OnVMI(vmi *v1.VirtualMachineInstance) error { panic(err) } - clusterSupportsIpv6, err := ClusterSupportsIpv6(virtClient) + clusterSupportsIpv6, err := cluster.SupportsIpv6(virtClient) if err != nil { return err } diff --git a/tests/libnet/skips.go b/tests/libnet/skips.go index 63efbeb1be01..7f6ad3c6b45f 100644 --- a/tests/libnet/skips.go +++ b/tests/libnet/skips.go @@ -5,11 +5,13 @@ import ( . "github.com/onsi/gomega" k8sv1 "k8s.io/api/core/v1" + "kubevirt.io/kubevirt/tests/libnet/cluster" + "kubevirt.io/client-go/kubecli" ) func SkipWhenNotDualStackCluster(virtClient kubecli.KubevirtClient) { - isClusterDualStack, err := IsClusterDualStack(virtClient) + isClusterDualStack, err := cluster.DualStack(virtClient) ExpectWithOffset(1, err).NotTo(HaveOccurred(), "should have been able to infer if the cluster is dual stack") if !isClusterDualStack { Skip("This test requires a dual stack network config.") @@ -17,7 +19,7 @@ func SkipWhenNotDualStackCluster(virtClient kubecli.KubevirtClient) { } func SkipWhenClusterNotSupportIpv4(virtClient kubecli.KubevirtClient) { - clusterSupportsIpv4, err := ClusterSupportsIpv4(virtClient) + clusterSupportsIpv4, err := cluster.SupportsIpv4(virtClient) ExpectWithOffset(1, err).NotTo(HaveOccurred(), "should have been able to infer if the cluster supports ipv4") if !clusterSupportsIpv4 { Skip("This test requires an ipv4 network config.") @@ -25,7 +27,7 @@ func SkipWhenClusterNotSupportIpv4(virtClient kubecli.KubevirtClient) { } func SkipWhenClusterNotSupportIpv6(virtClient kubecli.KubevirtClient) { - clusterSupportsIpv6, err := ClusterSupportsIpv6(virtClient) + clusterSupportsIpv6, err := cluster.SupportsIpv6(virtClient) ExpectWithOffset(1, err).NotTo(HaveOccurred(), "should have been able to infer if the cluster supports ipv6") if !clusterSupportsIpv6 { Skip("This test requires an ipv6 network config.") diff --git a/tests/network/BUILD.bazel b/tests/network/BUILD.bazel index 8bea49a93614..c670b619e201 100644 --- a/tests/network/BUILD.bazel +++ b/tests/network/BUILD.bazel @@ -40,6 +40,7 @@ go_library( "//tests/flags:go_default_library", "//tests/framework/checks:go_default_library", "//tests/libnet:go_default_library", + "//tests/libnet/cluster:go_default_library", "//tests/libnet/service:go_default_library", "//tests/libvmi:go_default_library", "//tests/util:go_default_library", diff --git a/tests/network/dual_stack_cluster.go b/tests/network/dual_stack_cluster.go index 758fc8b7e90e..0c2f0c029ab3 100644 --- a/tests/network/dual_stack_cluster.go +++ b/tests/network/dual_stack_cluster.go @@ -4,9 +4,10 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "kubevirt.io/kubevirt/tests/libnet/cluster" + "kubevirt.io/client-go/kubecli" "kubevirt.io/kubevirt/tests/flags" - "kubevirt.io/kubevirt/tests/libnet" ) var _ = SIGDescribe("Dual stack cluster network configuration", func() { @@ -24,7 +25,7 @@ var _ = SIGDescribe("Dual stack cluster network configuration", func() { Skip("user requested the dual stack check on the live cluster to be skipped") } - isClusterDualStack, err := libnet.IsClusterDualStack(virtClient) + isClusterDualStack, err := cluster.DualStack(virtClient) Expect(err).NotTo(HaveOccurred(), "must be able to infer the dual stack configuration from the live cluster") Expect(isClusterDualStack).To(BeTrue(), "the live cluster should be in dual stack mode") }) diff --git a/tests/network/expose.go b/tests/network/expose.go index 721775571fb3..adad2b73c33d 100644 --- a/tests/network/expose.go +++ b/tests/network/expose.go @@ -8,6 +8,7 @@ import ( "time" "kubevirt.io/kubevirt/tests/framework/checks" + "kubevirt.io/kubevirt/tests/libnet/cluster" . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/extensions/table" @@ -325,7 +326,7 @@ var _ = SIGDescribe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:c case k8sv1.IPFamilyPolicySingleStack: return 1 case k8sv1.IPFamilyPolicyPreferDualStack: - isClusterDualStack, err := libnet.IsClusterDualStack(virtClient) + isClusterDualStack, err := cluster.DualStack(virtClient) ExpectWithOffset(1, err).NotTo(HaveOccurred(), "should have been able to infer if the cluster is dual stack") if isClusterDualStack { return 2 From b7a26ab0ff4fd804ad08bc8ee06a7166c9bd2a71 Mon Sep 17 00:00:00 2001 From: Alona Kaplan Date: Tue, 1 Feb 2022 16:15:16 +0200 Subject: [PATCH 7/9] tests, single stack ipv6: Don't assume the cluster always supports IPv4 Till now, our tests assumes ipv4 is always supported. Now that we support single stack ipv6 this assumption is wrong. This commit fixes the tests to run ipv4 tests only if ipv4 is supported by the cluster. Signed-off-by: Alona Kaplan --- tests/infra_test.go | 36 +++++++--------------------- tests/network/expose.go | 31 +++++++++++++----------- tests/network/primary_pod_network.go | 1 + tests/network/probes.go | 5 ++-- tests/network/services.go | 4 ++-- tests/network/vmi_lifecycle.go | 2 ++ tests/network/vmi_networking.go | 23 +++++++++++------- tests/network/vmi_slirp_interface.go | 3 +++ tests/storage/storage.go | 9 +++---- 9 files changed, 53 insertions(+), 61 deletions(-) diff --git a/tests/infra_test.go b/tests/infra_test.go index ea1eac3091ce..38acf1cadcad 100644 --- a/tests/infra_test.go +++ b/tests/infra_test.go @@ -867,9 +867,7 @@ var _ = Describe("[Serial][sig-compute]Infrastructure", func() { }) table.DescribeTable("should throttle the Prometheus metrics access", func(family k8sv1.IPFamily) { - if family == k8sv1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) - } + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, family) ip := getSupportedIP(handlerMetricIPs, family) @@ -915,9 +913,7 @@ var _ = Describe("[Serial][sig-compute]Infrastructure", func() { ) table.DescribeTable("should include the metrics for a running VM", func(family k8sv1.IPFamily) { - if family == k8sv1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) - } + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, family) ip := getSupportedIP(handlerMetricIPs, family) @@ -933,9 +929,7 @@ var _ = Describe("[Serial][sig-compute]Infrastructure", func() { ) table.DescribeTable("should include the storage metrics for a running VM", func(family k8sv1.IPFamily, metricSubstring, operator string) { - if family == k8sv1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) - } + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, family) ip := getSupportedIP(handlerMetricIPs, family) @@ -973,9 +967,7 @@ var _ = Describe("[Serial][sig-compute]Infrastructure", func() { ) table.DescribeTable("should include metrics for a running VM", func(family k8sv1.IPFamily, metricSubstring, operator string) { - if family == k8sv1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) - } + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, family) ip := getSupportedIP(handlerMetricIPs, family) @@ -1001,9 +993,7 @@ var _ = Describe("[Serial][sig-compute]Infrastructure", func() { ) table.DescribeTable("should include VMI infos for a running VM", func(family k8sv1.IPFamily) { - if family == k8sv1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) - } + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, family) ip := getSupportedIP(handlerMetricIPs, family) @@ -1040,9 +1030,7 @@ var _ = Describe("[Serial][sig-compute]Infrastructure", func() { ) table.DescribeTable("should include VMI phase metrics for all running VMs", func(family k8sv1.IPFamily) { - if family == k8sv1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) - } + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, family) ip := getSupportedIP(handlerMetricIPs, family) @@ -1061,9 +1049,7 @@ var _ = Describe("[Serial][sig-compute]Infrastructure", func() { ) table.DescribeTable("should include VMI eviction blocker status for all running VMs", func(family k8sv1.IPFamily) { - if family == k8sv1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) - } + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, family) ip := getSupportedIP(controllerMetricIPs, family) @@ -1081,9 +1067,7 @@ var _ = Describe("[Serial][sig-compute]Infrastructure", func() { ) table.DescribeTable("should include kubernetes labels to VMI metrics", func(family k8sv1.IPFamily) { - if family == k8sv1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) - } + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, family) ip := getSupportedIP(handlerMetricIPs, family) @@ -1106,9 +1090,7 @@ var _ = Describe("[Serial][sig-compute]Infrastructure", func() { // explicit test fo swap metrics as test_id:4144 doesn't catch if they are missing table.DescribeTable("should include swap metrics", func(family k8sv1.IPFamily) { - if family == k8sv1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) - } + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, family) ip := getSupportedIP(handlerMetricIPs, family) diff --git a/tests/network/expose.go b/tests/network/expose.go index adad2b73c33d..06088b96ead3 100644 --- a/tests/network/expose.go +++ b/tests/network/expose.go @@ -66,11 +66,11 @@ func isDualStack(ipFamily ipFamily) bool { return ipFamily == dualIPv4Primary || ipFamily == dualIPv6Primary } -func doesSupportIpv6(ipFamily ipFamily) bool { +func inlcudesIpv6(ipFamily ipFamily) bool { return ipFamily != ipv4 } -func doesSupportIpv4(ipFamily ipFamily) bool { +func includesIpv4(ipFamily ipFamily) bool { return ipFamily != ipv6 } @@ -108,16 +108,19 @@ var _ = SIGDescribe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:c } skipIfNotSupportedCluster := func(ipFamily ipFamily) { - if doesSupportIpv6(ipFamily) { - libnet.SkipWhenNotDualStackCluster(virtClient) - if isDualStack(ipFamily) { - checks.SkipIfVersionBelow("Dual stack service requires v1.20 and above", "1.20") - } + if includesIpv4(ipFamily) { + libnet.SkipWhenClusterNotSupportIpv4(virtClient) + } + if inlcudesIpv6(ipFamily) { + libnet.SkipWhenClusterNotSupportIpv6(virtClient) + } + if isDualStack(ipFamily) { + checks.SkipIfVersionBelow("Dual stack service requires v1.20 and above", "1.20") } } appendIpFamilyToExposeArgs := func(ipFamily ipFamily, vmiExposeArgs []string) []string { - if doesSupportIpv6(ipFamily) { + if inlcudesIpv6(ipFamily) { vmiExposeArgs = append(vmiExposeArgs, "--ip-family", string(ipFamily)) } return vmiExposeArgs @@ -350,7 +353,7 @@ var _ = SIGDescribe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:c By("Validating the num of cluster ips") Expect(len(svc.Spec.ClusterIPs)).To(Equal(calcNumOfClusterIPs())) }, - table.Entry("over SingleStack IPv4 IP family policy", k8sv1.IPFamilyPolicySingleStack), + table.Entry("over SingleStack IP family policy", k8sv1.IPFamilyPolicySingleStack), table.Entry("over PreferDualStack IP family policy", k8sv1.IPFamilyPolicyPreferDualStack), table.Entry("over RequireDualStack IP family policy", k8sv1.IPFamilyPolicyRequireDualStack), ) @@ -395,13 +398,13 @@ var _ = SIGDescribe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:c nodeIP := node.Status.Addresses[0].Address var ipv6NodeIP string - if doesSupportIpv4(ipFamily) { + if includesIpv4(ipFamily) { By("Connecting to IPv4 node IP") assert.XFail(xfailError, func() { Expect(createAndWaitForJobToSucceed(tests.NewHelloWorldJobTCP, tcpVM.Namespace, nodeIP, strconv.Itoa(int(nodePort)), fmt.Sprintf("NodePort using %s node ip", ipFamily))).To(Succeed()) }, ipFamily == dualIPv6Primary) } - if doesSupportIpv6(ipFamily) { + if inlcudesIpv6(ipFamily) { ipv6NodeIP, err = resolveNodeIPAddrByFamily( virtClient, libvmi.GetPodByVirtualMachineInstance(tcpVM, tcpVM.GetNamespace()), @@ -517,7 +520,7 @@ var _ = SIGDescribe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:c nodeIP := node.Status.Addresses[0].Address var ipv6NodeIP string - if doesSupportIpv6(ipFamily) { + if inlcudesIpv6(ipFamily) { ipv6NodeIP, err = resolveNodeIPAddrByFamily( virtClient, libvmi.GetPodByVirtualMachineInstance(udpVM, udpVM.GetNamespace()), @@ -527,13 +530,13 @@ var _ = SIGDescribe("[rfe_id:253][crit:medium][vendor:cnv-qe@redhat.com][level:c Expect(ipv6NodeIP).NotTo(BeEmpty(), "must have been able to resolve the IPv6 address of the node") } - if doesSupportIpv4(ipFamily) { + if includesIpv4(ipFamily) { By("Connecting to IPv4 node IP") assert.XFail(xfailError, func() { Expect(createAndWaitForJobToSucceed(tests.NewHelloWorldJobUDP, udpVM.Namespace, nodeIP, strconv.Itoa(int(nodePort)), "NodePort ipv4 address")).To(Succeed()) }, ipFamily == dualIPv6Primary) } - if doesSupportIpv6(ipFamily) { + if inlcudesIpv6(ipFamily) { By("Connecting to IPv6 node IP") assert.XFail(xfailError, func() { Expect(createAndWaitForJobToSucceed(tests.NewHelloWorldJobUDP, udpVM.Namespace, ipv6NodeIP, strconv.Itoa(int(nodePort)), "NodePort ipv6 address")).To(Succeed()) diff --git a/tests/network/primary_pod_network.go b/tests/network/primary_pod_network.go index 0fa82fe6df42..42f3dec3c763 100644 --- a/tests/network/primary_pod_network.go +++ b/tests/network/primary_pod_network.go @@ -90,6 +90,7 @@ var _ = SIGDescribe("Primary Pod Network", func() { } ) BeforeEach(func() { + libnet.SkipWhenClusterNotSupportIpv4(virtClient) var err error vmi, err = newFedoraWithGuestAgentAndDefaultInterface(libvmi.InterfaceDeviceWithBridgeBinding(libvmi.DefaultInterfaceName)) diff --git a/tests/network/probes.go b/tests/network/probes.go index 10f05c93d27e..d960a996b310 100644 --- a/tests/network/probes.go +++ b/tests/network/probes.go @@ -69,8 +69,8 @@ var _ = SIGDescribe("[ref_id:1182]Probes", func() { guestAgentPingProbe := createGuestAgentPingProbe(period, initialSeconds) table.DescribeTable("should succeed", func(readinessProbe *v1.Probe, ipFamily corev1.IPFamily, isExecProbe bool, disableEnableCycle bool) { + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, ipFamily) if ipFamily == corev1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) By("Create a support pod which will reply to kubelet's probes ...") probeBackendPod, supportPodCleanupFunc := buildProbeBackendPodSpec(readinessProbe) defer func() { @@ -163,9 +163,8 @@ var _ = SIGDescribe("[ref_id:1182]Probes", func() { httpProbe := createHTTPProbe(period, initialSeconds, port) table.DescribeTable("should not fail the VMI", func(livenessProbe *v1.Probe, ipFamily corev1.IPFamily, isExecProbe bool) { - + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, ipFamily) if ipFamily == corev1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) By("Create a support pod which will reply to kubelet's probes ...") probeBackendPod, supportPodCleanupFunc := buildProbeBackendPodSpec(livenessProbe) diff --git a/tests/network/services.go b/tests/network/services.go index 169a7dd6963c..9a26037ef708 100644 --- a/tests/network/services.go +++ b/tests/network/services.go @@ -149,6 +149,7 @@ var _ = SIGDescribe("Services", func() { } BeforeEach(func() { + libnet.SkipWhenClusterNotSupportIpv4(virtClient) subdomain := "vmi" hostname := "inbound" @@ -273,9 +274,8 @@ var _ = SIGDescribe("Services", func() { table.DescribeTable("[Conformance] should be able to reach the vmi based on labels specified on the vmi", func(ipFamily k8sv1.IPFamily) { serviceName := "myservice" By("setting up resources to expose the VMI via a service", func() { + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, ipFamily) if ipFamily == k8sv1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) - serviceName = serviceName + "v6" service = netservice.BuildIPv6Spec(serviceName, servicePort, servicePort, selectorLabelKey, selectorLabelValue) } else { diff --git a/tests/network/vmi_lifecycle.go b/tests/network/vmi_lifecycle.go index 58dd39264611..bc95bef3489a 100644 --- a/tests/network/vmi_lifecycle.go +++ b/tests/network/vmi_lifecycle.go @@ -57,6 +57,7 @@ var _ = SIGDescribe("[crit:high][arm64][vendor:cnv-qe@redhat.com][level:componen Describe("[crit:high][vendor:cnv-qe@redhat.com][level:component]Creating a VirtualMachineInstance", func() { Context("when virt-handler is responsive", func() { It("[Serial]VMIs with Bridge Networking shouldn't fail after the kubelet restarts", func() { + libnet.SkipWhenClusterNotSupportIpv4(virtClient) bridgeVMI := vmi // Remove the masquerade interface to use the default bridge one bridgeVMI.Spec.Domain.Devices.Interfaces = nil @@ -93,6 +94,7 @@ var _ = SIGDescribe("[crit:high][arm64][vendor:cnv-qe@redhat.com][level:componen }) It("VMIs with Bridge Networking should work with Duplicate Address Detection (DAD)", func() { + libnet.SkipWhenClusterNotSupportIpv4(virtClient) bridgeVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n") // Remove the masquerade interface to use the default bridge one bridgeVMI.Spec.Domain.Devices.Interfaces = nil diff --git a/tests/network/vmi_networking.go b/tests/network/vmi_networking.go index b5df7fbfc2d4..08bb3761db1c 100644 --- a/tests/network/vmi_networking.go +++ b/tests/network/vmi_networking.go @@ -125,6 +125,9 @@ var _ = SIGDescribe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:c var inboundVMIWithPodNetworkSet *v1.VirtualMachineInstance var inboundVMIWithCustomMacAddress *v1.VirtualMachineInstance + BeforeEach(func() { + libnet.SkipWhenClusterNotSupportIpv4(virtClient) + }) Context("with a test outbound VMI", func() { BeforeEach(func() { inboundVMI = libvmi.NewCirros() @@ -228,7 +231,7 @@ var _ = SIGDescribe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:c inboundVMI = libvmi.NewCirros() inboundVMI, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(inboundVMI) Expect(err).ToNot(HaveOccurred()) - inboundVMI = tests.WaitUntilVMIReady(inboundVMI, libnet.WithIPv6(console.LoginToCirros)) + inboundVMI = tests.WaitUntilVMIReady(inboundVMI, console.LoginToCirros) tests.StartTCPServer(inboundVMI, testPort) }) @@ -492,6 +495,7 @@ var _ = SIGDescribe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:c Context("VirtualMachineInstance with learning disabled on pod interface", func() { It("[test_id:1777]should disable learning on pod iface", func() { + libnet.SkipWhenClusterNotSupportIpv4(virtClient) By("checking learning flag") learningDisabledVMI := libvmi.NewAlpine() learningDisabledVMI, err = virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(learningDisabledVMI) @@ -504,6 +508,7 @@ var _ = SIGDescribe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:c Context("VirtualMachineInstance with dhcp options", func() { It("[test_id:1778]should offer extra dhcp options to pod iface", func() { + libnet.SkipWhenClusterNotSupportIpv4(virtClient) dhcpVMI := tests.NewRandomVMIWithEphemeralDisk(cd.ContainerDiskFor(cd.ContainerDiskFedoraTestTooling)) tests.AddExplicitPodNetworkInterface(dhcpVMI) @@ -549,6 +554,7 @@ var _ = SIGDescribe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:c Context("VirtualMachineInstance with custom dns", func() { It("[test_id:1779]should have custom resolv.conf", func() { + libnet.SkipWhenClusterNotSupportIpv4(virtClient) userData := "#cloud-config\n" dnsVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), userData) @@ -681,6 +687,8 @@ var _ = SIGDescribe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:c } table.DescribeTable("ipv4", func(ports []v1.Port, tcpPort int, networkCIDR string) { + libnet.SkipWhenClusterNotSupportIpv4(virtClient) + var clientVMI *v1.VirtualMachineInstance var serverVMI *v1.VirtualMachineInstance @@ -719,6 +727,7 @@ var _ = SIGDescribe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:c ) It("[outside_connectivity]should be able to reach the outside world [IPv4]", func() { + libnet.SkipWhenClusterNotSupportIpv4(virtClient) ipv4Address := "8.8.8.8" if flags.IPV4ConnectivityCheckAddress != "" { ipv4Address = flags.IPV4ConnectivityCheckAddress @@ -739,7 +748,7 @@ var _ = SIGDescribe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:c }) table.DescribeTable("IPv6", func(ports []v1.Port, tcpPort int, networkCIDR string) { - libnet.SkipWhenNotDualStackCluster(virtClient) + libnet.SkipWhenClusterNotSupportIpv6(virtClient) var serverVMI *v1.VirtualMachineInstance var clientVMI *v1.VirtualMachineInstance @@ -776,7 +785,7 @@ var _ = SIGDescribe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:c ) It("[outside_connectivity]should be able to reach the outside world [IPv6]", func() { - libnet.SkipWhenNotDualStackCluster(virtClient) + libnet.SkipWhenClusterNotSupportIpv6(virtClient) // Cluster nodes subnet (docker network gateway) // Docker network subnet cidr definition: // https://github.com/kubevirt/project-infra/blob/master/github/ci/shared-deployments/files/docker-daemon-mirror.conf#L5 @@ -853,9 +862,7 @@ var _ = SIGDescribe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:c }) table.DescribeTable("[Conformance] preserves connectivity", func(ipFamily k8sv1.IPFamily, ports []v1.Port) { - if ipFamily == k8sv1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) - } + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, ipFamily) var err error var loginMethod console.LoginToFunction @@ -960,9 +967,7 @@ var _ = SIGDescribe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:c }) table.DescribeTable("should have the correct MTU", func(ipFamily k8sv1.IPFamily) { - if ipFamily == k8sv1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) - } + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, ipFamily) By("checking k6t-eth0 MTU inside the pod") vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, vmi.Namespace) diff --git a/tests/network/vmi_slirp_interface.go b/tests/network/vmi_slirp_interface.go index 9b37a2c25ca2..cf4d3faa32d0 100644 --- a/tests/network/vmi_slirp_interface.go +++ b/tests/network/vmi_slirp_interface.go @@ -40,6 +40,7 @@ import ( "kubevirt.io/kubevirt/tests/console" cd "kubevirt.io/kubevirt/tests/containerdisk" "kubevirt.io/kubevirt/tests/flags" + "kubevirt.io/kubevirt/tests/libnet" ) var _ = SIGDescribe("Slirp Networking", func() { @@ -75,6 +76,8 @@ var _ = SIGDescribe("Slirp Networking", func() { virtClient, err = kubecli.GetKubevirtClient() util.PanicOnError(err) + libnet.SkipWhenClusterNotSupportIpv4(virtClient) + kv := util.GetCurrentKv(virtClient) currentConfiguration = kv.Spec.Configuration }) diff --git a/tests/storage/storage.go b/tests/storage/storage.go index a684fe13e54f..25a85a533a5a 100644 --- a/tests/storage/storage.go +++ b/tests/storage/storage.go @@ -253,9 +253,7 @@ var _ = SIGDescribe("Storage", func() { } }) table.DescribeTable("started", func(newVMI VMICreationFunc, storageEngine string, family k8sv1.IPFamily, imageOwnedByQEMU bool) { - if family == k8sv1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) - } + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, family) var nodeName string // Start the VirtualMachineInstance with the PVC attached @@ -550,9 +548,8 @@ var _ = SIGDescribe("Storage", func() { // The following case is mostly similar to the alpine PVC test above, except using different VirtualMachineInstance. table.DescribeTable("started", func(newVMI VMICreationFunc, storageEngine string, family k8sv1.IPFamily) { - if family == k8sv1.IPv6Protocol { - libnet.SkipWhenNotDualStackCluster(virtClient) - } + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, family) + // Start the VirtualMachineInstance with the PVC attached if storageEngine == "nfs" { nfsPod = storageframework.InitNFS(tests.HostPathAlpine, "") From c059a62ddc8b24d2b9cd1ee70b9c94636ab511fc Mon Sep 17 00:00:00 2001 From: Alona Kaplan Date: Thu, 17 Feb 2022 10:18:39 +0200 Subject: [PATCH 8/9] tests, ipv6 only: port forwarding Change the tests to test both ipv4 and ipv6. Skip the ipv6 tests since VM port-forwarding over ipv6 is not supported yet. Tracking issue https://github.com/kubevirt/kubevirt/issues/7276 Signed-off-by: Alona Kaplan --- tests/libnet/ipaddress.go | 19 ++++++++++ tests/network/port_forward.go | 65 +++++++++++++++++++++++++---------- tests/vmi_servers.go | 14 +++++--- 3 files changed, 76 insertions(+), 22 deletions(-) diff --git a/tests/libnet/ipaddress.go b/tests/libnet/ipaddress.go index 56c3be61dd56..0423b06dccfa 100644 --- a/tests/libnet/ipaddress.go +++ b/tests/libnet/ipaddress.go @@ -1,6 +1,9 @@ package libnet import ( + "fmt" + "net" + k8sv1 "k8s.io/api/core/v1" netutils "k8s.io/utils/net" @@ -34,3 +37,19 @@ func getFamily(ip string) k8sv1.IPFamily { } return k8sv1.IPv4Protocol } + +func GetLoopbackAddress(family k8sv1.IPFamily) string { + if family == k8sv1.IPv4Protocol { + return "127.0.0.1" + + } + return net.IPv6loopback.String() +} + +func GetLoopbackAddressForUrl(family k8sv1.IPFamily) string { + address := GetLoopbackAddress(family) + if family == k8sv1.IPv6Protocol { + address = fmt.Sprintf("[%s]", address) + } + return address +} diff --git a/tests/network/port_forward.go b/tests/network/port_forward.go index 81bf97207f83..281c0ddc7590 100644 --- a/tests/network/port_forward.go +++ b/tests/network/port_forward.go @@ -26,6 +26,7 @@ import ( "time" "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -38,9 +39,12 @@ import ( "kubevirt.io/client-go/kubecli" "kubevirt.io/kubevirt/tests" "kubevirt.io/kubevirt/tests/console" + "kubevirt.io/kubevirt/tests/libnet" "kubevirt.io/kubevirt/tests/libvmi" ) +const skipIPv6Message = "port-forwarding over ipv6 is not supported yet. Tracking issue https://github.com/kubevirt/kubevirt/issues/7276" + var _ = SIGDescribe("Port-forward", func() { var ( err error @@ -62,9 +66,15 @@ var _ = SIGDescribe("Port-forward", func() { vmiDeclaredPorts []v1.Port ) - JustBeforeEach(func() { + setup := func(ipFamily k8sv1.IPFamily) { + libnet.SkipWhenClusterNotSupportIpFamily(virtClient, ipFamily) + + if ipFamily == k8sv1.IPv6Protocol { + Skip(skipIPv6Message) + } + vmi := createCirrosVMIWithPortsAndBlockUntilReady(virtClient, vmiDeclaredPorts) - tests.StartHTTPServer(vmi, vmiHttpServerPort) + tests.StartHTTPServerWithSourceIp(vmi, vmiHttpServerPort, getMasqueradeInternalAddress(ipFamily)) localPort = 1500 + config.GinkgoConfig.ParallelNode vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, util.NamespaceTestDefault) @@ -75,8 +85,8 @@ var _ = SIGDescribe("Port-forward", func() { stdout, err := portForwardCmd.StdoutPipe() Expect(err).NotTo(HaveOccurred()) Expect(portForwardCmd.Start()).To(Succeed()) - waitForPortForwardCmd(stdout, localPort, vmiHttpServerPort) - }) + waitForPortForwardCmd(ipFamily, stdout, localPort, vmiHttpServerPort) + } AfterEach(func() { Expect(killPortForwardCommand(portForwardCmd)).To(Succeed()) @@ -89,10 +99,14 @@ var _ = SIGDescribe("Port-forward", func() { vmiHttpServerPort = declaredPort }) - It("should reach the vmi", func() { + table.DescribeTable("should reach the vmi", func(ipFamily k8sv1.IPFamily) { + setup(ipFamily) By(fmt.Sprintf("checking that service running on port %d can be reached", declaredPort)) - Expect(testConnectivityThroughLocalPort(localPort)).To(Succeed()) - }) + Expect(testConnectivityThroughLocalPort(ipFamily, localPort)).To(Succeed()) + }, + table.Entry("IPv4", k8sv1.IPv4Protocol), + table.Entry("IPv6", k8sv1.IPv6Protocol), + ) }) When("performing port-forward from a local port to a VMI with no declared ports", func() { @@ -102,10 +116,14 @@ var _ = SIGDescribe("Port-forward", func() { vmiHttpServerPort = nonDeclaredPort }) - It("should reach the vmi", func() { + table.DescribeTable("should reach the vmi", func(ipFamily k8sv1.IPFamily) { + setup(ipFamily) By(fmt.Sprintf("checking that service running on port %d can be reached", nonDeclaredPort)) - Expect(testConnectivityThroughLocalPort(localPort)).To(Succeed()) - }) + Expect(testConnectivityThroughLocalPort(ipFamily, localPort)).To(Succeed()) + }, + table.Entry("IPv4", k8sv1.IPv4Protocol), + table.Entry("IPv6", k8sv1.IPv6Protocol), + ) }) When("performing port-forward from a local port to a VMI's non-declared port", func() { @@ -116,10 +134,14 @@ var _ = SIGDescribe("Port-forward", func() { vmiHttpServerPort = nonDeclaredPort }) - It("should not reach the vmi", func() { + table.DescribeTable("should not reach the vmi", func(ipFamily k8sv1.IPFamily) { + setup(ipFamily) By(fmt.Sprintf("checking that service running on port %d can not be reached", nonDeclaredPort)) - Expect(testConnectivityThroughLocalPort(localPort)).ToNot(Succeed()) - }) + Expect(testConnectivityThroughLocalPort(ipFamily, localPort)).ToNot(Succeed()) + }, + table.Entry("IPv4", k8sv1.IPv4Protocol), + table.Entry("IPv6", k8sv1.IPv6Protocol), + ) }) }) }) @@ -148,20 +170,27 @@ func createCirrosVMIWithPortsAndBlockUntilReady(virtClient kubecli.KubevirtClien vmi, err := virtClient.VirtualMachineInstance(util.NamespaceTestDefault).Create(vmi) Expect(err).ToNot(HaveOccurred()) - vmi = tests.WaitUntilVMIReady(vmi, console.LoginToCirros) + vmi = tests.WaitUntilVMIReady(vmi, libnet.WithIPv6(console.LoginToCirros)) return vmi } -func testConnectivityThroughLocalPort(portNumber int) error { - return exec.Command("curl", fmt.Sprintf("127.0.0.1:%d", portNumber)).Run() +func testConnectivityThroughLocalPort(ipFamily k8sv1.IPFamily, portNumber int) error { + return exec.Command("curl", fmt.Sprintf("%s:%d", libnet.GetLoopbackAddressForUrl(ipFamily), portNumber)).Run() } -func waitForPortForwardCmd(stdout io.ReadCloser, src, dst int) { +func waitForPortForwardCmd(ipFamily k8sv1.IPFamily, stdout io.ReadCloser, src, dst int) { Eventually(func() string { tmp := make([]byte, 1024) _, err := stdout.Read(tmp) Expect(err).NotTo(HaveOccurred()) return string(tmp) - }, 30*time.Second, 1*time.Second).Should(ContainSubstring(fmt.Sprintf("Forwarding from 127.0.0.1:%d -> %d", src, dst))) + }, 30*time.Second, 1*time.Second).Should(ContainSubstring(fmt.Sprintf("Forwarding from %s:%d -> %d", libnet.GetLoopbackAddressForUrl(ipFamily), src, dst))) +} + +func getMasqueradeInternalAddress(ipFamily k8sv1.IPFamily) string { + if ipFamily == k8sv1.IPv4Protocol { + return "10.0.2.2" + } + return "fd10:0:2::2" } diff --git a/tests/vmi_servers.go b/tests/vmi_servers.go index 90acfc867381..5940a10b7cff 100644 --- a/tests/vmi_servers.go +++ b/tests/vmi_servers.go @@ -2,6 +2,7 @@ package tests import ( "fmt" + "strings" "time" . "github.com/onsi/gomega" @@ -18,8 +19,8 @@ const ( HTTPServer = server("\"HTTP/1.1 200 OK\\nContent-Length: 12\\n\\nHello World!\"\n") ) -func (s server) composeNetcatServerCommand(port int) string { - return fmt.Sprintf("screen -d -m sudo nc -klp %d -e echo -e %s", port, string(s)) +func (s server) composeNetcatServerCommand(port int, extraArgs ...string) string { + return fmt.Sprintf("screen -d -m sudo nc %s -klp %d -e echo -e %s", strings.Join(extraArgs, " "), port, string(s)) } func StartTCPServer(vmi *v1.VirtualMachineInstance, port int) { @@ -32,11 +33,16 @@ func StartHTTPServer(vmi *v1.VirtualMachineInstance, port int) { HTTPServer.Start(vmi, port) } +func StartHTTPServerWithSourceIp(vmi *v1.VirtualMachineInstance, port int, sourceIP string) { + libnet.WithIPv6(console.LoginToCirros)(vmi) + HTTPServer.Start(vmi, port, fmt.Sprintf("-s %s", sourceIP)) +} + func StartPythonHttpServer(vmi *v1.VirtualMachineInstance, port int) { serverCommand := fmt.Sprintf("python3 -m http.server %d --bind ::0 &\n", port) Expect(console.RunCommand(vmi, serverCommand, 60*time.Second)).To(Succeed()) } -func (s server) Start(vmi *v1.VirtualMachineInstance, port int) { - Expect(console.RunCommand(vmi, s.composeNetcatServerCommand(port), 60*time.Second)).To(Succeed()) +func (s server) Start(vmi *v1.VirtualMachineInstance, port int, extraArgs ...string) { + Expect(console.RunCommand(vmi, s.composeNetcatServerCommand(port, extraArgs...), 60*time.Second)).To(Succeed()) } From 5dafb326b3cd8e73c3b12b348c2cf87a4337322a Mon Sep 17 00:00:00 2001 From: Alona Kaplan Date: Mon, 28 Feb 2022 13:33:57 +0200 Subject: [PATCH 9/9] tests, single stack ipv6: skip istio tests ipv6 support is in alpha stage on Istio u/s and is not supported yet by kubevirt. Note: even once ipv6 will be fully supported by istio, kubevirt may have troubles to make it work for VMs because there is no IPv6 equialent to "sysctl net.ipv4.conf.all.route_localnet". Signed-off-by: Alona Kaplan --- tests/network/vmi_istio.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/network/vmi_istio.go b/tests/network/vmi_istio.go index 1b48e1b7730c..7914e9500922 100644 --- a/tests/network/vmi_istio.go +++ b/tests/network/vmi_istio.go @@ -26,6 +26,8 @@ import ( "strings" "time" + k8sv1 "k8s.io/api/core/v1" + "kubevirt.io/kubevirt/tests/framework/checks" expect "github.com/google/goexpect" @@ -101,7 +103,7 @@ var _ = SIGDescribe("[Serial] Istio", func() { By("Getting back the VMI IP") vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) - vmiIP := vmi.Status.Interfaces[0].IP + vmiIP := libnet.GetVmiPrimaryIpByFamily(vmi, k8sv1.IPv4Protocol) By("Running job to send a request to the server") return virtClient.BatchV1().Jobs(util.NamespaceTestDefault).Create( @@ -116,6 +118,8 @@ var _ = SIGDescribe("[Serial] Istio", func() { virtClient, err = kubecli.GetKubevirtClient() util.PanicOnError(err) + libnet.SkipWhenClusterNotSupportIpv4(virtClient) + By("Create NetworkAttachmentDefinition") nad := generateIstioCNINetworkAttachmentDefinition() _, err = virtClient.NetworkClient().K8sCniCncfIoV1().NetworkAttachmentDefinitions(util.NamespaceTestDefault).Create(context.TODO(), nad, metav1.CreateOptions{}) @@ -219,7 +223,7 @@ var _ = SIGDescribe("[Serial] Istio", func() { By("Getting the VMI IP") vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) - vmiIP := vmi.Status.Interfaces[0].IP + vmiIP := libnet.GetVmiPrimaryIpByFamily(vmi, k8sv1.IPv4Protocol) Expect( checkSSHConnection(bastionVMI, "fedora", vmiIP), @@ -234,7 +238,7 @@ var _ = SIGDescribe("[Serial] Istio", func() { By("Getting the VMI IP") vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &metav1.GetOptions{}) Expect(err).ShouldNot(HaveOccurred()) - vmiIP := vmi.Status.Interfaces[0].IP + vmiIP := libnet.GetVmiPrimaryIpByFamily(vmi, k8sv1.IPv4Protocol) Expect( checkSSHConnection(bastionVMI, "fedora", vmiIP),